content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/modify_par.R
\name{verify_par_args}
\alias{verify_par_args}
\title{verify par arguments against a vector of allowed names}
\usage{
verify_par_args(arguments, available.par, on.readonly = c("stop", "skip",
"warning"))
}
\arguments{
\item{arguments}{named parameters for par}
\item{available.par}{par names that are allowed to be modified by this call}
\item{on.readly}{what to do when an argument (or arguments) can't be modified
"stop", "skip", or "warning" are supported}
}
\description{
verify par arguments against a vector of allowed names
}
\keyword{internal}
| /man/verify_par_args.Rd | permissive | wdwatkins/gsplot | R | false | true | 649 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/modify_par.R
\name{verify_par_args}
\alias{verify_par_args}
\title{verify par arguments against a vector of allowed names}
\usage{
verify_par_args(arguments, available.par, on.readonly = c("stop", "skip",
"warning"))
}
\arguments{
\item{arguments}{named parameters for par}
\item{available.par}{par names that are allowed to be modified by this call}
\item{on.readly}{what to do when an argument (or arguments) can't be modified
"stop", "skip", or "warning" are supported}
}
\description{
verify par arguments against a vector of allowed names
}
\keyword{internal}
|
require(ggplot2)
setwd('~/Google Drive/UTK/Fall 2014/COSC 594/Project 2/')
data <- read.csv('usermetrics.csv', head=T, sep=';')
data$logContrib <- ifelse(data$numContrib==0, 0, log(data$numContrib))
data$logQuality <- ifelse(data$numQuality==0, 0, log(data$numQuality))
### this is the plot of the data
p <- ggplot(data, aes(logContrib, logQuality))
p <- p + geom_point(size=3, alpha=.2) ## alpha controls the opacity of each point. it takes values from 0 (transparent) to 1 (opaque).
p
### this section displays the names of users for different areas of the plot above
data2 <- data[data$logQuality>=4.1 & data$logContrib>=8,]
data3 <- data[data$logQuality>=1 & data$logContrib>=7.5,]
data4 <- data[data$logQuality>=1 & data$logContrib<=7.5 & data$logContrib>=2.5,]
| /UserMetrics_Viz.R | no_license | fdac/team4.2 | R | false | false | 770 | r | require(ggplot2)
setwd('~/Google Drive/UTK/Fall 2014/COSC 594/Project 2/')
data <- read.csv('usermetrics.csv', head=T, sep=';')
data$logContrib <- ifelse(data$numContrib==0, 0, log(data$numContrib))
data$logQuality <- ifelse(data$numQuality==0, 0, log(data$numQuality))
### this is the plot of the data
p <- ggplot(data, aes(logContrib, logQuality))
p <- p + geom_point(size=3, alpha=.2) ## alpha controls the opacity of each point. it takes values from 0 (transparent) to 1 (opaque).
p
### this section displays the names of users for different areas of the plot above
data2 <- data[data$logQuality>=4.1 & data$logContrib>=8,]
data3 <- data[data$logQuality>=1 & data$logContrib>=7.5,]
data4 <- data[data$logQuality>=1 & data$logContrib<=7.5 & data$logContrib>=2.5,]
|
compute_variables_file <- function(file, ROM = FALSE) {
# Computes some variables based on isokinetic strenght test data from a single
# file
#
# Args:
# file: name of the file containing isokinetic strength test data
# ROM: a numeric sequence corresponding the desired range of motion
#
# Returns:
# A matrix with the computed variables and identifying the subject ID
# number and repetition
require(stringr)
source("R/functions/select_ROM.R")
source("R/functions/work_integration.R")
source("R/functions/compute_power.R")
if (str_detect(file, "60gs")) {
if (str_length(file) == 84) {
print(
str_c(
"Reading file: ",
str_sub(file, str_length(file) - 35, str_length(file))
)
)
} else {
print(
str_c(
"Reading file: ",
str_sub(file, str_length(file) - 34, str_length(file))
)
)
}
} else {
if (str_detect(file, "180gs")) {
if (str_length(file) == 86) {
print(
str_c(
"Reading file: ",
str_sub(file, str_length(file) - 36, str_length(file))
)
)
} else {
print(
str_c(
"Reading file: ",
str_sub(file, str_length(file) - 35, str_length(file))
)
)
}
}
}
D <- select_ROM(file, ROM)
B <- read.csv("data/raw/body_composition.csv")
# Detect parameters
# For ID and rep, the chunk of the string to be subset depends on the length
# of the string
if (str_detect(file, "180gs")) {
if (str_length(file) == 86) {
ID <- str_sub(file, str_length(file) - 13, str_length(file) - 11)
rep <- str_sub(file, str_length(file) - 9, str_length(file) - 4)
} else {
ID <- str_sub(file, str_length(file) - 12, str_length(file) - 10)
rep <- str_sub(file, str_length(file) - 8, str_length(file) - 4)
}
} else {
ID <- str_sub(file, str_length(file) - 12, str_length(file) - 10)
rep <- str_sub(file, str_length(file) - 8, str_length(file) - 4)
}
BM <- B[which(B[, 1] == as.numeric(ID)), 2] # body mass
LM <- B[which(B[, 1] == as.numeric(ID)), 4] # lower limb mass
# Compute variables
peak_torque <- max(abs(D[, 2]))
peak_torque_BM <- peak_torque / BM
peak_torque_LM <- peak_torque / LM
peak_torque_angle <- min(unname(D[which(abs(D[, 2]) == peak_torque), 4]))
total_work <- work_integration(D)
total_work_BM <- total_work / BM
total_work_LM <- total_work / LM
average_power <- compute_power(D)[2]
average_power_BM <- average_power / BM
average_power_LM <- average_power / LM
peak_power <- compute_power(D)[1]
peak_power_BM <- peak_power / BM
peak_power_LM <- peak_power / LM
# Assemble data frame
M <- as.matrix(
data.frame(
ID, rep, peak_torque, peak_torque_BM,
peak_torque_LM, peak_torque_angle,
total_work, total_work_BM, total_work_LM,
average_power, average_power_BM, average_power_LM,
peak_power, peak_power_BM, peak_power_LM
)
)
M[, 1] <- as.numeric(M[, 1])
return(M)
} | /code/R/functions/compute_variables_file.R | permissive | verasls/BaSEIB_isokinetic_strength | R | false | false | 3,166 | r | compute_variables_file <- function(file, ROM = FALSE) {
# Computes some variables based on isokinetic strenght test data from a single
# file
#
# Args:
# file: name of the file containing isokinetic strength test data
# ROM: a numeric sequence corresponding the desired range of motion
#
# Returns:
# A matrix with the computed variables and identifying the subject ID
# number and repetition
require(stringr)
source("R/functions/select_ROM.R")
source("R/functions/work_integration.R")
source("R/functions/compute_power.R")
if (str_detect(file, "60gs")) {
if (str_length(file) == 84) {
print(
str_c(
"Reading file: ",
str_sub(file, str_length(file) - 35, str_length(file))
)
)
} else {
print(
str_c(
"Reading file: ",
str_sub(file, str_length(file) - 34, str_length(file))
)
)
}
} else {
if (str_detect(file, "180gs")) {
if (str_length(file) == 86) {
print(
str_c(
"Reading file: ",
str_sub(file, str_length(file) - 36, str_length(file))
)
)
} else {
print(
str_c(
"Reading file: ",
str_sub(file, str_length(file) - 35, str_length(file))
)
)
}
}
}
D <- select_ROM(file, ROM)
B <- read.csv("data/raw/body_composition.csv")
# Detect parameters
# For ID and rep, the chunk of the string to be subset depends on the length
# of the string
if (str_detect(file, "180gs")) {
if (str_length(file) == 86) {
ID <- str_sub(file, str_length(file) - 13, str_length(file) - 11)
rep <- str_sub(file, str_length(file) - 9, str_length(file) - 4)
} else {
ID <- str_sub(file, str_length(file) - 12, str_length(file) - 10)
rep <- str_sub(file, str_length(file) - 8, str_length(file) - 4)
}
} else {
ID <- str_sub(file, str_length(file) - 12, str_length(file) - 10)
rep <- str_sub(file, str_length(file) - 8, str_length(file) - 4)
}
BM <- B[which(B[, 1] == as.numeric(ID)), 2] # body mass
LM <- B[which(B[, 1] == as.numeric(ID)), 4] # lower limb mass
# Compute variables
peak_torque <- max(abs(D[, 2]))
peak_torque_BM <- peak_torque / BM
peak_torque_LM <- peak_torque / LM
peak_torque_angle <- min(unname(D[which(abs(D[, 2]) == peak_torque), 4]))
total_work <- work_integration(D)
total_work_BM <- total_work / BM
total_work_LM <- total_work / LM
average_power <- compute_power(D)[2]
average_power_BM <- average_power / BM
average_power_LM <- average_power / LM
peak_power <- compute_power(D)[1]
peak_power_BM <- peak_power / BM
peak_power_LM <- peak_power / LM
# Assemble data frame
M <- as.matrix(
data.frame(
ID, rep, peak_torque, peak_torque_BM,
peak_torque_LM, peak_torque_angle,
total_work, total_work_BM, total_work_LM,
average_power, average_power_BM, average_power_LM,
peak_power, peak_power_BM, peak_power_LM
)
)
M[, 1] <- as.numeric(M[, 1])
return(M)
} |
rules <- validator( profit + cost == turnover
, cost >= 0.6 * turnover # cost should be at least 60% of turnover
, turnover >= 0 # can not be negative.
)
data <- data.frame(profit=755, cost=125, turnover=200)
le <- locate_errors(data, rules)
print(le)
summary(le)
v_categorical <- validator( A %in% c("a1", "a2")
, B %in% c("b1", "b2")
, if (A == "a1") B == "b1"
)
data <- data.frame(A = c("a1", "a2"), B = c("b2", "b2"))
locate_errors(data, v_categorical)$errors
v_logical <- validator( A %in% c(TRUE, FALSE)
, B %in% c(TRUE, FALSE)
, if (A == TRUE) B == TRUE
)
data <- data.frame(A = TRUE, B = FALSE)
locate_errors(data, v_logical, weight=c(2,1))$errors
# try a condinational rule
v <- validator( married %in% c(TRUE, FALSE), if (married==TRUE) age >= 17 )
data <- data.frame( married = TRUE, age = 16)
locate_errors(data, v, weight=c(married=1, age=2))$errors
| /examples/locate_errors.R | no_license | edwindj/goweradjust | R | false | false | 1,043 | r | rules <- validator( profit + cost == turnover
, cost >= 0.6 * turnover # cost should be at least 60% of turnover
, turnover >= 0 # can not be negative.
)
data <- data.frame(profit=755, cost=125, turnover=200)
le <- locate_errors(data, rules)
print(le)
summary(le)
v_categorical <- validator( A %in% c("a1", "a2")
, B %in% c("b1", "b2")
, if (A == "a1") B == "b1"
)
data <- data.frame(A = c("a1", "a2"), B = c("b2", "b2"))
locate_errors(data, v_categorical)$errors
v_logical <- validator( A %in% c(TRUE, FALSE)
, B %in% c(TRUE, FALSE)
, if (A == TRUE) B == TRUE
)
data <- data.frame(A = TRUE, B = FALSE)
locate_errors(data, v_logical, weight=c(2,1))$errors
# try a condinational rule
v <- validator( married %in% c(TRUE, FALSE), if (married==TRUE) age >= 17 )
data <- data.frame( married = TRUE, age = 16)
locate_errors(data, v, weight=c(married=1, age=2))$errors
|
\name{vcov.varComp}
\alias{vcov.varComp}
\title{Extracting Variance-Covariance Matrices
}
\description{
Extracting (approximate) variance-covariance matrices for fixed-effect parameters, variance components, ratios of variance components to error variance, or the response variable.
}
\usage{
\method{vcov}{varComp}(object, what = c("fixed", "beta", "random", "varComp",
"var.ratio", "tau", "response", "Y"), drop = TRUE, beta.correction=TRUE, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{object}{
An object of class \code{varComp}.
}
\item{what}{
A character vector (only the first element will be used) specifying what variance-covariance matrices are requested. \code{"fixed"} or \code{"beta"} request approximate variance-covariance of fixed-effect parameters (see details). \code{"random"} or \code{"varComp"} request the approximate variance-covariance matrix of variance components computed from the expected information matrix. \code{"var.ratio"} or \code{"tau"} requests approximate variance-covariance matrix of ratio of variance components to the error variance computed from the observed information matrix. \code{"response"} or \code{"Y"} request the marginal variance of the response variable computed from the plug-in estimate.
}
\item{drop}{
A logical scalar, indicating whether zero variance components should be dropped from the results.
}
\item{beta.correction}{
A logical scalar, only applicable when \code{what='beta'}, indicating whether the variance-covariance matrix for fixed effect estimates is corrected according to Kackar and Harville (1984).
}
\item{\dots}{
Place holder.
}
}
\details{
For fixed-effect parameters, the results is the plug-in estimate variance of generalized least squares estimates when \code{beta.correction=FALSE}; Otherwise, the Kackar and Harville (1984) correction will be used (default). For ratios of variance components to error variance, the result is the Hessian matrix. For response variable, the result is the plug-in estimate of the marginal variance. For variance components, the result is the plug-in estimate of inverse expected information matrix from the restricted likelihood.
}
\value{
A numeric matrix of the requested variance-covariance.
}
\references{
Raghu N. Kackar and David A. Harville (1984) Approximations for standard errors of estimators of fixed and random effect in mixed linear models. \emph{Journal of the American Statistical Association} 79, 853--862
}
\author{
Long Qu
}
\seealso{
\code{\link{varComp}} for the varComp object;
\code{\link{KR.varComp}} for testing fixed effect parameters accounting for uncertainty in variance parameter estimates.
}
%\examples{
%}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ models }
\keyword{ nonlinear }% __ONLY ONE__ keyword per line
| /man/vcov.varComp.Rd | no_license | cran/varComp | R | false | false | 2,878 | rd | \name{vcov.varComp}
\alias{vcov.varComp}
\title{Extracting Variance-Covariance Matrices
}
\description{
Extracting (approximate) variance-covariance matrices for fixed-effect parameters, variance components, ratios of variance components to error variance, or the response variable.
}
\usage{
\method{vcov}{varComp}(object, what = c("fixed", "beta", "random", "varComp",
"var.ratio", "tau", "response", "Y"), drop = TRUE, beta.correction=TRUE, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{object}{
An object of class \code{varComp}.
}
\item{what}{
A character vector (only the first element will be used) specifying what variance-covariance matrices are requested. \code{"fixed"} or \code{"beta"} request approximate variance-covariance of fixed-effect parameters (see details). \code{"random"} or \code{"varComp"} request the approximate variance-covariance matrix of variance components computed from the expected information matrix. \code{"var.ratio"} or \code{"tau"} requests approximate variance-covariance matrix of ratio of variance components to the error variance computed from the observed information matrix. \code{"response"} or \code{"Y"} request the marginal variance of the response variable computed from the plug-in estimate.
}
\item{drop}{
A logical scalar, indicating whether zero variance components should be dropped from the results.
}
\item{beta.correction}{
A logical scalar, only applicable when \code{what='beta'}, indicating whether the variance-covariance matrix for fixed effect estimates is corrected according to Kackar and Harville (1984).
}
\item{\dots}{
Place holder.
}
}
\details{
For fixed-effect parameters, the results is the plug-in estimate variance of generalized least squares estimates when \code{beta.correction=FALSE}; Otherwise, the Kackar and Harville (1984) correction will be used (default). For ratios of variance components to error variance, the result is the Hessian matrix. For response variable, the result is the plug-in estimate of the marginal variance. For variance components, the result is the plug-in estimate of inverse expected information matrix from the restricted likelihood.
}
\value{
A numeric matrix of the requested variance-covariance.
}
\references{
Raghu N. Kackar and David A. Harville (1984) Approximations for standard errors of estimators of fixed and random effect in mixed linear models. \emph{Journal of the American Statistical Association} 79, 853--862
}
\author{
Long Qu
}
\seealso{
\code{\link{varComp}} for the varComp object;
\code{\link{KR.varComp}} for testing fixed effect parameters accounting for uncertainty in variance parameter estimates.
}
%\examples{
%}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ models }
\keyword{ nonlinear }% __ONLY ONE__ keyword per line
|
calcSREMBI <- function(SREMBI,weigths,equilibriums) {
nquart <- nrow(SREMBI)
SREMBI$IPC <- SREMBI$IPC / 100
weigths <- weigths/100
equilibriums$Eq.cons <- equilibriums$Eq.cons / 100
# House Prices
Avg.M2.House <- 95 # Average size
Avg.Pr.House <- Avg.M2.House * SREMBI$Price
Ch.Pr.House <- matrix(0,ncol=1, nrow=nquart)
Ch.Pr.House[4:nquart] <- sapply(seq(4,nquart),function(x){(SREMBI$Price[x] - SREMBI$Price[x-3])/ SREMBI$Price[x-3]})
Ch.Pr.House[1:3] <- SREMBI$IPC[1:3]
# Household income / Annual Rent
Pr.HH.Income <- Avg.Pr.House / SREMBI$Household.Income
Var.Equilibrium.Pr.HH.Income <- Pr.HH.Income / equilibriums$Eq.Pr.House
Alloc.Var.Equilibrium.Pr.HH.Income <- Var.Equilibrium.Pr.HH.Income * weigths$w.Pr.House
Annual.Rent <- 12* SREMBI$Monthly.Rent
Pr.Annual.Rent <- Avg.Pr.House / Annual.Rent
Var.Equilibrium.Pr.Annual.Rent <- Pr.Annual.Rent / equilibriums$Eq.HH.income
Alloc.Var.Equilibrium.Pr.Annual.Rent <- Var.Equilibrium.Pr.Annual.Rent * weigths$w.HH.income
# Construction over GDP
Const.GDP <- SREMBI$Construction / SREMBI$GDP
Var.Equilibrium.Const.GDP <- Const.GDP / equilibriums$Eq.cons
Alloc.Var.Equilibrium.Const.GDP <- Var.Equilibrium.Const.GDP *weigths$w.cons
# General price index
Ch.Prices <- Ch.Pr.House - SREMBI$IPC
Var.Equilibrium.Ch.Prices <- (Ch.Prices / equilibriums$Eq.ipc) + 1
Alloc.Var.Equilibrium.Ch.Prices <- Var.Equilibrium.Ch.Prices * weigths$w.ipc
# Index
Index <- Alloc.Var.Equilibrium.Pr.HH.Income +
Alloc.Var.Equilibrium.Pr.Annual.Rent +
Alloc.Var.Equilibrium.Const.GDP +
Alloc.Var.Equilibrium.Ch.Prices
Index.df <- data.frame(Quarter = SREMBI$Date, Index = Index, stringsAsFactors = FALSE)
return(Index.df)
}
| /helpers.R | no_license | gabifoix/SREMBIApp | R | false | false | 1,749 | r |
calcSREMBI <- function(SREMBI,weigths,equilibriums) {
nquart <- nrow(SREMBI)
SREMBI$IPC <- SREMBI$IPC / 100
weigths <- weigths/100
equilibriums$Eq.cons <- equilibriums$Eq.cons / 100
# House Prices
Avg.M2.House <- 95 # Average size
Avg.Pr.House <- Avg.M2.House * SREMBI$Price
Ch.Pr.House <- matrix(0,ncol=1, nrow=nquart)
Ch.Pr.House[4:nquart] <- sapply(seq(4,nquart),function(x){(SREMBI$Price[x] - SREMBI$Price[x-3])/ SREMBI$Price[x-3]})
Ch.Pr.House[1:3] <- SREMBI$IPC[1:3]
# Household income / Annual Rent
Pr.HH.Income <- Avg.Pr.House / SREMBI$Household.Income
Var.Equilibrium.Pr.HH.Income <- Pr.HH.Income / equilibriums$Eq.Pr.House
Alloc.Var.Equilibrium.Pr.HH.Income <- Var.Equilibrium.Pr.HH.Income * weigths$w.Pr.House
Annual.Rent <- 12* SREMBI$Monthly.Rent
Pr.Annual.Rent <- Avg.Pr.House / Annual.Rent
Var.Equilibrium.Pr.Annual.Rent <- Pr.Annual.Rent / equilibriums$Eq.HH.income
Alloc.Var.Equilibrium.Pr.Annual.Rent <- Var.Equilibrium.Pr.Annual.Rent * weigths$w.HH.income
# Construction over GDP
Const.GDP <- SREMBI$Construction / SREMBI$GDP
Var.Equilibrium.Const.GDP <- Const.GDP / equilibriums$Eq.cons
Alloc.Var.Equilibrium.Const.GDP <- Var.Equilibrium.Const.GDP *weigths$w.cons
# General price index
Ch.Prices <- Ch.Pr.House - SREMBI$IPC
Var.Equilibrium.Ch.Prices <- (Ch.Prices / equilibriums$Eq.ipc) + 1
Alloc.Var.Equilibrium.Ch.Prices <- Var.Equilibrium.Ch.Prices * weigths$w.ipc
# Index
Index <- Alloc.Var.Equilibrium.Pr.HH.Income +
Alloc.Var.Equilibrium.Pr.Annual.Rent +
Alloc.Var.Equilibrium.Const.GDP +
Alloc.Var.Equilibrium.Ch.Prices
Index.df <- data.frame(Quarter = SREMBI$Date, Index = Index, stringsAsFactors = FALSE)
return(Index.df)
}
|
model.file<-dir(pattern="bam_mm_model.r")
#dir.name="/media/H_driver/2016/Morey_project/Peak_chip_seq/Shift75_summits/"
#file.name=dir("/media/H_driver/2016/Morey_project/Peak_chip_seq/Shift75_summits/",pattern="summits")
dir.name="/media/H_driver/2016/Morey_project/Peak_chip_rm_control/"
file.name=dir("/media/H_driver/2016/Morey_project/Peak_chip_rm_control/",pattern="summits")
peak.files<-paste0(dir.name,file.name)
name.sample<-gsub("__bam_mm_shift_75_2_summits.bed","",gsub(dir.name,"",peak.files))
peak.files.list<-as.list(peak.files)
names(peak.files.list)=name.sample
promoter <- getPromoters(TxDb=txdb, upstream=3000, downstream=3000)
tagMatrixList <- lapply(peak.files.list, getTagMatrix, windows=promoter)
plotAvgProf(tagMatrixList[[1]], xlim=c(-3000, 3000), conf=0.95,resample=100, facet="row")
tagHeatmap(tagMatrixList[[1]], xlim=c(-3000, 3000), title="7",color="red")
names(peak.files.list[[1]])
for(i in 1:18)
{
source(model.file[i])
}
source("http://bioconductor.org/biocLite.R")
biocLite("ChIPseeker")
biocLite("TxDb.Mmusculus.UCSC.mm10.knownGene")
biocLite("clusterProfiler")
require(ChIPseeker)
require(TxDb.Hsapiens.UCSC.hg19.knownGene)
txdb <-TxDb.Hsapiens.UCSC.hg19.knownGene
require(clusterProfiler)
files <- getSampleFiles()
print(files)
class(files)
names(files)
peak <- readPeakFile(files[[4]])
covplot(peak, weightCol="V5")
promoter <- getPromoters(TxDb=txdb, upstream=3000, downstream=3000)
tagMatrix <- getTagMatrix(peak, windows=promoter)
plotAvgProf(tagMatrix, xlim=c(-3000, 3000), xlab="Genomic Region (5'->3')", ylab = "Read Count Frequency")
peakHeatmap(peak, TxDb=txdb, upstream=3000, downstream=3000, color="red")
peakAnno <- annotatePeak(peak.files[1], tssRegion=c(-3000, 3000), TxDb=txdb)
peakAnno.data<-as.data.frame(peakAnno)
dim(peakAnno.data)
head(peakAnno.data)
class(peakAnno)
csAnno(peakAnno)
plotAnnoPie(peakAnno)
plotAnnoBar(peakAnno)
vennpie(peakAnno)
upsetplot(peakAnno)
upsetplot(peakAnno, vennpie=TRUE)
plotDistToTSS(peakAnno,title="Distribution of transcription factor-binding loci\nrelative to TSS")
as.data.frame(peakAnno)
getGEOspecies()
promoter <- getPromoters(TxDb=txdb, upstream=3000, downstream=3000)
tagMatrixList <- lapply(peak.files.list, getTagMatrix, windows=promoter)
plotAvgProf(tagMatrixList, xlim=c(-3000, 3000))
plotAvgProf(tagMatrixList, xlim=c(-3000, 3000), conf=0.95,resample=100, facet="row")
tagHeatmap(tagMatrixList[[7]], xlim=c(-3000, 3000), title="7",color="red")
tagHeatmap(tagMatrixList[[8]], xlim=c(-3000, 3000), title="8",color="red")
tagHeatmap(tagMatrixList[[13]], xlim=c(-3000, 3000), title="13",color="red")
tagHeatmap(tagMatrixList[[12]], xlim=c(-3000, 3000), title="12",color="red")
tagHeatmap(tagMatrixList[[5]], xlim=c(-1000, 1000), title="H3K4me3",color="red")
tagHeatmap(tagMatrixList[[8]], xlim=c(-3000, 3000), title="H3K4me3",color="red")
tagHeatmap(tagMatrixList[[5]], xlim=c(-1000, 1000), title="H3K4me3",color="red")
tagHeatmap(tagMatrixList[[8]], xlim=c(-3000, 3000), title="H3K4me3",color="red")
tagHeatmap(tagMatrixList[[5]], xlim=c(-1000, 1000), title="H3K4me3",color="red")
tagHeatmap(tagMatrixList[[8]], xlim=c(-3000, 3000), title="H3K4me3",color="red")
tagHeatmap(tagMatrixList[[5]], xlim=c(-1000, 1000), title="H3K4me3",color="red")
tagHeatmap(tagMatrixList[[8]], xlim=c(-3000, 3000), title="H3K4me3",color="red")
tagHeatmap(tagMatrixList[[5]], xlim=c(-1000, 1000), title="H3K4me3",color="red")
tagHeatmap(tagMatrixList[[8]], xlim=c(-3000, 3000), title="H3K4me3",color="red")
tagHeatmap(tagMatrixList[[5]], xlim=c(-1000, 1000), title="H3K4me3",color="red")
tagHeatmap(tagMatrixList[[8]], xlim=c(-3000, 3000), title="H3K4me3",color="red")
tagHeatmap(tagMatrixList[[5]], xlim=c(-1000, 1000), title="H3K4me3",color="red")
tagHeatmap(tagMatrixList[[8]], xlim=c(-3000, 3000), title="H3K4me3",color="red")
| /R_from_H_driver/ChipSeq-Data-Analysis.R | no_license | aiminy/SCCC-Code | R | false | false | 3,854 | r | model.file<-dir(pattern="bam_mm_model.r")
#dir.name="/media/H_driver/2016/Morey_project/Peak_chip_seq/Shift75_summits/"
#file.name=dir("/media/H_driver/2016/Morey_project/Peak_chip_seq/Shift75_summits/",pattern="summits")
dir.name="/media/H_driver/2016/Morey_project/Peak_chip_rm_control/"
file.name=dir("/media/H_driver/2016/Morey_project/Peak_chip_rm_control/",pattern="summits")
peak.files<-paste0(dir.name,file.name)
name.sample<-gsub("__bam_mm_shift_75_2_summits.bed","",gsub(dir.name,"",peak.files))
peak.files.list<-as.list(peak.files)
names(peak.files.list)=name.sample
promoter <- getPromoters(TxDb=txdb, upstream=3000, downstream=3000)
tagMatrixList <- lapply(peak.files.list, getTagMatrix, windows=promoter)
plotAvgProf(tagMatrixList[[1]], xlim=c(-3000, 3000), conf=0.95,resample=100, facet="row")
tagHeatmap(tagMatrixList[[1]], xlim=c(-3000, 3000), title="7",color="red")
names(peak.files.list[[1]])
for(i in 1:18)
{
source(model.file[i])
}
source("http://bioconductor.org/biocLite.R")
biocLite("ChIPseeker")
biocLite("TxDb.Mmusculus.UCSC.mm10.knownGene")
biocLite("clusterProfiler")
require(ChIPseeker)
require(TxDb.Hsapiens.UCSC.hg19.knownGene)
txdb <-TxDb.Hsapiens.UCSC.hg19.knownGene
require(clusterProfiler)
files <- getSampleFiles()
print(files)
class(files)
names(files)
peak <- readPeakFile(files[[4]])
covplot(peak, weightCol="V5")
promoter <- getPromoters(TxDb=txdb, upstream=3000, downstream=3000)
tagMatrix <- getTagMatrix(peak, windows=promoter)
plotAvgProf(tagMatrix, xlim=c(-3000, 3000), xlab="Genomic Region (5'->3')", ylab = "Read Count Frequency")
peakHeatmap(peak, TxDb=txdb, upstream=3000, downstream=3000, color="red")
peakAnno <- annotatePeak(peak.files[1], tssRegion=c(-3000, 3000), TxDb=txdb)
peakAnno.data<-as.data.frame(peakAnno)
dim(peakAnno.data)
head(peakAnno.data)
class(peakAnno)
csAnno(peakAnno)
plotAnnoPie(peakAnno)
plotAnnoBar(peakAnno)
vennpie(peakAnno)
upsetplot(peakAnno)
upsetplot(peakAnno, vennpie=TRUE)
plotDistToTSS(peakAnno,title="Distribution of transcription factor-binding loci\nrelative to TSS")
as.data.frame(peakAnno)
getGEOspecies()
promoter <- getPromoters(TxDb=txdb, upstream=3000, downstream=3000)
tagMatrixList <- lapply(peak.files.list, getTagMatrix, windows=promoter)
plotAvgProf(tagMatrixList, xlim=c(-3000, 3000))
plotAvgProf(tagMatrixList, xlim=c(-3000, 3000), conf=0.95,resample=100, facet="row")
tagHeatmap(tagMatrixList[[7]], xlim=c(-3000, 3000), title="7",color="red")
tagHeatmap(tagMatrixList[[8]], xlim=c(-3000, 3000), title="8",color="red")
tagHeatmap(tagMatrixList[[13]], xlim=c(-3000, 3000), title="13",color="red")
tagHeatmap(tagMatrixList[[12]], xlim=c(-3000, 3000), title="12",color="red")
tagHeatmap(tagMatrixList[[5]], xlim=c(-1000, 1000), title="H3K4me3",color="red")
tagHeatmap(tagMatrixList[[8]], xlim=c(-3000, 3000), title="H3K4me3",color="red")
tagHeatmap(tagMatrixList[[5]], xlim=c(-1000, 1000), title="H3K4me3",color="red")
tagHeatmap(tagMatrixList[[8]], xlim=c(-3000, 3000), title="H3K4me3",color="red")
tagHeatmap(tagMatrixList[[5]], xlim=c(-1000, 1000), title="H3K4me3",color="red")
tagHeatmap(tagMatrixList[[8]], xlim=c(-3000, 3000), title="H3K4me3",color="red")
tagHeatmap(tagMatrixList[[5]], xlim=c(-1000, 1000), title="H3K4me3",color="red")
tagHeatmap(tagMatrixList[[8]], xlim=c(-3000, 3000), title="H3K4me3",color="red")
tagHeatmap(tagMatrixList[[5]], xlim=c(-1000, 1000), title="H3K4me3",color="red")
tagHeatmap(tagMatrixList[[8]], xlim=c(-3000, 3000), title="H3K4me3",color="red")
tagHeatmap(tagMatrixList[[5]], xlim=c(-1000, 1000), title="H3K4me3",color="red")
tagHeatmap(tagMatrixList[[8]], xlim=c(-3000, 3000), title="H3K4me3",color="red")
tagHeatmap(tagMatrixList[[5]], xlim=c(-1000, 1000), title="H3K4me3",color="red")
tagHeatmap(tagMatrixList[[8]], xlim=c(-3000, 3000), title="H3K4me3",color="red")
|
library(DESeq2)
library("RColorBrewer")
library("gplots")
library("pheatmap")
library(gdata)
setwd("data/")
#Importing data
in_dir = dir(, pattern= "_htseq_counts.txt" )
counts_in <- lapply(in_dir, function(x) read.table(x, header=F, nrows=23337))
tot_count_matrix <- matrix(unlist(lapply(counts_in, function(x) x$V2)) , ncol=48, nrow=23337)
parse_names <- strsplit(in_dir, split="_")
parse_names <- matrix(unlist(parse_names), nrow=48, ncol=8, byrow=T)
col_names_counts <- paste(parse_names[,1], "_", parse_names[,2], "_", parse_names[,3], parse_names[,4], sep="")
colnames(tot_count_matrix) = col_names_counts
rownames(tot_count_matrix) = counts_in[[1]]$V1
#Setting up experimental design
experimental_design = data.frame(
sample_names = col_names_counts, # sample name
age = factor(parse_names[,1]), # old or young
treatment = factor(parse_names[,2]), # treatment plan
lane = factor(parse_names[,4]) # Which lane on the Illumina flowcell.
)
#Testing for lane effect
test_lane <- DESeqDataSetFromMatrix(tot_count_matrix, experimental_design, design = formula(~ lane))
test_lane <- DESeq(test_lane)
test_lane_results <- results(test_lane, pAdjustMethod="BH")
test_lane_results <- test_lane_results[order(-test_lane_results$padj),]
head(test_lane_results)
summary(test_lane_results)
hist(na.omit(test_lane_results$pvalue))
plotDispEsts(test_lane, xlab="Mean of Normalized Counts", ylab="Dispersion", main="Mean Dispersion")
plotMA(test_lane, ylim=c(-3,3), main= "Diffential Expression by Lane")
summary(test_lane_results)
pca_analysis <- rlog(test_lane, blind=TRUE)
#Testing for grouping by lane, age, or treatment
plotPCA(pca_analysis, intgroup=c("lane"))
plotPCA(pca_analysis, intgroup=c("age"))
plotPCA(pca_analysis, intgroup=c("treatment"))
#Comparing each group with the control treatment 4
#LPS_LPS vs vec_vec mice
DESeq_data_1_4 <- DESeqDataSetFromMatrix(tot_count_matrix, experimental_design, design = formula(~treatment + age + treatment:age))
DESeq_data_1_4$treatment <- relevel(DESeq_data_1_4$treatment, ref= "4")
DESeq_data_1_4 <- DESeq(DESeq_data_1_4)
resultsNames(DESeq_data_1_4)
treatment_results_1_vs_4 <- results(DESeq_data_1_4, contrast=list("treatment_1_vs_4", "age_Y_vs_O"), pAdjustMethod="BH" , alpha= 0.05)
summary(treatment_results_1_vs_4)
treatment_results_1_vs_4_sorted <- treatment_results_1_vs_4[order(treatment_results_1_vs_4$padj),]
write.csv(treatment_results_1_vs_4_sorted, file="treatment_diff_1_vs_4")
plotDispEsts(DESeq_data_1_4, xlab="Mean of Normalized Counts", ylab="Dispersion", main="Mean Dispersion")
plotMA(treatment_results_1_vs_4, ylim=c(-10,10), main="LPS LPS vs vec vec")
pca_analysis <- rlog(DESeq_data_1_4, blind=TRUE)
plotPCA(pca_analysis, intgroup=c("age"))
#LPS_vec vs vec_vec mice
DESeq_data_2_4 <- DESeqDataSetFromMatrix(tot_count_matrix, experimental_design, design = formula(~treatment + age + treatment:age))
DESeq_data_2_4$treatment <- relevel(DESeq_data_2_4$treatment, ref= "4")
DESeq_data_2_4 <- DESeq(DESeq_data_2_4)
resultsNames(DESeq_data_2_4)
treatment_results_2_vs_4 <- results(DESeq_data_2_4, contrast=list("treatment_2_vs_4", "age_Y_vs_O"), pAdjustMethod="BH" , alpha= 0.05)
summary(treatment_results_2_vs_4)
treatment_results_2_vs_4_sorted <- treatment_results_2_vs_4[order(treatment_results_2_vs_4$padj),]
write.csv(treatment_results_2_vs_4_sorted, file="treatment_diff_2_vs_4")
plotDispEsts(DESeq_data_2_4, xlab="Mean of Normalized Counts", ylab="Dispersion", main="Mean Dispersion")
plotMA(treatment_results_2_vs_4, ylim=c(-10,10), main="LPS vec vs vec vec")
#vec_LPS vs vec_vec mice
DESeq_data_3_4 <- DESeqDataSetFromMatrix(tot_count_matrix, experimental_design, design = formula(~treatment + age + treatment:age))
DESeq_data_3_4$treatment <- relevel(DESeq_data_3_4$treatment, ref= "4")
DESeq_data_3_4 <- DESeq(DESeq_data_3_4)
resultsNames(DESeq_data_3_4)
treatment_results_3_vs_4 <- results(DESeq_data_3_4, contrast=list("treatment_3_vs_4", "age_Y_vs_O"), pAdjustMethod="BH" , alpha= 0.05)
summary(treatment_results_3_vs_4)
treatment_results_3_vs_4_sorted <- treatment_results_3_vs_4[order(treatment_results_3_vs_4$padj),]
write.csv(treatment_results_3_vs_4_sorted, file="treatment_diff_3_vs_4")
plotDispEsts(DESeq_data_3_4, xlab="Mean of Normalized Counts", ylab="Dispersion", main="Mean Dispersion")
plotMA(treatment_results_3_vs_4, ylim=c(-10,10), main="vec LPS vs vec vec")
#Making a heatmap
id<-rownames(tot_count_matrix)
logFC1<-treatment_results_1_vs_4$log2FoldChange
padj1=treatment_results_1_vs_4$padj
logFC2<-treatment_results_2_vs_4$log2FoldChange
padj2=treatment_results_2_vs_4$padj
logFC3<-treatment_results_3_vs_4$log2FoldChange
padj3=treatment_results_3_vs_4$padj
df<- data.frame(id,logFC1,padj1,logFC2,padj2,logFC3,padj3)
df <- na.omit(df)
dim(df)
change_matrix<- as.matrix(df[,c(2,4,6)])
colnames(change_matrix)<-c("treatment 1 vs 4" , "treatment 2 vs 4" , "treatment 3 vs 4")
hmcols<- colorRampPalette(c("blue2","blue","white", "yellow","yellow2"))(256)
heatmap.2(change_matrix, col=hmcols, scale="row", hclust=function(x) hclust(x, method='complete'), distfun=dist, trace="none", margin=c(17,15), density.info="none", labRow=NA)
| /differential_expression.R | no_license | inickyap/Bio720 | R | false | false | 5,172 | r | library(DESeq2)
library("RColorBrewer")
library("gplots")
library("pheatmap")
library(gdata)
setwd("data/")
#Importing data
in_dir = dir(, pattern= "_htseq_counts.txt" )
counts_in <- lapply(in_dir, function(x) read.table(x, header=F, nrows=23337))
tot_count_matrix <- matrix(unlist(lapply(counts_in, function(x) x$V2)) , ncol=48, nrow=23337)
parse_names <- strsplit(in_dir, split="_")
parse_names <- matrix(unlist(parse_names), nrow=48, ncol=8, byrow=T)
col_names_counts <- paste(parse_names[,1], "_", parse_names[,2], "_", parse_names[,3], parse_names[,4], sep="")
colnames(tot_count_matrix) = col_names_counts
rownames(tot_count_matrix) = counts_in[[1]]$V1
#Setting up experimental design
experimental_design = data.frame(
sample_names = col_names_counts, # sample name
age = factor(parse_names[,1]), # old or young
treatment = factor(parse_names[,2]), # treatment plan
lane = factor(parse_names[,4]) # Which lane on the Illumina flowcell.
)
#Testing for lane effect
test_lane <- DESeqDataSetFromMatrix(tot_count_matrix, experimental_design, design = formula(~ lane))
test_lane <- DESeq(test_lane)
test_lane_results <- results(test_lane, pAdjustMethod="BH")
test_lane_results <- test_lane_results[order(-test_lane_results$padj),]
head(test_lane_results)
summary(test_lane_results)
hist(na.omit(test_lane_results$pvalue))
plotDispEsts(test_lane, xlab="Mean of Normalized Counts", ylab="Dispersion", main="Mean Dispersion")
plotMA(test_lane, ylim=c(-3,3), main= "Diffential Expression by Lane")
summary(test_lane_results)
pca_analysis <- rlog(test_lane, blind=TRUE)
#Testing for grouping by lane, age, or treatment
plotPCA(pca_analysis, intgroup=c("lane"))
plotPCA(pca_analysis, intgroup=c("age"))
plotPCA(pca_analysis, intgroup=c("treatment"))
#Comparing each group with the control treatment 4
#LPS_LPS vs vec_vec mice
DESeq_data_1_4 <- DESeqDataSetFromMatrix(tot_count_matrix, experimental_design, design = formula(~treatment + age + treatment:age))
DESeq_data_1_4$treatment <- relevel(DESeq_data_1_4$treatment, ref= "4")
DESeq_data_1_4 <- DESeq(DESeq_data_1_4)
resultsNames(DESeq_data_1_4)
treatment_results_1_vs_4 <- results(DESeq_data_1_4, contrast=list("treatment_1_vs_4", "age_Y_vs_O"), pAdjustMethod="BH" , alpha= 0.05)
summary(treatment_results_1_vs_4)
treatment_results_1_vs_4_sorted <- treatment_results_1_vs_4[order(treatment_results_1_vs_4$padj),]
write.csv(treatment_results_1_vs_4_sorted, file="treatment_diff_1_vs_4")
plotDispEsts(DESeq_data_1_4, xlab="Mean of Normalized Counts", ylab="Dispersion", main="Mean Dispersion")
plotMA(treatment_results_1_vs_4, ylim=c(-10,10), main="LPS LPS vs vec vec")
pca_analysis <- rlog(DESeq_data_1_4, blind=TRUE)
plotPCA(pca_analysis, intgroup=c("age"))
#LPS_vec vs vec_vec mice
DESeq_data_2_4 <- DESeqDataSetFromMatrix(tot_count_matrix, experimental_design, design = formula(~treatment + age + treatment:age))
DESeq_data_2_4$treatment <- relevel(DESeq_data_2_4$treatment, ref= "4")
DESeq_data_2_4 <- DESeq(DESeq_data_2_4)
resultsNames(DESeq_data_2_4)
treatment_results_2_vs_4 <- results(DESeq_data_2_4, contrast=list("treatment_2_vs_4", "age_Y_vs_O"), pAdjustMethod="BH" , alpha= 0.05)
summary(treatment_results_2_vs_4)
treatment_results_2_vs_4_sorted <- treatment_results_2_vs_4[order(treatment_results_2_vs_4$padj),]
write.csv(treatment_results_2_vs_4_sorted, file="treatment_diff_2_vs_4")
plotDispEsts(DESeq_data_2_4, xlab="Mean of Normalized Counts", ylab="Dispersion", main="Mean Dispersion")
plotMA(treatment_results_2_vs_4, ylim=c(-10,10), main="LPS vec vs vec vec")
#vec_LPS vs vec_vec mice
DESeq_data_3_4 <- DESeqDataSetFromMatrix(tot_count_matrix, experimental_design, design = formula(~treatment + age + treatment:age))
DESeq_data_3_4$treatment <- relevel(DESeq_data_3_4$treatment, ref= "4")
DESeq_data_3_4 <- DESeq(DESeq_data_3_4)
resultsNames(DESeq_data_3_4)
treatment_results_3_vs_4 <- results(DESeq_data_3_4, contrast=list("treatment_3_vs_4", "age_Y_vs_O"), pAdjustMethod="BH" , alpha= 0.05)
summary(treatment_results_3_vs_4)
treatment_results_3_vs_4_sorted <- treatment_results_3_vs_4[order(treatment_results_3_vs_4$padj),]
write.csv(treatment_results_3_vs_4_sorted, file="treatment_diff_3_vs_4")
plotDispEsts(DESeq_data_3_4, xlab="Mean of Normalized Counts", ylab="Dispersion", main="Mean Dispersion")
plotMA(treatment_results_3_vs_4, ylim=c(-10,10), main="vec LPS vs vec vec")
#Making a heatmap
id<-rownames(tot_count_matrix)
logFC1<-treatment_results_1_vs_4$log2FoldChange
padj1=treatment_results_1_vs_4$padj
logFC2<-treatment_results_2_vs_4$log2FoldChange
padj2=treatment_results_2_vs_4$padj
logFC3<-treatment_results_3_vs_4$log2FoldChange
padj3=treatment_results_3_vs_4$padj
df<- data.frame(id,logFC1,padj1,logFC2,padj2,logFC3,padj3)
df <- na.omit(df)
dim(df)
change_matrix<- as.matrix(df[,c(2,4,6)])
colnames(change_matrix)<-c("treatment 1 vs 4" , "treatment 2 vs 4" , "treatment 3 vs 4")
hmcols<- colorRampPalette(c("blue2","blue","white", "yellow","yellow2"))(256)
heatmap.2(change_matrix, col=hmcols, scale="row", hclust=function(x) hclust(x, method='complete'), distfun=dist, trace="none", margin=c(17,15), density.info="none", labRow=NA)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/readDepends.R
\name{readDepends}
\alias{readDepends}
\alias{readDepends.character}
\alias{readDepends.list}
\alias{readDepends.viz}
\title{read multiple dependency datasets into environment}
\usage{
readDepends(viz)
\method{readDepends}{character}(viz)
\method{readDepends}{list}(viz)
\method{readDepends}{viz}(viz)
}
\arguments{
\item{viz}{vizlab object, list, or vizlab identifier}
}
\value{
a list of data objects that are named according to depends
}
\description{
This function should be called from the generic, \code{readDepends()}. Reads
all dependency data from files into R format.
}
\examples{
wd <- getwd()
setwd(system.file(package = 'vizlab','testviz'))
#Read dependencies from list or viz object:
viz.data <- readDepends(list(depends = 'mayfly_nymph'))
viz.data[["mayfly_nymph"]]
setwd(wd)
}
\seealso{
readData
}
| /man/readDepends.Rd | permissive | USGS-VIZLAB/vizlab | R | false | true | 909 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/readDepends.R
\name{readDepends}
\alias{readDepends}
\alias{readDepends.character}
\alias{readDepends.list}
\alias{readDepends.viz}
\title{read multiple dependency datasets into environment}
\usage{
readDepends(viz)
\method{readDepends}{character}(viz)
\method{readDepends}{list}(viz)
\method{readDepends}{viz}(viz)
}
\arguments{
\item{viz}{vizlab object, list, or vizlab identifier}
}
\value{
a list of data objects that are named according to depends
}
\description{
This function should be called from the generic, \code{readDepends()}. Reads
all dependency data from files into R format.
}
\examples{
wd <- getwd()
setwd(system.file(package = 'vizlab','testviz'))
#Read dependencies from list or viz object:
viz.data <- readDepends(list(depends = 'mayfly_nymph'))
viz.data[["mayfly_nymph"]]
setwd(wd)
}
\seealso{
readData
}
|
#' @importFrom jsonlite fromJSON
#' @importFrom stats setNames
updateColumnName <- function(df_name, prev_name, new_name) {
new_names <- names(get(df_name, mstrio_temp_env))
new_names[new_names == prev_name] <- new_name
assign(df_name, stats::setNames(get(df_name, mstrio_temp_env), new_names), mstrio_temp_env)
}
reorderColumns <- function(df_name, cols_for_reorder, start_index) {
cols <- jsonlite::fromJSON(cols_for_reorder)
df <- get(df_name, mstrio_temp_env)
instr <- c((start_index):(length(cols) + (start_index - 1)))
names(instr) <- cols
assign(df_name, arrange.col(df, instr), mstrio_temp_env)
}
applyDataModeling <- function(steps, selected_objects) {
tryCatch({
clearTemporaryEnv();
parsed_steps <- jsonlite::parse_json(steps)
parsed_sel_objs <- jsonlite::parse_json(selected_objects)
for (step in parsed_steps) {
if (step$type == 'RENAME_DF') {
renameDataframe(step$oldName, step$newName)
} else if (step$type == 'RENAME_OBJ') {
updateColumnName(step$dfName, step$oldName, step$newName)
}
}
for (selected_df in parsed_sel_objs) {
cropDataframe(selected_df$dfName, selected_df$selectedObjects)
}
finishDataModeling(1)
},
error = function(e) {
print(e$message)
finishDataModeling(0)
});
}
renameDataframe <- function(oldName, newName) {
oldDf <- getDfFromTempEnv(oldName)
assign(
x = newName,
value = oldDf,
envir = mstrio_temp_env
)
remove(list = c(oldName), envir = mstrio_temp_env)
}
clearTemporaryEnv <- function() {
rm(list = ls(all.names = TRUE, envir = mstrio_temp_env), envir = mstrio_temp_env)
}
cloneDataframe <- function(dataframeToClone) {
originalDataframe <- mstrio_env[[dataframeToClone]]
assign(
x = dataframeToClone,
value = originalDataframe,
envir = mstrio_temp_env
)
}
cropDataframe <- function(df_name, selected_objects) {
df <- getDfFromTempEnv(df_name)
if (length(selected_objects) == 1) {
croppedDf <- data.frame(df[selected_objects[[1]]])
names(croppedDf) <- c(selected_objects[[1]])
} else {
croppedDf <- data.frame(df[, unlist(selected_objects)])
names(croppedDf) <- unlist(selected_objects)
}
assign(
x = df_name,
value = croppedDf,
envir = mstrio_temp_env
)
}
getListOfDataframes <- function(envir) {
unlisted <- unlist(eapply(mstrio_temp_env, function(x) is.data.frame(x) & nrow(x) > 0))
names <- names(which(unlisted))
names
}
getDfFromTempEnv <- function(dfName) {
existsInTempEnv <- !is.null(mstrio_temp_env[[dfName]])
if (!existsInTempEnv) {
cloneDataframe(dfName)
}
df <- mstrio_temp_env[[dfName]]
df
}
updateCube <- function(base_url, project_id, identity_token, cube_id, cube_name, update_policies) {
tryCatch({
displayUpdateLoadingMessage(cube_name)
connection <- mstrio::Connection$new(base_url, project_id = project_id, identity_token = identity_token, verbose = FALSE)
dataset <- Dataset$new(connection, dataset_id = cube_id)
parsed_update_policies <- jsonlite::fromJSON(update_policies)
for (i in 1:nrow(parsed_update_policies)) {
table_name = parsed_update_policies[i,]$tableName
update_policy = parsed_update_policies[i,]$updatePolicy
df <- getDfFromTempEnv(table_name)
dataset$add_table(table_name, df, update_policy)
}
dataset$update(auto_publish = FALSE)
displayPublishLoadingMessage(cube_name)
dataset$publish()
clearTemporaryEnv()
finishCubeUpdate(1, cube_name)
},
error = function(error) {
print(error$message)
finishCubeUpdate(0, cube_name)
});
}
exportDataframes <- function(base_url, project_id, identity_token, save_as_name, description, selected_dataframes_json, folder_id, certify) {
displayExportStartMessage(save_as_name);
tryCatch({
connection <- mstrio::Connection$new(base_url, project_id = project_id, identity_token = identity_token, verbose = FALSE)
new_dataset <- mstrio::Dataset$new(connection, save_as_name, description);
selected_dataframes <- jsonlite::fromJSON(selected_dataframes_json)
for (i in 1:nrow(selected_dataframes)) {
df_name = selected_dataframes[i, 'name']
df <- getDfFromTempEnv(df_name)
metrics = unlist(selected_dataframes[i, 'metrics'])
attributes = unlist(selected_dataframes[i, 'attributes'])
new_dataset$add_table(df_name, df, "replace", metrics, attributes)
}
new_dataset$create(folder_id)
if (certify) {
new_dataset$certify();
}
reloadCurrentFolder();
displayExportSuccessMessage(save_as_name)
}, error = function(error) {
print(error$message)
if (stringIntersects('Cannot overwrite a non-cube report with a cube report', error$message)) {
displayErrorMessage('RreportOverwriteError', error$message)
}
else if (stringIntersects('The object with the given identifier is not an object of the expected type', error$message)) {
displayErrorMessage('RexportUnexpectedObjectTypeError', error$message)
}
else {
displayErrorMessage('RexportError', error$message)
}
});
}
| /R/utils-export.R | permissive | MicroStrategy/mstrio | R | false | false | 5,133 | r | #' @importFrom jsonlite fromJSON
#' @importFrom stats setNames
updateColumnName <- function(df_name, prev_name, new_name) {
new_names <- names(get(df_name, mstrio_temp_env))
new_names[new_names == prev_name] <- new_name
assign(df_name, stats::setNames(get(df_name, mstrio_temp_env), new_names), mstrio_temp_env)
}
reorderColumns <- function(df_name, cols_for_reorder, start_index) {
cols <- jsonlite::fromJSON(cols_for_reorder)
df <- get(df_name, mstrio_temp_env)
instr <- c((start_index):(length(cols) + (start_index - 1)))
names(instr) <- cols
assign(df_name, arrange.col(df, instr), mstrio_temp_env)
}
applyDataModeling <- function(steps, selected_objects) {
tryCatch({
clearTemporaryEnv();
parsed_steps <- jsonlite::parse_json(steps)
parsed_sel_objs <- jsonlite::parse_json(selected_objects)
for (step in parsed_steps) {
if (step$type == 'RENAME_DF') {
renameDataframe(step$oldName, step$newName)
} else if (step$type == 'RENAME_OBJ') {
updateColumnName(step$dfName, step$oldName, step$newName)
}
}
for (selected_df in parsed_sel_objs) {
cropDataframe(selected_df$dfName, selected_df$selectedObjects)
}
finishDataModeling(1)
},
error = function(e) {
print(e$message)
finishDataModeling(0)
});
}
renameDataframe <- function(oldName, newName) {
oldDf <- getDfFromTempEnv(oldName)
assign(
x = newName,
value = oldDf,
envir = mstrio_temp_env
)
remove(list = c(oldName), envir = mstrio_temp_env)
}
clearTemporaryEnv <- function() {
rm(list = ls(all.names = TRUE, envir = mstrio_temp_env), envir = mstrio_temp_env)
}
cloneDataframe <- function(dataframeToClone) {
originalDataframe <- mstrio_env[[dataframeToClone]]
assign(
x = dataframeToClone,
value = originalDataframe,
envir = mstrio_temp_env
)
}
cropDataframe <- function(df_name, selected_objects) {
df <- getDfFromTempEnv(df_name)
if (length(selected_objects) == 1) {
croppedDf <- data.frame(df[selected_objects[[1]]])
names(croppedDf) <- c(selected_objects[[1]])
} else {
croppedDf <- data.frame(df[, unlist(selected_objects)])
names(croppedDf) <- unlist(selected_objects)
}
assign(
x = df_name,
value = croppedDf,
envir = mstrio_temp_env
)
}
getListOfDataframes <- function(envir) {
unlisted <- unlist(eapply(mstrio_temp_env, function(x) is.data.frame(x) & nrow(x) > 0))
names <- names(which(unlisted))
names
}
getDfFromTempEnv <- function(dfName) {
existsInTempEnv <- !is.null(mstrio_temp_env[[dfName]])
if (!existsInTempEnv) {
cloneDataframe(dfName)
}
df <- mstrio_temp_env[[dfName]]
df
}
updateCube <- function(base_url, project_id, identity_token, cube_id, cube_name, update_policies) {
tryCatch({
displayUpdateLoadingMessage(cube_name)
connection <- mstrio::Connection$new(base_url, project_id = project_id, identity_token = identity_token, verbose = FALSE)
dataset <- Dataset$new(connection, dataset_id = cube_id)
parsed_update_policies <- jsonlite::fromJSON(update_policies)
for (i in 1:nrow(parsed_update_policies)) {
table_name = parsed_update_policies[i,]$tableName
update_policy = parsed_update_policies[i,]$updatePolicy
df <- getDfFromTempEnv(table_name)
dataset$add_table(table_name, df, update_policy)
}
dataset$update(auto_publish = FALSE)
displayPublishLoadingMessage(cube_name)
dataset$publish()
clearTemporaryEnv()
finishCubeUpdate(1, cube_name)
},
error = function(error) {
print(error$message)
finishCubeUpdate(0, cube_name)
});
}
exportDataframes <- function(base_url, project_id, identity_token, save_as_name, description, selected_dataframes_json, folder_id, certify) {
displayExportStartMessage(save_as_name);
tryCatch({
connection <- mstrio::Connection$new(base_url, project_id = project_id, identity_token = identity_token, verbose = FALSE)
new_dataset <- mstrio::Dataset$new(connection, save_as_name, description);
selected_dataframes <- jsonlite::fromJSON(selected_dataframes_json)
for (i in 1:nrow(selected_dataframes)) {
df_name = selected_dataframes[i, 'name']
df <- getDfFromTempEnv(df_name)
metrics = unlist(selected_dataframes[i, 'metrics'])
attributes = unlist(selected_dataframes[i, 'attributes'])
new_dataset$add_table(df_name, df, "replace", metrics, attributes)
}
new_dataset$create(folder_id)
if (certify) {
new_dataset$certify();
}
reloadCurrentFolder();
displayExportSuccessMessage(save_as_name)
}, error = function(error) {
print(error$message)
if (stringIntersects('Cannot overwrite a non-cube report with a cube report', error$message)) {
displayErrorMessage('RreportOverwriteError', error$message)
}
else if (stringIntersects('The object with the given identifier is not an object of the expected type', error$message)) {
displayErrorMessage('RexportUnexpectedObjectTypeError', error$message)
}
else {
displayErrorMessage('RexportError', error$message)
}
});
}
|
## Put comments here that give an overall description of what your
## functions do
## makeCacheMatrix: This function creates a special "matrix" object that can cache its inverse.
## cacheSolve: This function computes the inverse of the special "matrix" returned by makeCacheMatrix above. If the inverse has already been calculated (and the matrix has not changed), then cacheSolve should retrieve the inverse from the cache.
## Write a short comment describing this function
## Creates a matrix object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y){
x << - y
m <<- NULL
}
get <- function() x
setmatrix <- function(solve) m <<- solve
getmatrix <- function() m
list(set = set, get = get,
setmatrix = setmatrix,
getmatrix = getmatrix)
}
## Write a short comment describing this function
## Computes matrix's inverse, if inverse is already caculated & not changed gets inverse from cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getmatrix()
if(!is.null(m){
message("getting cached data")
return(m)
}
matrix <- x$get()
m <- solve(matrix, ...)
x$setmatrix(m)
m
}
| /cachematrix.R | no_license | jarecalde/ProgrammingAssignment2 | R | false | false | 1,196 | r | ## Put comments here that give an overall description of what your
## functions do
## makeCacheMatrix: This function creates a special "matrix" object that can cache its inverse.
## cacheSolve: This function computes the inverse of the special "matrix" returned by makeCacheMatrix above. If the inverse has already been calculated (and the matrix has not changed), then cacheSolve should retrieve the inverse from the cache.
## Write a short comment describing this function
## Creates a matrix object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y){
x << - y
m <<- NULL
}
get <- function() x
setmatrix <- function(solve) m <<- solve
getmatrix <- function() m
list(set = set, get = get,
setmatrix = setmatrix,
getmatrix = getmatrix)
}
## Write a short comment describing this function
## Computes matrix's inverse, if inverse is already caculated & not changed gets inverse from cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getmatrix()
if(!is.null(m){
message("getting cached data")
return(m)
}
matrix <- x$get()
m <- solve(matrix, ...)
x$setmatrix(m)
m
}
|
##' @export
`manifest` <-
function(x,...) UseMethod("manifest")
##' @export
`manifest.lvm` <-
function(x,...) {
if (length(vars(x))>0)
setdiff(vars(x),latent(x))
else
NULL
}
##' @export
`manifest.lvmfit` <-
function(x,...) {
manifest(Model(x))
}
##' @export
manifest.list <- function(x,...) {
manifestlist <- c()
for (i in seq_along(x)) {
manifestlist <- c(manifestlist, manifest(x[[i]]))
}
endolist <- unique(manifestlist)
return(manifestlist)
}
##' @export
`manifest.multigroup` <-
function(x,...) {
manifest(Model(x))
}
| /lava/R/manifest.R | no_license | ingted/R-Examples | R | false | false | 557 | r | ##' @export
`manifest` <-
function(x,...) UseMethod("manifest")
##' @export
`manifest.lvm` <-
function(x,...) {
if (length(vars(x))>0)
setdiff(vars(x),latent(x))
else
NULL
}
##' @export
`manifest.lvmfit` <-
function(x,...) {
manifest(Model(x))
}
##' @export
manifest.list <- function(x,...) {
manifestlist <- c()
for (i in seq_along(x)) {
manifestlist <- c(manifestlist, manifest(x[[i]]))
}
endolist <- unique(manifestlist)
return(manifestlist)
}
##' @export
`manifest.multigroup` <-
function(x,...) {
manifest(Model(x))
}
|
## This functions accepts the following arguments:
## y: univariate outcome
## z: endogenous predictors
## w: instruments
## x: exogenous predictors
## zeval: optional evaluation data for the endogenous predictors
## weval: optional evaluation data for the instruments
## xeval: optional evaluation data for the exogenous predictors
## ... optional arguments for crs()
## This function returns a list with the following elements:
## phi: the IV estimator of phi(z) corresponding to the estimated
## derivative phihat(z)
## phi.prime: the IV derivative estimator
## phi.mat: the matrix with colums phi_1, phi_2 etc. over all iterations
## phi.prime.mat: the matrix with colums phi'_1, phi'_2 etc. over all iterations
## num.iterations: number of iterations taken by Landweber-Fridman
## norm.stop: the stopping rule for each Landweber-Fridman iteration
## norm.value: the norm not multiplied by the number of iterations
## convergence: a character string indicating whether/why iteration terminated
crsivderiv <- function(y,
z,
w,
x=NULL,
zeval=NULL,
weval=NULL,
xeval=NULL,
iterate.max=1000,
iterate.diff.tol=1.0e-08,
constant=0.5,
penalize.iteration=TRUE,
start.from=c("Eyz","EEywz"),
starting.values=NULL,
stop.on.increase=TRUE,
smooth.residuals=TRUE,
opts=list("MAX_BB_EVAL"=10000,
"EPSILON"=.Machine$double.eps,
"INITIAL_MESH_SIZE"="r1.0e-01",
"MIN_MESH_SIZE"=paste("r",sqrt(.Machine$double.eps),sep=""),
"MIN_POLL_SIZE"=paste("r",1,sep=""),
"DISPLAY_DEGREE"=0),
...) {
crs.messages <- getOption("crs.messages")
console <- newLineConsole()
## Basic error checking
if(!is.logical(stop.on.increase)) stop("stop.on.increase must be logical (TRUE/FALSE)")
if(!is.logical(smooth.residuals)) stop("smooth.residuals must be logical (TRUE/FALSE)")
start.from <- match.arg(start.from)
if(constant <= 0 || constant >=1) stop("constant must lie in (0,1)")
if(missing(y)) stop("You must provide y")
if(missing(z)) stop("You must provide z")
if(missing(w)) stop("You must provide w")
if(NCOL(y) > 1) stop("y must be univariate")
if(NROW(y) != NROW(z) || NROW(y) != NROW(w)) stop("y, z, and w have differing numbers of rows")
if(!is.null(x) && NROW(y) != NROW(x)) stop("y and x have differing numbers of rows")
if(iterate.max < 2) stop("iterate.max must be at least 2")
if(iterate.diff.tol < 0) stop("iterate.diff.tol must be non-negative")
## Cast as data frames
w <- data.frame(w)
z <- data.frame(z)
if(!is.null(x)) x <- data.frame(x)
## Check for evaluation data
if(is.null(zeval)) zeval <- z
if(is.null(weval)) weval <- w
if(!is.null(x) && is.null(xeval)) xeval <- x
## Set up formulas for multivariate w, z, and x if provided
wnames <- names(w)
znames <- names(z)
names(weval) <- wnames
names(zeval) <- znames
## If there exist exogenous regressors X, append these to the
## formulas involving Z (can be manually added to W by the user if
## desired)
if(!is.null(x)) {
xnames <- names(x)
names(xeval) <- xnames
}
## Now create evaluation data
if(is.null(x)) {
traindata <- data.frame(y,z,w)
evaldata <- data.frame(zeval,weval)
} else {
traindata <- data.frame(y,z,w,x)
evaldata <- data.frame(zeval,weval,xeval)
}
if(!is.null(starting.values) && (NROW(starting.values) != NROW(evaldata))) stop(paste("starting.values must be of length",NROW(evaldata)))
## Formulae for derivative estimation
formula.muw <- as.formula(paste("mu ~ ", paste(wnames, collapse= "+")))
formula.yw <- as.formula(paste("y ~ ", paste(wnames, collapse= "+")))
formula.phiw <- as.formula(paste("phi ~ ", paste(wnames, collapse= "+")))
if(is.null(x)) {
formula.yz <- as.formula(paste("y ~ ", paste(znames, collapse= "+")))
formula.Eywz <- as.formula(paste("E.y.w ~ ", paste(znames, collapse= "+")))
} else {
formula.yz <- as.formula(paste("y ~ ", paste(znames, collapse= "+"), " + ", paste(xnames, collapse= "+")))
formula.Eywz <- as.formula(paste("E.y.w ~ ", paste(znames, collapse= "+"), " + ", paste(xnames, collapse= "+")))
}
## Landweber-Fridman
## We begin the iteration computing phi.prime.0
console <- printClear(console)
console <- printPop(console)
if(is.null(x)) {
console <- printPush(paste("Computing optimal smoothing for f(z) and S(z) for iteration 1...",sep=""),console)
} else {
console <- printPush(paste("Computing optimal smoothing f(z) and S(z) for iteration 1...",sep=""),console)
}
## Note - here I am only treating the univariate case, so let's
## throw a stop with warning for now...
if(NCOL(z) > 1) stop(" This version supports univariate z only")
## For all results we need the density function for Z and the
## survivor function for Z (1-CDF of Z)
# require(np)
cat(paste("\rIteration ", 1, " of at most ", iterate.max,sep=""))
## Let's compute the bandwidth object for the unconditional density
## for the moment. Use the normal-reference rule for speed
## considerations (sensitivity analysis indicates this is not
## problematic).
bw <- npudensbw(dat=z,
bwmethod="normal-reference",
...)
model.fz <- npudens(tdat=z,
bws=bw$bw,
...)
f.z <- predict(model.fz,newdata=evaldata)
model.Sz <- npudist(tdat=z,
bws=bw$bw,
...)
S.z <- 1-predict(model.Sz,newdata=evaldata)
if(is.null(starting.values)) {
console <- printClear(console)
console <- printPop(console)
if(is.null(x)) {
console <- printPush(paste("Computing optimal smoothing for E(y|z) for iteration 1...",sep=""),console)
} else {
console <- printPush(paste("Computing optimal smoothing for E(y|z,x) for iteration 1...",sep=""),console)
}
if(start.from == "Eyz") {
## Start from E(Y|z)
if(crs.messages) options(crs.messages=FALSE)
model.E.y.z <- crs(formula.yz,
opts=opts,
data=traindata,
deriv=1,
...)
if(crs.messages) options(crs.messages=TRUE)
E.y.z <- predict(model.E.y.z,newdata=evaldata)
phi.prime <- attr(E.y.z,"deriv.mat")[,1]
} else {
## Start from E(E(Y|w)|z)
E.y.w <- fitted(crs(formula.yw,opts=opts,data=traindata,...))
model.E.E.y.w.z <- crs(formula.Eywz,opts=opts,data=traindata,deriv=1,...)
E.E.y.w.z <- predict(model.E.E.y.w.z,newdata=evaldata,...)
phi.prime <- attr(E.E.y.w.z,"deriv.mat")[,1]
}
} else {
phi.prime <- starting.values
}
## Step 1 - begin iteration - for this we require \varphi_0. To
## compute \varphi_{0,i}, we require \mu_{0,i}. For j=0 (first
## term in the series), \mu_{0,i} is Y_i.
console <- printClear(console)
console <- printPop(console)
if(is.null(x)) {
console <- printPush(paste("Computing optimal smoothing for E(y|w) (stopping rule) for iteration 1...",sep=""),console)
} else {
console <- printPush(paste("Computing optimal smoothing for E(y|w) (stopping rule) for iteration 1...",sep=""),console)
}
## NOTE - this presumes univariate z case... in general this would
## be a continuous variable's index
phi <- integrate.trapezoidal(z[,1],phi.prime)
## In the definition of phi we have the integral minus the mean of
## the integral with respect to z, so subtract the mean here
phi <- phi - mean(phi) + mean(y)
starting.values.phi <- phi
starting.values.phi.prime <- phi.prime
## For stopping rule...
if(crs.messages) options(crs.messages=FALSE)
model.E.y.w <- crs(formula.yw,
opts=opts,
data=traindata,
...)
if(crs.messages) options(crs.messages=TRUE)
E.y.w <- predict(model.E.y.w,newdata=evaldata)
norm.stop <- numeric()
## For the stopping rule, we require E.phi.w
if(crs.messages) options(crs.messages=FALSE)
model.E.phi.w <- crs(formula.phiw,
opts=opts,
data=traindata,
...)
if(crs.messages) options(crs.messages=TRUE)
E.phi.w <- predict(model.E.phi.w,newdata=evaldata)
## Now we compute mu.0 (a residual of sorts)
mu <- y - phi
## Now we repeat this entire process using mu = y - phi.0 rather
## than y
mean.mu <- mean(mu)
if(smooth.residuals) {
## Smooth residuals (smooth of (y-phi) on w)
if(crs.messages) options(crs.messages=FALSE)
model.E.mu.w <- crs(formula.muw,
opts=opts,
data=traindata,
...)
## We require the fitted values...
predicted.model.E.mu.w <- predict(model.E.mu.w,newdata=evaldata)
if(crs.messages) options(crs.messages=TRUE)
## We again require the mean of the fitted values
mean.predicted.model.E.mu.w <- mean(predicted.model.E.mu.w)
} else {
## Not smoothing residuals (difference of E(Y|w) and smooth of phi
## on w)
if(crs.messages) options(crs.messages=FALSE)
model.E.phi.w <- crs(formula.phiw,
opts=opts,
data=traindata,
...)
## We require the fitted values...
predicted.model.E.mu.w <- E.y.w - predict(model.E.phi.w,newdata=evaldata)
if(crs.messages) options(crs.messages=TRUE)
## We again require the mean of the fitted values
mean.predicted.model.E.mu.w <- mean(E.y.w) - mean(predicted.model.E.mu.w)
}
norm.stop[1] <- sum(predicted.model.E.mu.w^2)/sum(E.y.w^2)
## Now we compute T^* applied to mu
cdf.weighted.average <- npksum(txdat=z,
exdat=zeval,
tydat=as.matrix(predicted.model.E.mu.w),
operator="integral",
bws=bw$bw)$ksum/nrow(traindata)
survivor.weighted.average <- mean.predicted.model.E.mu.w - cdf.weighted.average
T.star.mu <- (survivor.weighted.average-S.z*mean.mu)/f.z
## Now we update phi.prime.0, this provides phi.prime.1, and now
## we can iterate until convergence... note we replace phi.prime.0
## with phi.prime.1 (i.e. overwrite phi.prime)
phi.prime <- phi.prime + constant*T.star.mu
phi.prime.mat <- phi.prime
phi.mat <- phi
## This we iterate...
for(j in 2:iterate.max) {
## Save previous run in case stop norm increases
cat(paste("\rIteration ", j, " of at most ", iterate.max,sep=""))
console <- printClear(console)
console <- printPop(console)
if(is.null(x)) {
console <- printPush(paste("Computing optimal smoothing and phi(z) for iteration ", j,"...",sep=""),console)
} else {
console <- printPush(paste("Computing optimal smoothing and phi(z,x) for iteration ", j,"...",sep=""),console)
}
## NOTE - this presumes univariate z case... in general this would
## be a continuous variable's index
phi <- integrate.trapezoidal(z[,1],phi.prime)
## In the definition of phi we have the integral minus the mean of
## the integral with respect to z, so subtract the mean here
phi <- phi - mean(phi) + mean(y)
## Now we compute mu.0 (a residual of sorts)
mu <- y - phi
## Now we repeat this entire process using mu = y = phi.0 rather than y
## Next, we regress require \mu_{0,i} W
if(smooth.residuals) {
## Smooth residuals (smooth of (y-phi) on w)
if(crs.messages) options(crs.messages=FALSE)
model.E.mu.w <- crs(formula.muw,
opts=opts,
data=traindata,
...)
## We require the fitted values...
predicted.model.E.mu.w <- predict(model.E.mu.w,newdata=evaldata)
if(crs.messages) options(crs.messages=TRUE)
## We again require the mean of the fitted values
mean.predicted.model.E.mu.w <- mean(predicted.model.E.mu.w)
} else {
## Not smoothing residuals (difference of E(Y|w) and smooth of
## phi on w)
if(crs.messages) options(crs.messages=FALSE)
model.E.phi.w <- crs(formula.phiw,
opts=opts,
data=traindata,
...)
## We require the fitted values...
predicted.model.E.mu.w <- E.y.w - predict(model.E.phi.w,newdata=evaldata)
if(crs.messages) options(crs.messages=TRUE)
## We again require the mean of the fitted values
mean.predicted.model.E.mu.w <- mean(E.y.w) - mean(predicted.model.E.mu.w)
}
norm.stop[j] <- ifelse(penalize.iteration,j*sum(predicted.model.E.mu.w^2)/sum(E.y.w^2),sum(predicted.model.E.mu.w^2)/sum(E.y.w^2))
## Now we compute T^* applied to mu
cdf.weighted.average <- npksum(txdat=z,
exdat=zeval,
tydat=as.matrix(predicted.model.E.mu.w),
operator="integral",
bws=bw$bw)$ksum/nrow(traindata)
survivor.weighted.average <- mean.predicted.model.E.mu.w - cdf.weighted.average
T.star.mu <- (survivor.weighted.average-S.z*mean.predicted.model.E.mu.w)/f.z
## Now we update, this provides phi.prime.1, and now we can iterate until convergence...
phi.prime <- phi.prime + constant*T.star.mu
phi.prime.mat <- cbind(phi.prime.mat,phi.prime)
phi.mat <- cbind(phi.mat,phi)
## The number of iterations in LF is asymptotically equivalent to
## 1/alpha (where alpha is the regularization parameter in
## Tikhonov). Plus the criterion function we use is increasing
## for very small number of iterations. So we need a threshold
## after which we can pretty much confidently say that the
## stopping criterion is decreasing. In Darolles et al. (2011)
## \alpha ~ O(N^(-1/(min(beta,2)+2)), where beta is the so called
## qualification of your regularization method. Take the worst
## case in which beta = 0 and then the number of iterations is ~
## N^0.5. Note that derivative estimation seems to require more
## iterations hence the heuristic sqrt(N)
if(j > round(sqrt(nrow(traindata))) && !is.monotone.increasing(norm.stop)) {
## If stopping rule criterion increases or we are below stopping
## tolerance then break
if(stop.on.increase && norm.stop[j] > norm.stop[j-1]) {
convergence <- "STOP_ON_INCREASE"
break()
}
if(abs(norm.stop[j-1]-norm.stop[j]) < iterate.diff.tol) {
convergence <- "ITERATE_DIFF_TOL"
break()
}
}
convergence <- "ITERATE_MAX"
}
## Extract minimum, and check for monotone increasing function and
## issue warning in that case. Otherwise allow for an increasing
## then decreasing (and potentially increasing thereafter) portion
## of the stopping function, ignore the initial increasing portion,
## and take the min from where the initial inflection point occurs
## to the length of norm.stop
norm.value <- norm.stop/(1:length(norm.stop))
if(which.min(norm.stop) == 1 && is.monotone.increasing(norm.stop)) {
warning("Stopping rule increases monotonically (consult model$norm.stop):\nThis could be the result of an inspired initial value (unlikely)\nNote: we suggest manually choosing phi.0 and restarting (e.g. instead set `starting.values' to E[E(Y|w)|z])")
convergence <- "FAILURE_MONOTONE_INCREASING"
# phi <- starting.values.phi
# phi.prime <- starting.values.phi.prime
j <- 1
while(norm.value[j+1] > norm.value[j]) j <- j + 1
j <- j-1 + which.min(norm.value[j:length(norm.value)])
phi <- phi.mat[,j]
phi.prime <- phi.prime.mat[,j]
} else {
## Ignore the initial increasing portion, take the min to the
## right of where the initial inflection point occurs
j <- 1
while(norm.stop[j+1] > norm.stop[j]) j <- j + 1
j <- j-1 + which.min(norm.stop[j:length(norm.stop)])
phi <- phi.mat[,j]
phi.prime <- phi.prime.mat[,j]
}
console <- printClear(console)
console <- printPop(console)
if(j == iterate.max) warning(" iterate.max reached: increase iterate.max or inspect norm.stop vector")
return(list(phi=phi,
phi.prime=phi.prime,
phi.mat=phi.mat,
phi.prime.mat=phi.prime.mat,
num.iterations=j,
norm.stop=norm.stop,
norm.value=norm.value,
convergence=convergence,
starting.values.phi=starting.values.phi,
starting.values.phi.prime=starting.values.phi.prime))
}
| /R/crsivderiv.R | no_license | JeffreyRacine/R-Package-crs | R | false | false | 17,354 | r | ## This functions accepts the following arguments:
## y: univariate outcome
## z: endogenous predictors
## w: instruments
## x: exogenous predictors
## zeval: optional evaluation data for the endogenous predictors
## weval: optional evaluation data for the instruments
## xeval: optional evaluation data for the exogenous predictors
## ... optional arguments for crs()
## This function returns a list with the following elements:
## phi: the IV estimator of phi(z) corresponding to the estimated
## derivative phihat(z)
## phi.prime: the IV derivative estimator
## phi.mat: the matrix with colums phi_1, phi_2 etc. over all iterations
## phi.prime.mat: the matrix with colums phi'_1, phi'_2 etc. over all iterations
## num.iterations: number of iterations taken by Landweber-Fridman
## norm.stop: the stopping rule for each Landweber-Fridman iteration
## norm.value: the norm not multiplied by the number of iterations
## convergence: a character string indicating whether/why iteration terminated
crsivderiv <- function(y,
z,
w,
x=NULL,
zeval=NULL,
weval=NULL,
xeval=NULL,
iterate.max=1000,
iterate.diff.tol=1.0e-08,
constant=0.5,
penalize.iteration=TRUE,
start.from=c("Eyz","EEywz"),
starting.values=NULL,
stop.on.increase=TRUE,
smooth.residuals=TRUE,
opts=list("MAX_BB_EVAL"=10000,
"EPSILON"=.Machine$double.eps,
"INITIAL_MESH_SIZE"="r1.0e-01",
"MIN_MESH_SIZE"=paste("r",sqrt(.Machine$double.eps),sep=""),
"MIN_POLL_SIZE"=paste("r",1,sep=""),
"DISPLAY_DEGREE"=0),
...) {
crs.messages <- getOption("crs.messages")
console <- newLineConsole()
## Basic error checking
if(!is.logical(stop.on.increase)) stop("stop.on.increase must be logical (TRUE/FALSE)")
if(!is.logical(smooth.residuals)) stop("smooth.residuals must be logical (TRUE/FALSE)")
start.from <- match.arg(start.from)
if(constant <= 0 || constant >=1) stop("constant must lie in (0,1)")
if(missing(y)) stop("You must provide y")
if(missing(z)) stop("You must provide z")
if(missing(w)) stop("You must provide w")
if(NCOL(y) > 1) stop("y must be univariate")
if(NROW(y) != NROW(z) || NROW(y) != NROW(w)) stop("y, z, and w have differing numbers of rows")
if(!is.null(x) && NROW(y) != NROW(x)) stop("y and x have differing numbers of rows")
if(iterate.max < 2) stop("iterate.max must be at least 2")
if(iterate.diff.tol < 0) stop("iterate.diff.tol must be non-negative")
## Cast as data frames
w <- data.frame(w)
z <- data.frame(z)
if(!is.null(x)) x <- data.frame(x)
## Check for evaluation data
if(is.null(zeval)) zeval <- z
if(is.null(weval)) weval <- w
if(!is.null(x) && is.null(xeval)) xeval <- x
## Set up formulas for multivariate w, z, and x if provided
wnames <- names(w)
znames <- names(z)
names(weval) <- wnames
names(zeval) <- znames
## If there exist exogenous regressors X, append these to the
## formulas involving Z (can be manually added to W by the user if
## desired)
if(!is.null(x)) {
xnames <- names(x)
names(xeval) <- xnames
}
## Now create evaluation data
if(is.null(x)) {
traindata <- data.frame(y,z,w)
evaldata <- data.frame(zeval,weval)
} else {
traindata <- data.frame(y,z,w,x)
evaldata <- data.frame(zeval,weval,xeval)
}
if(!is.null(starting.values) && (NROW(starting.values) != NROW(evaldata))) stop(paste("starting.values must be of length",NROW(evaldata)))
## Formulae for derivative estimation
formula.muw <- as.formula(paste("mu ~ ", paste(wnames, collapse= "+")))
formula.yw <- as.formula(paste("y ~ ", paste(wnames, collapse= "+")))
formula.phiw <- as.formula(paste("phi ~ ", paste(wnames, collapse= "+")))
if(is.null(x)) {
formula.yz <- as.formula(paste("y ~ ", paste(znames, collapse= "+")))
formula.Eywz <- as.formula(paste("E.y.w ~ ", paste(znames, collapse= "+")))
} else {
formula.yz <- as.formula(paste("y ~ ", paste(znames, collapse= "+"), " + ", paste(xnames, collapse= "+")))
formula.Eywz <- as.formula(paste("E.y.w ~ ", paste(znames, collapse= "+"), " + ", paste(xnames, collapse= "+")))
}
## Landweber-Fridman
## We begin the iteration computing phi.prime.0
console <- printClear(console)
console <- printPop(console)
if(is.null(x)) {
console <- printPush(paste("Computing optimal smoothing for f(z) and S(z) for iteration 1...",sep=""),console)
} else {
console <- printPush(paste("Computing optimal smoothing f(z) and S(z) for iteration 1...",sep=""),console)
}
## Note - here I am only treating the univariate case, so let's
## throw a stop with warning for now...
if(NCOL(z) > 1) stop(" This version supports univariate z only")
## For all results we need the density function for Z and the
## survivor function for Z (1-CDF of Z)
# require(np)
cat(paste("\rIteration ", 1, " of at most ", iterate.max,sep=""))
## Let's compute the bandwidth object for the unconditional density
## for the moment. Use the normal-reference rule for speed
## considerations (sensitivity analysis indicates this is not
## problematic).
bw <- npudensbw(dat=z,
bwmethod="normal-reference",
...)
model.fz <- npudens(tdat=z,
bws=bw$bw,
...)
f.z <- predict(model.fz,newdata=evaldata)
model.Sz <- npudist(tdat=z,
bws=bw$bw,
...)
S.z <- 1-predict(model.Sz,newdata=evaldata)
if(is.null(starting.values)) {
console <- printClear(console)
console <- printPop(console)
if(is.null(x)) {
console <- printPush(paste("Computing optimal smoothing for E(y|z) for iteration 1...",sep=""),console)
} else {
console <- printPush(paste("Computing optimal smoothing for E(y|z,x) for iteration 1...",sep=""),console)
}
if(start.from == "Eyz") {
## Start from E(Y|z)
if(crs.messages) options(crs.messages=FALSE)
model.E.y.z <- crs(formula.yz,
opts=opts,
data=traindata,
deriv=1,
...)
if(crs.messages) options(crs.messages=TRUE)
E.y.z <- predict(model.E.y.z,newdata=evaldata)
phi.prime <- attr(E.y.z,"deriv.mat")[,1]
} else {
## Start from E(E(Y|w)|z)
E.y.w <- fitted(crs(formula.yw,opts=opts,data=traindata,...))
model.E.E.y.w.z <- crs(formula.Eywz,opts=opts,data=traindata,deriv=1,...)
E.E.y.w.z <- predict(model.E.E.y.w.z,newdata=evaldata,...)
phi.prime <- attr(E.E.y.w.z,"deriv.mat")[,1]
}
} else {
phi.prime <- starting.values
}
## Step 1 - begin iteration - for this we require \varphi_0. To
## compute \varphi_{0,i}, we require \mu_{0,i}. For j=0 (first
## term in the series), \mu_{0,i} is Y_i.
console <- printClear(console)
console <- printPop(console)
if(is.null(x)) {
console <- printPush(paste("Computing optimal smoothing for E(y|w) (stopping rule) for iteration 1...",sep=""),console)
} else {
console <- printPush(paste("Computing optimal smoothing for E(y|w) (stopping rule) for iteration 1...",sep=""),console)
}
## NOTE - this presumes univariate z case... in general this would
## be a continuous variable's index
phi <- integrate.trapezoidal(z[,1],phi.prime)
## In the definition of phi we have the integral minus the mean of
## the integral with respect to z, so subtract the mean here
phi <- phi - mean(phi) + mean(y)
starting.values.phi <- phi
starting.values.phi.prime <- phi.prime
## For stopping rule...
if(crs.messages) options(crs.messages=FALSE)
model.E.y.w <- crs(formula.yw,
opts=opts,
data=traindata,
...)
if(crs.messages) options(crs.messages=TRUE)
E.y.w <- predict(model.E.y.w,newdata=evaldata)
norm.stop <- numeric()
## For the stopping rule, we require E.phi.w
if(crs.messages) options(crs.messages=FALSE)
model.E.phi.w <- crs(formula.phiw,
opts=opts,
data=traindata,
...)
if(crs.messages) options(crs.messages=TRUE)
E.phi.w <- predict(model.E.phi.w,newdata=evaldata)
## Now we compute mu.0 (a residual of sorts)
mu <- y - phi
## Now we repeat this entire process using mu = y - phi.0 rather
## than y
mean.mu <- mean(mu)
if(smooth.residuals) {
## Smooth residuals (smooth of (y-phi) on w)
if(crs.messages) options(crs.messages=FALSE)
model.E.mu.w <- crs(formula.muw,
opts=opts,
data=traindata,
...)
## We require the fitted values...
predicted.model.E.mu.w <- predict(model.E.mu.w,newdata=evaldata)
if(crs.messages) options(crs.messages=TRUE)
## We again require the mean of the fitted values
mean.predicted.model.E.mu.w <- mean(predicted.model.E.mu.w)
} else {
## Not smoothing residuals (difference of E(Y|w) and smooth of phi
## on w)
if(crs.messages) options(crs.messages=FALSE)
model.E.phi.w <- crs(formula.phiw,
opts=opts,
data=traindata,
...)
## We require the fitted values...
predicted.model.E.mu.w <- E.y.w - predict(model.E.phi.w,newdata=evaldata)
if(crs.messages) options(crs.messages=TRUE)
## We again require the mean of the fitted values
mean.predicted.model.E.mu.w <- mean(E.y.w) - mean(predicted.model.E.mu.w)
}
norm.stop[1] <- sum(predicted.model.E.mu.w^2)/sum(E.y.w^2)
## Now we compute T^* applied to mu
cdf.weighted.average <- npksum(txdat=z,
exdat=zeval,
tydat=as.matrix(predicted.model.E.mu.w),
operator="integral",
bws=bw$bw)$ksum/nrow(traindata)
survivor.weighted.average <- mean.predicted.model.E.mu.w - cdf.weighted.average
T.star.mu <- (survivor.weighted.average-S.z*mean.mu)/f.z
## Now we update phi.prime.0, this provides phi.prime.1, and now
## we can iterate until convergence... note we replace phi.prime.0
## with phi.prime.1 (i.e. overwrite phi.prime)
phi.prime <- phi.prime + constant*T.star.mu
phi.prime.mat <- phi.prime
phi.mat <- phi
## This we iterate...
for(j in 2:iterate.max) {
## Save previous run in case stop norm increases
cat(paste("\rIteration ", j, " of at most ", iterate.max,sep=""))
console <- printClear(console)
console <- printPop(console)
if(is.null(x)) {
console <- printPush(paste("Computing optimal smoothing and phi(z) for iteration ", j,"...",sep=""),console)
} else {
console <- printPush(paste("Computing optimal smoothing and phi(z,x) for iteration ", j,"...",sep=""),console)
}
## NOTE - this presumes univariate z case... in general this would
## be a continuous variable's index
phi <- integrate.trapezoidal(z[,1],phi.prime)
## In the definition of phi we have the integral minus the mean of
## the integral with respect to z, so subtract the mean here
phi <- phi - mean(phi) + mean(y)
## Now we compute mu.0 (a residual of sorts)
mu <- y - phi
## Now we repeat this entire process using mu = y = phi.0 rather than y
## Next, we regress require \mu_{0,i} W
if(smooth.residuals) {
## Smooth residuals (smooth of (y-phi) on w)
if(crs.messages) options(crs.messages=FALSE)
model.E.mu.w <- crs(formula.muw,
opts=opts,
data=traindata,
...)
## We require the fitted values...
predicted.model.E.mu.w <- predict(model.E.mu.w,newdata=evaldata)
if(crs.messages) options(crs.messages=TRUE)
## We again require the mean of the fitted values
mean.predicted.model.E.mu.w <- mean(predicted.model.E.mu.w)
} else {
## Not smoothing residuals (difference of E(Y|w) and smooth of
## phi on w)
if(crs.messages) options(crs.messages=FALSE)
model.E.phi.w <- crs(formula.phiw,
opts=opts,
data=traindata,
...)
## We require the fitted values...
predicted.model.E.mu.w <- E.y.w - predict(model.E.phi.w,newdata=evaldata)
if(crs.messages) options(crs.messages=TRUE)
## We again require the mean of the fitted values
mean.predicted.model.E.mu.w <- mean(E.y.w) - mean(predicted.model.E.mu.w)
}
norm.stop[j] <- ifelse(penalize.iteration,j*sum(predicted.model.E.mu.w^2)/sum(E.y.w^2),sum(predicted.model.E.mu.w^2)/sum(E.y.w^2))
## Now we compute T^* applied to mu
cdf.weighted.average <- npksum(txdat=z,
exdat=zeval,
tydat=as.matrix(predicted.model.E.mu.w),
operator="integral",
bws=bw$bw)$ksum/nrow(traindata)
survivor.weighted.average <- mean.predicted.model.E.mu.w - cdf.weighted.average
T.star.mu <- (survivor.weighted.average-S.z*mean.predicted.model.E.mu.w)/f.z
## Now we update, this provides phi.prime.1, and now we can iterate until convergence...
phi.prime <- phi.prime + constant*T.star.mu
phi.prime.mat <- cbind(phi.prime.mat,phi.prime)
phi.mat <- cbind(phi.mat,phi)
## The number of iterations in LF is asymptotically equivalent to
## 1/alpha (where alpha is the regularization parameter in
## Tikhonov). Plus the criterion function we use is increasing
## for very small number of iterations. So we need a threshold
## after which we can pretty much confidently say that the
## stopping criterion is decreasing. In Darolles et al. (2011)
## \alpha ~ O(N^(-1/(min(beta,2)+2)), where beta is the so called
## qualification of your regularization method. Take the worst
## case in which beta = 0 and then the number of iterations is ~
## N^0.5. Note that derivative estimation seems to require more
## iterations hence the heuristic sqrt(N)
if(j > round(sqrt(nrow(traindata))) && !is.monotone.increasing(norm.stop)) {
## If stopping rule criterion increases or we are below stopping
## tolerance then break
if(stop.on.increase && norm.stop[j] > norm.stop[j-1]) {
convergence <- "STOP_ON_INCREASE"
break()
}
if(abs(norm.stop[j-1]-norm.stop[j]) < iterate.diff.tol) {
convergence <- "ITERATE_DIFF_TOL"
break()
}
}
convergence <- "ITERATE_MAX"
}
## Extract minimum, and check for monotone increasing function and
## issue warning in that case. Otherwise allow for an increasing
## then decreasing (and potentially increasing thereafter) portion
## of the stopping function, ignore the initial increasing portion,
## and take the min from where the initial inflection point occurs
## to the length of norm.stop
norm.value <- norm.stop/(1:length(norm.stop))
if(which.min(norm.stop) == 1 && is.monotone.increasing(norm.stop)) {
warning("Stopping rule increases monotonically (consult model$norm.stop):\nThis could be the result of an inspired initial value (unlikely)\nNote: we suggest manually choosing phi.0 and restarting (e.g. instead set `starting.values' to E[E(Y|w)|z])")
convergence <- "FAILURE_MONOTONE_INCREASING"
# phi <- starting.values.phi
# phi.prime <- starting.values.phi.prime
j <- 1
while(norm.value[j+1] > norm.value[j]) j <- j + 1
j <- j-1 + which.min(norm.value[j:length(norm.value)])
phi <- phi.mat[,j]
phi.prime <- phi.prime.mat[,j]
} else {
## Ignore the initial increasing portion, take the min to the
## right of where the initial inflection point occurs
j <- 1
while(norm.stop[j+1] > norm.stop[j]) j <- j + 1
j <- j-1 + which.min(norm.stop[j:length(norm.stop)])
phi <- phi.mat[,j]
phi.prime <- phi.prime.mat[,j]
}
console <- printClear(console)
console <- printPop(console)
if(j == iterate.max) warning(" iterate.max reached: increase iterate.max or inspect norm.stop vector")
return(list(phi=phi,
phi.prime=phi.prime,
phi.mat=phi.mat,
phi.prime.mat=phi.prime.mat,
num.iterations=j,
norm.stop=norm.stop,
norm.value=norm.value,
convergence=convergence,
starting.values.phi=starting.values.phi,
starting.values.phi.prime=starting.values.phi.prime))
}
|
## ---- setup, echo=FALSE-------------------------------------------------------
IS_GITHUB <- Sys.getenv("IS_GITHUB") != ""
## ----results='asis', echo=FALSE, eval=IS_GITHUB-------------------------------
# cat('
# [](https://github.com/traversc/glow/actions)
# [](https://cran.r-project.org/package=glow)
# [](https://cran.r-project.org/package=glow)
# [](https://cran.r-project.org/package=glow)
# ')
## ----results='asis', echo=FALSE-----------------------------------------------
output <- '
<center>
|Methylation 450K Volcano Plot |Diamonds |
|-|-|
|{height=240px} |{height=240px} |
| Milky Way Galaxy (6.1 million stars) |
|-|
| {height=300px} |
| OpenStreetMap GPS traces (2.8 billion points) |
|-|
| {height=300px} |
| Clifford strange attractor (1 billion points) |
|-|
| {height=300px} |
| Airline Dataset (145 million points) | Glow-y Spiral |
|-|-|
| {height=240px} | {height=240px} |
| U.S. Coronavirus Cases (2021) |
|-|
| {height=300px} |
</center>
'
if(IS_GITHUB) {
cat(output)
} else {
cat(gsub("vignettes/", "", output))
}
## ----eval=FALSE---------------------------------------------------------------
# remotes::install_github("traversc/glow")
## ----eval=FALSE---------------------------------------------------------------
# library(glow)
# library(ggplot2)
# library(viridisLite) # Magma color scale
#
# # Number of threads
# nt <- 4
#
# data(diamonds)
# gm <- GlowMapper$new(xdim=800, ydim = 640, blend_mode = "screen", nthreads=nt)
# gm$map(x=diamonds$carat, y=diamonds$price, intensity=1, radius = .1)
# pd <- gm$output_dataframe(saturation = 1)
#
# # Dark color theme
# ggplot() +
# geom_raster(data = pd, aes(x = pd$x, y = pd$y, fill = pd$value), show.legend = FALSE) +
# scale_fill_gradientn(colors = additive_alpha(magma(12))) +
# coord_fixed(gm$aspect(), xlim = gm$xlim(), ylim = gm$ylim()) +
# labs(x = "carat", y = "price") +
# theme_night(bgcolor = magma(12)[1])
## ----results='asis', echo=FALSE-----------------------------------------------
if(IS_GITHUB) {
cat('{height=240px}')
} else {
cat('{height=240px}')
}
## ----eval=FALSE---------------------------------------------------------------
# # light color theme
# light_colors <- light_heat_colors(144)
# ggplot() +
# geom_raster(data = pd, aes(x = pd$x, y = pd$y, fill = pd$value), show.legend = FALSE) +
# scale_fill_gradientn(colors = additive_alpha(light_colors)) +
# coord_fixed(gm$aspect(), xlim = gm$xlim(), ylim = gm$ylim()) +
# labs(x = "carat", y = "price") +
# theme_bw(base_size = 14)
## ----results='asis', echo=FALSE-----------------------------------------------
if(IS_GITHUB) {
cat('{height=240px}')
} else {
cat('{height=240px}')
}
## ----eval=FALSE---------------------------------------------------------------
# library(EBImage)
#
# # Generate data
# cliff_points <- clifford_attractor(1e6, 1.886,-2.357,-0.328, 0.918, 0.1, 0)
# color_pal <- circular_palette(n=144, pal_function=rainbow)
# cliff_points$color <- map_colors(color_pal, cliff_points$angle, min_limit=-pi, max_limit=pi)
#
# # Create raster
# gm <- GlowMapper4$new(xdim=480, ydim = 270, blend_mode = "additive", nthreads=4)
# gm$map(x=cliff_points$x, y=cliff_points$y, radius=0.05, color=cliff_points$color)
# pd <- gm$output_raw(saturation = 1)
#
# # Output raster with EBImage
# image_array <- array(1, dim=c(480, 270, 3))
# image_array[,,1] <- pd[[1]]*pd[[4]]
# image_array[,,2] <- pd[[2]]*pd[[4]]
# image_array[,,3] <- pd[[3]]*pd[[4]]
# img <- EBImage::Image(image_array, colormode='Color')
# plot(img)
# writeImage(img, "plots/clifford_vignette.png")
## ----results='asis', echo=FALSE-----------------------------------------------
if(IS_GITHUB) {
cat('{height=240px}')
} else {
cat('{height=240px}')
}
| /inst/doc/vignette.R | no_license | cran/glow | R | false | false | 4,934 | r | ## ---- setup, echo=FALSE-------------------------------------------------------
IS_GITHUB <- Sys.getenv("IS_GITHUB") != ""
## ----results='asis', echo=FALSE, eval=IS_GITHUB-------------------------------
# cat('
# [](https://github.com/traversc/glow/actions)
# [](https://cran.r-project.org/package=glow)
# [](https://cran.r-project.org/package=glow)
# [](https://cran.r-project.org/package=glow)
# ')
## ----results='asis', echo=FALSE-----------------------------------------------
output <- '
<center>
|Methylation 450K Volcano Plot |Diamonds |
|-|-|
|{height=240px} |{height=240px} |
| Milky Way Galaxy (6.1 million stars) |
|-|
| {height=300px} |
| OpenStreetMap GPS traces (2.8 billion points) |
|-|
| {height=300px} |
| Clifford strange attractor (1 billion points) |
|-|
| {height=300px} |
| Airline Dataset (145 million points) | Glow-y Spiral |
|-|-|
| {height=240px} | {height=240px} |
| U.S. Coronavirus Cases (2021) |
|-|
| {height=300px} |
</center>
'
if(IS_GITHUB) {
cat(output)
} else {
cat(gsub("vignettes/", "", output))
}
## ----eval=FALSE---------------------------------------------------------------
# remotes::install_github("traversc/glow")
## ----eval=FALSE---------------------------------------------------------------
# library(glow)
# library(ggplot2)
# library(viridisLite) # Magma color scale
#
# # Number of threads
# nt <- 4
#
# data(diamonds)
# gm <- GlowMapper$new(xdim=800, ydim = 640, blend_mode = "screen", nthreads=nt)
# gm$map(x=diamonds$carat, y=diamonds$price, intensity=1, radius = .1)
# pd <- gm$output_dataframe(saturation = 1)
#
# # Dark color theme
# ggplot() +
# geom_raster(data = pd, aes(x = pd$x, y = pd$y, fill = pd$value), show.legend = FALSE) +
# scale_fill_gradientn(colors = additive_alpha(magma(12))) +
# coord_fixed(gm$aspect(), xlim = gm$xlim(), ylim = gm$ylim()) +
# labs(x = "carat", y = "price") +
# theme_night(bgcolor = magma(12)[1])
## ----results='asis', echo=FALSE-----------------------------------------------
if(IS_GITHUB) {
cat('{height=240px}')
} else {
cat('{height=240px}')
}
## ----eval=FALSE---------------------------------------------------------------
# # light color theme
# light_colors <- light_heat_colors(144)
# ggplot() +
# geom_raster(data = pd, aes(x = pd$x, y = pd$y, fill = pd$value), show.legend = FALSE) +
# scale_fill_gradientn(colors = additive_alpha(light_colors)) +
# coord_fixed(gm$aspect(), xlim = gm$xlim(), ylim = gm$ylim()) +
# labs(x = "carat", y = "price") +
# theme_bw(base_size = 14)
## ----results='asis', echo=FALSE-----------------------------------------------
if(IS_GITHUB) {
cat('{height=240px}')
} else {
cat('{height=240px}')
}
## ----eval=FALSE---------------------------------------------------------------
# library(EBImage)
#
# # Generate data
# cliff_points <- clifford_attractor(1e6, 1.886,-2.357,-0.328, 0.918, 0.1, 0)
# color_pal <- circular_palette(n=144, pal_function=rainbow)
# cliff_points$color <- map_colors(color_pal, cliff_points$angle, min_limit=-pi, max_limit=pi)
#
# # Create raster
# gm <- GlowMapper4$new(xdim=480, ydim = 270, blend_mode = "additive", nthreads=4)
# gm$map(x=cliff_points$x, y=cliff_points$y, radius=0.05, color=cliff_points$color)
# pd <- gm$output_raw(saturation = 1)
#
# # Output raster with EBImage
# image_array <- array(1, dim=c(480, 270, 3))
# image_array[,,1] <- pd[[1]]*pd[[4]]
# image_array[,,2] <- pd[[2]]*pd[[4]]
# image_array[,,3] <- pd[[3]]*pd[[4]]
# img <- EBImage::Image(image_array, colormode='Color')
# plot(img)
# writeImage(img, "plots/clifford_vignette.png")
## ----results='asis', echo=FALSE-----------------------------------------------
if(IS_GITHUB) {
cat('{height=240px}')
} else {
cat('{height=240px}')
}
|
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Title: Inundated Well Users
#Date: 6/26/2019
#Coder: C. Nathan Jones (cnjones7@ua.edu)
#Purpose: Estimate the number of well user impacted by Hurricane Harvey.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Setup workspace----------------------------------------------------------------
#Clear memory
rm(list=ls(all=TRUE))
#Load Required Packages
library(tidyverse)
library(raster)
library(sf)
library(fasterize)
library(tmap)
#Define working directory and database location
spatial_data_dir<-"C:\\Users/cnjones7/Box Sync/My Folders/Research Projects/Private Wells/Harvey/spatial_data/"
working_dir<-"//nfs/njones-data/Research Projects/Private Wells/Harvey/inundated_wells/"
#Download well and reproject relevant data
wells<-raster(paste0(spatial_data_dir, "Private_Wells/REM_map1990.tif"))
p<-wells@crs
#Download Municipal Boundaries (source: http://gis-txdot.opendata.arcgis.com/datasets/09cd5b6811c54857bd3856b5549e34f0_0)
cities<-st_read(paste0(spatial_data_dir, "TxDOT_City_Boundaries/TxDOT_City_Boundaries.shp")) %>%
st_transform(., crs=p)
zip_codes<-st_read(paste0(spatial_data_dir, "zip_codes/tl_2015_us_zcta510.shp")) %>%
st_transform(., crs=p)
counties<-st_read(paste0(spatial_data_dir, "counties_tx/counties_texas.shp")) %>%
st_transform(., crs=p)
#Define counties sampled in this study------------------------------------------
#Make list of counties in the study
county_names<-read_csv("//nfs/njones-data/Research Projects/Private Wells/Harvey/geolocation_data/locations.csv") %>%
dplyr::select(Sample_County) %>%
distinct(.) %>% na.omit() %>%
rename(NAME = Sample_County)
#Limit Counties
counties_sampled<-counties %>% right_join(.,county_names)
counties<-counties[counties_sampled,]
remove(county_names)
remove(counties_sampled)
#Crop wells
wells<-crop(wells, counties)
#Create raster of inundation extent---------------------------------------------
#List shape files from Dartmouth Flood Observatory
files<-list.files(paste0(spatial_data_dir, "DFO_Inundation")) %>%
tibble::enframe(name = NULL) %>%
filter(str_detect(value,".shp")) %>%
as_vector()
#Create blank inundation raster
inundation<-wells*0
inundation[is.na(inundation)]<-0
#Create loop to download and rasterize each
for(i in 1:length(files)){
#Read file
temp<-st_read(paste0(spatial_data_dir, "DFO_Inundation/", files[i])) %>%
st_transform(., crs=p)
#rasterize
temp<-fasterize(sf=temp, raster= inundation, background=0)
#Add to inundation
inundation<-inundation+temp
#Remove temp
remove(temp)
}
#Make inundation raster bianary
inundation[inundation==0]<-NA
inundation<-inundation*0+1
#Create Summary Stats (County)--------------------------------------------------
#Create function to sum by county
fun<-function(n){
#Select county
county<-counties[n,]
#crop inundation and wells to counties
wells <- crop(wells, county)
inundation <- crop(inundation, county)
wells <- mask(wells, county)
inundation <- mask(inundation, county)
#Create output tibble
output<-tibble(
NAME = county$NAME,
total_area = st_area(county),
inun_area = cellStats(inundation, sum)*(res(inundation)[1]^2),
prop_inun_area = inun_area/total_area,
total_well_users = cellStats(wells, sum),
inun_well_users = cellStats(wells*inundation, sum),
prop_inun_wells = inun_well_users/total_well_users)
#Export
output
}
#apply function
output<-lapply(seq(1, nrow(counties)), fun) %>% bind_rows()
#Left Join to counties sf
counties<-counties %>%
dplyr::select(NAME) %>%
dplyr::left_join(., output)
#Create Summary Stats (Zip Code)------------------------------------------------
#Limit zip codes to county extent
zip_codes<-zip_codes[counties,]
zip_codes %<>%
dplyr::rename(zip = ZCTA5CE10) %>%
dplyr::select(zip)
#Create function to sum by county
fun<-function(n){
#Select county
zip<-zip_codes[n,]
#crop inundation and wells to counties
wells <- crop(wells, zip)
inundation <- crop(inundation, zip)
wells <- mask(wells, zip)
inundation <- mask(inundation, zip)
#Create output tibble
output<-tibble(
zip_code = zip$zip,
total_area = st_area(zip),
inun_area = cellStats(inundation, sum)*(res(inundation)[1]^2),
prop_inun_area = inun_area/total_area,
total_well_users = cellStats(wells, sum),
inun_well_users = cellStats(wells*inundation, sum),
prop_inun_wells = inun_well_users/total_well_users)
#Export
output
}
#apply function
output<-lapply(seq(1, nrow(zip_codes)), fun) %>% bind_rows() %>% rename(zip = zip_code)
#Left Join to counties sf
zip_codes<-zip_codes %>%
dplyr::left_join(., output)
#Creat Initial County Plots-----------------------------------------------------
#Turn plotting device on
png(paste0(working_dir, "inundated_wells_county.png"), width=7,height=7, units = "in", res=300)
tmap_mode("plot")
#Create plots
tm1<-tm_shape(counties) +
tm_polygons("total_well_users", palette = "BuGn", style = 'quantile', breaks=10) +
tm_layout(frame=F, legend.show = T)
tm2<-tm_shape(counties) +
tm_polygons("inun_well_users", palette = 'PuRd', style = 'quantile', breaks=10) +
tm_layout(frame=F, legend.show = T)
tm3<-tm_shape(counties) +
tm_polygons("prop_inun_wells", palette = "YlOrBr", style = 'quantile', breaks=10) +
tm_layout(frame=F, legend.show = T)
tm4<-tm_shape(counties) +
tm_polygons("prop_inun_area", palette = 'PuBu', style = 'quantile', breaks=10) +
tm_layout(frame=F, legend.show = T)
#plot
tmap_arrange(tm4, tm1, tm2, tm3)
#Turn plotting device off
dev.off()
#Export csv for good measure
output<-counties %>%
as_tibble() %>%
dplyr::select(NAME, total_area, inun_area, prop_inun_area, total_well_users,
inun_well_users, prop_inun_wells)
write_csv(output, paste0(working_dir, "inundated_wells_county.csv"))
#Create Initial Zip Code Plots--------------------------------------------------
#Turn plotting device on
png(paste0(working_dir, "inundated_wells_zip.png"), width=7,height=7, units = "in", res=300)
tmap_mode("plot")
#Create plots
tm1<-tm_shape(zip_codes) +
tm_polygons("total_well_users", palette = "BuGn", style = 'quantile', breaks=10) +
tm_layout(frame=F, legend.show = T)
tm2<-tm_shape(zip_codes) +
tm_polygons("inun_well_users", palette = 'PuRd', style = 'quantile', breaks=10) +
tm_layout(frame=F, legend.show = T)
tm3<-tm_shape(zip_codes) +
tm_polygons("prop_inun_wells", palette = "YlOrBr", style = 'quantile', breaks=10) +
tm_layout(frame=F, legend.show = T)
tm4<-tm_shape(zip_codes) +
tm_polygons("prop_inun_area", palette = 'PuBu', style = 'quantile', breaks=10) +
tm_layout(frame=F, legend.show = T)
#plot
tmap_arrange(tm4, tm1, tm2, tm3)
#Turn plotting device off
dev.off()
#Export csv for good measure
output<-zip_codes %>%
as_tibble() %>%
dplyr::select(zip,total_area,inun_area,prop_inun_area,total_well_users,inun_well_users,prop_inun_wells)
write_csv(output, paste0(working_dir, "inundated_wells_zip.csv"))
| /RScripts/archive/4_Inundated_Well_Users.R | no_license | asummerfield28/HurricaneHarvey | R | false | false | 7,181 | r | #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Title: Inundated Well Users
#Date: 6/26/2019
#Coder: C. Nathan Jones (cnjones7@ua.edu)
#Purpose: Estimate the number of well user impacted by Hurricane Harvey.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Setup workspace----------------------------------------------------------------
#Clear memory
rm(list=ls(all=TRUE))
#Load Required Packages
library(tidyverse)
library(raster)
library(sf)
library(fasterize)
library(tmap)
#Define working directory and database location
spatial_data_dir<-"C:\\Users/cnjones7/Box Sync/My Folders/Research Projects/Private Wells/Harvey/spatial_data/"
working_dir<-"//nfs/njones-data/Research Projects/Private Wells/Harvey/inundated_wells/"
#Download well and reproject relevant data
wells<-raster(paste0(spatial_data_dir, "Private_Wells/REM_map1990.tif"))
p<-wells@crs
#Download Municipal Boundaries (source: http://gis-txdot.opendata.arcgis.com/datasets/09cd5b6811c54857bd3856b5549e34f0_0)
cities<-st_read(paste0(spatial_data_dir, "TxDOT_City_Boundaries/TxDOT_City_Boundaries.shp")) %>%
st_transform(., crs=p)
zip_codes<-st_read(paste0(spatial_data_dir, "zip_codes/tl_2015_us_zcta510.shp")) %>%
st_transform(., crs=p)
counties<-st_read(paste0(spatial_data_dir, "counties_tx/counties_texas.shp")) %>%
st_transform(., crs=p)
#Define counties sampled in this study------------------------------------------
#Make list of counties in the study
county_names<-read_csv("//nfs/njones-data/Research Projects/Private Wells/Harvey/geolocation_data/locations.csv") %>%
dplyr::select(Sample_County) %>%
distinct(.) %>% na.omit() %>%
rename(NAME = Sample_County)
#Limit Counties
counties_sampled<-counties %>% right_join(.,county_names)
counties<-counties[counties_sampled,]
remove(county_names)
remove(counties_sampled)
#Crop wells
wells<-crop(wells, counties)
#Create raster of inundation extent---------------------------------------------
#List shape files from Dartmouth Flood Observatory
files<-list.files(paste0(spatial_data_dir, "DFO_Inundation")) %>%
tibble::enframe(name = NULL) %>%
filter(str_detect(value,".shp")) %>%
as_vector()
#Create blank inundation raster
inundation<-wells*0
inundation[is.na(inundation)]<-0
#Create loop to download and rasterize each
for(i in 1:length(files)){
#Read file
temp<-st_read(paste0(spatial_data_dir, "DFO_Inundation/", files[i])) %>%
st_transform(., crs=p)
#rasterize
temp<-fasterize(sf=temp, raster= inundation, background=0)
#Add to inundation
inundation<-inundation+temp
#Remove temp
remove(temp)
}
#Make inundation raster bianary
inundation[inundation==0]<-NA
inundation<-inundation*0+1
#Create Summary Stats (County)--------------------------------------------------
#Create function to sum by county
fun<-function(n){
#Select county
county<-counties[n,]
#crop inundation and wells to counties
wells <- crop(wells, county)
inundation <- crop(inundation, county)
wells <- mask(wells, county)
inundation <- mask(inundation, county)
#Create output tibble
output<-tibble(
NAME = county$NAME,
total_area = st_area(county),
inun_area = cellStats(inundation, sum)*(res(inundation)[1]^2),
prop_inun_area = inun_area/total_area,
total_well_users = cellStats(wells, sum),
inun_well_users = cellStats(wells*inundation, sum),
prop_inun_wells = inun_well_users/total_well_users)
#Export
output
}
#apply function
output<-lapply(seq(1, nrow(counties)), fun) %>% bind_rows()
#Left Join to counties sf
counties<-counties %>%
dplyr::select(NAME) %>%
dplyr::left_join(., output)
#Create Summary Stats (Zip Code)------------------------------------------------
#Limit zip codes to county extent
zip_codes<-zip_codes[counties,]
zip_codes %<>%
dplyr::rename(zip = ZCTA5CE10) %>%
dplyr::select(zip)
#Create function to sum by county
fun<-function(n){
#Select county
zip<-zip_codes[n,]
#crop inundation and wells to counties
wells <- crop(wells, zip)
inundation <- crop(inundation, zip)
wells <- mask(wells, zip)
inundation <- mask(inundation, zip)
#Create output tibble
output<-tibble(
zip_code = zip$zip,
total_area = st_area(zip),
inun_area = cellStats(inundation, sum)*(res(inundation)[1]^2),
prop_inun_area = inun_area/total_area,
total_well_users = cellStats(wells, sum),
inun_well_users = cellStats(wells*inundation, sum),
prop_inun_wells = inun_well_users/total_well_users)
#Export
output
}
#apply function
output<-lapply(seq(1, nrow(zip_codes)), fun) %>% bind_rows() %>% rename(zip = zip_code)
#Left Join to counties sf
zip_codes<-zip_codes %>%
dplyr::left_join(., output)
#Creat Initial County Plots-----------------------------------------------------
#Turn plotting device on
png(paste0(working_dir, "inundated_wells_county.png"), width=7,height=7, units = "in", res=300)
tmap_mode("plot")
#Create plots
tm1<-tm_shape(counties) +
tm_polygons("total_well_users", palette = "BuGn", style = 'quantile', breaks=10) +
tm_layout(frame=F, legend.show = T)
tm2<-tm_shape(counties) +
tm_polygons("inun_well_users", palette = 'PuRd', style = 'quantile', breaks=10) +
tm_layout(frame=F, legend.show = T)
tm3<-tm_shape(counties) +
tm_polygons("prop_inun_wells", palette = "YlOrBr", style = 'quantile', breaks=10) +
tm_layout(frame=F, legend.show = T)
tm4<-tm_shape(counties) +
tm_polygons("prop_inun_area", palette = 'PuBu', style = 'quantile', breaks=10) +
tm_layout(frame=F, legend.show = T)
#plot
tmap_arrange(tm4, tm1, tm2, tm3)
#Turn plotting device off
dev.off()
#Export csv for good measure
output<-counties %>%
as_tibble() %>%
dplyr::select(NAME, total_area, inun_area, prop_inun_area, total_well_users,
inun_well_users, prop_inun_wells)
write_csv(output, paste0(working_dir, "inundated_wells_county.csv"))
#Create Initial Zip Code Plots--------------------------------------------------
#Turn plotting device on
png(paste0(working_dir, "inundated_wells_zip.png"), width=7,height=7, units = "in", res=300)
tmap_mode("plot")
#Create plots
tm1<-tm_shape(zip_codes) +
tm_polygons("total_well_users", palette = "BuGn", style = 'quantile', breaks=10) +
tm_layout(frame=F, legend.show = T)
tm2<-tm_shape(zip_codes) +
tm_polygons("inun_well_users", palette = 'PuRd', style = 'quantile', breaks=10) +
tm_layout(frame=F, legend.show = T)
tm3<-tm_shape(zip_codes) +
tm_polygons("prop_inun_wells", palette = "YlOrBr", style = 'quantile', breaks=10) +
tm_layout(frame=F, legend.show = T)
tm4<-tm_shape(zip_codes) +
tm_polygons("prop_inun_area", palette = 'PuBu', style = 'quantile', breaks=10) +
tm_layout(frame=F, legend.show = T)
#plot
tmap_arrange(tm4, tm1, tm2, tm3)
#Turn plotting device off
dev.off()
#Export csv for good measure
output<-zip_codes %>%
as_tibble() %>%
dplyr::select(zip,total_area,inun_area,prop_inun_area,total_well_users,inun_well_users,prop_inun_wells)
write_csv(output, paste0(working_dir, "inundated_wells_zip.csv"))
|
rm(list = ls())
setwd("/cloud/project")
library(ggplot2)
library(reshape2)
library(ggpubr)
library(Hotelling)
library(knitr)
library(HDtest)
library(kableExtra)
#setwd("~/zebrafish/analysis1218")
load("/cloud/project/environment/old60.RData")
source("/cloud/project/code/function_new.R")
# average value from -30 to 59 seconds
# folder 1, 2 and old, including normalized data
folder <- list()
#add offset
offset = 0.13
t_interval = 30
range_time = c(-t_interval:(2*t_interval-1))
plot.range = c(-0.03, 0.43)
workingData = subset(current1.lightoff, current1.lightoff$time >= - t_interval & current1.lightoff$time < 2*t_interval)
folder$data = normal.diy(workingData = workingData, baseline = baseline1, current.lightoff = current1.lightoff)
# plot
chosen1 = c('Q344X','Rho')
plot = list()
plot$fig = plot.diy(workingData = folder$data, plot.range = plot.range, chosen = chosen1, OnOff = 'Light-Off', rep = 18)
ggarrange(plot$fig$mean, plot$fig$mean_light_normalized,
plot$fig$mean_baseline_normalized, plot$fig$mean_int_normalized,
ncol = 2, nrow = 2, labels = c("a)", "b)","c)","d)"))
Rho <- folder$data[folder$data$genotype == "Rho", ]
Q344X <- folder$data[folder$data$genotype == "Q344X", ]
### Rho
meanDrugTime = tapply(as.numeric(unlist(Rho$mean_int_normalized)),
list(Rho$genotype, Rho$time), mean)
SE = tapply(as.numeric(unlist(Rho$mean_int_normalized)),
list(Rho$genotype, Rho$time), sd)
SEM = SE/sqrt(48*max(Rho$rep))
result_Rho <- as.data.frame(rbind(meanDrugTime,SE,SEM))
rownames(result_Rho) <- c("mean", "SE", "SEM")
write.csv(result_Rho, file = "result_Rho.csv")
### Q344X
meanDrugTime = tapply(as.numeric(unlist(Q344X$mean_int_normalized)),
list(Q344X$genotype, Q344X$time), mean)
SE = tapply(as.numeric(unlist(Q344X$mean_int_normalized)),
list(Q344X$genotype, Q344X$time), sd)
SEM = SE/sqrt(48*max(Q344X$rep))
result_Q344X <- as.data.frame(rbind(meanDrugTime,SE,SEM))
rownames(result_Q344X) <- c("mean", "SE", "SEM")
write.csv(result_Q344X, file = "result_Q344X.csv")
#### 1 second after light onset ##
# 1 second analysis
folder1 = list()
offset = 0.13
t_interval = 1
range_time = c(-t_interval:(t_interval-1))
workingData = subset(current1.lightoff, current1.lightoff$time >= - t_interval & current1.lightoff$time < t_interval)
folder1$off30data = normal.diy(workingData = workingData, baseline = baseline1, current.lightoff = current1.lightoff)
plot.range = c(-0.03, 0.43)
chosen1 = c('Q344X','Rho')
plot = list()
plot$fig = plot.diy(workingData = folder1$off30data, plot.range = plot.range, chosen = chosen1, OnOff = 'Light-Off', rep = 18)
ggarrange(plot$fig$mean, plot$fig$mean_light_normalized,
plot$fig$mean_baseline_normalized, plot$fig$mean_int_normalized,
ncol = 2, nrow = 2, labels = c("a)", "b)","c)","d)"))
result = data.frame()
for (i in 1:18){
rep <- folder1$off30data[folder1$off30data$rep == i,]
rho = rep[rep$genotype == "Rho" & rep$time == 0,]
q344 = rep[rep$genotype == "Q344X" & rep$time == 0,]
ave_rho = mean(rho$mean_int_normalized)
ave_q344 = mean(q344$mean_int_normalized)
result[1,i] = ave_rho
result[2,i] = ave_q344
print(i)
}
colnames(result) = c("rep1","rep2","rep3","rep4","rep5","rep6","rep7","rep8","rep9",
"rep10","rep11","rep12","rep13","rep14","rep15","rep16","rep17","rep18")
rownames(result) = c("Rho", "Q344X")
write.csv(result,"Time_0_original_Rho_Q344X.csv")
| /data_original.R | permissive | samcom12/Zebrafish_Model | R | false | false | 3,509 | r | rm(list = ls())
setwd("/cloud/project")
library(ggplot2)
library(reshape2)
library(ggpubr)
library(Hotelling)
library(knitr)
library(HDtest)
library(kableExtra)
#setwd("~/zebrafish/analysis1218")
load("/cloud/project/environment/old60.RData")
source("/cloud/project/code/function_new.R")
# average value from -30 to 59 seconds
# folder 1, 2 and old, including normalized data
folder <- list()
#add offset
offset = 0.13
t_interval = 30
range_time = c(-t_interval:(2*t_interval-1))
plot.range = c(-0.03, 0.43)
workingData = subset(current1.lightoff, current1.lightoff$time >= - t_interval & current1.lightoff$time < 2*t_interval)
folder$data = normal.diy(workingData = workingData, baseline = baseline1, current.lightoff = current1.lightoff)
# plot
chosen1 = c('Q344X','Rho')
plot = list()
plot$fig = plot.diy(workingData = folder$data, plot.range = plot.range, chosen = chosen1, OnOff = 'Light-Off', rep = 18)
ggarrange(plot$fig$mean, plot$fig$mean_light_normalized,
plot$fig$mean_baseline_normalized, plot$fig$mean_int_normalized,
ncol = 2, nrow = 2, labels = c("a)", "b)","c)","d)"))
Rho <- folder$data[folder$data$genotype == "Rho", ]
Q344X <- folder$data[folder$data$genotype == "Q344X", ]
### Rho
meanDrugTime = tapply(as.numeric(unlist(Rho$mean_int_normalized)),
list(Rho$genotype, Rho$time), mean)
SE = tapply(as.numeric(unlist(Rho$mean_int_normalized)),
list(Rho$genotype, Rho$time), sd)
SEM = SE/sqrt(48*max(Rho$rep))
result_Rho <- as.data.frame(rbind(meanDrugTime,SE,SEM))
rownames(result_Rho) <- c("mean", "SE", "SEM")
write.csv(result_Rho, file = "result_Rho.csv")
### Q344X
meanDrugTime = tapply(as.numeric(unlist(Q344X$mean_int_normalized)),
list(Q344X$genotype, Q344X$time), mean)
SE = tapply(as.numeric(unlist(Q344X$mean_int_normalized)),
list(Q344X$genotype, Q344X$time), sd)
SEM = SE/sqrt(48*max(Q344X$rep))
result_Q344X <- as.data.frame(rbind(meanDrugTime,SE,SEM))
rownames(result_Q344X) <- c("mean", "SE", "SEM")
write.csv(result_Q344X, file = "result_Q344X.csv")
#### 1 second after light onset ##
# 1 second analysis
folder1 = list()
offset = 0.13
t_interval = 1
range_time = c(-t_interval:(t_interval-1))
workingData = subset(current1.lightoff, current1.lightoff$time >= - t_interval & current1.lightoff$time < t_interval)
folder1$off30data = normal.diy(workingData = workingData, baseline = baseline1, current.lightoff = current1.lightoff)
plot.range = c(-0.03, 0.43)
chosen1 = c('Q344X','Rho')
plot = list()
plot$fig = plot.diy(workingData = folder1$off30data, plot.range = plot.range, chosen = chosen1, OnOff = 'Light-Off', rep = 18)
ggarrange(plot$fig$mean, plot$fig$mean_light_normalized,
plot$fig$mean_baseline_normalized, plot$fig$mean_int_normalized,
ncol = 2, nrow = 2, labels = c("a)", "b)","c)","d)"))
result = data.frame()
for (i in 1:18){
rep <- folder1$off30data[folder1$off30data$rep == i,]
rho = rep[rep$genotype == "Rho" & rep$time == 0,]
q344 = rep[rep$genotype == "Q344X" & rep$time == 0,]
ave_rho = mean(rho$mean_int_normalized)
ave_q344 = mean(q344$mean_int_normalized)
result[1,i] = ave_rho
result[2,i] = ave_q344
print(i)
}
colnames(result) = c("rep1","rep2","rep3","rep4","rep5","rep6","rep7","rep8","rep9",
"rep10","rep11","rep12","rep13","rep14","rep15","rep16","rep17","rep18")
rownames(result) = c("Rho", "Q344X")
write.csv(result,"Time_0_original_Rho_Q344X.csv")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/distances.R
\name{cluster_distance}
\alias{cluster_distance}
\title{Matrix of Ward's distances between clusters}
\usage{
cluster_distance(table, clusters, dimension = 1)
}
\arguments{
\item{table}{object of class "table".}
\item{clusters}{list of integer vectors. Each vector should define a cluster
by specifing row or column indices of its memebrs. Clusters must not overalap.}
\item{dimension}{integer. Whether to use rows (1) or columns (2). Default is 1.}
}
\value{
Matrix of size length(clusters) x length(clusters) containing distances between selected clusters of
rows or columns.
}
\description{
Calculates chi-square distances between selected rows or columns of the contingency table.
}
\examples{
data(israeli_survey)
cluster_distance(israeli_survey, as.list(seq_len(nrow(israeli_survey))), 1)
cluster_distance(israeli_survey, list(1, 2, c(3, 5), c(4, 6, 7), 8), 1)
}
| /man/cluster_distance.Rd | permissive | aczepielik/CrossTabCluster | R | false | true | 961 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/distances.R
\name{cluster_distance}
\alias{cluster_distance}
\title{Matrix of Ward's distances between clusters}
\usage{
cluster_distance(table, clusters, dimension = 1)
}
\arguments{
\item{table}{object of class "table".}
\item{clusters}{list of integer vectors. Each vector should define a cluster
by specifing row or column indices of its memebrs. Clusters must not overalap.}
\item{dimension}{integer. Whether to use rows (1) or columns (2). Default is 1.}
}
\value{
Matrix of size length(clusters) x length(clusters) containing distances between selected clusters of
rows or columns.
}
\description{
Calculates chi-square distances between selected rows or columns of the contingency table.
}
\examples{
data(israeli_survey)
cluster_distance(israeli_survey, as.list(seq_len(nrow(israeli_survey))), 1)
cluster_distance(israeli_survey, list(1, 2, c(3, 5), c(4, 6, 7), 8), 1)
}
|
library(shiny)
bs2appObj <- function() {
shinybootstrap2::withBootstrap2({
shinyApp(
ui = fluidPage(
sidebarPanel(selectInput("n", "n", c(1, 5, 10))),
mainPanel(plotOutput("plot")),
# <p class="muted">Fusce dapibus, tellus ac cursus commodo, tortor mauris nibh.</p>
),
server = function(input, output) {
output$plot <- renderPlot({
plot(head(cars, as.numeric(input$n)))
})
}
)
})
} | /shiny.R | no_license | roxanaivan95/Driving-School | R | false | false | 500 | r | library(shiny)
bs2appObj <- function() {
shinybootstrap2::withBootstrap2({
shinyApp(
ui = fluidPage(
sidebarPanel(selectInput("n", "n", c(1, 5, 10))),
mainPanel(plotOutput("plot")),
# <p class="muted">Fusce dapibus, tellus ac cursus commodo, tortor mauris nibh.</p>
),
server = function(input, output) {
output$plot <- renderPlot({
plot(head(cars, as.numeric(input$n)))
})
}
)
})
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Fn.inv.Bernshtein.R
\name{Fn.inv.Bernshtein}
\alias{Fn.inv.Bernshtein}
\title{Bersntein polynomial fitting to a quasi-inverse.}
\usage{
Fn.inv.Bernshtein(u, valores.emp)
}
\arguments{
\item{u}{A point where the fitted Bernstein polynomial is to be evaluated at. \eqn{u \in [0,1]}}
\item{valores.emp}{Observed values.}
}
\description{
Bersntein polynomial fitting to a quasi-inverse.
}
\details{
See also equation 4 in Hernandez-Maldonado, Diaz-Viera and Erdely, 2012,
A joint stochastic simulation method using the Bernstein copula as a flexible...
This is the same function as lmomco:::dat2bernqua
}
\references{
Munoz-Perez and Fernandez-Palacin, 1987. Bernstein-Kantorovich polynomial
}
\author{
Arturo Erdely (\email{arturo.erdely@comunidad.unam.mx})
}
| /man/Fn.inv.Bernshtein.Rd | no_license | mathphysmx/bernstein | R | false | true | 836 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Fn.inv.Bernshtein.R
\name{Fn.inv.Bernshtein}
\alias{Fn.inv.Bernshtein}
\title{Bersntein polynomial fitting to a quasi-inverse.}
\usage{
Fn.inv.Bernshtein(u, valores.emp)
}
\arguments{
\item{u}{A point where the fitted Bernstein polynomial is to be evaluated at. \eqn{u \in [0,1]}}
\item{valores.emp}{Observed values.}
}
\description{
Bersntein polynomial fitting to a quasi-inverse.
}
\details{
See also equation 4 in Hernandez-Maldonado, Diaz-Viera and Erdely, 2012,
A joint stochastic simulation method using the Bernstein copula as a flexible...
This is the same function as lmomco:::dat2bernqua
}
\references{
Munoz-Perez and Fernandez-Palacin, 1987. Bernstein-Kantorovich polynomial
}
\author{
Arturo Erdely (\email{arturo.erdely@comunidad.unam.mx})
}
|
library(igraph)
# test (dataset here is directed graph, but we can also apply it to undirected graph) #
# Below procedure is to calculate -> the number of times that node "one" appears in all 3-node motifs #
testGraph = barabasi.game(10,
m = 5,
power = 0.6,
out.pref = TRUE,
zero.appeal = 0.5,
directed = TRUE)
plot(testGraph,vertex.size = 20, edge.width =2, edge.arrow.size = 0.5, edge.color = "black")
# Label nodes to more easily keep track during subsets/deletions
V(testGraph)$name = c('one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'ten')
subGraph = graph.neighborhood(testGraph, order = 1, V(testGraph)[1], mode = 'all')[[1]]
allMotifs = triad.census(subGraph)
removeNode = delete.vertices(subGraph, 'one')
node1Motifs = allMotifs - triad.census(removeNode) # the length of output here is 16
final_node1Motifs = node1Motifs[c(3,5,6:16)] # the length of final version is 13
# test over #
# Idea from https://stackoverflow.com/questions/12374534/how-to-mine-for-motifs-in-r-with-igraph # | /network-metrics/example-mine_numtimes_motifs_for_individual_vertex.R | no_license | doboateng1/tda-ps | R | false | false | 1,155 | r | library(igraph)
# test (dataset here is directed graph, but we can also apply it to undirected graph) #
# Below procedure is to calculate -> the number of times that node "one" appears in all 3-node motifs #
testGraph = barabasi.game(10,
m = 5,
power = 0.6,
out.pref = TRUE,
zero.appeal = 0.5,
directed = TRUE)
plot(testGraph,vertex.size = 20, edge.width =2, edge.arrow.size = 0.5, edge.color = "black")
# Label nodes to more easily keep track during subsets/deletions
V(testGraph)$name = c('one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'ten')
subGraph = graph.neighborhood(testGraph, order = 1, V(testGraph)[1], mode = 'all')[[1]]
allMotifs = triad.census(subGraph)
removeNode = delete.vertices(subGraph, 'one')
node1Motifs = allMotifs - triad.census(removeNode) # the length of output here is 16
final_node1Motifs = node1Motifs[c(3,5,6:16)] # the length of final version is 13
# test over #
# Idea from https://stackoverflow.com/questions/12374534/how-to-mine-for-motifs-in-r-with-igraph # |
# Right now just using a fixed total system 2020 demand time series.
# I think I created this with PRRISM without too much thought.
# Need to re-do, making sure I turn off restrictions!
#
# Demands have been imported into the dataframe, demands.daily.df
# where
# date_time = date
# demands_total_unrestricted = total system demands without restrictions
#
# Add restriction level and restricted demands:
| /code/server/demands_maybediscard.R | no_license | ShyGuyPy/drought_ops_test_app | R | false | false | 420 | r | # Right now just using a fixed total system 2020 demand time series.
# I think I created this with PRRISM without too much thought.
# Need to re-do, making sure I turn off restrictions!
#
# Demands have been imported into the dataframe, demands.daily.df
# where
# date_time = date
# demands_total_unrestricted = total system demands without restrictions
#
# Add restriction level and restricted demands:
|
install.packages(c("rvest","XML","magrittr","xml2"))
library(rvest)
library(XML)
library(xml2)
library(magrittr)
# Amazon Reviews #############################
aurl <- "https://amazon.in/Apple-MacBook-Air-13-3-inch-Integrated/product-reviews/B073Q5R6VR/ref=cm_cr_arp_d_paging_btm_3?showViewpoints=1&pageNumber"
amazon_reviews <- NULL
for (i in 1:10){
murl <- read_html(as.character(paste(aurl,i,sep="=")))
rev <- murl %>% html_nodes(".review-text") %>% html_text()
amazon_reviews <- c(amazon_reviews,rev)
}
length(amazon_reviews)
write.table(amazon_reviews,"apple.txt",row.names = F)
getwd()
install.packages("tm") # for text mining
install.packages(c("SnowballC","textstem")) # for text stemming
install.packages("wordcloud") # word-cloud generator
install.packages("RColorBrewer") # color palettes
library('tm')
library("SnowballC")
library("wordcloud")
library("RColorBrewer")
library('textstem')
# Importing apple reviews data
#apple <- read.csv("/Volumes/Data/Course Content/DS content/Text Mining/apple.txt", sep="")
# Importing apple reviews data
x <- as.character(amazon_reviews)
x <- iconv(x, "UTF-8") #Unicode Transformation Format. The '8' means it uses 8-bit blocks to represent a character
# Load the data as a corpus
x <- Corpus(VectorSource(x))
inspect(x[1])
# Convert the text to lower case
x1 <- tm_map(x, tolower)
inspect(x1[1])
# Remove numbers
x1 <- tm_map(x1, removeNumbers)
# Remove punctuations
x1 <- tm_map(x1, removePunctuation)
# Remove english common stopwords
x1 <- tm_map(x1, removeWords, stopwords('english'))
# Remove your own stop word
# specify your stopwords as a character vector
x1 <- tm_map(x1, removeWords, c("apple", "mac","the","will"))
#striping white spaces
x1 <- tm_map(x1, stripWhitespace)
inspect(x1[1])
# Text stemming
x1<-lemmatize_words(x1)
#x1 <- tm_map(x1, stemDocument)
# Term document matrix
# converting unstructured data to structured format using TDM
tdm <- TermDocumentMatrix(x1)
tdm <- as.matrix(tdm)
tdm
#Frequency
v <- sort(rowSums(tdm),decreasing=TRUE)
d <- data.frame(word = names(v),freq=v)
head(d, 10)
# Bar plot
w <- rowSums(tdm)
w_sub <- subset(w, w >= 10)
barplot(w_sub, las=3, col = rainbow(20))
# Term laptop repeats in all most all documents
x1 <- tm_map(x1, removeWords, c('apple','air',"laptop",'can','will',"amazon",'mac','macbook','product'))
x1 <- tm_map(x1, stripWhitespace)
tdm <- TermDocumentMatrix(x1)
tdm <- as.matrix(tdm)
# Bar plot
w <- rowSums(tdm)
w_sub <- subset(w, w >= 10)
barplot(w_sub, las=3, col = rainbow(20))
# Term laptop repeats in all most all documents
x1 <- tm_map(x1, removeWords, c('apple','air',"laptop",'can','will',"amazon",'mac','macbook','product'))
x1 <- tm_map(x1, stripWhitespace)
tdm <- TermDocumentMatrix(x1)
tdm <- as.matrix(tdm)
w1 <- rowSums(tdm)
# Word cloud
#with all the words
wordcloud(words = names(w1), freq = w1,
random.order = F, colors = rainbow(20),
scale=c(2,.4), rot.per = 0.3)
# lOADING +VE AND -VE dictonaries
pos.words = scan(file.choose(), what="character", comment.char=";")# read-in positive-words.txt
neg.words = scan(file.choose(), what="character", comment.char=";") # read-in negative-words.txt
pos.words = c(pos.words,"wow", "kudos", "hurray") # including our own positive words to the existing list
# Positive wordcloud
pos.matches = match(names(w), c(pos.words))
pos.matches = is.na(pos.matches)
freq_pos <- w[pos.matches]
p_names <- names(freq_pos)
wordcloud(p_names,freq_pos,scale=c(3.5,.5),colors = rainbow(20))
# Negative wordcloud
neg.matches = match(names(w), c(neg.words))
neg.matches = is.na(neg.matches)
freq_neg <- w[neg.matches]
n_names <- names(freq_neg)
wordcloud(n_names,freq_neg,scale=c(3.5,.5),colors = brewer.pal(8,"Dark2"))
#Association between words
tdm <- TermDocumentMatrix(x1)
findAssocs(tdm, c("problems"),corlimit = 0.3)
| /amazon.R | no_license | monicamurugesan/Text-Mining-DS | R | false | false | 3,956 | r | install.packages(c("rvest","XML","magrittr","xml2"))
library(rvest)
library(XML)
library(xml2)
library(magrittr)
# Amazon Reviews #############################
aurl <- "https://amazon.in/Apple-MacBook-Air-13-3-inch-Integrated/product-reviews/B073Q5R6VR/ref=cm_cr_arp_d_paging_btm_3?showViewpoints=1&pageNumber"
amazon_reviews <- NULL
for (i in 1:10){
murl <- read_html(as.character(paste(aurl,i,sep="=")))
rev <- murl %>% html_nodes(".review-text") %>% html_text()
amazon_reviews <- c(amazon_reviews,rev)
}
length(amazon_reviews)
write.table(amazon_reviews,"apple.txt",row.names = F)
getwd()
install.packages("tm") # for text mining
install.packages(c("SnowballC","textstem")) # for text stemming
install.packages("wordcloud") # word-cloud generator
install.packages("RColorBrewer") # color palettes
library('tm')
library("SnowballC")
library("wordcloud")
library("RColorBrewer")
library('textstem')
# Importing apple reviews data
#apple <- read.csv("/Volumes/Data/Course Content/DS content/Text Mining/apple.txt", sep="")
# Importing apple reviews data
x <- as.character(amazon_reviews)
x <- iconv(x, "UTF-8") #Unicode Transformation Format. The '8' means it uses 8-bit blocks to represent a character
# Load the data as a corpus
x <- Corpus(VectorSource(x))
inspect(x[1])
# Convert the text to lower case
x1 <- tm_map(x, tolower)
inspect(x1[1])
# Remove numbers
x1 <- tm_map(x1, removeNumbers)
# Remove punctuations
x1 <- tm_map(x1, removePunctuation)
# Remove english common stopwords
x1 <- tm_map(x1, removeWords, stopwords('english'))
# Remove your own stop word
# specify your stopwords as a character vector
x1 <- tm_map(x1, removeWords, c("apple", "mac","the","will"))
#striping white spaces
x1 <- tm_map(x1, stripWhitespace)
inspect(x1[1])
# Text stemming
x1<-lemmatize_words(x1)
#x1 <- tm_map(x1, stemDocument)
# Term document matrix
# converting unstructured data to structured format using TDM
tdm <- TermDocumentMatrix(x1)
tdm <- as.matrix(tdm)
tdm
#Frequency
v <- sort(rowSums(tdm),decreasing=TRUE)
d <- data.frame(word = names(v),freq=v)
head(d, 10)
# Bar plot
w <- rowSums(tdm)
w_sub <- subset(w, w >= 10)
barplot(w_sub, las=3, col = rainbow(20))
# Term laptop repeats in all most all documents
x1 <- tm_map(x1, removeWords, c('apple','air',"laptop",'can','will',"amazon",'mac','macbook','product'))
x1 <- tm_map(x1, stripWhitespace)
tdm <- TermDocumentMatrix(x1)
tdm <- as.matrix(tdm)
# Bar plot
w <- rowSums(tdm)
w_sub <- subset(w, w >= 10)
barplot(w_sub, las=3, col = rainbow(20))
# Term laptop repeats in all most all documents
x1 <- tm_map(x1, removeWords, c('apple','air',"laptop",'can','will',"amazon",'mac','macbook','product'))
x1 <- tm_map(x1, stripWhitespace)
tdm <- TermDocumentMatrix(x1)
tdm <- as.matrix(tdm)
w1 <- rowSums(tdm)
# Word cloud
#with all the words
wordcloud(words = names(w1), freq = w1,
random.order = F, colors = rainbow(20),
scale=c(2,.4), rot.per = 0.3)
# lOADING +VE AND -VE dictonaries
pos.words = scan(file.choose(), what="character", comment.char=";")# read-in positive-words.txt
neg.words = scan(file.choose(), what="character", comment.char=";") # read-in negative-words.txt
pos.words = c(pos.words,"wow", "kudos", "hurray") # including our own positive words to the existing list
# Positive wordcloud
pos.matches = match(names(w), c(pos.words))
pos.matches = is.na(pos.matches)
freq_pos <- w[pos.matches]
p_names <- names(freq_pos)
wordcloud(p_names,freq_pos,scale=c(3.5,.5),colors = rainbow(20))
# Negative wordcloud
neg.matches = match(names(w), c(neg.words))
neg.matches = is.na(neg.matches)
freq_neg <- w[neg.matches]
n_names <- names(freq_neg)
wordcloud(n_names,freq_neg,scale=c(3.5,.5),colors = brewer.pal(8,"Dark2"))
#Association between words
tdm <- TermDocumentMatrix(x1)
findAssocs(tdm, c("problems"),corlimit = 0.3)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kdensity_helpers.R
\name{support_compatible}
\alias{support_compatible}
\title{Checks compatibility between supports.}
\usage{
support_compatible(kernel, start, support)
}
\arguments{
\item{kernel, start, support}{The kernel, start and support to check.}
}
\value{
None.
}
\description{
The supplied support must never be larger than the support of
the parametric start / kernel.
}
\keyword{internal}
| /man/support_compatible.Rd | no_license | cran/kdensity | R | false | true | 498 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kdensity_helpers.R
\name{support_compatible}
\alias{support_compatible}
\title{Checks compatibility between supports.}
\usage{
support_compatible(kernel, start, support)
}
\arguments{
\item{kernel, start, support}{The kernel, start and support to check.}
}
\value{
None.
}
\description{
The supplied support must never be larger than the support of
the parametric start / kernel.
}
\keyword{internal}
|
# HW3
# 1
# a)
set.seed(1)
x <- rnorm(100)
y <- x-2*x^2+rnorm(100)
# n=100; p=2; model: y=x-2x^2+e (the error term)
# b)
plot(x,y)
# We see a negative parabolic curve. As we expect from
# normal distributions, the center is at 0 and there are
# more data points clustered around the center than the
# sparse few on each end of the tails.
# c)
library(boot)
mat1 <- matrix(data = NA, nrow = 4, ncol=2) # record errors
set.seed(1)
dat1 <- data.frame(x,y)
glm.1 <- glm(y~x)
cv.1 <- cv.glm(dat1, glm.1)
mat1[1,1] <- cv.1$delta[1]
glm.2 <- glm(y~poly(x,2))
cv.2 <- cv.glm(dat1, glm.2)
mat1[2,1] <- cv.2$delta[1]
glm.3 <- glm(y~poly(x,3))
cv.3 <- cv.glm(dat1, glm.3)
mat1[3,1] <- cv.3$delta[1]
glm.4 <- glm(y~poly(x,4))
cv.4 <- cv.glm(dat1, glm.4)
mat1[4,1] <- cv.4$delta[1]
# d)
set.seed(3000)
glm.1 <- glm(y~x)
cv.1 <- cv.glm(dat1, glm.1)
mat1[1,2] <- cv.1$delta[1]
glm.2 <- glm(y~poly(x,2))
cv.2 <- cv.glm(dat1, glm.2)
mat1[2,2] <- cv.2$delta[1]
glm.3 <- glm(y~poly(x,3))
cv.3 <- cv.glm(dat1, glm.3)
mat1[3,2] <- cv.3$delta[1]
glm.4 <- glm(y~poly(x,4))
cv.4 <- cv.glm(dat1, glm.4)
mat1[4,2] <- cv.4$delta[1]
mat1 # display errors
# The results are the same from each of the seeds. Results
# are identical because LOOCV uses the same MSE calculation
# process on all observations with a set n value.
# I.e. every single observation is evaluated n folds.
# e)
# The model that goes up to the 2nd power has the smallest
# LOOCV error. This can be what I expected because the
# original data had a clear quadratic shape. But I expected
# the 4th power model to do as well or better because, as
# the direct square of a quadratic, although it may overfit,
# the errors could have been smaller.
# f)
summary(glm.1)
# The coefficients do not mean much when we are fitting
# a quadratic with just the intercept and linear slope.
# The 1st power is significant at the 0.01 level.
summary(glm.2)
# This shows that all coefficients up to the 2nd power are
# statistically significant.
summary(glm.3)
summary(glm.4)
# These two show that the coefficients up to the 2nd power
# are statistically significant. The 3rd and 4th power are
# insignificant, which agrees with our conclusions from the
# cross-validation results.
# 2
# a)
library(MASS)
attach(Boston) # used in lecture; every name is like a vars
mu.hat <- mean(medv)
mu.hat
# b)
# standard error of the sample mean =
# sd(sample) / sqrt(observations count)
sd(medv)/sqrt(nrow(Boston))
# c)
# bootstrap for mu
# output should incluse SE of sample mean
fun <- function(data, index) {
mu <- mean(data[index])
return (mu)
}
library(boot)
boot(medv, fun, R = 1000)
# SE = 0.4033299
# The bootstrap estimated standard error of the samle mean
# is very close to the calculated SE from the previous.
# d)
# approx 95% confidence interval using
# [mu.hat-2SE(mu.hat) , mu.hat+2SE(mu.hat)].
c(mu.hat-2*0.4033299 , mu.hat+2*0.4033299)
t.test(medv)$conf.int
# The 95% confidence intervals from bootstrapping and
# the t.test() method are very close.
# e)
med.hat <- median(medv)
med.hat
# f)
fun <- function(data, index) {
med <- median(data[index])
return (med)
}
boot(medv, fun, R = 1000)
# The estimated standard error of the median using bootstrap
# is reasonably small. The median is equal to the value we
# calculated previously.
# g)
quant.hat <- quantile(medv, 0.1)
quant.hat
# h)
fun <- function(data, index) {
quant <- quantile(data[index], 0.1)
return (quant)
}
boot(medv, fun, 1000)
# The estimated standard error of the 10th percentile using
# bootstrap is again reasonably small. The 10th percentile
# is equal to the value we calculated preciously.
# 3
# a)
library(ISLR)
data("College")
head(College)
attach(College)
# split data to training and testing
collegeTrain <- College[1:(0.8*nrow(College)),] # 80% for train
collegeTest <- College[(0.8*nrow(College)+1):nrow(College),] # 20% for test
# b)
lm.1 <- lm(Apps~., data = collegeTrain)
summary(lm.1)
mse.1 <- mean((predict(lm.1, collegeTest)-collegeTest$Apps)^2)
mse.1 # test error
# c)
# ridge regression with CV choosing lambda
x <- model.matrix(Apps~.,data=College)[,-1] # take out intercept
xtrain <- model.matrix(Apps~.,data=collegeTrain)[,-1]
xtest <- model.matrix(Apps~.,data=collegeTest)[,-1]
ytrain <- Apps[1:(0.8*nrow(College))]
# ytest <- Apps[(0.8*nrow(College)+1):nrow(College)]
library(glmnet)
#set.seed(1)
cv.ridge <- cv.glmnet(xtrain, ytrain, alpha = 0) # 0 for ridge
cv.lambda <- cv.ridge$lambda.min # get smallest lambda (tuning param)
# plot(cv.ridge)
ridge <- glmnet(xtrain, ytrain, alpha = 0, lambda = cv.lambda)
summary(ridge)
pred <- predict(ridge, s = cv.lambda, newx = xtest)
mse.2 <- mean((pred-ytest)^2)
mse.2 # test error
# d)
set.seed(1)
cv.lasso <- cv.glmnet(xtrain, ytrain, alpha = 1) # 1 for lasso
cv.lambda <- cv.lasso$lambda.min # get smallest lambda (tuning param)
# plot(cv.lasso)
lasso <- glmnet(xtrain, ytrain, alpha = 1, lambda = cv.lambda)
summary(lasso)
pred <- predict(lasso, s = cv.lambda, newx = xtest)
mse.3 <- mean((pred-ytest)^2)
mse.3 # test error
lassocoeffs <- predict(lasso, s = cv.lambda, type = "coefficients")
summary(lassocoeffs)
lassocoeffs[lassocoeffs!=0] # nonzero lasso coeffs
# g)
# In terms of test error there is not much of a huge
# difference. Although we do see that the error from the
# ridge regression is smaller than the least squares' and
# lasso's. That is, it is better at prediction that other
# models; however, we know that, like the LASSO, the
# coefficients shrink to zero due to regularization and
# it is "impossible" to interpret our results. The problem
# with the lasso is that it works for low dimension models.
# The College data we dealt with will not be considered
# low dimensional data, but it is not high dimensional.
| /ECON484/hw3.R | no_license | BrianKang98/UW_ECON | R | false | false | 5,779 | r | # HW3
# 1
# a)
set.seed(1)
x <- rnorm(100)
y <- x-2*x^2+rnorm(100)
# n=100; p=2; model: y=x-2x^2+e (the error term)
# b)
plot(x,y)
# We see a negative parabolic curve. As we expect from
# normal distributions, the center is at 0 and there are
# more data points clustered around the center than the
# sparse few on each end of the tails.
# c)
library(boot)
mat1 <- matrix(data = NA, nrow = 4, ncol=2) # record errors
set.seed(1)
dat1 <- data.frame(x,y)
glm.1 <- glm(y~x)
cv.1 <- cv.glm(dat1, glm.1)
mat1[1,1] <- cv.1$delta[1]
glm.2 <- glm(y~poly(x,2))
cv.2 <- cv.glm(dat1, glm.2)
mat1[2,1] <- cv.2$delta[1]
glm.3 <- glm(y~poly(x,3))
cv.3 <- cv.glm(dat1, glm.3)
mat1[3,1] <- cv.3$delta[1]
glm.4 <- glm(y~poly(x,4))
cv.4 <- cv.glm(dat1, glm.4)
mat1[4,1] <- cv.4$delta[1]
# d)
set.seed(3000)
glm.1 <- glm(y~x)
cv.1 <- cv.glm(dat1, glm.1)
mat1[1,2] <- cv.1$delta[1]
glm.2 <- glm(y~poly(x,2))
cv.2 <- cv.glm(dat1, glm.2)
mat1[2,2] <- cv.2$delta[1]
glm.3 <- glm(y~poly(x,3))
cv.3 <- cv.glm(dat1, glm.3)
mat1[3,2] <- cv.3$delta[1]
glm.4 <- glm(y~poly(x,4))
cv.4 <- cv.glm(dat1, glm.4)
mat1[4,2] <- cv.4$delta[1]
mat1 # display errors
# The results are the same from each of the seeds. Results
# are identical because LOOCV uses the same MSE calculation
# process on all observations with a set n value.
# I.e. every single observation is evaluated n folds.
# e)
# The model that goes up to the 2nd power has the smallest
# LOOCV error. This can be what I expected because the
# original data had a clear quadratic shape. But I expected
# the 4th power model to do as well or better because, as
# the direct square of a quadratic, although it may overfit,
# the errors could have been smaller.
# f)
summary(glm.1)
# The coefficients do not mean much when we are fitting
# a quadratic with just the intercept and linear slope.
# The 1st power is significant at the 0.01 level.
summary(glm.2)
# This shows that all coefficients up to the 2nd power are
# statistically significant.
summary(glm.3)
summary(glm.4)
# These two show that the coefficients up to the 2nd power
# are statistically significant. The 3rd and 4th power are
# insignificant, which agrees with our conclusions from the
# cross-validation results.
# 2
# a)
library(MASS)
attach(Boston) # used in lecture; every name is like a vars
mu.hat <- mean(medv)
mu.hat
# b)
# standard error of the sample mean =
# sd(sample) / sqrt(observations count)
sd(medv)/sqrt(nrow(Boston))
# c)
# bootstrap for mu
# output should incluse SE of sample mean
fun <- function(data, index) {
mu <- mean(data[index])
return (mu)
}
library(boot)
boot(medv, fun, R = 1000)
# SE = 0.4033299
# The bootstrap estimated standard error of the samle mean
# is very close to the calculated SE from the previous.
# d)
# approx 95% confidence interval using
# [mu.hat-2SE(mu.hat) , mu.hat+2SE(mu.hat)].
c(mu.hat-2*0.4033299 , mu.hat+2*0.4033299)
t.test(medv)$conf.int
# The 95% confidence intervals from bootstrapping and
# the t.test() method are very close.
# e)
med.hat <- median(medv)
med.hat
# f)
fun <- function(data, index) {
med <- median(data[index])
return (med)
}
boot(medv, fun, R = 1000)
# The estimated standard error of the median using bootstrap
# is reasonably small. The median is equal to the value we
# calculated previously.
# g)
quant.hat <- quantile(medv, 0.1)
quant.hat
# h)
fun <- function(data, index) {
quant <- quantile(data[index], 0.1)
return (quant)
}
boot(medv, fun, 1000)
# The estimated standard error of the 10th percentile using
# bootstrap is again reasonably small. The 10th percentile
# is equal to the value we calculated preciously.
# 3
# a)
library(ISLR)
data("College")
head(College)
attach(College)
# split data to training and testing
collegeTrain <- College[1:(0.8*nrow(College)),] # 80% for train
collegeTest <- College[(0.8*nrow(College)+1):nrow(College),] # 20% for test
# b)
lm.1 <- lm(Apps~., data = collegeTrain)
summary(lm.1)
mse.1 <- mean((predict(lm.1, collegeTest)-collegeTest$Apps)^2)
mse.1 # test error
# c)
# ridge regression with CV choosing lambda
x <- model.matrix(Apps~.,data=College)[,-1] # take out intercept
xtrain <- model.matrix(Apps~.,data=collegeTrain)[,-1]
xtest <- model.matrix(Apps~.,data=collegeTest)[,-1]
ytrain <- Apps[1:(0.8*nrow(College))]
# ytest <- Apps[(0.8*nrow(College)+1):nrow(College)]
library(glmnet)
#set.seed(1)
cv.ridge <- cv.glmnet(xtrain, ytrain, alpha = 0) # 0 for ridge
cv.lambda <- cv.ridge$lambda.min # get smallest lambda (tuning param)
# plot(cv.ridge)
ridge <- glmnet(xtrain, ytrain, alpha = 0, lambda = cv.lambda)
summary(ridge)
pred <- predict(ridge, s = cv.lambda, newx = xtest)
mse.2 <- mean((pred-ytest)^2)
mse.2 # test error
# d)
set.seed(1)
cv.lasso <- cv.glmnet(xtrain, ytrain, alpha = 1) # 1 for lasso
cv.lambda <- cv.lasso$lambda.min # get smallest lambda (tuning param)
# plot(cv.lasso)
lasso <- glmnet(xtrain, ytrain, alpha = 1, lambda = cv.lambda)
summary(lasso)
pred <- predict(lasso, s = cv.lambda, newx = xtest)
mse.3 <- mean((pred-ytest)^2)
mse.3 # test error
lassocoeffs <- predict(lasso, s = cv.lambda, type = "coefficients")
summary(lassocoeffs)
lassocoeffs[lassocoeffs!=0] # nonzero lasso coeffs
# g)
# In terms of test error there is not much of a huge
# difference. Although we do see that the error from the
# ridge regression is smaller than the least squares' and
# lasso's. That is, it is better at prediction that other
# models; however, we know that, like the LASSO, the
# coefficients shrink to zero due to regularization and
# it is "impossible" to interpret our results. The problem
# with the lasso is that it works for low dimension models.
# The College data we dealt with will not be considered
# low dimensional data, but it is not high dimensional.
|
for(k in 1:30) {
iter <- 1
tic <- proc.time()
shortcomp1 <- comp.Dists(LDAlist.ss[[k]],LDAlist.sl[[k]])
maxtomin.shortshortgini <- order(LDAlist.ss[[k]]$gtunif.gini,decreasing=T)
maxtomin.shortlonggini <- order(LDAlist.sl[[k]]$gtunif.gini, decreasing=T)
shortshortindex <- indexs[which(LDAlist.ss[[k]]$gtunif.gini > 0.40)]
shortlongindex <- indexs[which(LDAlist.sl[[k]]$gtunif.gini > 0.52)]
for(i in 1:20) {
tic1 <- proc.time()
for(j in 1:20) {
######################################################################
# Match on taub
saveTAUBshort[k,iter,] <- c(i,j,ktaubprob(30,shortcomp1$d1[shortcomp1$d1$topic==i,],
shortcomp1$d2[shortcomp1$d2$topic==j,])$taub)
######################################################################
######################################################################
# Match on JS divergence
saveJSshort[k,iter,] <- c(i,j,JSdiverge(shortcomp1$d1$beta[shortcomp1$d1$topic==i],
shortcomp1$d2$beta[shortcomp1$d2$topic==j]))
######################################################################
######################################################################
# Match on JS divergence, remove non-informative topics
if(is.element(i,shortshortindex) && is.element(j,shortlongindex)) {
saveJSRemove[k,iter,] <- saveJSshort[k,iter,]
}
######################################################################
iter <- iter + 1
#print(iter)
}
toc1 <- proc.time()
print(paste0("April ",k," topic ",i," Time:",round(toc1[3]-tic1[3],2)))
# Match on gini
matchGINI[k,i,] <- c(maxtomin.shortshortgini[i],maxtomin.shortlonggini[i],JSdiverge(shortcomp1$d1$beta[shortcomp1$d1$topic==maxtomin.shortshortgini[i]],shortcomp1$d2$beta[shortcomp1$d2$topic==maxtomin.shortlonggini[i]]))
}
matchTAUB[k,,] <- matchJS(saveTAUBshort[k,,],min=F)
matchJSdiverge[k,,] <- matchJS(saveJSshort[k,,],min=T)
#inter <- saveJSRemove[k,1:(min(which(is.na(saveJSRemove[k,,])))-1),]
matchJSdivergeRemove[[k]] <- matchJS(saveJSRemove[k,!is.na(saveJSRemove[k,,1]),],min=T)
toc <- proc.time()
print(paste0("One loop has finished running, loop:",k))
print(round(toc[3]-tic[3],2))
timesfordays[k] <- toc[3]-tic[3]
}
# This is returning me an error right now, need to just present something
# Takes around an hour to run
# Now I need to match these kendall taus
# double checking probabilities on articles
# Look up the articles from April 15th
april15th
a <- which(april15th$url=="https://www.nytimes.com/2019/04/15/podcasts/the-daily/julian-assange-wikileaks-arrest.html")
posterior(LDAlist.ss[[15]]$LDAmodel)$topics[a,]
posterior(LDAlist.sl[[15]]$LDAmodel)$topics[a,]*100
b <- which(april15th$url=="https://www.cnn.com/2019/04/15/us/micah-herndon-boston-marathon-crawl-finish-trnd/index.html")
posterior(LDAlist.ss[[15]]$LDAmodel)$topics[b,]*100
posterior(LDAlist.sl[[15]]$LDAmodel)$topics[b,]*100
c <- which(april15th$url == "http://www.msnbc.com/rachel-maddow-show/sanders-congress-not-smart-enough-understand-trumps-tax-returns")
posterior(LDAlist.ss[[15]]$LDAmodel)$topics[c,]*100
posterior(LDAlist.sl[[15]]$LDAmodel)$topics[c,]*100
d <- which(april15th$url == "https://www.breitbart.com/economy/2019/04/14/feds-company-faked-white-collar-jobs-1900-chinese-migrants/")
posterior(LDAlist.ss[[15]]$LDAmodel)$topics[d,]*100
posterior(LDAlist.sl[[15]]$LDAmodel)$topics[d,]*100
e <- which(april15th$url == "https://www.foxnews.com/world/american-witness-describes-notre-dame-burn-all-my-insides-just-fell-apart")
posterior(LDAlist.ss[[15]]$LDAmodel)$topics[e,]*100
posterior(LDAlist.sl[[15]]$LDAmodel)$topics[e,]*100
f <- which(april15th$url == "https://www.nytimes.com/2019/04/15/nyregion/newyorktoday/nyc-news-city-hall-station.html")
sort(posterior(LDAlist.ss[[15]]$LDAmodel)$topics[f,]*100,decreasing = T)
sort(posterior(LDAlist.sl[[15]]$LDAmodel)$topics[f,]*100,decreasing = T)
# Try to get the matches printed out, get efficient tables
# Now lists all of the arrays together
allmatches <- list(taub=matchTAUB,gini=matchGINI,jsd=matchJSdiverge,jsdr=matchJSdivergeRemove)
save(allmatches,file="~/Documents/ATD group/LDAmodelsApril/allmatches.Rdata")
######################################
# Creating a 2x2 contingency table
######################################
TAUB.contin <- array(dim=c(30,20,2,2))
GINI.contin <- array(dim=c(30,20,2,2))
JSd.contin <- array(dim=c(30,20,2,2))
TAUB.day.contin <- array(dim=c(30,2,2))
GINI.day.contin <- array(dim=c(30,2,2))
JSd.day.contin <- array(dim=c(30,2,2))
totarticles <- c()
for(d in 1:30) { # For each day
doctopic1 <- posterior(LDAlist.ss[[d]]$LDAmodel)$topics
doctopic2 <- posterior(LDAlist.sl[[d]]$LDAmodel)$topics
totarticles <- dim(doctopic1)[1]
tic <- proc.time()
for(i in 1:20) { # For each combination of topics
# Find out the which statement
TAUB.contin[d,i,,] <- matrix(c(0,0,0,0),ncol=2,nrow=2)
GINI.contin[d,i,,] <- TAUB.contin[d,i,,]
JSd.contin[d,i,,] <- TAUB.contin[d,i,,]
TAUB.contin[d,i,1,1] <- length(which(doctopic1[,allmatches$taub[d,i,1]] >= 0.5 & doctopic2[,allmatches$taub[d,i,2]] >= 0.5))
TAUB.contin[d,i,1,2] <- length(which(doctopic1[,allmatches$taub[d,i,1]] < 0.5 & doctopic2[,allmatches$taub[d,i,2]] >= 0.5))
TAUB.contin[d,i,2,1] <- length(which(doctopic1[,allmatches$taub[d,i,1]] >= 0.5 & doctopic2[,allmatches$taub[d,i,2]] < 0.5))
TAUB.contin[d,i,2,2] <- length(which(doctopic1[,allmatches$taub[d,i,1]] < 0.5 & doctopic2[,allmatches$taub[d,i,2]] < 0.5))
GINI.contin[d,i,1,1] <- length(which(doctopic1[,allmatches$gini[d,i,1]] >= 0.5 & doctopic2[,allmatches$gini[d,i,2]] >= 0.5))
GINI.contin[d,i,1,2] <- length(which(doctopic1[,allmatches$gini[d,i,1]] < 0.5 & doctopic2[,allmatches$gini[d,i,2]] >= 0.5))
GINI.contin[d,i,2,1] <- length(which(doctopic1[,allmatches$gini[d,i,1]] >= 0.5 & doctopic2[,allmatches$gini[d,i,2]] < 0.5))
GINI.contin[d,i,2,2] <- length(which(doctopic1[,allmatches$gini[d,i,1]] < 0.5 & doctopic2[,allmatches$gini[d,i,2]] < 0.5))
JSd.contin[d,i,1,1] <- length(which(doctopic1[,allmatches$jsd[d,i,1]] >= 0.5 & doctopic2[,allmatches$jsd[d,i,2]] >= 0.5))
JSd.contin[d,i,1,2] <- length(which(doctopic1[,allmatches$jsd[d,i,1]] < 0.5 & doctopic2[,allmatches$jsd[d,i,2]] >= 0.5))
JSd.contin[d,i,2,1] <- length(which(doctopic1[,allmatches$jsd[d,i,1]] >= 0.5 & doctopic2[,allmatches$jsd[d,i,2]] < 0.5))
JSd.contin[d,i,2,2] <- length(which(doctopic1[,allmatches$jsd[d,i,1]] < 0.5 & doctopic2[,allmatches$jsd[d,i,2]] < 0.5))
}
toc <- proc.time()
TAUB.day.contin[d,,] <- apply(TAUB.contin[d,,,],c(2,3),sum)
GINI.day.contin[d,,] <- apply(GINI.contin[d,,,],c(2,3),sum)
JSd.day.contin[d,,] <- apply(JSd.contin[d,,,],c(2,3),sum)
print(paste("Day",d,(toc[3]-tic[3])))
}
# Time to build a function for creating a contingency table for these days
build.contin <- function(matched.df, p1.doctopic, p2.doctopic, cutoff=FALSE, cutoff.num=10, cutoff.prop=FALSE, cutoff.prop.val=0.5, cutoff.value.bool=FALSE, cutoff.value=0.5,threshold=0.1, plurality=FALSE, plurality.tol=0.2) {
# Purpose:
# This function takes in a matched data frame of 2 topic distributions and outputs a contingency table of correctly matched topics to incorrect matches
# Inputs:
# matched.df: A matched (ordered) data frame (K by 3) with the first two columns representing matches and the third column representing the probability
# p1.doctopic, p2.doctopic: A document-topic data frame (N by K) with the rows representing the topic distribution for a certain document
# cutoff, cutoff.num: Cutoff is a boolean representing of a number top matched from the K topics (less than K of course)
# cutoff.value: a value based on the third column of the data frame where it is higher than a certain value
# cutoff.prop, cutoff.prop.val: Cutoff.prop is a boolean representing the proportion of top matched K topics to be accounted for
# threshold: A value between 0 and 1 that represents the candidate set of topics to be considered when calculating numbers
# plurality, plurality.tol: Plurality allows for the maximum topic to be chosen in addition to a small tolerance around the maximum to be considered
contin <- matrix(NA,nrow=2,ncol=2)
K <- nrow(matched.df)
N <- nrow(p1.doctopic)
# Do I want a cutoff based on the distribution of the values in the third column? Like greater than the mean?
if(cutoff) { topmatch <- cutoff.num }
else if(cutoff.prop){ topmatch <- round(K*cutoff.prop.val,0) }
else if(cutoff.value.bool) {topmatch <- length(which(matched.df[,3] >= cutoff.value))}
else{ topmatch <- K }
contin <- replicate(topmatch,contin)
contin.percent <- contin
# Instead I'll loop through the top topic matches
# I'm probably going to have to go by row through row
max1 <- apply(p1.doctopic,1,which.max)
max2 <- apply(p2.doctopic,1,which.max)
matches <- matrix(rep(0,N*topmatch),nrow=N, ncol=topmatch)
if(plurality==T) {
# Make a new matrix of N by k indicating larger number
# So find numbers that are close
ll1 <- apply(p1.doctopic,1,max)
ll2 <- apply(p2.doctopic,1,max)
plu.max1 <- which(p1.doctopic >= ll1*(1-plurality.tol),arr.ind = T)
plu.max2 <- which(p2.doctopic >= ll2*(1-plurality.tol),arr.ind = T)
for(k in 1:topmatch) {
# Filter out rows that are less than the threshold for this combination of topics
# Need to threshold later
# Find all the rows in each set that respect the threshold
# The ones that are combined to hold the threshold
# Can be double counting here
# Need to determine a new max list
thres <- which(p1.doctopic[,matched.df[k,1]] >= threshold | p2.doctopic[,matched.df[k,2]] >= threshold)
matchvec <- intersect(plu.max1[plu.max1[,2]==matched.df[k,1],1],plu.max2[plu.max2[,2]==matched.df[k,2],1])
contin[1,1,k] <- length(matchvec)
#print(contin[1,1,k])
contin[1,2,k] <- length(plu.max2[plu.max2[,2]==matched.df[k,2],1])-contin[1,1,k]
#print(contin[1,2,k])
contin[2,1,k] <- length(plu.max1[plu.max1[,2]==matched.df[k,1],1])-contin[1,1,k]
#print(contin[2,1,k])
contin[2,2,k] <- length(thres)-contin[1,1,k]-contin[1,2,k]-contin[2,1,k]
#print(contin[2,2,k])
matches[matchvec,k] <- rep(1,contin[1,1,k])
contin.percent[,,k] <- contin[,,k]/sum(contin[,,k])
}
}
else {
for(k in 1:topmatch) {
thres <- which(p1.doctopic[,matched.df[k,1]] > threshold | p2.doctopic[,matched.df[k,2]] > threshold )
contin[1,1,k] <- length(which(matched.df[k,1] == max1[thres] & matched.df[k,2] == max2[thres]))
contin[1,2,k] <- length(which(matched.df[k,1] == max1[thres] & matched.df[k,2] != max2[thres]))
contin[2,1,k] <- length(which(matched.df[k,1] != max1[thres] & matched.df[k,2] == max2[thres]))
contin[2,2,k] <- length(which(matched.df[k,1] != max1[thres] & matched.df[k,2] != max2[thres]))
contin.percent[,,k] <- contin[,,k]/sum(contin[,,k])
}
}
return (list(matches=matches,table=contin,table.percent=contin.percent))
}
hi<- build.contin(matched.df=allmatches$taub[1,,], p1.doctopic=doctopic1, p2.doctopic=doctopic2, cutoff=TRUE, cutoff.num=10, cutoff.prop=FALSE, cutoff.prop.val=0.5, cutoff.value.bool=FALSE, cutoff.value=0.5,threshold=0.1, plurality=TRUE, plurality.tol=0.2)
which(rowSums(hi$matches)>1) # no dupes
# Lets go through all the days and see how much there is
save.cutoff10.plural0.2 <- array(dim=c(6,30,2,2,10))
numdoublecount <- matrix(NA,nrow=30,ncol=6)
for(i in 1:30) {
doctopic1 <- posterior(LDAlist.ss[[i]]$LDAmodel)$topics
doctopic2 <- posterior(LDAlist.sl[[i]]$LDAmodel)$topics
f <- build.contin(matched.df = allmatches$taub[i,,],p1.doctopic = doctopic1, p2.doctopic = doctopic2, cutoff=TRUE, cutoff.num = 10, plurality=TRUE)
save.cutoff10.plural0.2[1,i,,,] <- f$table
numdoublecount[i,1] <- length(f$matches[rowSums(f$matches) > 1,])
# Taub w/o plurality
f2 <- build.contin(matched.df = allmatches$taub[i,,],p1.doctopic = doctopic1, p2.doctopic = doctopic2, cutoff=TRUE, cutoff.num = 10, plurality = FALSE)
save.cutoff10.plural0.2[2,i,,,] <- f2$table
numdoublecount[i,2] <- length(f2$matches[rowSums(f2$matches) > 1,])
# Gini w/ plurality
f3 <- build.contin(matched.df = allmatches$gini[i,,],p1.doctopic = doctopic1, p2.doctopic = doctopic2, cutoff=TRUE, cutoff.num = 10, plurality = TRUE)
save.cutoff10.plural0.2[3,i,,,] <- f3$table
numdoublecount[i,3] <- length(f3$matches[rowSums(f3$matches) > 1,])
# Gini w/o plurality
f4 <- build.contin(matched.df = allmatches$gini[i,,],p1.doctopic = doctopic1, p2.doctopic = doctopic2, cutoff=TRUE, cutoff.num = 10, plurality = FALSE)
save.cutoff10.plural0.2[4,i,,,] <- f4$table
numdoublecount[i,4] <- length(f4$matches[rowSums(f4$matches) > 1,])
# Jsd w/ plurality
f5 <- build.contin(matched.df = allmatches$jsd[i,,],p1.doctopic = doctopic1, p2.doctopic = doctopic2, cutoff=TRUE, cutoff.num = 10, plurality = TRUE)
save.cutoff10.plural0.2[5,i,,,] <- f5$table
numdoublecount[i,5] <- length(f5$matches[rowSums(f5$matches) > 1,])
# Jsd w/o plurality
f6 <- build.contin(matched.df = allmatches$jsd[i,,],p1.doctopic = doctopic1, p2.doctopic = doctopic2, cutoff=TRUE, cutoff.num = 10, plurality = FALSE)
save.cutoff10.plural0.2[6,i,,,] <- f6$table
numdoublecount[i,6] <- length(f6$matches[rowSums(f6$matches) > 1,])
}
apply(save.cutoff10.plural0.2[1,,,,],c(2,3),sum)
apply(save.cutoff10.plural0.2[2,,,,],c(2,3),sum)
apply(save.cutoff10.plural0.2[3,,,,],c(2,3),sum)
apply(save.cutoff10.plural0.2[4,,,,],c(2,3),sum)
apply(save.cutoff10.plural0.2[5,,,,],c(2,3),sum)
apply(save.cutoff10.plural0.2[6,,,,],c(2,3),sum) | /Methods/Matching_with_contingency_tables.R | no_license | VT-ATD/Modeling | R | false | false | 13,935 | r | for(k in 1:30) {
iter <- 1
tic <- proc.time()
shortcomp1 <- comp.Dists(LDAlist.ss[[k]],LDAlist.sl[[k]])
maxtomin.shortshortgini <- order(LDAlist.ss[[k]]$gtunif.gini,decreasing=T)
maxtomin.shortlonggini <- order(LDAlist.sl[[k]]$gtunif.gini, decreasing=T)
shortshortindex <- indexs[which(LDAlist.ss[[k]]$gtunif.gini > 0.40)]
shortlongindex <- indexs[which(LDAlist.sl[[k]]$gtunif.gini > 0.52)]
for(i in 1:20) {
tic1 <- proc.time()
for(j in 1:20) {
######################################################################
# Match on taub
saveTAUBshort[k,iter,] <- c(i,j,ktaubprob(30,shortcomp1$d1[shortcomp1$d1$topic==i,],
shortcomp1$d2[shortcomp1$d2$topic==j,])$taub)
######################################################################
######################################################################
# Match on JS divergence
saveJSshort[k,iter,] <- c(i,j,JSdiverge(shortcomp1$d1$beta[shortcomp1$d1$topic==i],
shortcomp1$d2$beta[shortcomp1$d2$topic==j]))
######################################################################
######################################################################
# Match on JS divergence, remove non-informative topics
if(is.element(i,shortshortindex) && is.element(j,shortlongindex)) {
saveJSRemove[k,iter,] <- saveJSshort[k,iter,]
}
######################################################################
iter <- iter + 1
#print(iter)
}
toc1 <- proc.time()
print(paste0("April ",k," topic ",i," Time:",round(toc1[3]-tic1[3],2)))
# Match on gini
matchGINI[k,i,] <- c(maxtomin.shortshortgini[i],maxtomin.shortlonggini[i],JSdiverge(shortcomp1$d1$beta[shortcomp1$d1$topic==maxtomin.shortshortgini[i]],shortcomp1$d2$beta[shortcomp1$d2$topic==maxtomin.shortlonggini[i]]))
}
matchTAUB[k,,] <- matchJS(saveTAUBshort[k,,],min=F)
matchJSdiverge[k,,] <- matchJS(saveJSshort[k,,],min=T)
#inter <- saveJSRemove[k,1:(min(which(is.na(saveJSRemove[k,,])))-1),]
matchJSdivergeRemove[[k]] <- matchJS(saveJSRemove[k,!is.na(saveJSRemove[k,,1]),],min=T)
toc <- proc.time()
print(paste0("One loop has finished running, loop:",k))
print(round(toc[3]-tic[3],2))
timesfordays[k] <- toc[3]-tic[3]
}
# This is returning me an error right now, need to just present something
# Takes around an hour to run
# Now I need to match these kendall taus
# double checking probabilities on articles
# Look up the articles from April 15th
april15th
a <- which(april15th$url=="https://www.nytimes.com/2019/04/15/podcasts/the-daily/julian-assange-wikileaks-arrest.html")
posterior(LDAlist.ss[[15]]$LDAmodel)$topics[a,]
posterior(LDAlist.sl[[15]]$LDAmodel)$topics[a,]*100
b <- which(april15th$url=="https://www.cnn.com/2019/04/15/us/micah-herndon-boston-marathon-crawl-finish-trnd/index.html")
posterior(LDAlist.ss[[15]]$LDAmodel)$topics[b,]*100
posterior(LDAlist.sl[[15]]$LDAmodel)$topics[b,]*100
c <- which(april15th$url == "http://www.msnbc.com/rachel-maddow-show/sanders-congress-not-smart-enough-understand-trumps-tax-returns")
posterior(LDAlist.ss[[15]]$LDAmodel)$topics[c,]*100
posterior(LDAlist.sl[[15]]$LDAmodel)$topics[c,]*100
d <- which(april15th$url == "https://www.breitbart.com/economy/2019/04/14/feds-company-faked-white-collar-jobs-1900-chinese-migrants/")
posterior(LDAlist.ss[[15]]$LDAmodel)$topics[d,]*100
posterior(LDAlist.sl[[15]]$LDAmodel)$topics[d,]*100
e <- which(april15th$url == "https://www.foxnews.com/world/american-witness-describes-notre-dame-burn-all-my-insides-just-fell-apart")
posterior(LDAlist.ss[[15]]$LDAmodel)$topics[e,]*100
posterior(LDAlist.sl[[15]]$LDAmodel)$topics[e,]*100
f <- which(april15th$url == "https://www.nytimes.com/2019/04/15/nyregion/newyorktoday/nyc-news-city-hall-station.html")
sort(posterior(LDAlist.ss[[15]]$LDAmodel)$topics[f,]*100,decreasing = T)
sort(posterior(LDAlist.sl[[15]]$LDAmodel)$topics[f,]*100,decreasing = T)
# Try to get the matches printed out, get efficient tables
# Now lists all of the arrays together
allmatches <- list(taub=matchTAUB,gini=matchGINI,jsd=matchJSdiverge,jsdr=matchJSdivergeRemove)
save(allmatches,file="~/Documents/ATD group/LDAmodelsApril/allmatches.Rdata")
######################################
# Creating a 2x2 contingency table
######################################
TAUB.contin <- array(dim=c(30,20,2,2))
GINI.contin <- array(dim=c(30,20,2,2))
JSd.contin <- array(dim=c(30,20,2,2))
TAUB.day.contin <- array(dim=c(30,2,2))
GINI.day.contin <- array(dim=c(30,2,2))
JSd.day.contin <- array(dim=c(30,2,2))
totarticles <- c()
for(d in 1:30) { # For each day
doctopic1 <- posterior(LDAlist.ss[[d]]$LDAmodel)$topics
doctopic2 <- posterior(LDAlist.sl[[d]]$LDAmodel)$topics
totarticles <- dim(doctopic1)[1]
tic <- proc.time()
for(i in 1:20) { # For each combination of topics
# Find out the which statement
TAUB.contin[d,i,,] <- matrix(c(0,0,0,0),ncol=2,nrow=2)
GINI.contin[d,i,,] <- TAUB.contin[d,i,,]
JSd.contin[d,i,,] <- TAUB.contin[d,i,,]
TAUB.contin[d,i,1,1] <- length(which(doctopic1[,allmatches$taub[d,i,1]] >= 0.5 & doctopic2[,allmatches$taub[d,i,2]] >= 0.5))
TAUB.contin[d,i,1,2] <- length(which(doctopic1[,allmatches$taub[d,i,1]] < 0.5 & doctopic2[,allmatches$taub[d,i,2]] >= 0.5))
TAUB.contin[d,i,2,1] <- length(which(doctopic1[,allmatches$taub[d,i,1]] >= 0.5 & doctopic2[,allmatches$taub[d,i,2]] < 0.5))
TAUB.contin[d,i,2,2] <- length(which(doctopic1[,allmatches$taub[d,i,1]] < 0.5 & doctopic2[,allmatches$taub[d,i,2]] < 0.5))
GINI.contin[d,i,1,1] <- length(which(doctopic1[,allmatches$gini[d,i,1]] >= 0.5 & doctopic2[,allmatches$gini[d,i,2]] >= 0.5))
GINI.contin[d,i,1,2] <- length(which(doctopic1[,allmatches$gini[d,i,1]] < 0.5 & doctopic2[,allmatches$gini[d,i,2]] >= 0.5))
GINI.contin[d,i,2,1] <- length(which(doctopic1[,allmatches$gini[d,i,1]] >= 0.5 & doctopic2[,allmatches$gini[d,i,2]] < 0.5))
GINI.contin[d,i,2,2] <- length(which(doctopic1[,allmatches$gini[d,i,1]] < 0.5 & doctopic2[,allmatches$gini[d,i,2]] < 0.5))
JSd.contin[d,i,1,1] <- length(which(doctopic1[,allmatches$jsd[d,i,1]] >= 0.5 & doctopic2[,allmatches$jsd[d,i,2]] >= 0.5))
JSd.contin[d,i,1,2] <- length(which(doctopic1[,allmatches$jsd[d,i,1]] < 0.5 & doctopic2[,allmatches$jsd[d,i,2]] >= 0.5))
JSd.contin[d,i,2,1] <- length(which(doctopic1[,allmatches$jsd[d,i,1]] >= 0.5 & doctopic2[,allmatches$jsd[d,i,2]] < 0.5))
JSd.contin[d,i,2,2] <- length(which(doctopic1[,allmatches$jsd[d,i,1]] < 0.5 & doctopic2[,allmatches$jsd[d,i,2]] < 0.5))
}
toc <- proc.time()
TAUB.day.contin[d,,] <- apply(TAUB.contin[d,,,],c(2,3),sum)
GINI.day.contin[d,,] <- apply(GINI.contin[d,,,],c(2,3),sum)
JSd.day.contin[d,,] <- apply(JSd.contin[d,,,],c(2,3),sum)
print(paste("Day",d,(toc[3]-tic[3])))
}
# Time to build a function for creating a contingency table for these days
build.contin <- function(matched.df, p1.doctopic, p2.doctopic, cutoff=FALSE, cutoff.num=10, cutoff.prop=FALSE, cutoff.prop.val=0.5, cutoff.value.bool=FALSE, cutoff.value=0.5,threshold=0.1, plurality=FALSE, plurality.tol=0.2) {
# Purpose:
# This function takes in a matched data frame of 2 topic distributions and outputs a contingency table of correctly matched topics to incorrect matches
# Inputs:
# matched.df: A matched (ordered) data frame (K by 3) with the first two columns representing matches and the third column representing the probability
# p1.doctopic, p2.doctopic: A document-topic data frame (N by K) with the rows representing the topic distribution for a certain document
# cutoff, cutoff.num: Cutoff is a boolean representing of a number top matched from the K topics (less than K of course)
# cutoff.value: a value based on the third column of the data frame where it is higher than a certain value
# cutoff.prop, cutoff.prop.val: Cutoff.prop is a boolean representing the proportion of top matched K topics to be accounted for
# threshold: A value between 0 and 1 that represents the candidate set of topics to be considered when calculating numbers
# plurality, plurality.tol: Plurality allows for the maximum topic to be chosen in addition to a small tolerance around the maximum to be considered
contin <- matrix(NA,nrow=2,ncol=2)
K <- nrow(matched.df)
N <- nrow(p1.doctopic)
# Do I want a cutoff based on the distribution of the values in the third column? Like greater than the mean?
if(cutoff) { topmatch <- cutoff.num }
else if(cutoff.prop){ topmatch <- round(K*cutoff.prop.val,0) }
else if(cutoff.value.bool) {topmatch <- length(which(matched.df[,3] >= cutoff.value))}
else{ topmatch <- K }
contin <- replicate(topmatch,contin)
contin.percent <- contin
# Instead I'll loop through the top topic matches
# I'm probably going to have to go by row through row
max1 <- apply(p1.doctopic,1,which.max)
max2 <- apply(p2.doctopic,1,which.max)
matches <- matrix(rep(0,N*topmatch),nrow=N, ncol=topmatch)
if(plurality==T) {
# Make a new matrix of N by k indicating larger number
# So find numbers that are close
ll1 <- apply(p1.doctopic,1,max)
ll2 <- apply(p2.doctopic,1,max)
plu.max1 <- which(p1.doctopic >= ll1*(1-plurality.tol),arr.ind = T)
plu.max2 <- which(p2.doctopic >= ll2*(1-plurality.tol),arr.ind = T)
for(k in 1:topmatch) {
# Filter out rows that are less than the threshold for this combination of topics
# Need to threshold later
# Find all the rows in each set that respect the threshold
# The ones that are combined to hold the threshold
# Can be double counting here
# Need to determine a new max list
thres <- which(p1.doctopic[,matched.df[k,1]] >= threshold | p2.doctopic[,matched.df[k,2]] >= threshold)
matchvec <- intersect(plu.max1[plu.max1[,2]==matched.df[k,1],1],plu.max2[plu.max2[,2]==matched.df[k,2],1])
contin[1,1,k] <- length(matchvec)
#print(contin[1,1,k])
contin[1,2,k] <- length(plu.max2[plu.max2[,2]==matched.df[k,2],1])-contin[1,1,k]
#print(contin[1,2,k])
contin[2,1,k] <- length(plu.max1[plu.max1[,2]==matched.df[k,1],1])-contin[1,1,k]
#print(contin[2,1,k])
contin[2,2,k] <- length(thres)-contin[1,1,k]-contin[1,2,k]-contin[2,1,k]
#print(contin[2,2,k])
matches[matchvec,k] <- rep(1,contin[1,1,k])
contin.percent[,,k] <- contin[,,k]/sum(contin[,,k])
}
}
else {
for(k in 1:topmatch) {
thres <- which(p1.doctopic[,matched.df[k,1]] > threshold | p2.doctopic[,matched.df[k,2]] > threshold )
contin[1,1,k] <- length(which(matched.df[k,1] == max1[thres] & matched.df[k,2] == max2[thres]))
contin[1,2,k] <- length(which(matched.df[k,1] == max1[thres] & matched.df[k,2] != max2[thres]))
contin[2,1,k] <- length(which(matched.df[k,1] != max1[thres] & matched.df[k,2] == max2[thres]))
contin[2,2,k] <- length(which(matched.df[k,1] != max1[thres] & matched.df[k,2] != max2[thres]))
contin.percent[,,k] <- contin[,,k]/sum(contin[,,k])
}
}
return (list(matches=matches,table=contin,table.percent=contin.percent))
}
hi<- build.contin(matched.df=allmatches$taub[1,,], p1.doctopic=doctopic1, p2.doctopic=doctopic2, cutoff=TRUE, cutoff.num=10, cutoff.prop=FALSE, cutoff.prop.val=0.5, cutoff.value.bool=FALSE, cutoff.value=0.5,threshold=0.1, plurality=TRUE, plurality.tol=0.2)
which(rowSums(hi$matches)>1) # no dupes
# Lets go through all the days and see how much there is
save.cutoff10.plural0.2 <- array(dim=c(6,30,2,2,10))
numdoublecount <- matrix(NA,nrow=30,ncol=6)
for(i in 1:30) {
doctopic1 <- posterior(LDAlist.ss[[i]]$LDAmodel)$topics
doctopic2 <- posterior(LDAlist.sl[[i]]$LDAmodel)$topics
f <- build.contin(matched.df = allmatches$taub[i,,],p1.doctopic = doctopic1, p2.doctopic = doctopic2, cutoff=TRUE, cutoff.num = 10, plurality=TRUE)
save.cutoff10.plural0.2[1,i,,,] <- f$table
numdoublecount[i,1] <- length(f$matches[rowSums(f$matches) > 1,])
# Taub w/o plurality
f2 <- build.contin(matched.df = allmatches$taub[i,,],p1.doctopic = doctopic1, p2.doctopic = doctopic2, cutoff=TRUE, cutoff.num = 10, plurality = FALSE)
save.cutoff10.plural0.2[2,i,,,] <- f2$table
numdoublecount[i,2] <- length(f2$matches[rowSums(f2$matches) > 1,])
# Gini w/ plurality
f3 <- build.contin(matched.df = allmatches$gini[i,,],p1.doctopic = doctopic1, p2.doctopic = doctopic2, cutoff=TRUE, cutoff.num = 10, plurality = TRUE)
save.cutoff10.plural0.2[3,i,,,] <- f3$table
numdoublecount[i,3] <- length(f3$matches[rowSums(f3$matches) > 1,])
# Gini w/o plurality
f4 <- build.contin(matched.df = allmatches$gini[i,,],p1.doctopic = doctopic1, p2.doctopic = doctopic2, cutoff=TRUE, cutoff.num = 10, plurality = FALSE)
save.cutoff10.plural0.2[4,i,,,] <- f4$table
numdoublecount[i,4] <- length(f4$matches[rowSums(f4$matches) > 1,])
# Jsd w/ plurality
f5 <- build.contin(matched.df = allmatches$jsd[i,,],p1.doctopic = doctopic1, p2.doctopic = doctopic2, cutoff=TRUE, cutoff.num = 10, plurality = TRUE)
save.cutoff10.plural0.2[5,i,,,] <- f5$table
numdoublecount[i,5] <- length(f5$matches[rowSums(f5$matches) > 1,])
# Jsd w/o plurality
f6 <- build.contin(matched.df = allmatches$jsd[i,,],p1.doctopic = doctopic1, p2.doctopic = doctopic2, cutoff=TRUE, cutoff.num = 10, plurality = FALSE)
save.cutoff10.plural0.2[6,i,,,] <- f6$table
numdoublecount[i,6] <- length(f6$matches[rowSums(f6$matches) > 1,])
}
apply(save.cutoff10.plural0.2[1,,,,],c(2,3),sum)
apply(save.cutoff10.plural0.2[2,,,,],c(2,3),sum)
apply(save.cutoff10.plural0.2[3,,,,],c(2,3),sum)
apply(save.cutoff10.plural0.2[4,,,,],c(2,3),sum)
apply(save.cutoff10.plural0.2[5,,,,],c(2,3),sum)
apply(save.cutoff10.plural0.2[6,,,,],c(2,3),sum) |
########################################################################
############ 28 March 2018: derive Kaesmann's expression levels for orthologous clusters from OrthoDB
########################################################################
### SPECIES:
# Pan paniscus = bonobo
# Pan troglodytes = chimp
# Pongo abelii = orangutan
# Papio anubis = baboon
# Monodelphis domestica = opossum
# Ornithorhynchus anatinus = platypus
#### 1 read "OrthoDB.Mammals.EOG090A0320.txt" and save only species from Kaesmann paper: "OrthoDB.Mammals.EOG090A0320.Kaesmann.txt"
rm(list=ls(all=TRUE))
Orthologs <- read.table('/hdd/SCIENCE_PROJECTS_BODY/Hsp/1_RAW/FromOrthoDB/OrthoDB.Mammals.EOG090A0320.txt', sep = '\t', header = TRUE)
VecOfSpecies = unique(Orthologs$organism_name); VecOfSpecies
KaesmannSpecies = c('Macaca mulatta', 'Pan paniscus', 'Pan troglodytes', 'Pongo abelii', 'Homo sapiens', 'Papio anubis', 'Mus musculus', 'Monodelphis domestica', 'Ornithorhynchus anatinus')
Orthologs = as.data.frame(Orthologs)
str(Orthologs)
Orthologs = Orthologs[Orthologs$organism_name %in% KaesmannSpecies,]
Orthologs$pub_gene_id = gsub("\\;(.*)",'',Orthologs$pub_gene_id) # ENSMUSG00000023944;Hsp90ab1 => ENSMUSG00000023944
write.table(Orthologs,'/hdd/SCIENCE_PROJECTS_BODY/Hsp/1_RAW/FromOrthoDB/OrthoDB.Mammals.EOG090A0320.Kaesmann.txt')
# pan paniscus - there are no Ensembl ID!!! keasman derived them from P trogl
#### 2 add by hand EnsemblID to "OrthoDB.Mammals.EOG090A0320.Kaesmann.txt" and save it as "OrthoDB.Mammals.EOG090A0320.Kaesmann.Edited.txt"
#### 3 For each gene from "OrthoDB.Mammals.EOG090A0320.Kaesmann.Edited.txt" look for expression level from Kaesmenn Supplementry data
rm(list=ls(all=TRUE))
Orthologs <- read.table('/hdd/SCIENCE_PROJECTS_BODY/Hsp/1_RAW/FromOrthoDB/OrthoDB.Mammals.EOG090A0320.Kaesmann.Edited.txt', sep = '\t', header = TRUE)
KaesmannMusMus<-read.table('/hdd/SCIENCE_PROJECTS_BODY/Hsp/1_RAW/Brawand_SM/Supplementary_Data2/Mouse_Ensembl57_TopHat_UniqueReads.txt', sep = '\t', header = TRUE)
KaesmannMusMus = KaesmannMouse[KaesmannMouse$GeneID %in% Orthologs$pub_gene_id,]
KaesmannMonDom<-read.table('/hdd/SCIENCE_PROJECTS_BODY/Hsp/1_RAW/Brawand_SM/Supplementary_Data2/Opossum_Ensembl57_TopHat_UniqueReads.txt', sep = '\t', header = TRUE)
KaesmannMonDom = KaesmannMonDom[KaesmannMonDom$GeneID %in% Orthologs$pub_gene_id,]
KaesmannOrnAna<-read.table('/hdd/SCIENCE_PROJECTS_BODY/Hsp/1_RAW/Brawand_SM/Supplementary_Data2/Platypus_Ensembl57_TopHat_UniqueReads.txt', sep = '\t', header = TRUE)
KaesmannOrnAna = KaesmannOrnAna[KaesmannOrnAna$GeneID %in% Orthologs$pub_gene_id,]
########################################################################
############ boxplots
########################################################################
rm(list=ls(all=TRUE))
setwd('/hdd/SCIENCE_PROJECTS_BODY/Hsp/1_RAW/Brawand_SM');
hsp <- read.table("Expr.txt", header = TRUE)
head(hsp)
table(hsp$Species)
table(hsp$Tissue)
VecOfTissues = unique(hsp$Tissue); length(VecOfTissues)
setwd('/hdd/SCIENCE_PROJECTS_BODY/Hsp/4_FIGURES/');
pdf('HspPrimatesVsMouse.pdf')
par(mfrow=c(2,3))
for (i in 1:length(VecOfTissues))
{ # i = 1
TEMP = hsp[hsp$Tissue == VecOfTissues[i],]
boxplot(TEMP[TEMP$Species!= 'mml',]$RPKM, TEMP[TEMP$Species == 'mml',]$RPKM, names = c('primates','mouse'), outline = FALSE, main = VecOfTissues[i])
}
dev.off()
########################################################################
############ 16 Aug 2018, extract Kn/Ks from ensembl, merge with GT and correlate Kn/Ks vs GT
########################################################################
rm(list=ls(all=TRUE))
# read Kn/Ks and GT data
KnKs = read.csv('/media/konstantinpopadin/ac45df81-e084-4d30-9653-5c57cc9b58fd/konstantinpopadin/SCIENCE_PROJECTS_BODY/Hsp/1_RAW/ComparaOrthologs/orthologues-ComparaOrthologs-Homo_sapiens_Gene_Compara_Ortholog_ENSG00000096384.csv')
GT = read.table('/media/konstantinpopadin/ac45df81-e084-4d30-9653-5c57cc9b58fd/konstantinpopadin/SCIENCE_PROJECTS_BODY/Hsp/1_RAW/GenerationLenghtforAllMammals/GenerationLenghtforMammals.xlsx.txt', sep = '\t', header = TRUE)
### keep only 1 to 1 orthologs
nrow(KnKs)
table(KnKs$Type)
KnKs = KnKs[KnKs$Type == '1-to-1View Gene Tree',]
nrow(KnKs)
### filter out raws with NA dn.ds
KnKs = KnKs[KnKs$dN.dS != 'n/a',]
str(KnKs)
KnKs$dN.dS = as.numeric(as.character(KnKs$dN.dS))
nrow(KnKs)
### edit name
KnKs$Scientific_name = gsub("(.*)\\(",'',KnKs$Species)
KnKs$Scientific_name = gsub("\\)",'',KnKs$Scientific_name)
KnKs$Scientific_name = gsub('Canis lupus familiaris','Canis lupus',KnKs$Scientific_name)
KnKs$Scientific_name = gsub('Colobus angolensis palliatus','Colobus angolensis',KnKs$Scientific_name)
KnKs$Scientific_name = gsub('Gorilla gorilla gorilla','Gorilla gorilla',KnKs$Scientific_name)
KnKs$Scientific_name = gsub('Mustela putorius furo','Mustela putorius',KnKs$Scientific_name)
KnKs$Scientific_name = gsub('Peromyscus maniculatus bairdii','Peromyscus maniculatus',KnKs$Scientific_name)
KnKs$Scientific_name = gsub('Panthera tigris altaica','Panthera tigris',KnKs$Scientific_name)
### take subset of columns
KnKs = KnKs[,grepl("Scientific_name|dN.dS", names(KnKs))]
KnKs = aggregate(KnKs$dN.dS, by = list(KnKs$Scientific_name), FUN = mean)
names(KnKs)=c('Scientific_name','dN.dS')
### keep subset of columns from GT
GT = GT[,grepl("Scientific_name|GenerationLength_d", names(GT)),]
### merge by Scientific_name
ALL = merge(KnKs,GT, by = 'Scientific_name')
cor.test(ALL$dN.dS,ALL$GenerationLength_d, method = 'spearman')
pdf('/media/konstantinpopadin/ac45df81-e084-4d30-9653-5c57cc9b58fd/konstantinpopadin/SCIENCE_PROJECTS_BODY/Hsp/4_FIGURES/KnKsVsGtMammalsHsp.pdf')
plot(log2(ALL$GenerationLength_d),log2(ALL$dN.dS), cex = 2, pch = 16, col = 'black')
A<-lm(log2(ALL$dN.dS)~log2(ALL$GenerationLength_d))
summary(A) # intercept doesn't differ from zero => make it from the zero
abline(A, col = 'red', lwd = 3)
dev.off()
## my regression from Popadin (MBE) is very similar (at least not steeper): Kn/Ks = 0.094 + 1.18*10^(-5)*GT
A<-lm(ALL$dN.dS~ALL$GenerationLength_d)
summary(A) # intercept doesn't differ from zero => make it from the zero
B<-lm(ALL$dN.dS~0+ALL$GenerationLength_d)
summary(B)
#Call:
# lm(formula = ALL$dN.dS ~ 0 + ALL$GenerationLength_d)
#
#Residuals:
# Min 1Q Median 3Q Max
#-0.19021 -0.07294 -0.03678 -0.01115 0.49727
#
#Coefficients:
# Estimate Std. Error t value Pr(>|t|)
#ALL$GenerationLength_d 2.158e-05 5.452e-06 3.958 0.00033 ***
# ---
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
#
#Residual standard error: 0.1403 on 37 degrees of freedom
#Multiple R-squared: 0.2975, Adjusted R-squared: 0.2785
#F-statistic: 15.67 on 1 and 37 DF, p-value: 0.0003297
| /Head/2Scripts/ApHsp90 (1).R | no_license | Anastasia3sokol/HSP | R | false | false | 6,759 | r | ########################################################################
############ 28 March 2018: derive Kaesmann's expression levels for orthologous clusters from OrthoDB
########################################################################
### SPECIES:
# Pan paniscus = bonobo
# Pan troglodytes = chimp
# Pongo abelii = orangutan
# Papio anubis = baboon
# Monodelphis domestica = opossum
# Ornithorhynchus anatinus = platypus
#### 1 read "OrthoDB.Mammals.EOG090A0320.txt" and save only species from Kaesmann paper: "OrthoDB.Mammals.EOG090A0320.Kaesmann.txt"
rm(list=ls(all=TRUE))
Orthologs <- read.table('/hdd/SCIENCE_PROJECTS_BODY/Hsp/1_RAW/FromOrthoDB/OrthoDB.Mammals.EOG090A0320.txt', sep = '\t', header = TRUE)
VecOfSpecies = unique(Orthologs$organism_name); VecOfSpecies
KaesmannSpecies = c('Macaca mulatta', 'Pan paniscus', 'Pan troglodytes', 'Pongo abelii', 'Homo sapiens', 'Papio anubis', 'Mus musculus', 'Monodelphis domestica', 'Ornithorhynchus anatinus')
Orthologs = as.data.frame(Orthologs)
str(Orthologs)
Orthologs = Orthologs[Orthologs$organism_name %in% KaesmannSpecies,]
Orthologs$pub_gene_id = gsub("\\;(.*)",'',Orthologs$pub_gene_id) # ENSMUSG00000023944;Hsp90ab1 => ENSMUSG00000023944
write.table(Orthologs,'/hdd/SCIENCE_PROJECTS_BODY/Hsp/1_RAW/FromOrthoDB/OrthoDB.Mammals.EOG090A0320.Kaesmann.txt')
# pan paniscus - there are no Ensembl ID!!! keasman derived them from P trogl
#### 2 add by hand EnsemblID to "OrthoDB.Mammals.EOG090A0320.Kaesmann.txt" and save it as "OrthoDB.Mammals.EOG090A0320.Kaesmann.Edited.txt"
#### 3 For each gene from "OrthoDB.Mammals.EOG090A0320.Kaesmann.Edited.txt" look for expression level from Kaesmenn Supplementry data
rm(list=ls(all=TRUE))
Orthologs <- read.table('/hdd/SCIENCE_PROJECTS_BODY/Hsp/1_RAW/FromOrthoDB/OrthoDB.Mammals.EOG090A0320.Kaesmann.Edited.txt', sep = '\t', header = TRUE)
KaesmannMusMus<-read.table('/hdd/SCIENCE_PROJECTS_BODY/Hsp/1_RAW/Brawand_SM/Supplementary_Data2/Mouse_Ensembl57_TopHat_UniqueReads.txt', sep = '\t', header = TRUE)
KaesmannMusMus = KaesmannMouse[KaesmannMouse$GeneID %in% Orthologs$pub_gene_id,]
KaesmannMonDom<-read.table('/hdd/SCIENCE_PROJECTS_BODY/Hsp/1_RAW/Brawand_SM/Supplementary_Data2/Opossum_Ensembl57_TopHat_UniqueReads.txt', sep = '\t', header = TRUE)
KaesmannMonDom = KaesmannMonDom[KaesmannMonDom$GeneID %in% Orthologs$pub_gene_id,]
KaesmannOrnAna<-read.table('/hdd/SCIENCE_PROJECTS_BODY/Hsp/1_RAW/Brawand_SM/Supplementary_Data2/Platypus_Ensembl57_TopHat_UniqueReads.txt', sep = '\t', header = TRUE)
KaesmannOrnAna = KaesmannOrnAna[KaesmannOrnAna$GeneID %in% Orthologs$pub_gene_id,]
########################################################################
############ boxplots
########################################################################
rm(list=ls(all=TRUE))
setwd('/hdd/SCIENCE_PROJECTS_BODY/Hsp/1_RAW/Brawand_SM');
hsp <- read.table("Expr.txt", header = TRUE)
head(hsp)
table(hsp$Species)
table(hsp$Tissue)
VecOfTissues = unique(hsp$Tissue); length(VecOfTissues)
setwd('/hdd/SCIENCE_PROJECTS_BODY/Hsp/4_FIGURES/');
pdf('HspPrimatesVsMouse.pdf')
par(mfrow=c(2,3))
for (i in 1:length(VecOfTissues))
{ # i = 1
TEMP = hsp[hsp$Tissue == VecOfTissues[i],]
boxplot(TEMP[TEMP$Species!= 'mml',]$RPKM, TEMP[TEMP$Species == 'mml',]$RPKM, names = c('primates','mouse'), outline = FALSE, main = VecOfTissues[i])
}
dev.off()
########################################################################
############ 16 Aug 2018, extract Kn/Ks from ensembl, merge with GT and correlate Kn/Ks vs GT
########################################################################
rm(list=ls(all=TRUE))
# read Kn/Ks and GT data
KnKs = read.csv('/media/konstantinpopadin/ac45df81-e084-4d30-9653-5c57cc9b58fd/konstantinpopadin/SCIENCE_PROJECTS_BODY/Hsp/1_RAW/ComparaOrthologs/orthologues-ComparaOrthologs-Homo_sapiens_Gene_Compara_Ortholog_ENSG00000096384.csv')
GT = read.table('/media/konstantinpopadin/ac45df81-e084-4d30-9653-5c57cc9b58fd/konstantinpopadin/SCIENCE_PROJECTS_BODY/Hsp/1_RAW/GenerationLenghtforAllMammals/GenerationLenghtforMammals.xlsx.txt', sep = '\t', header = TRUE)
### keep only 1 to 1 orthologs
nrow(KnKs)
table(KnKs$Type)
KnKs = KnKs[KnKs$Type == '1-to-1View Gene Tree',]
nrow(KnKs)
### filter out raws with NA dn.ds
KnKs = KnKs[KnKs$dN.dS != 'n/a',]
str(KnKs)
KnKs$dN.dS = as.numeric(as.character(KnKs$dN.dS))
nrow(KnKs)
### edit name
KnKs$Scientific_name = gsub("(.*)\\(",'',KnKs$Species)
KnKs$Scientific_name = gsub("\\)",'',KnKs$Scientific_name)
KnKs$Scientific_name = gsub('Canis lupus familiaris','Canis lupus',KnKs$Scientific_name)
KnKs$Scientific_name = gsub('Colobus angolensis palliatus','Colobus angolensis',KnKs$Scientific_name)
KnKs$Scientific_name = gsub('Gorilla gorilla gorilla','Gorilla gorilla',KnKs$Scientific_name)
KnKs$Scientific_name = gsub('Mustela putorius furo','Mustela putorius',KnKs$Scientific_name)
KnKs$Scientific_name = gsub('Peromyscus maniculatus bairdii','Peromyscus maniculatus',KnKs$Scientific_name)
KnKs$Scientific_name = gsub('Panthera tigris altaica','Panthera tigris',KnKs$Scientific_name)
### take subset of columns
KnKs = KnKs[,grepl("Scientific_name|dN.dS", names(KnKs))]
KnKs = aggregate(KnKs$dN.dS, by = list(KnKs$Scientific_name), FUN = mean)
names(KnKs)=c('Scientific_name','dN.dS')
### keep subset of columns from GT
GT = GT[,grepl("Scientific_name|GenerationLength_d", names(GT)),]
### merge by Scientific_name
ALL = merge(KnKs,GT, by = 'Scientific_name')
cor.test(ALL$dN.dS,ALL$GenerationLength_d, method = 'spearman')
pdf('/media/konstantinpopadin/ac45df81-e084-4d30-9653-5c57cc9b58fd/konstantinpopadin/SCIENCE_PROJECTS_BODY/Hsp/4_FIGURES/KnKsVsGtMammalsHsp.pdf')
plot(log2(ALL$GenerationLength_d),log2(ALL$dN.dS), cex = 2, pch = 16, col = 'black')
A<-lm(log2(ALL$dN.dS)~log2(ALL$GenerationLength_d))
summary(A) # intercept doesn't differ from zero => make it from the zero
abline(A, col = 'red', lwd = 3)
dev.off()
## my regression from Popadin (MBE) is very similar (at least not steeper): Kn/Ks = 0.094 + 1.18*10^(-5)*GT
A<-lm(ALL$dN.dS~ALL$GenerationLength_d)
summary(A) # intercept doesn't differ from zero => make it from the zero
B<-lm(ALL$dN.dS~0+ALL$GenerationLength_d)
summary(B)
#Call:
# lm(formula = ALL$dN.dS ~ 0 + ALL$GenerationLength_d)
#
#Residuals:
# Min 1Q Median 3Q Max
#-0.19021 -0.07294 -0.03678 -0.01115 0.49727
#
#Coefficients:
# Estimate Std. Error t value Pr(>|t|)
#ALL$GenerationLength_d 2.158e-05 5.452e-06 3.958 0.00033 ***
# ---
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
#
#Residual standard error: 0.1403 on 37 degrees of freedom
#Multiple R-squared: 0.2975, Adjusted R-squared: 0.2785
#F-statistic: 15.67 on 1 and 37 DF, p-value: 0.0003297
|
library(tidyverse)
library(gganimate)
t_diff <- read_csv("data/GLB.Ts+dSST.csv", skip = 1, na = "***") %>%
select(year = Year, month.abb) %>%
pivot_longer(-year, names_to="month", values_to="t_diff") %>%
drop_na()
# last_dec <- t_diff %>%
# filter(month == "Dec") %>%
# mutate(year = year + 1,
# month = "last_Dec")
next_jan <- t_diff %>%
filter(month == "Jan") %>%
mutate(year = year - 1,
month = "next_Jan")
t_data <- bind_rows(t_diff, next_jan) %>%
mutate(month = factor(month, levels = c(month.abb, "next_Jan")),
month_number = as.numeric(month)) %>%
arrange(year, month) %>%
filter(year != 1879) %>%
mutate(step_number = 1:nrow(.))
annotation <- t_data %>%
slice_max(year) %>%
slice_max(month_number)
temp_lines <- tibble(
x = 12,
y = c(1.5, 2.0),
labels = c("1.5\u00B0C", "2.0\u00B0C")
)
month_labels <- tibble(
x = 1:12,
labels = month.abb,
y = 2.7
)
a <- t_data %>%
ggplot(aes(x=month_number, y=t_diff, group=year, color=year)) +
geom_rect(aes(xmin=1, xmax=13, ymin=-2, ymax=2.4),
color="black", fill="black",
inherit.aes = FALSE) +
geom_hline(yintercept = c(1.5, 2.0), color="red") +
geom_label(data = temp_lines, aes(x=x, y=y, label=labels),
color = "red", fill = "black", label.size = 0,
inherit.aes=FALSE) +
geom_text(data = month_labels, aes(x=x, y=y, label = labels),
inherit.aes = FALSE, color="white",
angle = seq(360 - 360/12, 0, length.out = 12)) +
geom_label(aes(x = 1, y=-1.3, label = year),
color="white", fill="black",
label.padding = unit(50, "pt"), label.size = 0,
size=6) +
geom_line() +
scale_x_continuous(breaks=1:12,
labels=month.abb, expand = c(0,0),
sec.axis = dup_axis(name = NULL, labels=NULL)) +
scale_y_continuous(breaks = seq(-2, 2, 0.2),
limits = c(-2, 2.7), expand = c(0, -0.7),
sec.axis = dup_axis(name = NULL, labels=NULL)) +
scale_color_viridis_c(breaks = seq(1880, 2020, 20),
guide = "none") +
coord_polar(start = 2*pi/12) +
labs(x = NULL,
y = NULL,
title = "Global temperature change (1880-2022)") +
theme(
panel.background = element_rect(fill="#444444", size=1),
plot.background = element_rect(fill = "#444444", color="#444444"),
panel.grid = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.title.y = element_blank(),
axis.ticks = element_blank(),
axis.title = element_text(color="white", size=13),
plot.title = element_text(color="white", hjust = 0.5,size = 15)
) +
transition_manual(frames = year, cumulative = TRUE)
animate(a, width=4.155, height=4.5, unit="in", res=300,)
anim_save("figures/climate_spiral.gif")
animate(a, width=4.155, height=4.5, unit="in", res=300,
renderer = av_renderer("figures/climate_spiral.mp4")
)
| /scripts/global_temp_lines_spiral_animated.R | no_license | zabdi8/climate | R | false | false | 2,993 | r | library(tidyverse)
library(gganimate)
t_diff <- read_csv("data/GLB.Ts+dSST.csv", skip = 1, na = "***") %>%
select(year = Year, month.abb) %>%
pivot_longer(-year, names_to="month", values_to="t_diff") %>%
drop_na()
# last_dec <- t_diff %>%
# filter(month == "Dec") %>%
# mutate(year = year + 1,
# month = "last_Dec")
next_jan <- t_diff %>%
filter(month == "Jan") %>%
mutate(year = year - 1,
month = "next_Jan")
t_data <- bind_rows(t_diff, next_jan) %>%
mutate(month = factor(month, levels = c(month.abb, "next_Jan")),
month_number = as.numeric(month)) %>%
arrange(year, month) %>%
filter(year != 1879) %>%
mutate(step_number = 1:nrow(.))
annotation <- t_data %>%
slice_max(year) %>%
slice_max(month_number)
temp_lines <- tibble(
x = 12,
y = c(1.5, 2.0),
labels = c("1.5\u00B0C", "2.0\u00B0C")
)
month_labels <- tibble(
x = 1:12,
labels = month.abb,
y = 2.7
)
a <- t_data %>%
ggplot(aes(x=month_number, y=t_diff, group=year, color=year)) +
geom_rect(aes(xmin=1, xmax=13, ymin=-2, ymax=2.4),
color="black", fill="black",
inherit.aes = FALSE) +
geom_hline(yintercept = c(1.5, 2.0), color="red") +
geom_label(data = temp_lines, aes(x=x, y=y, label=labels),
color = "red", fill = "black", label.size = 0,
inherit.aes=FALSE) +
geom_text(data = month_labels, aes(x=x, y=y, label = labels),
inherit.aes = FALSE, color="white",
angle = seq(360 - 360/12, 0, length.out = 12)) +
geom_label(aes(x = 1, y=-1.3, label = year),
color="white", fill="black",
label.padding = unit(50, "pt"), label.size = 0,
size=6) +
geom_line() +
scale_x_continuous(breaks=1:12,
labels=month.abb, expand = c(0,0),
sec.axis = dup_axis(name = NULL, labels=NULL)) +
scale_y_continuous(breaks = seq(-2, 2, 0.2),
limits = c(-2, 2.7), expand = c(0, -0.7),
sec.axis = dup_axis(name = NULL, labels=NULL)) +
scale_color_viridis_c(breaks = seq(1880, 2020, 20),
guide = "none") +
coord_polar(start = 2*pi/12) +
labs(x = NULL,
y = NULL,
title = "Global temperature change (1880-2022)") +
theme(
panel.background = element_rect(fill="#444444", size=1),
plot.background = element_rect(fill = "#444444", color="#444444"),
panel.grid = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.title.y = element_blank(),
axis.ticks = element_blank(),
axis.title = element_text(color="white", size=13),
plot.title = element_text(color="white", hjust = 0.5,size = 15)
) +
transition_manual(frames = year, cumulative = TRUE)
animate(a, width=4.155, height=4.5, unit="in", res=300,)
anim_save("figures/climate_spiral.gif")
animate(a, width=4.155, height=4.5, unit="in", res=300,
renderer = av_renderer("figures/climate_spiral.mp4")
)
|
# 更新治愈者
#' Update removed
#'
#' Randomly update removed infected nodes and return all the removed nodes.
#'
#' @param Var_Param List, saved important var and params
#' @param wait_removed Vector, the infected nodes wait to be removed
#' @param day Int, the time at now
#'
#' @return Var_Param$Removed_list List, shallow copy
#' @export
#'
#' @examples NULL
update_removed = function(Var_Param, wait_removed, day) {
# 产生移除概率向量
Rem_prob = c(Var_Param$RemovedRate, 1-Var_Param$RemovedRate)
# 获得等待判断是否康复的结点
wait_removed = data.frame(table(unlist(igraph::V(Var_Param$Network$graph)[wait_removed])))
# 通过概率函数判断是否康复
push = unlist(lapply(wait_removed[,2], SIRInNetwork::random_selection, vec_prob = Rem_prob))
# 获得新康复结点
new_removed = as.numeric(as.character(wait_removed[,1][push >= 1]))
# 新康复者加入治愈者序列
if (length(Var_Param$Removed_list[[day]]) == 0 && length(new_removed) == 0) {
Var_Param$Removed_list[[day+1]] = igraph::V(Var_Param$Network$graph)[0]
}else {
Var_Param$Removed_list[[day+1]] = c(Var_Param$Removed_list[[day]], igraph::V(Var_Param$Network$graph)[new_removed])
}
return(Var_Param$Removed_list)
}
| /R/update_removed.R | permissive | AllToBeNice/SIR_In_Network | R | false | false | 1,249 | r | # 更新治愈者
#' Update removed
#'
#' Randomly update removed infected nodes and return all the removed nodes.
#'
#' @param Var_Param List, saved important var and params
#' @param wait_removed Vector, the infected nodes wait to be removed
#' @param day Int, the time at now
#'
#' @return Var_Param$Removed_list List, shallow copy
#' @export
#'
#' @examples NULL
update_removed = function(Var_Param, wait_removed, day) {
# 产生移除概率向量
Rem_prob = c(Var_Param$RemovedRate, 1-Var_Param$RemovedRate)
# 获得等待判断是否康复的结点
wait_removed = data.frame(table(unlist(igraph::V(Var_Param$Network$graph)[wait_removed])))
# 通过概率函数判断是否康复
push = unlist(lapply(wait_removed[,2], SIRInNetwork::random_selection, vec_prob = Rem_prob))
# 获得新康复结点
new_removed = as.numeric(as.character(wait_removed[,1][push >= 1]))
# 新康复者加入治愈者序列
if (length(Var_Param$Removed_list[[day]]) == 0 && length(new_removed) == 0) {
Var_Param$Removed_list[[day+1]] = igraph::V(Var_Param$Network$graph)[0]
}else {
Var_Param$Removed_list[[day+1]] = c(Var_Param$Removed_list[[day]], igraph::V(Var_Param$Network$graph)[new_removed])
}
return(Var_Param$Removed_list)
}
|
#context("2D check by hand")
# Check that RANN is available
got_RANN <- requireNamespace("RANN", quietly = TRUE)
# Consider cases where method 1 (on its own) gives the wrong nearest neighbours
# because an observation, or pair of observations, contribute more than once.
# Check that an additional call using method 2 inside nnt() corrects this.
if (got_RANN) {
x1 <- c(5, 5)
x2 <- c(9, 3)
x3 <- c(8, 9)
x4 <- c(0, 2)
x5 <- c(1, 10)
x <- rbind(x1, x2, x3, x4, x5)
# plot(x, xlim = c(0, 10), ylim = c(0, 10))
dfn <- function(x, y) sqrt(sum((x - y) ^ 2))
# Only wrap on variable 1
n1d <- n1n <- matrix(NA, 5, 5)
temp <- apply(cbind(x1, x2, x3, x4, x5), 2, dfn, y = x1)
n1d[1, ] <- sort(temp)
n1n[1, ] <- order(temp)
x4d <- c(10, 2)
x5d <- c(11, 10)
temp <- apply(cbind(x1, x2, x3, x4d, x5d), 2, dfn, y = x2)
n1d[2, ] <- sort(temp)
n1n[2, ] <- order(temp)
temp <- apply(cbind(x1, x2, x3, x4d, x5d), 2, dfn, y = x3)
n1d[3, ] <- sort(temp)
n1n[3, ] <- order(temp)
x2d <- c(-1, 3)
x3d <- c(-2, 9)
temp <- apply(cbind(x1, x2d, x3d, x4, x5), 2, dfn, y = x4)
n1d[4, ] <- sort(temp)
n1n[4, ] <- order(temp)
temp <- apply(cbind(x1, x2d, x3d, x4, x5), 2, dfn, y = x5)
n1d[5, ] <- sort(temp)
n1n[5, ] <- order(temp)
res1 <- nnt(x, x, torus = 1, ranges = c(0, 10), method = 1)
res2 <- nnt(x, x, torus = 1, ranges = c(0, 10), method = 2)
test_that("Wrap on variable 1: indices vs method 1", {
testthat::expect_equal(res1$nn.idx, n1n)
})
test_that("Wrap on variable 1: indices vs method 2", {
testthat::expect_equal(res2$nn.idx, n1n)
})
test_that("Wrap on variable 1: distances vs method 1", {
testthat::expect_equal(res1$nn.dists, n1d)
})
test_that("Wrap on variable 1: distances vs method 2", {
testthat::expect_equal(res2$nn.dists, n1d)
})
# Only wrap on variable 2
n2d <- n2n <- matrix(NA, 5, 5)
temp <- apply(cbind(x1, x2, x3, x4, x5), 2, dfn, y = x1)
n2d[1, ] <- sort(temp)
n2n[1, ] <- order(temp)
x3d <- c(8, -1)
x5d <- c(1, 0)
temp <- apply(cbind(x1, x2, x3d, x4, x5d), 2, dfn, y = x2)
n2d[2, ] <- sort(temp)
n2n[2, ] <- order(temp)
x2d <- c(9, 13)
x4d <- c(0, 12)
temp <- apply(cbind(x1, x2d, x3, x4d, x5), 2, dfn, y = x3)
n2d[3, ] <- sort(temp)
n2n[3, ] <- order(temp)
x3d <- c(8, -1)
x5d <- c(1, 0)
temp <- apply(cbind(x1, x2, x3d, x4, x5d), 2, dfn, y = x4)
n2d[4, ] <- sort(temp)
n2n[4, ] <- order(temp)
temp <- apply(cbind(x1, x2d, x3, x4d, x5), 2, dfn, y = x5)
n2d[5, ] <- sort(temp)
n2n[5, ] <- order(temp)
res1 <- nnt(x, x, torus = 2, ranges = c(0, 10), method = 1)
res2 <- nnt(x, x, torus = 2, ranges = c(0, 10), method = 2)
test_that("Wrap on variable 2: indices vs method 1", {
testthat::expect_equal(res1$nn.idx, n2n)
})
test_that("Wrap on variable 2: indices vs method 2", {
testthat::expect_equal(res2$nn.idx, n2n)
})
test_that("Wrap on variable 2: distances vs method 1", {
testthat::expect_equal(res1$nn.dists, n2d)
})
test_that("Wrap on variable 2: distances vs method 2", {
testthat::expect_equal(res2$nn.dists, n2d)
})
# Wrap on variables 1 and 2
n2d <- n2n <- matrix(NA, 5, 5)
temp <- apply(cbind(x1, x2, x3, x4, x5), 2, dfn, y = x1)
n2d[1, ] <- sort(temp)
n2n[1, ] <- order(temp)
x3d <- c(8, -1)
x4d <- c(10, 2)
x5d <- c(11, 0)
temp <- apply(cbind(x1, x2, x3d, x4d, x5d), 2, dfn, y = x2)
n2d[2, ] <- sort(temp)
n2n[2, ] <- order(temp)
x2d <- c(9, 13)
x4d <- c(10, 12)
x5d <- c(11, 10)
temp <- apply(cbind(x1, x2d, x3, x4d, x5d), 2, dfn, y = x3)
n2d[3, ] <- sort(temp)
n2n[3, ] <- order(temp)
x2d <- c(-1, 3)
x3d <- c(-2, -1)
x5d <- c(1, 0)
temp <- apply(cbind(x1, x2d, x3d, x4, x5d), 2, dfn, y = x4)
n2d[4, ] <- sort(temp)
n2n[4, ] <- order(temp)
x2d <- c(-1, 13)
x3d <- c(-2, 9)
x4d <- c(0, 12)
temp <- apply(cbind(x1, x2d, x3d, x4d, x5), 2, dfn, y = x5)
n2d[5, ] <- sort(temp)
n2n[5, ] <- order(temp)
ranges <- rbind(c(0, 10), c(0, 10))
res1 <- nnt(x, x, torus = 1:2, ranges = ranges, method = 1)
res2 <- nnt(x, x, torus = 1:2, ranges = ranges, method = 2)
test_that("Wrap on variable 2: indices vs method 1", {
testthat::expect_equal(res1$nn.idx, n2n)
})
test_that("Wrap on variable 2: indices vs method 2", {
testthat::expect_equal(res2$nn.idx, n2n)
})
test_that("Wrap on variable 2: distances vs method 1", {
testthat::expect_equal(res1$nn.dists, n2d)
})
test_that("Wrap on variable 2: distances vs method 2", {
testthat::expect_equal(res2$nn.dists, n2d)
})
}
| /tests/testthat/test-2D.R | no_license | paulnorthrop/donut | R | false | false | 4,557 | r | #context("2D check by hand")
# Check that RANN is available
got_RANN <- requireNamespace("RANN", quietly = TRUE)
# Consider cases where method 1 (on its own) gives the wrong nearest neighbours
# because an observation, or pair of observations, contribute more than once.
# Check that an additional call using method 2 inside nnt() corrects this.
if (got_RANN) {
x1 <- c(5, 5)
x2 <- c(9, 3)
x3 <- c(8, 9)
x4 <- c(0, 2)
x5 <- c(1, 10)
x <- rbind(x1, x2, x3, x4, x5)
# plot(x, xlim = c(0, 10), ylim = c(0, 10))
dfn <- function(x, y) sqrt(sum((x - y) ^ 2))
# Only wrap on variable 1
n1d <- n1n <- matrix(NA, 5, 5)
temp <- apply(cbind(x1, x2, x3, x4, x5), 2, dfn, y = x1)
n1d[1, ] <- sort(temp)
n1n[1, ] <- order(temp)
x4d <- c(10, 2)
x5d <- c(11, 10)
temp <- apply(cbind(x1, x2, x3, x4d, x5d), 2, dfn, y = x2)
n1d[2, ] <- sort(temp)
n1n[2, ] <- order(temp)
temp <- apply(cbind(x1, x2, x3, x4d, x5d), 2, dfn, y = x3)
n1d[3, ] <- sort(temp)
n1n[3, ] <- order(temp)
x2d <- c(-1, 3)
x3d <- c(-2, 9)
temp <- apply(cbind(x1, x2d, x3d, x4, x5), 2, dfn, y = x4)
n1d[4, ] <- sort(temp)
n1n[4, ] <- order(temp)
temp <- apply(cbind(x1, x2d, x3d, x4, x5), 2, dfn, y = x5)
n1d[5, ] <- sort(temp)
n1n[5, ] <- order(temp)
res1 <- nnt(x, x, torus = 1, ranges = c(0, 10), method = 1)
res2 <- nnt(x, x, torus = 1, ranges = c(0, 10), method = 2)
test_that("Wrap on variable 1: indices vs method 1", {
testthat::expect_equal(res1$nn.idx, n1n)
})
test_that("Wrap on variable 1: indices vs method 2", {
testthat::expect_equal(res2$nn.idx, n1n)
})
test_that("Wrap on variable 1: distances vs method 1", {
testthat::expect_equal(res1$nn.dists, n1d)
})
test_that("Wrap on variable 1: distances vs method 2", {
testthat::expect_equal(res2$nn.dists, n1d)
})
# Only wrap on variable 2
n2d <- n2n <- matrix(NA, 5, 5)
temp <- apply(cbind(x1, x2, x3, x4, x5), 2, dfn, y = x1)
n2d[1, ] <- sort(temp)
n2n[1, ] <- order(temp)
x3d <- c(8, -1)
x5d <- c(1, 0)
temp <- apply(cbind(x1, x2, x3d, x4, x5d), 2, dfn, y = x2)
n2d[2, ] <- sort(temp)
n2n[2, ] <- order(temp)
x2d <- c(9, 13)
x4d <- c(0, 12)
temp <- apply(cbind(x1, x2d, x3, x4d, x5), 2, dfn, y = x3)
n2d[3, ] <- sort(temp)
n2n[3, ] <- order(temp)
x3d <- c(8, -1)
x5d <- c(1, 0)
temp <- apply(cbind(x1, x2, x3d, x4, x5d), 2, dfn, y = x4)
n2d[4, ] <- sort(temp)
n2n[4, ] <- order(temp)
temp <- apply(cbind(x1, x2d, x3, x4d, x5), 2, dfn, y = x5)
n2d[5, ] <- sort(temp)
n2n[5, ] <- order(temp)
res1 <- nnt(x, x, torus = 2, ranges = c(0, 10), method = 1)
res2 <- nnt(x, x, torus = 2, ranges = c(0, 10), method = 2)
test_that("Wrap on variable 2: indices vs method 1", {
testthat::expect_equal(res1$nn.idx, n2n)
})
test_that("Wrap on variable 2: indices vs method 2", {
testthat::expect_equal(res2$nn.idx, n2n)
})
test_that("Wrap on variable 2: distances vs method 1", {
testthat::expect_equal(res1$nn.dists, n2d)
})
test_that("Wrap on variable 2: distances vs method 2", {
testthat::expect_equal(res2$nn.dists, n2d)
})
# Wrap on variables 1 and 2
n2d <- n2n <- matrix(NA, 5, 5)
temp <- apply(cbind(x1, x2, x3, x4, x5), 2, dfn, y = x1)
n2d[1, ] <- sort(temp)
n2n[1, ] <- order(temp)
x3d <- c(8, -1)
x4d <- c(10, 2)
x5d <- c(11, 0)
temp <- apply(cbind(x1, x2, x3d, x4d, x5d), 2, dfn, y = x2)
n2d[2, ] <- sort(temp)
n2n[2, ] <- order(temp)
x2d <- c(9, 13)
x4d <- c(10, 12)
x5d <- c(11, 10)
temp <- apply(cbind(x1, x2d, x3, x4d, x5d), 2, dfn, y = x3)
n2d[3, ] <- sort(temp)
n2n[3, ] <- order(temp)
x2d <- c(-1, 3)
x3d <- c(-2, -1)
x5d <- c(1, 0)
temp <- apply(cbind(x1, x2d, x3d, x4, x5d), 2, dfn, y = x4)
n2d[4, ] <- sort(temp)
n2n[4, ] <- order(temp)
x2d <- c(-1, 13)
x3d <- c(-2, 9)
x4d <- c(0, 12)
temp <- apply(cbind(x1, x2d, x3d, x4d, x5), 2, dfn, y = x5)
n2d[5, ] <- sort(temp)
n2n[5, ] <- order(temp)
ranges <- rbind(c(0, 10), c(0, 10))
res1 <- nnt(x, x, torus = 1:2, ranges = ranges, method = 1)
res2 <- nnt(x, x, torus = 1:2, ranges = ranges, method = 2)
test_that("Wrap on variable 2: indices vs method 1", {
testthat::expect_equal(res1$nn.idx, n2n)
})
test_that("Wrap on variable 2: indices vs method 2", {
testthat::expect_equal(res2$nn.idx, n2n)
})
test_that("Wrap on variable 2: distances vs method 1", {
testthat::expect_equal(res1$nn.dists, n2d)
})
test_that("Wrap on variable 2: distances vs method 2", {
testthat::expect_equal(res2$nn.dists, n2d)
})
}
|
library('shiny')
library('babynames')
library('dplyr')
ui <- fluidPage(
titlePanel("What's in a Name?"),
selectInput('sex', 'Select Sex', choices = c("F", "M")),
sliderInput('year', 'Select Year', min = 1900, max = 2010, value = 1900),
tableOutput('table_top_10_names')
)
server <- function(input, output, session){
top_10_names <- function() {
babynames %>%
filter(sex == input$sex) %>%
filter(year == input$year) %>%
top_n(10, prop) %>%
mutate(year = paste(year))
}
output$table_top_10_names = renderTable({
top_10_names()
})
}
shinyApp(ui = ui, server = server) | /webapps/example-05-render-table.R | permissive | cassiopagnoncelli/datacamp-courses | R | false | false | 618 | r | library('shiny')
library('babynames')
library('dplyr')
ui <- fluidPage(
titlePanel("What's in a Name?"),
selectInput('sex', 'Select Sex', choices = c("F", "M")),
sliderInput('year', 'Select Year', min = 1900, max = 2010, value = 1900),
tableOutput('table_top_10_names')
)
server <- function(input, output, session){
top_10_names <- function() {
babynames %>%
filter(sex == input$sex) %>%
filter(year == input$year) %>%
top_n(10, prop) %>%
mutate(year = paste(year))
}
output$table_top_10_names = renderTable({
top_10_names()
})
}
shinyApp(ui = ui, server = server) |
rm(list = ls())
dat <- readr::read_lines(file.path("day 8", "input.txt"))
#dat <- readr::read_lines(file.path("day 8", "test.txt")) # nolint
library("dplyr")
library("stringr")
modFunction <- function(x) {
return(str_replace_all(x, c("inc" = "+", "dec" = "-")))
}
charExtract <- function(x) {
return(unlist(str_extract_all(x, "[a-z]+")))
}
c <- q <- NA
registerVec <- character()
maxVal <- 0
for (i in 1:length(dat)) {
instruction <- dat[i]
inSplit <- unlist(strsplit(instruction, split = " if "))
mod <- modFunction(inSplit[1])
cond <- inSplit[2]
if (!exists(charExtract(mod))) {
assign(charExtract(mod), 0)
}
if (is.na(eval(parse(text = charExtract(mod))))) {
assign(charExtract(mod), 0)
}
if (!exists(charExtract(cond))) {
assign(charExtract(cond), 0)
}
if (is.na(eval(parse(text = charExtract(cond))))) {
assign(charExtract(cond), 0)
}
if (!charExtract(mod) %in% registerVec) {
registerVec <- c(registerVec, charExtract(mod))
}
if (!charExtract(cond) %in% registerVec) {
registerVec <- c(registerVec, charExtract(cond))
}
# process instructions
condEval <- eval(parse(text = cond))
if (condEval) {
eval(parse(text = paste0(charExtract(mod), " <- ", mod)))
}
curVal <- max(eval(parse(text = paste0("c(",
paste(registerVec, collapse = ", "), ")"))))
maxVal <- ifelse(curVal > maxVal, curVal, maxVal)
}
registerVec[which.max(eval(parse(text = paste0("c(", paste(registerVec, collapse = ", "), ")"))))]
maxVal
| /day 8/day8.R | permissive | johnlocker/adventofcode2017 | R | false | false | 1,529 | r | rm(list = ls())
dat <- readr::read_lines(file.path("day 8", "input.txt"))
#dat <- readr::read_lines(file.path("day 8", "test.txt")) # nolint
library("dplyr")
library("stringr")
modFunction <- function(x) {
return(str_replace_all(x, c("inc" = "+", "dec" = "-")))
}
charExtract <- function(x) {
return(unlist(str_extract_all(x, "[a-z]+")))
}
c <- q <- NA
registerVec <- character()
maxVal <- 0
for (i in 1:length(dat)) {
instruction <- dat[i]
inSplit <- unlist(strsplit(instruction, split = " if "))
mod <- modFunction(inSplit[1])
cond <- inSplit[2]
if (!exists(charExtract(mod))) {
assign(charExtract(mod), 0)
}
if (is.na(eval(parse(text = charExtract(mod))))) {
assign(charExtract(mod), 0)
}
if (!exists(charExtract(cond))) {
assign(charExtract(cond), 0)
}
if (is.na(eval(parse(text = charExtract(cond))))) {
assign(charExtract(cond), 0)
}
if (!charExtract(mod) %in% registerVec) {
registerVec <- c(registerVec, charExtract(mod))
}
if (!charExtract(cond) %in% registerVec) {
registerVec <- c(registerVec, charExtract(cond))
}
# process instructions
condEval <- eval(parse(text = cond))
if (condEval) {
eval(parse(text = paste0(charExtract(mod), " <- ", mod)))
}
curVal <- max(eval(parse(text = paste0("c(",
paste(registerVec, collapse = ", "), ")"))))
maxVal <- ifelse(curVal > maxVal, curVal, maxVal)
}
registerVec[which.max(eval(parse(text = paste0("c(", paste(registerVec, collapse = ", "), ")"))))]
maxVal
|
library(dataRetrieval)
whatWQPsites_app <- function(...){
matchReturn <- list(...)
# Added from constructWQPurl()
options <- c("bBox", "lat", "long", "within", "countrycode",
"statecode", "countycode", "siteType", "organization",
"siteid", "huc", "sampleMedia", "characteristicType",
"characteristicName", "pCode", "activityId", "startDateLo",
"startDateHi", "mimeType", "Zip", "providers")
if (!all(names(matchReturn) %in% options))
warning(matchReturn[!(names(matchReturn) %in% options)],
"is not a valid query parameter to the Water Quality Portal")
# Checks for user input filters - removes null or default filters
if (0 %in% matchReturn$bBox)
matchReturn$bBox<-NULL
if (matchReturn$lat== 0 | matchReturn$lat== FALSE)
matchReturn$lat<-NULL
if (matchReturn$long== 0 | matchReturn$long== FALSE)
matchReturn$long<-NULL
if (matchReturn$within== 0 | matchReturn$within== FALSE)
matchReturn$within<-NULL
if (matchReturn$statecode[1] == " "| matchReturn$statecode== FALSE | matchReturn$statecode == "0")
matchReturn$statecode<-NULL
if (matchReturn$countycode == " "| matchReturn$countycode== FALSE)
matchReturn$countycode<-NULL
if (matchReturn$siteType == " "| matchReturn$siteType== FALSE)
matchReturn$siteType<-NULL
if (matchReturn$organization == " "| matchReturn$organization== FALSE)
matchReturn$organization<-NULL
if (matchReturn$siteid == " "| matchReturn$siteid== FALSE)
matchReturn$siteid<-NULL
if (matchReturn$huc==" "| matchReturn$huc== FALSE)
matchReturn$huc<-NULL
if (matchReturn$sampleMedia==" "| matchReturn$sampleMedia== FALSE)
matchReturn$sampleMedia<-NULL
if (matchReturn$characteristicType==" "| matchReturn$characteristicType== FALSE)
matchReturn$characteristicType<-NULL
if (matchReturn$characteristicName==" "| matchReturn$characteristicName== FALSE | is.null(matchReturn$characteristicName))
matchReturn$characteristicName<-NULL
if (matchReturn$startDateLo==Sys.Date() & matchReturn$startDateHi == Sys.Date()) {
matchReturn$startDateLo<-NULL
matchReturn$startDateHi<-NULL
}
values <- sapply(matchReturn, function(x) URLencode(as.character(paste(eval(x),collapse=";",sep=""))))
if("bBox" %in% names(values)){
values['bBox'] <- gsub(pattern = ";", replacement = ",", x = values['bBox'])
}
values <- checkWQPdates(values)
names(values)[names(values) == "siteNumber"] <- "siteid"
names(values)[names(values) == "siteNumbers"] <- "siteid"
# if("statecode" %in% names(values)){
# stCd <- values["statecode"]
# if(!grepl("US:",stCd)){
# values["statecode"] <- paste0("US:",stateCdLookup(stCd, "id"))
# }
# }
# if("statecode" %in% names(values)){
# values["statecode"] <- as.list(values["statecode"] )
# }
#
# if("stateCd" %in% names(values)){
# stCd <- values["stateCd"]
# if(!grepl("US:",stCd)){
# values["stateCd"] <- paste0("US:",stateCdLookup(stCd, "id"))
# }
# names(values)[names(values) == "stateCd"] <- "statecode"
# }
if("tz" %in% names(values)){
tz <- values["tz"]
if(tz != ""){
rTZ <- c("America/New_York","America/Chicago",
"America/Denver","America/Los_Angeles",
"America/Anchorage","America/Honolulu",
"America/Jamaica","America/Managua",
"America/Phoenix","America/Metlakatla","UTC")
tz <- match.arg(tz, rTZ)
if("UTC" == tz) tz <- ""
}
values <- values[!(names(values) %in% "tz")]
} else {
tz <- ""
}
values <- gsub(",","%2C",values)
values <- gsub("%20","+",values)
values <- gsub(":","%3A",values)
values <- gsub("c(","",values, fixed="TRUE")
values <- gsub('""',"",values, fixed="TRUE")
values <- checkWQPdates(values)
urlCall <- paste(paste(names(values),values,sep="="),collapse="&")
baseURL <- "http://www.waterqualitydata.us/Station/search?"
urlCall <- paste(baseURL,
urlCall,
"&mimeType=tsv&sorted=no",sep = "")
doc <- getWebServiceData(urlCall)
headerInfo <- attr(doc, "headerInfo")
numToBeReturned <- as.numeric(headerInfo["Total-Site-Count"])
if (!is.na(numToBeReturned) & numToBeReturned != 0){
retval <- read.delim(textConnection(doc), header = TRUE,
dec=".", sep='\t', quote="",
colClasses=c('character'),
fill = TRUE)
actualNumReturned <- nrow(retval)
if(actualNumReturned != numToBeReturned) warning(numToBeReturned, " sites were expected, ", actualNumReturned, " were returned")
if("LatitudeMeasure" %in% names(retval)){
retval$LatitudeMeasure <- as.numeric(retval$LatitudeMeasure)
}
if("LongitudeMeasure" %in% names(retval)){
retval$LongitudeMeasure <- as.numeric(retval$LongitudeMeasure)
}
retval$queryTime <- Sys.time()
return(retval)
} else {
if(headerInfo['Total-Site-Count'] == "0"){
warning("No data returned")
}
for(i in grep("Warning",names(headerInfo))){
warning(headerInfo[i])
}
}
} | /external/whatWQPsites_app.R | no_license | USEPA/Water-Quality-Data-Discovery-Tool | R | false | false | 5,380 | r | library(dataRetrieval)
whatWQPsites_app <- function(...){
matchReturn <- list(...)
# Added from constructWQPurl()
options <- c("bBox", "lat", "long", "within", "countrycode",
"statecode", "countycode", "siteType", "organization",
"siteid", "huc", "sampleMedia", "characteristicType",
"characteristicName", "pCode", "activityId", "startDateLo",
"startDateHi", "mimeType", "Zip", "providers")
if (!all(names(matchReturn) %in% options))
warning(matchReturn[!(names(matchReturn) %in% options)],
"is not a valid query parameter to the Water Quality Portal")
# Checks for user input filters - removes null or default filters
if (0 %in% matchReturn$bBox)
matchReturn$bBox<-NULL
if (matchReturn$lat== 0 | matchReturn$lat== FALSE)
matchReturn$lat<-NULL
if (matchReturn$long== 0 | matchReturn$long== FALSE)
matchReturn$long<-NULL
if (matchReturn$within== 0 | matchReturn$within== FALSE)
matchReturn$within<-NULL
if (matchReturn$statecode[1] == " "| matchReturn$statecode== FALSE | matchReturn$statecode == "0")
matchReturn$statecode<-NULL
if (matchReturn$countycode == " "| matchReturn$countycode== FALSE)
matchReturn$countycode<-NULL
if (matchReturn$siteType == " "| matchReturn$siteType== FALSE)
matchReturn$siteType<-NULL
if (matchReturn$organization == " "| matchReturn$organization== FALSE)
matchReturn$organization<-NULL
if (matchReturn$siteid == " "| matchReturn$siteid== FALSE)
matchReturn$siteid<-NULL
if (matchReturn$huc==" "| matchReturn$huc== FALSE)
matchReturn$huc<-NULL
if (matchReturn$sampleMedia==" "| matchReturn$sampleMedia== FALSE)
matchReturn$sampleMedia<-NULL
if (matchReturn$characteristicType==" "| matchReturn$characteristicType== FALSE)
matchReturn$characteristicType<-NULL
if (matchReturn$characteristicName==" "| matchReturn$characteristicName== FALSE | is.null(matchReturn$characteristicName))
matchReturn$characteristicName<-NULL
if (matchReturn$startDateLo==Sys.Date() & matchReturn$startDateHi == Sys.Date()) {
matchReturn$startDateLo<-NULL
matchReturn$startDateHi<-NULL
}
values <- sapply(matchReturn, function(x) URLencode(as.character(paste(eval(x),collapse=";",sep=""))))
if("bBox" %in% names(values)){
values['bBox'] <- gsub(pattern = ";", replacement = ",", x = values['bBox'])
}
values <- checkWQPdates(values)
names(values)[names(values) == "siteNumber"] <- "siteid"
names(values)[names(values) == "siteNumbers"] <- "siteid"
# if("statecode" %in% names(values)){
# stCd <- values["statecode"]
# if(!grepl("US:",stCd)){
# values["statecode"] <- paste0("US:",stateCdLookup(stCd, "id"))
# }
# }
# if("statecode" %in% names(values)){
# values["statecode"] <- as.list(values["statecode"] )
# }
#
# if("stateCd" %in% names(values)){
# stCd <- values["stateCd"]
# if(!grepl("US:",stCd)){
# values["stateCd"] <- paste0("US:",stateCdLookup(stCd, "id"))
# }
# names(values)[names(values) == "stateCd"] <- "statecode"
# }
if("tz" %in% names(values)){
tz <- values["tz"]
if(tz != ""){
rTZ <- c("America/New_York","America/Chicago",
"America/Denver","America/Los_Angeles",
"America/Anchorage","America/Honolulu",
"America/Jamaica","America/Managua",
"America/Phoenix","America/Metlakatla","UTC")
tz <- match.arg(tz, rTZ)
if("UTC" == tz) tz <- ""
}
values <- values[!(names(values) %in% "tz")]
} else {
tz <- ""
}
values <- gsub(",","%2C",values)
values <- gsub("%20","+",values)
values <- gsub(":","%3A",values)
values <- gsub("c(","",values, fixed="TRUE")
values <- gsub('""',"",values, fixed="TRUE")
values <- checkWQPdates(values)
urlCall <- paste(paste(names(values),values,sep="="),collapse="&")
baseURL <- "http://www.waterqualitydata.us/Station/search?"
urlCall <- paste(baseURL,
urlCall,
"&mimeType=tsv&sorted=no",sep = "")
doc <- getWebServiceData(urlCall)
headerInfo <- attr(doc, "headerInfo")
numToBeReturned <- as.numeric(headerInfo["Total-Site-Count"])
if (!is.na(numToBeReturned) & numToBeReturned != 0){
retval <- read.delim(textConnection(doc), header = TRUE,
dec=".", sep='\t', quote="",
colClasses=c('character'),
fill = TRUE)
actualNumReturned <- nrow(retval)
if(actualNumReturned != numToBeReturned) warning(numToBeReturned, " sites were expected, ", actualNumReturned, " were returned")
if("LatitudeMeasure" %in% names(retval)){
retval$LatitudeMeasure <- as.numeric(retval$LatitudeMeasure)
}
if("LongitudeMeasure" %in% names(retval)){
retval$LongitudeMeasure <- as.numeric(retval$LongitudeMeasure)
}
retval$queryTime <- Sys.time()
return(retval)
} else {
if(headerInfo['Total-Site-Count'] == "0"){
warning("No data returned")
}
for(i in grep("Warning",names(headerInfo))){
warning(headerInfo[i])
}
}
} |
testlist <- list(phi = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), x = c(1.36656528938164e-311, -1.65791256519293e+82, 1.29418168595419e-228, -1.85353502606261e+293, 8.08855267383463e-84, -4.03929894096111e-178, 6.04817943207006e-103, -1.66738461804717e-220, -8.8217241872956e-21, -7.84828807007467e-146, -7.48864562038427e+21, -1.00905374512e-187, 5.22970923741951e-218, 2.77992264324548e-197, -5.29147138122706e+140, -1.71332436886848e-93, -1.52261021137076e-52, 2.0627472502345e-21, 1.07149136185465e+184, 4.41748962512848e+47, -4.05885894997926e-142))
result <- do.call(dcurver:::ddc,testlist)
str(result) | /dcurver/inst/testfiles/ddc/AFL_ddc/ddc_valgrind_files/1609866987-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 831 | r | testlist <- list(phi = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), x = c(1.36656528938164e-311, -1.65791256519293e+82, 1.29418168595419e-228, -1.85353502606261e+293, 8.08855267383463e-84, -4.03929894096111e-178, 6.04817943207006e-103, -1.66738461804717e-220, -8.8217241872956e-21, -7.84828807007467e-146, -7.48864562038427e+21, -1.00905374512e-187, 5.22970923741951e-218, 2.77992264324548e-197, -5.29147138122706e+140, -1.71332436886848e-93, -1.52261021137076e-52, 2.0627472502345e-21, 1.07149136185465e+184, 4.41748962512848e+47, -4.05885894997926e-142))
result <- do.call(dcurver:::ddc,testlist)
str(result) |
unary <- function(x, o, overwrite = FALSE){
expr_calc <- paste0(o, " = int(if(!isnull(", x, "), 1, log(-1)))")
flags <- "quiet"
if(overwrite) flags <- c(flags, "overwrite")
execGRASS(
"r.mapcalc",
flags = flags,
parameters = list(expression = expr_calc)
)
} | /R/unary.R | no_license | cran/rdwplus | R | false | false | 288 | r | unary <- function(x, o, overwrite = FALSE){
expr_calc <- paste0(o, " = int(if(!isnull(", x, "), 1, log(-1)))")
flags <- "quiet"
if(overwrite) flags <- c(flags, "overwrite")
execGRASS(
"r.mapcalc",
flags = flags,
parameters = list(expression = expr_calc)
)
} |
#' wrangle.TablesList
#' Import data table list into R (according source_file description) and wrangle into new data table list following wrangle_parameter_file description.
#'
#'@param wrangle_parameter file of parameter data frame (see format)
#'@param sources_file name of data frame on which foreign key constraint is checked
#'@return list of data table
#'
#'@import data.table
#'@import lubridate
#'@import stringr
#'@import readxl
#'
#' @export
#'
#'
wrangle.TablesList<-function(wrangler_parameter , table_list, call_customFunction){
if(is.list(table_list)){
raw<-table_list
}else{
stop("[wrangle.TablesList] table_list argument isn't type of list")
}
wrangler<-importWranglerParameter(wrangler_parameter)
if(nrow(wrangler)==0){
stop("[wrangle.Files] empty wrangler parameters")
}
#--- check before wrangler execution
source_table_name<-unique(wrangler$source_table)
target_table_name<-unique(wrangler$target_table)
if(length(which(source_table_name%in%names(raw)))>length(names(raw))){
stop("[wrangle.Files] all source table not identified in source_file")
}else{
for(source in source_table_name){
attSource<-colnames(raw[[source]])
attWParamSource<-unique(unlist( str_split(
wrangler$source_field[which(wrangler$source_table == source)],pattern='[|]')))
if(length(which(!attWParamSource%in%attSource))>0){
stop(paste("[wrangle.Files] table source",paste(source,
sep = " doesn't contains some column used in wrangler parameter ",
attWParamSource[which(!attWParamSource%in%attSource)])))
}
}
}
#--- check implementation of necessary function
necessaryFunction<-unlist(unique(c(wrangler[,c("mapper","check_fail","ref_fail")])))
necessaryCheck<-unlist(lapply(necessaryFunction, FUN = function(x){
if(is.na(x)){ return(TRUE)}else{
return(exists(x,mode='function'))}
}))
stopifnot(all(necessaryCheck))
#--- wrangle
tables<-list()
##--- call universally function
if(!is.null(call_customFunction)&& is.data.frame(call_customFunction)&& length(
which(c("table_name","function_name")%in%colnames(call_customFunction)))==2){
for(i in nrow(call_customFunction)){
f_name<-call_customFunction$function_name[i]
if(exists(x=f_name,mode='function') && !is.na(call_customFunction$table_name[i])){
raw[[call_customFunction$table_name[i]]]<-get(call_customFunction$function_name[i])(raw)
tables[[call_customFunction$table_name[i]]]<-get(call_customFunction$function_name[i])(raw)
}else{
print(paste("[Wrangle Pre-process] error in custom_call_data at line",i))
}
}
}
for(t in target_table_name){
i<-which(wrangler$target_table == t)
map <- wrangler[i,]
id_field <- map$target_field[which(map$check=="as.ID")]
if(length(id_field)==1){
id_map<-map[which(map$check=="as.ID"),]
} else{
#### split error
if(length(id_field)==0){
error_check<-"no"} else
{error_check<-"multiple"}
stop(paste("[wrangle.Files]",paste(error_check,
sep=" id specified for same source table (as.ID in check field) ", t)))
}
#--- gère des mapper TODO ----
if(is.na(id_map$source_field)) {
# Multi-column mapper
x <- get(id_map$mapper)(raw[[id_map$source_table]])
} else {
# Direct mapper
fieldSel<-unlist(str_split(id_map$source_field,pattern="[|]"))
if(length(fieldSel)>1){
x <- get(id_map$mapper)(raw[[id_map$source_table]][,fieldSel, with=FALSE])
tables[[id_map$target_table]] <- x
}else{
x <- get(id_map$mapper)(raw[[id_map$source_table]][[id_map$source_field]])
tables[[id_map$target_table]] <- data.table(id = x)
setnames(tables[[id_map$target_table]],c(id_field))
}
}
for(att in which(map$target_field != id_field)) {
att_map<-map[att,]
if(is.na(att_map$source_field)) {
# Multi-column mapper
x <- get(att_map$mapper)(raw[[att_map$source_table]])#[ft])
} else if(!att_map$source_field%in% colnames(raw[[att_map$source_table]])){
atts<-unlist(str_split(att_map$source_field,pattern="|"))
x <- get(att_map$mapper)(raw[[att_map$source_table]][[atts]])#[ft])
}else if(!is.na(att_map$ref_table)){
# Direct mapper #
x <- get(att_map$mapper)(x=raw[[att_map$source_table]][[att_map$source_field]]#[ft]
,table = tables[[att_map$ref_table]])
}else{
#### Nomenclature table management
x <- get(att_map$mapper)(raw[[att_map$source_table]][[att_map$source_field]])#[ft])
}
if(length(x)>dim(tables[[att_map$target_table]])[1]){
tables[[att_map$target_table]][, c(att_map$target_field) := unique(x)]
}else{
tables[[att_map$target_table]][, c(att_map$target_field) := x]
}
if(!is.na(att_map$ref_field)&& !is.na(att_map$ref_table)){
foreignKeyConstraint(tables,att_map$target_field, att_map$target_table, att_map$ref_field, att_map$ref_table, att_map$`ref_fail`)
}else if (!is.na(att_map$ref_field) || !is.na(att_map$ref_table)) {
warnings(paste("[Foreign constraint missing required data]",sep=" ",
paste(att_map$target_table,sep="$",att_map$target_field)))
}else{
print(paste("[No foreign constraint]",sep=" ",
paste(att_map$target_table,sep="$",att_map$target_field)))
}
}
}
print("##### End #####")
return(tables)
}
#' wrangle.Files
#' Import data table list into R (according source_file description) and wrangle into new data table list following wrangle_parameter_file description.
#'
#'@param wrangle_parameter file of parameter data frame (see format)
#'@param sources_file name of data frame on which foreign key constraint is checked
#'@return list of data table
#'
#'@import data.table
#'@import lubridate
#'@import stringr
#'@import readxl
#'
#' @export
#'
#'
wrangle.Files<-function(wrangler_parameter , sources_file, call_customFunction){
raw<-importTableFromSource(sources_file)
tables<- wrangle.TablesList(wrangler_parameter , table_list=raw, call_customFunction)
return(tables)
}
#' importTableFromSource
#' Import data table list into R
#'
#'@param sources_file parameter file to source data (see source_format)
#'@return table list
#'@import data.table
#'@import readxl
#'
#'
#' @export
#'
importTableFromSource<-function(parameter, useAll = FALSE){
source_data<-read.csv(parameter,stringsAsFactors = FALSE)
source_data$sourcePath<-str_squish(source_data$sourcePath)
print(source_data$sourcePath)
col_source<-colnames(get_sources_file())
if(length(which(colnames(source_data)%in%col_source))<length(col_source)){
stop("[importTableFromSource] bad source file format")
}
files<-source_data$sourcePath[which(file_test("-f",source_data$sourcePath))]
raw_data<-vector("list", length(unique(source_data$tableName)))
names(raw_data)<-unique(source_data$tableName)
if(useAll && length(files)<nrow(source_data)){
stop("[importTableFromSource] file missing error ", sep=" : ",
source_data$sourcePath[!which(file_test("-f",source_data$sourcePath))])
}
else if(length(files)<nrow(source_data) ){
warning("[importTableFromSource] file missing warning, some following table will not be filled in return list()", sep=" : ",
source_data$tableName[!which(file_test("-f",source_data$sourcePath))])
}
for(f in files){
tableName<-source_data$tableName[which(source_data$sourcePath == f)]
extension<-str_extract(f, "[.][a-zA-Z]*$")
if(extension == ".csv"){
data<-read.csv(f)
}else if(extension %in% c(".xls",".xlsx")){
data<-as.data.frame(read_excel( f))
}else{
warning(paste("[importTableFromSource] unreconized extension",
paste(extension, sep=" for file ",f)))
}
if(is.null(raw_data[[tableName]])){
raw_data[[tableName]]<-data
}else if(is.data.frame(raw_data[[tableName]])) {
colnamesRD<-colnames(raw_data[[tableName]])
if(length(which(colnames(data)%in%colnamesRD)) == length(data)){
raw_data[[tableName]]<-rbind(raw_data[[tableName]], data)
}else{
raw_data[[tableName]]<-rbind(raw_data[[tableName]], data[,which(colnames(data)%in%colnamesRD)])
warning(paste("[importTableFromSource] data from ",
paste(f, sep=" limited extraction due to column missing in previous file used to fill same table. \n Column insert limitation to : ",colnamesRD)))
}
}
}
raw_data<-lapply(raw_data, FUN = function(x){
return(as.data.table(x))
})
return(raw_data)
}
#' importWranglerParameter
#' Get wrangler parameter data from source file
#'
#'@param wrangler_parameter wrangler parameter file to transform data (excel or csv-comma)
#'@return wrangler data frame
#'@import readxl
#'
#'
#' @export
#'
importWranglerParameter<-function(wrangler_parameter){
wrangler<-data.frame()
if(! file_test("-f",wrangler_parameter)){
stop(paste("[importWranglerParameter] wrangler parameter file doesn't exist",wrangler_parameter))
}
extension<-str_extract(wrangler_parameter, "[.][a-zA-Z]*$")
if(extension == ".csv"){
wrangler<-read.csv(wrangler_parameter)
}else if(extension %in% c(".xls",".xlsx")){
wrangler<-as.data.frame(read_excel(wrangler_parameter),stringsAsFactors=FALSE)
}else{
warning(paste("[importWranglerParameter] unreconized extension",
paste(extension, sep=" for file ",wrangler_parameter)))
}
col_wrangler<-colnames(get_wrangler_parameter_file())
if(length(which(colnames(wrangler)%in%col_wrangler))<length(col_wrangler)){
stop("[importWranglerParameter] bad source file format")
}
return(wrangler)
}
| /project/R/DataWrangle.R | permissive | melissachamary/RDataWrangler | R | false | false | 10,040 | r | #' wrangle.TablesList
#' Import data table list into R (according source_file description) and wrangle into new data table list following wrangle_parameter_file description.
#'
#'@param wrangle_parameter file of parameter data frame (see format)
#'@param sources_file name of data frame on which foreign key constraint is checked
#'@return list of data table
#'
#'@import data.table
#'@import lubridate
#'@import stringr
#'@import readxl
#'
#' @export
#'
#'
wrangle.TablesList<-function(wrangler_parameter , table_list, call_customFunction){
if(is.list(table_list)){
raw<-table_list
}else{
stop("[wrangle.TablesList] table_list argument isn't type of list")
}
wrangler<-importWranglerParameter(wrangler_parameter)
if(nrow(wrangler)==0){
stop("[wrangle.Files] empty wrangler parameters")
}
#--- check before wrangler execution
source_table_name<-unique(wrangler$source_table)
target_table_name<-unique(wrangler$target_table)
if(length(which(source_table_name%in%names(raw)))>length(names(raw))){
stop("[wrangle.Files] all source table not identified in source_file")
}else{
for(source in source_table_name){
attSource<-colnames(raw[[source]])
attWParamSource<-unique(unlist( str_split(
wrangler$source_field[which(wrangler$source_table == source)],pattern='[|]')))
if(length(which(!attWParamSource%in%attSource))>0){
stop(paste("[wrangle.Files] table source",paste(source,
sep = " doesn't contains some column used in wrangler parameter ",
attWParamSource[which(!attWParamSource%in%attSource)])))
}
}
}
#--- check implementation of necessary function
necessaryFunction<-unlist(unique(c(wrangler[,c("mapper","check_fail","ref_fail")])))
necessaryCheck<-unlist(lapply(necessaryFunction, FUN = function(x){
if(is.na(x)){ return(TRUE)}else{
return(exists(x,mode='function'))}
}))
stopifnot(all(necessaryCheck))
#--- wrangle
tables<-list()
##--- call universally function
if(!is.null(call_customFunction)&& is.data.frame(call_customFunction)&& length(
which(c("table_name","function_name")%in%colnames(call_customFunction)))==2){
for(i in nrow(call_customFunction)){
f_name<-call_customFunction$function_name[i]
if(exists(x=f_name,mode='function') && !is.na(call_customFunction$table_name[i])){
raw[[call_customFunction$table_name[i]]]<-get(call_customFunction$function_name[i])(raw)
tables[[call_customFunction$table_name[i]]]<-get(call_customFunction$function_name[i])(raw)
}else{
print(paste("[Wrangle Pre-process] error in custom_call_data at line",i))
}
}
}
for(t in target_table_name){
i<-which(wrangler$target_table == t)
map <- wrangler[i,]
id_field <- map$target_field[which(map$check=="as.ID")]
if(length(id_field)==1){
id_map<-map[which(map$check=="as.ID"),]
} else{
#### split error
if(length(id_field)==0){
error_check<-"no"} else
{error_check<-"multiple"}
stop(paste("[wrangle.Files]",paste(error_check,
sep=" id specified for same source table (as.ID in check field) ", t)))
}
#--- gère des mapper TODO ----
if(is.na(id_map$source_field)) {
# Multi-column mapper
x <- get(id_map$mapper)(raw[[id_map$source_table]])
} else {
# Direct mapper
fieldSel<-unlist(str_split(id_map$source_field,pattern="[|]"))
if(length(fieldSel)>1){
x <- get(id_map$mapper)(raw[[id_map$source_table]][,fieldSel, with=FALSE])
tables[[id_map$target_table]] <- x
}else{
x <- get(id_map$mapper)(raw[[id_map$source_table]][[id_map$source_field]])
tables[[id_map$target_table]] <- data.table(id = x)
setnames(tables[[id_map$target_table]],c(id_field))
}
}
for(att in which(map$target_field != id_field)) {
att_map<-map[att,]
if(is.na(att_map$source_field)) {
# Multi-column mapper
x <- get(att_map$mapper)(raw[[att_map$source_table]])#[ft])
} else if(!att_map$source_field%in% colnames(raw[[att_map$source_table]])){
atts<-unlist(str_split(att_map$source_field,pattern="|"))
x <- get(att_map$mapper)(raw[[att_map$source_table]][[atts]])#[ft])
}else if(!is.na(att_map$ref_table)){
# Direct mapper #
x <- get(att_map$mapper)(x=raw[[att_map$source_table]][[att_map$source_field]]#[ft]
,table = tables[[att_map$ref_table]])
}else{
#### Nomenclature table management
x <- get(att_map$mapper)(raw[[att_map$source_table]][[att_map$source_field]])#[ft])
}
if(length(x)>dim(tables[[att_map$target_table]])[1]){
tables[[att_map$target_table]][, c(att_map$target_field) := unique(x)]
}else{
tables[[att_map$target_table]][, c(att_map$target_field) := x]
}
if(!is.na(att_map$ref_field)&& !is.na(att_map$ref_table)){
foreignKeyConstraint(tables,att_map$target_field, att_map$target_table, att_map$ref_field, att_map$ref_table, att_map$`ref_fail`)
}else if (!is.na(att_map$ref_field) || !is.na(att_map$ref_table)) {
warnings(paste("[Foreign constraint missing required data]",sep=" ",
paste(att_map$target_table,sep="$",att_map$target_field)))
}else{
print(paste("[No foreign constraint]",sep=" ",
paste(att_map$target_table,sep="$",att_map$target_field)))
}
}
}
print("##### End #####")
return(tables)
}
#' wrangle.Files
#' Import data table list into R (according source_file description) and wrangle into new data table list following wrangle_parameter_file description.
#'
#'@param wrangle_parameter file of parameter data frame (see format)
#'@param sources_file name of data frame on which foreign key constraint is checked
#'@return list of data table
#'
#'@import data.table
#'@import lubridate
#'@import stringr
#'@import readxl
#'
#' @export
#'
#'
wrangle.Files<-function(wrangler_parameter , sources_file, call_customFunction){
raw<-importTableFromSource(sources_file)
tables<- wrangle.TablesList(wrangler_parameter , table_list=raw, call_customFunction)
return(tables)
}
#' importTableFromSource
#' Import data table list into R
#'
#'@param sources_file parameter file to source data (see source_format)
#'@return table list
#'@import data.table
#'@import readxl
#'
#'
#' @export
#'
importTableFromSource<-function(parameter, useAll = FALSE){
source_data<-read.csv(parameter,stringsAsFactors = FALSE)
source_data$sourcePath<-str_squish(source_data$sourcePath)
print(source_data$sourcePath)
col_source<-colnames(get_sources_file())
if(length(which(colnames(source_data)%in%col_source))<length(col_source)){
stop("[importTableFromSource] bad source file format")
}
files<-source_data$sourcePath[which(file_test("-f",source_data$sourcePath))]
raw_data<-vector("list", length(unique(source_data$tableName)))
names(raw_data)<-unique(source_data$tableName)
if(useAll && length(files)<nrow(source_data)){
stop("[importTableFromSource] file missing error ", sep=" : ",
source_data$sourcePath[!which(file_test("-f",source_data$sourcePath))])
}
else if(length(files)<nrow(source_data) ){
warning("[importTableFromSource] file missing warning, some following table will not be filled in return list()", sep=" : ",
source_data$tableName[!which(file_test("-f",source_data$sourcePath))])
}
for(f in files){
tableName<-source_data$tableName[which(source_data$sourcePath == f)]
extension<-str_extract(f, "[.][a-zA-Z]*$")
if(extension == ".csv"){
data<-read.csv(f)
}else if(extension %in% c(".xls",".xlsx")){
data<-as.data.frame(read_excel( f))
}else{
warning(paste("[importTableFromSource] unreconized extension",
paste(extension, sep=" for file ",f)))
}
if(is.null(raw_data[[tableName]])){
raw_data[[tableName]]<-data
}else if(is.data.frame(raw_data[[tableName]])) {
colnamesRD<-colnames(raw_data[[tableName]])
if(length(which(colnames(data)%in%colnamesRD)) == length(data)){
raw_data[[tableName]]<-rbind(raw_data[[tableName]], data)
}else{
raw_data[[tableName]]<-rbind(raw_data[[tableName]], data[,which(colnames(data)%in%colnamesRD)])
warning(paste("[importTableFromSource] data from ",
paste(f, sep=" limited extraction due to column missing in previous file used to fill same table. \n Column insert limitation to : ",colnamesRD)))
}
}
}
raw_data<-lapply(raw_data, FUN = function(x){
return(as.data.table(x))
})
return(raw_data)
}
#' importWranglerParameter
#' Get wrangler parameter data from source file
#'
#'@param wrangler_parameter wrangler parameter file to transform data (excel or csv-comma)
#'@return wrangler data frame
#'@import readxl
#'
#'
#' @export
#'
importWranglerParameter<-function(wrangler_parameter){
wrangler<-data.frame()
if(! file_test("-f",wrangler_parameter)){
stop(paste("[importWranglerParameter] wrangler parameter file doesn't exist",wrangler_parameter))
}
extension<-str_extract(wrangler_parameter, "[.][a-zA-Z]*$")
if(extension == ".csv"){
wrangler<-read.csv(wrangler_parameter)
}else if(extension %in% c(".xls",".xlsx")){
wrangler<-as.data.frame(read_excel(wrangler_parameter),stringsAsFactors=FALSE)
}else{
warning(paste("[importWranglerParameter] unreconized extension",
paste(extension, sep=" for file ",wrangler_parameter)))
}
col_wrangler<-colnames(get_wrangler_parameter_file())
if(length(which(colnames(wrangler)%in%col_wrangler))<length(col_wrangler)){
stop("[importWranglerParameter] bad source file format")
}
return(wrangler)
}
|
\name{fitin.ppm}
\alias{fitin}
\alias{fitin.ppm}
\alias{fitin.profilepl}
\title{Extract the Interaction from a Fitted Point Process Model}
\description{
Given a point process model that has been fitted to point pattern
data, this function extracts the interpoint interaction part of the
model as a separate object.
}
\usage{
fitin(object)
\method{fitin}{ppm}(object)
\method{fitin}{profilepl}(object)
}
\arguments{
\item{object}{A fitted point process model (object of class
\code{"ppm"} or \code{"profilepl"}).
}
}
\details{
An object of class \code{"ppm"} describes a fitted point process
model. It contains information about the original data to which the
model was fitted, the spatial trend that was fitted, the
interpoint interaction that was fitted, and other data.
See \code{\link{ppm.object}}) for details of this class.
The function \code{fitin} extracts from this model the information about the
fitted interpoint interaction only.
The information is organised as an object of class \code{"fii"}
(fitted interpoint interaction).
This object can be printed or plotted.
Users may find this a convenient way to plot the
fitted interpoint interaction term, as shown in the Examples.
For a pairwise interaction, the plot of the fitted interaction
shows the pair interaction function (the contribution to the
probability density from a pair of points as a function of the
distance between them). For a higher-order interaction, the plot shows
the strongest interaction (the value most different from 1)
that could ever arise at the given distance.
The fitted interaction coefficients can also be extracted
from this object using \code{\link{coef}}.
}
\value{
An object of class \code{"fii"} representing the fitted
interpoint interaction. This object can be printed and plotted.
}
\author{
\spatstatAuthors.
}
\seealso{
Methods for handling fitted interactions:
\code{\link{methods.fii}}, \code{\link{reach.fii}},
\code{\link{as.interact.fii}}.
Background:
\code{\link{ppm}},
\code{\link{ppm.object}}.
}
\examples{
# unmarked
model <- ppm(swedishpines ~1, PairPiece(seq(3,19,by=4)))
f <- fitin(model)
f
plot(f)
# extract fitted interaction coefficients
coef(f)
# multitype
# fit the stationary multitype Strauss process to `amacrine'
r <- 0.02 * matrix(c(1,2,2,1), nrow=2,ncol=2)
model <- ppm(amacrine ~1, MultiStrauss(r))
f <- fitin(model)
f
plot(f)
}
\keyword{spatial}
\keyword{models}
| /man/fitin.Rd | no_license | spatstat/spatstat.core | R | false | false | 2,515 | rd | \name{fitin.ppm}
\alias{fitin}
\alias{fitin.ppm}
\alias{fitin.profilepl}
\title{Extract the Interaction from a Fitted Point Process Model}
\description{
Given a point process model that has been fitted to point pattern
data, this function extracts the interpoint interaction part of the
model as a separate object.
}
\usage{
fitin(object)
\method{fitin}{ppm}(object)
\method{fitin}{profilepl}(object)
}
\arguments{
\item{object}{A fitted point process model (object of class
\code{"ppm"} or \code{"profilepl"}).
}
}
\details{
An object of class \code{"ppm"} describes a fitted point process
model. It contains information about the original data to which the
model was fitted, the spatial trend that was fitted, the
interpoint interaction that was fitted, and other data.
See \code{\link{ppm.object}}) for details of this class.
The function \code{fitin} extracts from this model the information about the
fitted interpoint interaction only.
The information is organised as an object of class \code{"fii"}
(fitted interpoint interaction).
This object can be printed or plotted.
Users may find this a convenient way to plot the
fitted interpoint interaction term, as shown in the Examples.
For a pairwise interaction, the plot of the fitted interaction
shows the pair interaction function (the contribution to the
probability density from a pair of points as a function of the
distance between them). For a higher-order interaction, the plot shows
the strongest interaction (the value most different from 1)
that could ever arise at the given distance.
The fitted interaction coefficients can also be extracted
from this object using \code{\link{coef}}.
}
\value{
An object of class \code{"fii"} representing the fitted
interpoint interaction. This object can be printed and plotted.
}
\author{
\spatstatAuthors.
}
\seealso{
Methods for handling fitted interactions:
\code{\link{methods.fii}}, \code{\link{reach.fii}},
\code{\link{as.interact.fii}}.
Background:
\code{\link{ppm}},
\code{\link{ppm.object}}.
}
\examples{
# unmarked
model <- ppm(swedishpines ~1, PairPiece(seq(3,19,by=4)))
f <- fitin(model)
f
plot(f)
# extract fitted interaction coefficients
coef(f)
# multitype
# fit the stationary multitype Strauss process to `amacrine'
r <- 0.02 * matrix(c(1,2,2,1), nrow=2,ncol=2)
model <- ppm(amacrine ~1, MultiStrauss(r))
f <- fitin(model)
f
plot(f)
}
\keyword{spatial}
\keyword{models}
|
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inv) m <<- inv
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
} | /cachematrix.R | no_license | zjwufei/ProgrammingAssignment2 | R | false | false | 542 | r | makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inv) m <<- inv
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
} |
\name{parseSubType}
\alias{parseSubType}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Parse KGML relation subtype }
\description{
The function parses KGML relation subtype, called internally and not
intended to be used by end users.
}
\usage{
parseSubType(subtype)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{subtype}{ KGML subtype node}
}
\value{
An object of \code{\link{KEGGEdgeSubType-class}}
}
\author{ Jitao David Zhang \url{mailto:jitao_david.zhang@roche.com} }
| /man/parseSubType.Rd | no_license | Accio/KEGGgraph | R | false | false | 536 | rd | \name{parseSubType}
\alias{parseSubType}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Parse KGML relation subtype }
\description{
The function parses KGML relation subtype, called internally and not
intended to be used by end users.
}
\usage{
parseSubType(subtype)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{subtype}{ KGML subtype node}
}
\value{
An object of \code{\link{KEGGEdgeSubType-class}}
}
\author{ Jitao David Zhang \url{mailto:jitao_david.zhang@roche.com} }
|
#' pFSA: Pareto Feasible Solution Algorithm
#'
#' @description A function using a Feasible Solution Algorithm to estimate a set of models which are on the Pareto frontiers for chosen criteria
#'
#'
#' @param numFronts integer number of estimated frontiers to return
#' @param pselExpr expression used by function psel to estimate pareto frontiers. help(psel).
#' @param plot.it TRUE/FALSE for whether to plot the pareto frontiers
#' @param formula an object of class "formula" (or one that can be coerced to that class): a symbolic description of the model to be fitted.
#' @param data a data frame, list or environment (or object coercible by as.data.frame to a data frame) containing the variables in the model.
#' @param fitfunc the method that should be used to fit the model. For Example: lm, glm, or other methods that rely on formula, data, and other inputs.
#' @param fixvar variable(s) to fix in the model. Usually a covariate that should always be included (Example: Age, Sex). Will still consider it with interactions. Default is NULL.
#' @param quad Include quadratic terms or not. Logical.
#' @param m order of terms to include. If interactions is set to TRUE then m is the order of interactions to be considered. For Subset selection (interaction=F), m is the size of the subset to examine. Defaults to 2.
#' @param numrs number of random starts to perform.
#' @param cores number of cores to use while running. Note: Windows can only use 1 core. See mclapply for details. If function detects a Windows user it will automatically set cores=1.
#' @param interactions whether to include interactions in model. Defaults to TRUE.
#' @param criterion which criterion function to either maximize or minimize. For linear models one can use: r.squared, adj.r.squared, cv5.lmFSA (5 Fold Cross Validation error), cv10.lmFSA (10 Fold Cross Validation error), apress (Allen's Press Statistic), int.p.val (Interaction P-value), AIC, BIC.
#' @param minmax whether to minimize or maximize the criterion function
#' @param checkfeas vector of variables that could be a feasible solution. These variables will be used as the last random start.
#' @param var4int specification of which variables to check for marginal feasiblilty. Default is NULL
#' @param min.nonmissing the combination of predictors will be ignored unless this many of observations are not missing
#' @param return.models bool value to specify whether return all the fitted models which have been checked
#' @param fix.formula ...
#' @param ... see arguments taken by function FSA or other functions. help(FSA).
#'
#' @import hash
#' @importFrom parallel mclapply
#' @importFrom graphics legend
#' @import tibble
#' @import rPref
#' @import tidyr
#' @return list of a matrix of all models obtained from FSA (fits) and their criteria. Also a matrix of the estimated frontiers that were requested. The Key column in fits, and pbound refers to the column number of the variables contined in the model fit. For instance, Key="42,96" would refer to the model which contains the variable in the 42nd column and 96th column of the designated dataset.
#'
#' @export
#'
#' @examples
#'\donttest{
#'N <- 1000 #number of obs
#'P <- 100 #number of variables
#'data <- data.frame(matrix(rnorm(N*(P+1)), nrow = N, ncol = P+1))
#'sln <- pFSA(formula = "X101~1", data = data, m = 2, criterion = c(max_abs_resid,r.squared),
#' minmax = c("min","max"),numrs = 10,numFronts = 2,
#' pselExpr =rPref::low(max_abs_resid)*rPref::high(r.squared),plot.it = TRUE)
#' }
pFSA <- function(numFronts=2,pselExpr=NULL,plot.it=TRUE,formula, data, fitfunc=lm, fixvar = NULL, quad = FALSE,
m = 2, numrs = 1, cores=1, interactions = T,
criterion = AIC, minmax="min", checkfeas=NULL, var4int=NULL,
min.nonmissing=1, return.models=FALSE, fix.formula=NULL,...)
{
if (length(criterion)<2) {
stop("for Pareto Optimality you need atleast two criteria functions")
}
k<- NULL
fsaFit<-FSA(formula, data, fitfunc=fitfunc, fixvar=fixvar, quad=quad,
m=m, numrs=numrs, cores=cores, interactions=interactions,
criterion=criterion, minmax=minmax, checkfeas=checkfeas,
var4int=var4int, min.nonmissing=min.nonmissing,
return.models=return.models, fix.formula=fix.formula,...)
fits<-spread(fsaFit$criData,key = "k",value = "Values")
fits2<-fits
ans<-matrix(data = unlist(mclapply(X = 1:dim(fits2)[1],mc.cores = cores,FUN = function(x){
if(interactions==TRUE){int="*"} else {int="+"}
form<-as.formula(paste(all.vars(as.formula(formula))[1],"~",paste(colnames(data)[eval(parse(text=paste0("c(", fits[x,1], ")")))],collapse= int)))
fit_tmp<-fitfunc(formula=form, data=data,...)
tmp<-NULL
for(i in 1:(length(criterion))){
tmp<-c(tmp,criterion[[i]](fit_tmp))
}
tmp
})),byrow = TRUE,ncol = 2)
fits2[,-1]<-ans
fsaFit<-FSA(formula, data, fitfunc=lm, fixvar=NULL, quad=FALSE,
m=m, numrs=numrs, cores=1, interactions=FALSE,
criterion=criterion, minmax=minmax, checkfeas=NULL,
var4int=NULL,
return.models=FALSE, fix.formula=NULL)
fits<-spread(fsaFit$criData,key ="k",value = "Values")
fits2<<-fits
l<-mclapply(X = which(apply(X = fits2, MARGIN = 1, function(x){any(is.na(x))})),
mc.cores = cores,
FUN = function(x,...){
if(interactions==TRUE){int="*"} else {int="+"}
form<-as.formula(paste(all.vars(as.formula(formula))[1],"~",
paste(colnames(data)[eval(parse(text=paste0("c(", fits2[x,1], ")")))],
collapse= int))
)
fit_tmp<-fitfunc(formula=form, data=data)
for(i in 1:(length(criterion))){
fits2[x,-1][,i]<<-criterion[[i]](fit_tmp)
}
}
)
fits3<-fits2
cname<-gsub(pattern = "high|low|[(]|[])]| ",replacement = "",x = as.character(pselExpr))
cname<-unlist(strsplit(cname,split = "[*]"))
colnames(fits3)<-c("Key",cname)
pbound<-psel(df = fits3,pselExpr,top_level=numFronts)
if(length(criterion)>2 & plot.it==TRUE){
"Sorry, plots cannot be made for more than 2 criteria."
} else{if(plot.it==TRUE){
par(mar=c(5.1, 4.1, 4.1, 8.1), xpd=TRUE)
plot(x = pbound[,2],y = pbound[,3],col=pbound$.level,xlab=cname[1],ylab=cname[2],pch=20,
main = "Estimated Pareto Frontier Graph for \nChosen Criteria and Number of Fronts")
legend("bottomright", legend=c("Front 1","Front 2","Not Pareto"), col=c(1,2,3),pch=20, title="Est Pareto Front")
}
}
return(list(fits=fits2,pbound=pbound))
}
| /R/pFSA.R | no_license | joshuawlambert/rFSA | R | false | false | 6,729 | r | #' pFSA: Pareto Feasible Solution Algorithm
#'
#' @description A function using a Feasible Solution Algorithm to estimate a set of models which are on the Pareto frontiers for chosen criteria
#'
#'
#' @param numFronts integer number of estimated frontiers to return
#' @param pselExpr expression used by function psel to estimate pareto frontiers. help(psel).
#' @param plot.it TRUE/FALSE for whether to plot the pareto frontiers
#' @param formula an object of class "formula" (or one that can be coerced to that class): a symbolic description of the model to be fitted.
#' @param data a data frame, list or environment (or object coercible by as.data.frame to a data frame) containing the variables in the model.
#' @param fitfunc the method that should be used to fit the model. For Example: lm, glm, or other methods that rely on formula, data, and other inputs.
#' @param fixvar variable(s) to fix in the model. Usually a covariate that should always be included (Example: Age, Sex). Will still consider it with interactions. Default is NULL.
#' @param quad Include quadratic terms or not. Logical.
#' @param m order of terms to include. If interactions is set to TRUE then m is the order of interactions to be considered. For Subset selection (interaction=F), m is the size of the subset to examine. Defaults to 2.
#' @param numrs number of random starts to perform.
#' @param cores number of cores to use while running. Note: Windows can only use 1 core. See mclapply for details. If function detects a Windows user it will automatically set cores=1.
#' @param interactions whether to include interactions in model. Defaults to TRUE.
#' @param criterion which criterion function to either maximize or minimize. For linear models one can use: r.squared, adj.r.squared, cv5.lmFSA (5 Fold Cross Validation error), cv10.lmFSA (10 Fold Cross Validation error), apress (Allen's Press Statistic), int.p.val (Interaction P-value), AIC, BIC.
#' @param minmax whether to minimize or maximize the criterion function
#' @param checkfeas vector of variables that could be a feasible solution. These variables will be used as the last random start.
#' @param var4int specification of which variables to check for marginal feasiblilty. Default is NULL
#' @param min.nonmissing the combination of predictors will be ignored unless this many of observations are not missing
#' @param return.models bool value to specify whether return all the fitted models which have been checked
#' @param fix.formula ...
#' @param ... see arguments taken by function FSA or other functions. help(FSA).
#'
#' @import hash
#' @importFrom parallel mclapply
#' @importFrom graphics legend
#' @import tibble
#' @import rPref
#' @import tidyr
#' @return list of a matrix of all models obtained from FSA (fits) and their criteria. Also a matrix of the estimated frontiers that were requested. The Key column in fits, and pbound refers to the column number of the variables contined in the model fit. For instance, Key="42,96" would refer to the model which contains the variable in the 42nd column and 96th column of the designated dataset.
#'
#' @export
#'
#' @examples
#'\donttest{
#'N <- 1000 #number of obs
#'P <- 100 #number of variables
#'data <- data.frame(matrix(rnorm(N*(P+1)), nrow = N, ncol = P+1))
#'sln <- pFSA(formula = "X101~1", data = data, m = 2, criterion = c(max_abs_resid,r.squared),
#' minmax = c("min","max"),numrs = 10,numFronts = 2,
#' pselExpr =rPref::low(max_abs_resid)*rPref::high(r.squared),plot.it = TRUE)
#' }
pFSA <- function(numFronts=2,pselExpr=NULL,plot.it=TRUE,formula, data, fitfunc=lm, fixvar = NULL, quad = FALSE,
m = 2, numrs = 1, cores=1, interactions = T,
criterion = AIC, minmax="min", checkfeas=NULL, var4int=NULL,
min.nonmissing=1, return.models=FALSE, fix.formula=NULL,...)
{
if (length(criterion)<2) {
stop("for Pareto Optimality you need atleast two criteria functions")
}
k<- NULL
fsaFit<-FSA(formula, data, fitfunc=fitfunc, fixvar=fixvar, quad=quad,
m=m, numrs=numrs, cores=cores, interactions=interactions,
criterion=criterion, minmax=minmax, checkfeas=checkfeas,
var4int=var4int, min.nonmissing=min.nonmissing,
return.models=return.models, fix.formula=fix.formula,...)
fits<-spread(fsaFit$criData,key = "k",value = "Values")
fits2<-fits
ans<-matrix(data = unlist(mclapply(X = 1:dim(fits2)[1],mc.cores = cores,FUN = function(x){
if(interactions==TRUE){int="*"} else {int="+"}
form<-as.formula(paste(all.vars(as.formula(formula))[1],"~",paste(colnames(data)[eval(parse(text=paste0("c(", fits[x,1], ")")))],collapse= int)))
fit_tmp<-fitfunc(formula=form, data=data,...)
tmp<-NULL
for(i in 1:(length(criterion))){
tmp<-c(tmp,criterion[[i]](fit_tmp))
}
tmp
})),byrow = TRUE,ncol = 2)
fits2[,-1]<-ans
fsaFit<-FSA(formula, data, fitfunc=lm, fixvar=NULL, quad=FALSE,
m=m, numrs=numrs, cores=1, interactions=FALSE,
criterion=criterion, minmax=minmax, checkfeas=NULL,
var4int=NULL,
return.models=FALSE, fix.formula=NULL)
fits<-spread(fsaFit$criData,key ="k",value = "Values")
fits2<<-fits
l<-mclapply(X = which(apply(X = fits2, MARGIN = 1, function(x){any(is.na(x))})),
mc.cores = cores,
FUN = function(x,...){
if(interactions==TRUE){int="*"} else {int="+"}
form<-as.formula(paste(all.vars(as.formula(formula))[1],"~",
paste(colnames(data)[eval(parse(text=paste0("c(", fits2[x,1], ")")))],
collapse= int))
)
fit_tmp<-fitfunc(formula=form, data=data)
for(i in 1:(length(criterion))){
fits2[x,-1][,i]<<-criterion[[i]](fit_tmp)
}
}
)
fits3<-fits2
cname<-gsub(pattern = "high|low|[(]|[])]| ",replacement = "",x = as.character(pselExpr))
cname<-unlist(strsplit(cname,split = "[*]"))
colnames(fits3)<-c("Key",cname)
pbound<-psel(df = fits3,pselExpr,top_level=numFronts)
if(length(criterion)>2 & plot.it==TRUE){
"Sorry, plots cannot be made for more than 2 criteria."
} else{if(plot.it==TRUE){
par(mar=c(5.1, 4.1, 4.1, 8.1), xpd=TRUE)
plot(x = pbound[,2],y = pbound[,3],col=pbound$.level,xlab=cname[1],ylab=cname[2],pch=20,
main = "Estimated Pareto Frontier Graph for \nChosen Criteria and Number of Fronts")
legend("bottomright", legend=c("Front 1","Front 2","Not Pareto"), col=c(1,2,3),pch=20, title="Est Pareto Front")
}
}
return(list(fits=fits2,pbound=pbound))
}
|
library(raster); library(ncdf4)
r = raster('D:/FLO1K.1.1.ts.1961.2015.qavnew.nc', varname='qav', band = 1)
nc <- nc_open('D:/FLO1K.1.1.ts.1961.2015.qav.nc')
nc2 <- nc_open('F:/pcrglobwb_CRU_30min_qcMONAVG_NC3.nc')
nc_open('F:/CRU_TS_3.24/cru_ts3.24.1901.2015.pre.dat.nc')
nc_atts <- ncatt_get(nc, 0)
names(nc_atts)
t <- ncvar_get(nc, varid='time',verbose = F)
tunits <- ncatt_get(nc,"time","units")
nt <- dim(t)
nt
time_d <- as.Date(t, format="%j", origin=as.Date("1900-01-01"))
qav_array <- ncvar_get(nc,'qav')
date_time_start <- as.POSIXct(tunits$value, format = "%Y%m%dT%H%M%SZ", tz = "UTC")
nc_close(nc) | /scripts/netcdf.creation/check.R | no_license | vbarbarossa/flo1k | R | false | false | 645 | r | library(raster); library(ncdf4)
r = raster('D:/FLO1K.1.1.ts.1961.2015.qavnew.nc', varname='qav', band = 1)
nc <- nc_open('D:/FLO1K.1.1.ts.1961.2015.qav.nc')
nc2 <- nc_open('F:/pcrglobwb_CRU_30min_qcMONAVG_NC3.nc')
nc_open('F:/CRU_TS_3.24/cru_ts3.24.1901.2015.pre.dat.nc')
nc_atts <- ncatt_get(nc, 0)
names(nc_atts)
t <- ncvar_get(nc, varid='time',verbose = F)
tunits <- ncatt_get(nc,"time","units")
nt <- dim(t)
nt
time_d <- as.Date(t, format="%j", origin=as.Date("1900-01-01"))
qav_array <- ncvar_get(nc,'qav')
date_time_start <- as.POSIXct(tunits$value, format = "%Y%m%dT%H%M%SZ", tz = "UTC")
nc_close(nc) |
# 단일 페이지(rvest 패키지 사용)
library(rvest)
text<- NULL; title<-NULL; point<-NULL; review<-NULL; page=NULL
url<- "http://movie.naver.com/movie/point/af/list.nhn?page=1"
text <- read_html(url, encoding="CP949")
text
# 영화제목
nodes <- html_nodes(text, ".movie")
title <- html_text(nodes)
title
# 영화평점
nodes <- html_nodes(text, ".title em")
point <- html_text(nodes)
point
# 영화리뷰
nodes <- html_nodes(text, xpath="//*[@id='old_content']/table/tbody/tr/td[2]/text()")
nodes <- html_text(nodes, trim=TRUE)
nodes
review <- nodes[nchar(nodes) > 0]
review
page <- data.frame(title, point, review)
write.csv(page, "movie_reviews.csv")
text<- NULL; vtitle<-NULL; vpoint<-NULL; vreview<-NULL; page=NULL
url<- "http://movie.naver.com/movie/point/af/list.nhn?page=1"
text <- read_html(url, encoding="CP949")
text
for (index in 1:10) {
# 영화제목
node <- html_nodes(text, paste0("#old_content > table > tbody > tr:nth-child(", index, ") > td.title > a.movie.color_b"))
title <- html_text(node)
vtitle[index] <- title
# 영화평점
node <- html_nodes(text, paste0("#old_content > table > tbody > tr:nth-child(", index,") > td.title > div > em"))
point <- html_text(node)
vpoint <- c(vpoint, point)
# 영화리뷰
node <- html_nodes(text, xpath=paste0('//*[@id="old_content"]/table/tbody/tr[', index,"]/td[2]/text()"))
node <- html_text(node, trim=TRUE)
review = node[4] #
vreview <- append(vreview, review)
}
page <- data.frame(vtitle, vpoint, vreview)
write.csv(page, "movie_reviews1.csv")
# 여러 페이지
site<- "http://movie.naver.com/movie/point/af/list.nhn?page="
text <- NULL
movie.review <- NULL
for(i in 1: 100) {
url <- paste(site, i, sep="")
text <- read_html(url, encoding="CP949")
nodes <- html_nodes(text, ".movie")
title <- html_text(nodes)
nodes <- html_nodes(text, ".title em")
point <- html_text(nodes)
nodes <- html_nodes(text, xpath="//*[@id='old_content']/table/tbody/tr/td[2]/text()")
imsi <- html_text(nodes, trim=TRUE)
review <- imsi[nchar(imsi) > 0]
if(length(review) == 10) {
page <- data.frame(title, point, review)
movie.review <- rbind(movie.review, page)
} else {
cat(paste(i," 페이지에는 리뷰글이 생략된 데이터가 있어서 수집하지 않습니다.ㅜㅜ\n"))
}
}
write.csv(movie.review, "movie_reviews2.csv")
| /day6.R | no_license | RyuJelly/R-TIL | R | false | false | 2,363 | r | # 단일 페이지(rvest 패키지 사용)
library(rvest)
text<- NULL; title<-NULL; point<-NULL; review<-NULL; page=NULL
url<- "http://movie.naver.com/movie/point/af/list.nhn?page=1"
text <- read_html(url, encoding="CP949")
text
# 영화제목
nodes <- html_nodes(text, ".movie")
title <- html_text(nodes)
title
# 영화평점
nodes <- html_nodes(text, ".title em")
point <- html_text(nodes)
point
# 영화리뷰
nodes <- html_nodes(text, xpath="//*[@id='old_content']/table/tbody/tr/td[2]/text()")
nodes <- html_text(nodes, trim=TRUE)
nodes
review <- nodes[nchar(nodes) > 0]
review
page <- data.frame(title, point, review)
write.csv(page, "movie_reviews.csv")
text<- NULL; vtitle<-NULL; vpoint<-NULL; vreview<-NULL; page=NULL
url<- "http://movie.naver.com/movie/point/af/list.nhn?page=1"
text <- read_html(url, encoding="CP949")
text
for (index in 1:10) {
# 영화제목
node <- html_nodes(text, paste0("#old_content > table > tbody > tr:nth-child(", index, ") > td.title > a.movie.color_b"))
title <- html_text(node)
vtitle[index] <- title
# 영화평점
node <- html_nodes(text, paste0("#old_content > table > tbody > tr:nth-child(", index,") > td.title > div > em"))
point <- html_text(node)
vpoint <- c(vpoint, point)
# 영화리뷰
node <- html_nodes(text, xpath=paste0('//*[@id="old_content"]/table/tbody/tr[', index,"]/td[2]/text()"))
node <- html_text(node, trim=TRUE)
review = node[4] #
vreview <- append(vreview, review)
}
page <- data.frame(vtitle, vpoint, vreview)
write.csv(page, "movie_reviews1.csv")
# 여러 페이지
site<- "http://movie.naver.com/movie/point/af/list.nhn?page="
text <- NULL
movie.review <- NULL
for(i in 1: 100) {
url <- paste(site, i, sep="")
text <- read_html(url, encoding="CP949")
nodes <- html_nodes(text, ".movie")
title <- html_text(nodes)
nodes <- html_nodes(text, ".title em")
point <- html_text(nodes)
nodes <- html_nodes(text, xpath="//*[@id='old_content']/table/tbody/tr/td[2]/text()")
imsi <- html_text(nodes, trim=TRUE)
review <- imsi[nchar(imsi) > 0]
if(length(review) == 10) {
page <- data.frame(title, point, review)
movie.review <- rbind(movie.review, page)
} else {
cat(paste(i," 페이지에는 리뷰글이 생략된 데이터가 있어서 수집하지 않습니다.ㅜㅜ\n"))
}
}
write.csv(movie.review, "movie_reviews2.csv")
|
#' @param x string deseq_dir
get_align_stat3 <- function(x) {
x_type <- hiseqr::is_hiseq_dir(x)
if(! x_type == "deseq_single") {
warning(paste0("deseq_single expected, ", x_type, " got"))
return(NULL)
}
tmp <- lapply(c("gene", "te", "piRNA_cluster"), function(f){
args <- hiseqr::deseq_single_dir(x, f)
if(is.null(args)) {
return(NULL)
}
tmp2 <- lapply(c(args$dirs_ctl, args$dirs_exp), function(i){
get_align_stat2(i, f)
})
dplyr::bind_rows(tmp2)
})
## total mapping
df <- dplyr::bind_rows(tmp)
## total reads
df2 <- df %>%
dplyr::filter(index_name == "1.rRNA") %>%
dplyr::select(fqname, total)
## mapped
df_tmp <- df %>%
dplyr::select(fqname, index_name, map) %>%
tidyr::spread("index_name", "map")
df_table <- merge(df2, df_tmp, by = "fqname") %>%
dplyr::mutate(unmap = total * 2 - select_if(., is.numeric) %>% rowSums())
df_table
}
#' @param x string deseq_dir
get_align_stat <- function(x, feature = "gene") {
x_type <- hiseqr::is_hiseq_dir(x)
if(! x_type == "deseq_single") {
warning(paste0("deseq_single expected, ", x_type, " got"))
return(NULL)
}
args <- hiseqr::deseq_single_dir(x)
tmp <- lapply(c(args$dirs_ctl, args$dirs_exp), function(i){
get_align_stat2(i, feature)
})
dplyr::bind_rows(tmp)
}
#' @param x string, rnaseq_single dir
get_align_stat2 <- function(x, feature = "gene") {
x_type <- hiseqr::is_hiseq_dir(x)
if(! x_type == "rnaseq_single") {
warning(paste0("rnaseq_single expected, ", x_type, " got"))
return(NULL)
}
# x is rnaseq_single
align_dir <- file.path(x, feature, "align")
stat_list <- list.files(align_dir, "*.json", T, T, T)
tmp <- lapply(stat_list, function(f){
d <- jsonlite::read_json(f)
as_tibble(d)
})
dplyr::bind_rows(tmp)
}
| /R/deseq_tools.R | permissive | bakerwm/hiseqr | R | false | false | 1,836 | r |
#' @param x string deseq_dir
get_align_stat3 <- function(x) {
x_type <- hiseqr::is_hiseq_dir(x)
if(! x_type == "deseq_single") {
warning(paste0("deseq_single expected, ", x_type, " got"))
return(NULL)
}
tmp <- lapply(c("gene", "te", "piRNA_cluster"), function(f){
args <- hiseqr::deseq_single_dir(x, f)
if(is.null(args)) {
return(NULL)
}
tmp2 <- lapply(c(args$dirs_ctl, args$dirs_exp), function(i){
get_align_stat2(i, f)
})
dplyr::bind_rows(tmp2)
})
## total mapping
df <- dplyr::bind_rows(tmp)
## total reads
df2 <- df %>%
dplyr::filter(index_name == "1.rRNA") %>%
dplyr::select(fqname, total)
## mapped
df_tmp <- df %>%
dplyr::select(fqname, index_name, map) %>%
tidyr::spread("index_name", "map")
df_table <- merge(df2, df_tmp, by = "fqname") %>%
dplyr::mutate(unmap = total * 2 - select_if(., is.numeric) %>% rowSums())
df_table
}
#' @param x string deseq_dir
get_align_stat <- function(x, feature = "gene") {
x_type <- hiseqr::is_hiseq_dir(x)
if(! x_type == "deseq_single") {
warning(paste0("deseq_single expected, ", x_type, " got"))
return(NULL)
}
args <- hiseqr::deseq_single_dir(x)
tmp <- lapply(c(args$dirs_ctl, args$dirs_exp), function(i){
get_align_stat2(i, feature)
})
dplyr::bind_rows(tmp)
}
#' @param x string, rnaseq_single dir
get_align_stat2 <- function(x, feature = "gene") {
x_type <- hiseqr::is_hiseq_dir(x)
if(! x_type == "rnaseq_single") {
warning(paste0("rnaseq_single expected, ", x_type, " got"))
return(NULL)
}
# x is rnaseq_single
align_dir <- file.path(x, feature, "align")
stat_list <- list.files(align_dir, "*.json", T, T, T)
tmp <- lapply(stat_list, function(f){
d <- jsonlite::read_json(f)
as_tibble(d)
})
dplyr::bind_rows(tmp)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/autoGLM.R
\name{logistic}
\alias{logistic}
\title{The logistic function.}
\usage{
logistic(theta, data)
}
\arguments{
\item{theta}{A vector of coefficients.}
\item{data}{A dataframe of multiple exogenous regressors.}
}
\value{
A vector of values produced by a logistic formula under specified parameters and data.
}
\description{
Called by various routines in the package. May also be used to predict fitted conditional probabilities by supplying a set of variables and corresponding coefficients estimated from a logit model.
}
\examples{
logitdata <- simulateLogit(1000, c(1,0.5,-0.5,-0.3))
model <- logit(logitdata)
pars <- coef(model)
# predict with logistic function
predicted <- logistic(pars, cbind(1,logitdata[,-1]))
# compare with data
describe(logitdata[,1])
# compare with predict function from R
describe(fitted(model))
}
| /man/logistic.Rd | no_license | BPJandree/AutoGLM | R | false | true | 945 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/autoGLM.R
\name{logistic}
\alias{logistic}
\title{The logistic function.}
\usage{
logistic(theta, data)
}
\arguments{
\item{theta}{A vector of coefficients.}
\item{data}{A dataframe of multiple exogenous regressors.}
}
\value{
A vector of values produced by a logistic formula under specified parameters and data.
}
\description{
Called by various routines in the package. May also be used to predict fitted conditional probabilities by supplying a set of variables and corresponding coefficients estimated from a logit model.
}
\examples{
logitdata <- simulateLogit(1000, c(1,0.5,-0.5,-0.3))
model <- logit(logitdata)
pars <- coef(model)
# predict with logistic function
predicted <- logistic(pars, cbind(1,logitdata[,-1]))
# compare with data
describe(logitdata[,1])
# compare with predict function from R
describe(fitted(model))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sagemaker_operations.R
\name{sagemaker_delete_workteam}
\alias{sagemaker_delete_workteam}
\title{Deletes an existing work team}
\usage{
sagemaker_delete_workteam(WorkteamName)
}
\arguments{
\item{WorkteamName}{[required] The name of the work team to delete.}
}
\description{
Deletes an existing work team. This operation can't be undone.
}
\section{Request syntax}{
\preformatted{svc$delete_workteam(
WorkteamName = "string"
)
}
}
\keyword{internal}
| /cran/paws.machine.learning/man/sagemaker_delete_workteam.Rd | permissive | sanchezvivi/paws | R | false | true | 531 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sagemaker_operations.R
\name{sagemaker_delete_workteam}
\alias{sagemaker_delete_workteam}
\title{Deletes an existing work team}
\usage{
sagemaker_delete_workteam(WorkteamName)
}
\arguments{
\item{WorkteamName}{[required] The name of the work team to delete.}
}
\description{
Deletes an existing work team. This operation can't be undone.
}
\section{Request syntax}{
\preformatted{svc$delete_workteam(
WorkteamName = "string"
)
}
}
\keyword{internal}
|
#' A data set created by merging 1) "actual" data from a "gold standard" survey (A1, A2), and 2) data from another survey (Q1, Q2), including weights columns for that data (W1, W2). A1/Q1 and A2/Q2 are responses to the same two questions, asked to the same 10 respondents (ID), along the same 1-99 response scale.
#'
#' @format A data frame with 10 rows and 7 variables
#' \describe{\item{ID, A1, A2, Q1, Q2, W1, W2}{Paired "actual"/survey data with weights columns for survey data}
#' }
#'
#' @source Example data generated by author
"TESTWGT"
| /R/TESTWGT.R | no_license | cran/TSEwgt | R | false | false | 553 | r | #' A data set created by merging 1) "actual" data from a "gold standard" survey (A1, A2), and 2) data from another survey (Q1, Q2), including weights columns for that data (W1, W2). A1/Q1 and A2/Q2 are responses to the same two questions, asked to the same 10 respondents (ID), along the same 1-99 response scale.
#'
#' @format A data frame with 10 rows and 7 variables
#' \describe{\item{ID, A1, A2, Q1, Q2, W1, W2}{Paired "actual"/survey data with weights columns for survey data}
#' }
#'
#' @source Example data generated by author
"TESTWGT"
|
linRegClass <- R6::R6Class(
"linRegClass",
inherit = linRegBase,
private = list(
#### Member variables ----
terms = NULL,
coefTerms = list(),
emMeans = list(),
#### Init + run functions ----
.init = function() {
private$.modelTerms()
private$.initModelFitTable()
private$.initModelCompTable()
private$.initModelSpec()
private$.initAnovaTables()
private$.initCoefTable()
private$.initCollinearityTable()
private$.initResPlots()
private$.initEmm()
private$.initEmmTable()
},
.run = function() {
ready <- TRUE
if (is.null(self$options$dep) || length(self$options$blocks) < 1 || length(self$options$blocks[[1]]) == 0)
ready <- FALSE
if (ready) {
data <- private$.cleanData()
results <- private$.compute(data)
private$.populateModelFitTable(results)
private$.populateModelCompTable(results)
private$.populateAnovaTables(results)
private$.populateCoefTables(results)
private$.populateCooksTable(results)
private$.populateCollinearityTable(results)
private$.populateDurbinWatsonTable(results)
private$.populateNormality(results)
private$.prepareQQPlot(results)
private$.prepareResPlots(data, results)
private$.prepareEmmPlots(results$models, data=data)
private$.populateEmmTables()
# private$.prepareCoefPlot(results)
}
},
#### Compute results ----
.compute = function(data) {
formulas <- private$.formulas()
scaledData <- private$.scaleData(data)
models <- list(); modelsScaled <- list(); anovaTerms <- list()
for (i in seq_along(formulas)) {
models[[i]] <- lm(formulas[[i]], data=data)
anovaTerms[[i]] <- car::Anova(models[[i]], type=3, singular.ok=TRUE)
modelsScaled[[i]] <- lm(formulas[[i]], data=scaledData)
}
ANOVA <- do.call(stats::anova, models)
AIC <- list(); BIC <- list(); CI <- list();
CIScaled <- list(); dwTest <- list(); VIF <- list(); cooks <- list()
for (i in seq_along(models)) {
AIC[[i]] <- stats::AIC(models[[i]])
BIC[[i]] <- stats::BIC(models[[i]])
# betas[[i]] <- private$.stdEst(models[[i]])
CI[[i]] <- stats::confint(models[[i]], level = self$options$ciWidth / 100)
CIScaled[[i]] <- stats::confint(modelsScaled[[i]], level = self$options$ciWidth / 100)
cooks[[i]] <- stats::cooks.distance(models[[i]])
if (length(private$terms[[i]]) > 1)
VIF[[i]] <- car::vif(models[[i]])
else
VIF[[i]] <- NULL
if (self$options$durbin)
dwTest[[i]] <- car::durbinWatsonTest(models[[i]])
else
dwTest[[i]] <- NULL
}
return(list(models=models, modelsScaled=modelsScaled, ANOVA=ANOVA,
anovaTerms=anovaTerms, AIC=AIC, BIC=BIC, CI=CI, CIScaled=CIScaled,
dwTest=dwTest, VIF=VIF, cooks=cooks))
},
#### Init tables/plots functions ----
.initModelFitTable = function() {
table <- self$results$modelFit
blocks <- self$options$blocks
for (i in seq_along(self$options$blocks))
table$addRow(rowKey=i, values=list(model = i))
},
.initModelCompTable = function() {
table <- self$results$modelComp
blocks <- self$options$blocks
if (length(blocks) <= 1) {
table$setVisible(visible = FALSE)
return()
}
for (i in 1:(length(blocks)-1))
table$addRow(rowKey=i, values=list(model1 = i, model2 = as.integer(i+1)))
},
.initModelSpec = function() {
groups <- self$results$models
for (i in seq_along(self$options$blocks)) {
groups$addItem(key=i)
group <- groups$get(key=i)
group$setTitle(paste("Model",i))
}
},
.initAnovaTables = function() {
groups <- self$results$models
termsAll <- private$terms
for (i in seq_along(termsAll)) {
table <- groups$get(key=i)$anova
terms <- termsAll[[i]]
for (j in seq_along(terms))
table$addRow(rowKey=paste0(terms[[j]]), values=list(term = jmvcore::stringifyTerm(terms[j])))
table$addRow(rowKey='.RES', values=list(term = 'Residuals'))
table$addFormat(col=1, rowKey='.RES', format=Cell.BEGIN_GROUP)
table$setNote("ss", "Type 3 sum of squares")
}
},
.initCoefTable = function() {
groups <- self$results$models
termsAll <- private$terms
data <- self$data
factors <- self$options$factors
dep <- self$options$dep
for (i in seq_along(termsAll)) {
table <- groups$get(key=i)$coef
ciWidth <- self$options$ciWidth
table$getColumn('lower')$setSuperTitle(jmvcore::format('{}% Confidence Interval', ciWidth))
table$getColumn('upper')$setSuperTitle(jmvcore::format('{}% Confidence Interval', ciWidth))
ciWidthStdEst <- self$options$ciWidthStdEst
table$getColumn('stdEstLower')$setSuperTitle(jmvcore::format('{}% Confidence Interval', ciWidthStdEst))
table$getColumn('stdEstUpper')$setSuperTitle(jmvcore::format('{}% Confidence Interval', ciWidthStdEst))
coefTerms <- list()
table$addRow(rowKey="`(Intercept)`", values=list(term = "Intercept"))
coefTerms[[1]] <- "(Intercept)"
if ( ! is.null(factors)) {
note <- ifelse(self$options$intercept == 'refLevel', 'Represents reference level',
'Represents grand mean')
table$addFootnote(rowKey="`(Intercept)`", 'term', note)
}
terms <- termsAll[[i]]
for (j in seq_along(terms)) {
if (any(terms[[j]] %in% factors)) { # check if there are factors in the term
table$addRow(rowKey=terms[[j]], values=list(term = paste0(jmvcore::stringifyTerm(terms[[j]]), ':'),
est='', se='', t='', p='',
lower='', upper='', stdEst='',
stdEstLower='', stdEstUpper=''))
coefs <- private$.coefTerms(terms[[j]])
coefNames <- coefs$coefNames
for (k in seq_along(coefNames)) {
rowKey <- jmvcore::composeTerm(coefs$coefTerms[[k]])
table$addRow(rowKey=rowKey, values=list(term = coefNames[[k]]))
table$addFormat(rowKey=rowKey, col=1, Cell.INDENTED)
}
coefTerms <- c(coefTerms, coefs$coefTerms)
} else {
rowKey <- jmvcore::composeTerm(jmvcore::toB64(terms[[j]]))
table$addRow(rowKey=rowKey, values=list(term = jmvcore::stringifyTerm(terms[[j]])))
coefTerms[[length(coefTerms) + 1]] <- jmvcore::toB64(terms[[j]])
}
}
private$coefTerms[[i]] <- coefTerms
}
},
.initCollinearityTable = function() {
groups <- self$results$models
termsAll <- private$terms
for (i in seq_along(termsAll)) {
table <- groups$get(key=i)$assump$collin
terms <- termsAll[[i]]
if (length(terms) < 1)
terms <- ''
for (i in seq_along(terms))
table$addRow(rowKey=i, values=list(term = jmvcore::stringifyTerm(terms[i])))
}
},
.initResPlots=function() {
groups <- self$results$models
termsAll <- private$terms
covs <- self$options$covs
for (i in seq_along(termsAll)) {
modelTerms <- termsAll[[i]]
if (length(modelTerms) < 1) {
terms <- ''
} else {
terms <- c('Fitted', self$options$dep)
for (term in modelTerms) {
if (length(term) == 1 && term %in% covs)
terms <- c(terms, term)
}
}
images <- groups$get(key=i)$assump$resPlots
for (term in terms)
images$addItem(term)
}
},
.initEmm = function() {
groups <- self$results$models
termsAll <- private$terms
emMeans <- self$options$emMeans
for (i in seq_along(termsAll)) {
group <- groups$get(key=i)$emm
terms <- unique(unlist(termsAll[[i]]))
for (j in seq_along(emMeans)) {
emm <- emMeans[[j]]
if ( ! is.null(emm) && all(emm %in% terms)) {
group$addItem(key=j)
emmGroup <- group$get(key=j)
emmGroup$setTitle(jmvcore::stringifyTerm(emm))
image <- emmGroup$emmPlot
size <- private$.plotSize(emm)
image$setSize(size[1], size[2])
}
}
}
},
.initEmmTable = function() {
groups <- self$results$models
termsAll <- private$terms
emMeans <- self$options$emMeans
factors <- self$options$factors
for (i in seq_along(termsAll)) {
group <- groups$get(key=i)$emm
terms <- unique(unlist(termsAll[[i]]))
for (j in seq_along(emMeans)) {
emm <- emMeans[[j]]
if ( ! is.null(emm) && all(emm %in% terms)) {
emmGroup <- group$get(key=j)
table <- emmGroup$emmTable
table$setTitle(paste0('Estimated Marginal Means - ', jmvcore::stringifyTerm(emm)))
nLevels <- numeric(length(emm))
for (k in rev(seq_along(emm))) {
if (emm[k] %in% factors) {
table$addColumn(name=emm[k], title=emm[k], type='text', combineBelow=TRUE)
nLevels[k] <- length(levels(self$data[[ emm[k] ]]))
} else {
table$addColumn(name=emm[k], title=emm[k], type='number', combineBelow=TRUE)
nLevels[k] <- 3
}
}
table$addColumn(name='emmean', title='Marginal Mean', type='number')
table$addColumn(name='se', title='SE', type='number')
table$addColumn(name='lower', title='Lower', type='number', superTitle=paste0(self$options$ciWidthEmm, '% Confidence Interval'), visibl="(ciEmm)")
table$addColumn(name='upper', title='Upper', type='number', superTitle=paste0(self$options$ciWidthEmm, '% Confidence Interval'), visibl="(ciEmm)")
nRows <- prod(nLevels)
for (k in 1:nRows) {
row <- list()
table$addRow(rowKey=k, row)
}
}
}
}
},
#### Populate tables functions ----
.populateModelFitTable = function(results) {
table <- self$results$modelFit
models <- results$models
# modelsBF <- results$modelsBF
AIC <- results$AIC
BIC <- results$BIC
for (i in seq_along(models)) {
row <- list()
row[["aic"]] <- AIC[[i]]
row[["bic"]] <- BIC[[i]]
row[["r"]] <- sqrt(summary(models[[i]])$r.squared)
row[["r2"]] <- summary(models[[i]])$r.squared
row[["r2Adj"]] <- summary(models[[i]])$adj.r.squared
row[["rmse"]] <- sqrt(mean(models[[i]]$residuals^2))
# row[["bf"]] <- exp(modelsBF[[i]]@bayesFactor$bf)
# row[["err"]] <- modelsBF[[i]]@bayesFactor$error
F <- summary(models[[i]])$fstatistic
if ( ! is.null(F)) {
row[["f"]] <- as.numeric(F[1])
row[["df1"]] <- as.numeric(F[2])
row[["df2"]] <- as.numeric(F[3])
row[["p"]] <- stats::pf(F[1], F[2], F[3], lower.tail=FALSE)
} else {
row[["f"]] <- ""
row[["df1"]] <- ""
row[["df2"]] <- ""
row[["p"]] <- ""
}
table$setRow(rowNo=i, values = row)
}
},
.populateModelCompTable = function(results) {
table <- self$results$modelComp
models <- results$models
# modelsBF <- results$modelsBF
ANOVA <- results$ANOVA
r <- ANOVA[-1,]
if (length(models) <= 1)
return()
for (i in 1:(length(models)-1)) {
row <- list()
row[["r2"]] <- abs(summary(models[[i]])$r.squared - summary(models[[i+1]])$r.squared)
row[["f"]] <- (r[i,4] / r[i,3]) / (r[i,2] / r[i,1])
row[["df1"]] <- r[i,3]
row[["df2"]] <- r[i,1]
row[["p"]] <- stats::pf(row[["f"]], row[["df1"]], row[["df2"]], lower.tail=FALSE)
# BF <- modelsBF[[i+1]]/modelsBF[[i]]
# row[["bf"]] <- exp(BF@bayesFactor$bf)
# row[["err"]] <- BF@bayesFactor$error
table$setRow(rowNo=i, values = row)
}
},
.populateAnovaTables = function(results) {
groups <- self$results$models
termsAll <- private$terms
anova <- results$anovaTerms
for (i in seq_along(termsAll)) {
table <- groups$get(key=i)$anova
terms <- termsAll[[i]]
termsB64 <- lapply(terms, jmvcore::toB64)
r <- anova[[i]]
rowTerms <- jmvcore::decomposeTerms(rownames(r))
resIndex <- length(rowTerms)
for (j in seq_along(terms)) {
term <- termsB64[[j]]
# check which rows have the same length + same terms
index <- which(length(term) == sapply(rowTerms, length) &
sapply(rowTerms, function(x) all(term %in% x)))
ss <- r[index,'Sum Sq']
df <- r[index,'Df']
ms <- ss / df
F <- r[index,'F value']
p <- r[index,'Pr(>F)']
if ( ! is.finite(ss))
ss <- 0
if ( ! is.finite(ms))
ms <- ''
if ( ! is.finite(F))
F <- ''
if ( ! is.finite(p))
p <- ''
row <- list(ss=ss, df=df, ms=ms, F=F, p=p)
table$setRow(rowKey=paste0(terms[[j]]), values = row)
}
ss <- r[resIndex,'Sum Sq']
df <- r[resIndex,'Df']
ms <- ss / df
row <- list(ss=ss, df=df, ms=ms, F='', p='')
table$setRow(rowKey='.RES', values = row)
}
},
.populateCoefTables = function(results) {
groups <- self$results$models
termsAll <- private$coefTerms
models <- results$models
modelsScaled <- results$modelsScaled
for (i in seq_along(termsAll)) {
table <- groups$get(key=i)$coef
model <- summary(models[[i]])
modelScaled <- summary(modelsScaled[[i]])
CI <- results$CI[[i]]
CIScaled <- results$CIScaled[[i]]
coef<- model$coef
coefScaled <- modelScaled$coef
# stdEst <- results$betas[[i]]
terms <- termsAll[[i]]
rowTerms <- jmvcore::decomposeTerms(rownames(coef))
for (j in seq_along(terms)) {
term <- terms[[j]]
# check which rows have the same length + same terms
index <- which(length(term) == sapply(rowTerms, length) &
sapply(rowTerms, function(x) all(term %in% x)))
row <- list()
row[["est"]] <- coef[index, 1]
row[["se"]] <- coef[index, 2]
row[["t"]] <- coef[index, 3]
row[["p"]] <- coef[index, 4]
row[["lower"]] <- CI[index, 1]
row[["upper"]] <- CI[index, 2]
if (rowTerms[index] == "(Intercept)") {
row[["stdEst"]] <- ""
row[["stdEstLower"]] <- ""
row[["stdEstUpper"]] <- ""
} else {
row[["stdEst"]] <- coefScaled[index, 1]
row[["stdEstLower"]] <- CIScaled[index, 1]
row[["stdEstUpper"]] <- CIScaled[index, 2]
}
table$setRow(rowKey=jmvcore::composeTerm(term), values = row)
}
}
},
.populateCooksTable = function(results) {
groups <- self$results$models
termsAll <- private$terms
for (i in seq_along(termsAll)) {
table <- groups$get(key=i)$dataSummary$cooks
cooks <- results$cooks[[i]]
row <- list()
row[['mean']] <- mean(cooks)
row[['median']] <- median(cooks)
row[['sd']] <- sd(cooks)
row[['min']] <- min(cooks)
row[['max']] <- max(cooks)
table$setRow(rowNo=1, values=row)
}
},
.populateDurbinWatsonTable = function(results) {
if (length(results$dwTest) == 0)
return()
groups <- self$results$models
termsAll <- private$terms
for (i in seq_along(termsAll)) {
table <- groups$get(key=i)$assump$durbin
dwTest <- results$dwTest[[i]]
row <- list()
row[["autoCor"]] <- as.numeric(dwTest[1])
row[["dw"]] <- as.numeric(dwTest[2])
row[["p"]] <- as.numeric(dwTest[3])
table$setRow(rowNo=1, values=row)
}
},
.populateCollinearityTable = function(results) {
groups <- self$results$models
termsAll <- private$terms
for (i in seq_along(termsAll)) {
table <- groups$get(key=i)$assump$collin
terms <- lapply(termsAll[[i]], jmvcore::toB64)
if (length(results$VIF) == 0)
VIF <- NULL
else
VIF <- results$VIF[[i]]
if (length(dim(VIF)) > 1) {
names <- rownames(VIF)
VIF <- VIF[,3]
names(VIF) <- names
}
rowTerms <- jmvcore::decomposeTerms(names(VIF))
for (i in seq_along(terms)) {
row <- list()
if (length(terms) <= 1) {
row[["tol"]] <- 1
row[["vif"]] <- 1
} else {
# check which rows have the same length + same terms
index <- which(length(terms[[i]]) == sapply(rowTerms, length) &
sapply(rowTerms, function(x) all(terms[[i]] %in% x)))
row[["tol"]] <- 1 / as.numeric(VIF[index])
row[["vif"]] <- as.numeric(VIF[index])
}
table$setRow(rowNo=i, values=row)
}
}
},
.populateEmmTables = function() {
groups <- self$results$models
termsAll <- private$terms
emMeans <- self$options$emMeans
factors <- self$options$factors
covs <- self$options$covs
emmTables <- private$emMeans
for (i in seq_along(termsAll)) {
group <- groups$get(key=i)$emm
terms <- unique(unlist(termsAll[[i]]))
for (j in seq_along(emMeans)) {
emm <- emMeans[[j]]
if ( ! is.null(emm) && all(emm %in% terms)) {
emmGroup <- group$get(key=j)
table <- emmGroup$emmTable
emmTable <- emmTables[[i]][[j]]
covValues <- list()
for (k in seq_along(emm)) {
if (emm[k] %in% covs)
covValues[[ emm[k] ]] <- sort(unique(emmTable[, jmvcore::toB64(emm[k])]))
}
for (k in 1:nrow(emmTable)) {
row <- list()
sign <- list()
for (l in seq_along(emm)) {
value <- emmTable[k, jmvcore::toB64(emm[l])]
if (emm[l] %in% factors) {
row[[emm[l]]] <- jmvcore::fromB64(value)
} else {
row[[emm[l]]] <- value
if (value == covValues[[ emm[l] ]][1])
sign[[ emm[l] ]] <- '\u207B'
else if (value == covValues[[ emm[l] ]][3])
sign[[ emm[l] ]] <- '\u207A'
else
sign[[ emm[l] ]] <- '<sup>\u03BC</sup>'
}
}
row[['emmean']] <- emmTable[k, 'emmean']
row[['se']] <- emmTable[k, 'SE']
row[['lower']] <- emmTable[k, 'lower.CL']
row[['upper']] <- emmTable[k, 'upper.CL']
table$setRow(rowNo=k, values=row)
if (length(covValues) > 0) {
table$setNote("sub", "\u207B mean - 1SD, <sup>\u03BC</sup> mean, \u207A mean + 1SD")
for (l in seq_along(emm)) {
if (emm[l] %in% covs)
table$addSymbol(rowNo=k, emm[l], sign[[ emm[l] ]])
}
}
}
}
}
}
},
.populateNormality = function(results) {
groups <- self$results$models
termsAll <- private$terms
for (i in seq_along(termsAll)) {
model <- results$models[[i]]
table <- groups$get(key=i)$assump$get('norm')
res <- try(shapiro.test(model$residuals), silent=TRUE)
if (jmvcore::isError(res)) {
values <- list(`s[sw]`=NaN, `p[sw]`='')
} else {
values <- list(`s[sw]`=res$statistic, `p[sw]`=res$p.value)
}
table$setRow(rowNo=1, values)
}
},
#### Plot functions ----
.prepareQQPlot = function(results) {
groups <- self$results$models
termsAll <- private$terms
for (i in seq_along(termsAll)) {
model <- results$models[[i]]
image <- groups$get(key=i)$assump$get('qqPlot')
df <- as.data.frame(qqnorm(scale(model$residuals), plot.it=FALSE))
image$setState(df)
}
},
.qqPlot = function(image, ggtheme, theme, ...) {
if (is.null(image$state))
return(FALSE)
p <- ggplot(data=image$state, aes(x=x, y=y)) +
geom_abline(slope=1, intercept=0, colour=theme$color[1]) +
geom_point(aes(x=x,y=y), size=2, colour=theme$color[1]) +
xlab("Theoretical Quantiles") +
ylab("Standardized Residuals") +
ggtheme
return(p)
},
.prepareResPlots = function(data, results) {
groups <- self$results$models
termsAll <- private$terms
for (i in seq_along(termsAll)) {
model <- results$models[[i]]
res <- model$residuals
images <- groups$get(key=i)$assump$resPlots
for (term in images$itemKeys) {
if (term == 'Fitted') {
x <- model$fitted.values
} else {
x <- data[[jmvcore::toB64(term)]]
}
df <- data.frame(y=res, x=x)
image <- images$get(key=term)
image$setState(list(df=df, xlab=term))
}
}
},
.resPlot = function(image, ggtheme, theme, ...) {
if (is.null(image$state))
return(FALSE)
p <- ggplot(data=image$state$df, aes(y=y, x=x)) +
geom_point(aes(x=x,y=y), colour=theme$color[1]) +
xlab(image$state$xlab) +
ylab("Residuals") +
ggtheme
return(p)
},
.prepareCoefPlot = function(results) {
image <- self$results$coefPlot
betas <- results$betas[[private$modelSelected]]
df <- data.frame(
term = jmvcore::fromB64(names(betas$beta)),
estimate = as.numeric(betas$beta),
conf.low = as.numeric(betas$lower),
conf.high = as.numeric(betas$upper),
group = rep('CI', length(betas$beta))
)
df$term <- factor(df$term, rev(df$term))
image$setState(df)
},
.coefPlot = function(image, ggtheme, theme, ...) {
if (is.null(image$state))
return(FALSE)
themeSpec <- theme(
legend.position = 'right',
legend.background = element_rect("transparent"),
legend.title = element_blank(),
legend.key = element_blank(),
legend.text = element_text(size=16, colour='#333333'))
errorType <- paste0(self$options$ciWidth, '% CI')
p <- ggplot(data=image$state) +
geom_hline(yintercept=0, linetype="dotted", colour=theme$color[1], size=1.2) +
geom_errorbar(aes(x=term, ymin=conf.low, ymax=conf.high, width=.1, colour='colour'), size=.8) +
geom_point(aes(x=term, y=estimate, colour='colour'), shape=21, fill=theme$fill[1], size=3) +
scale_colour_manual(name='', values=c(colour=theme$color[1]), labels=paste("", errorType)) +
labs(x="Predictor", y="Standardized Estimate") +
coord_flip() +
ggtheme + themeSpec
return(p)
},
.prepareEmmPlots = function(models, data) {
covs <- self$options$covs
factors <- self$options$factors
dep <- self$options$dep
groups <- self$results$models
termsAll <- private$terms
emMeans <- self$options$emMeans
emmTables <- list()
for (i in seq_along(termsAll)) {
group <- groups$get(key=i)$emm
terms <- unique(unlist(termsAll[[i]]))
model <- models[[i]]
emmTable <- list()
for (j in seq_along(emMeans)) {
term <- emMeans[[j]]
if ( ! is.null(term) && all(term %in% terms)) {
image <- group$get(key=j)$emmPlot
termB64 <- jmvcore::toB64(term)
FUN <- list(); FUN2 <- list()
cont <- FALSE
for(k in seq_along(termB64)) {
if (term[k] %in% covs) {
if (k == 1) {
FUN[[termB64[k]]] <- function(x) pretty(x, 25)
cont <- TRUE
} else {
FUN[[termB64[k]]] <- function(x) c(mean(x)-sd(x), mean(x), mean(x)+sd(x))
}
FUN2[[termB64[[k]]]] <- function(x) c(mean(x)-sd(x), mean(x), mean(x)+sd(x))
}
}
formula <- formula(paste('~', jmvcore::composeTerm(termB64)))
if (self$options$emmWeights)
weights <- 'equal'
else
weights <- 'cells'
suppressMessages({
mm <- try(
emmeans::emmeans(model, formula, cov.reduce=FUN, options=list(level=self$options$ciWidthEmm / 100), weights = weights, data=data),
silent = TRUE
)
emmTable[[ j ]] <- try(
as.data.frame(summary(emmeans::emmeans(model, formula, cov.reduce=FUN2, options=list(level=self$options$ciWidthEmm / 100), weights = weights, data=data))),
silent = TRUE
)
})
# if (class(mm) == 'try-error')
# jmvcore::reject('No variable named rank in the reference grid')
d <- as.data.frame(summary(mm))
for (k in 1:3) {
if ( ! is.na(termB64[k])) {
if (term[k] %in% covs) {
if (k > 1) {
d[[ termB64[k] ]] <- factor(d[[ termB64[k] ]])
levels(d[[ termB64[k] ]]) <- c('-1SD', 'Mean', '+1SD')
}
} else {
d[[ termB64[k] ]] <- factor(jmvcore::fromB64(d[[ termB64[k] ]]),
jmvcore::fromB64(levels(d[[ termB64[k] ]])))
}
}
}
names <- list('x'=termB64[1], 'y'='emmean', 'lines'=termB64[2], 'plots'=termB64[3], 'lower'='lower.CL', 'upper'='upper.CL')
names <- lapply(names, function(x) if (is.na(x)) NULL else x)
labels <- list('x'=term[1], 'y'=dep, 'lines'=term[2], 'plots'=term[3])
labels <- lapply(labels, function(x) if (is.na(x)) NULL else x)
image$setState(list(data=d, names=names, labels=labels, cont=cont))
}
}
emmTables[[i]] <- emmTable
}
private$emMeans <- emmTables
},
.emmPlot = function(image, ggtheme, theme, ...) {
if (is.null(image$state))
return(FALSE)
data <- image$state$data
names <- image$state$names
labels <- image$state$labels
cont <- image$state$cont
dodge <- position_dodge(0.4)
p <- ggplot(data=data, aes_string(x=names$x, y=names$y, color=names$lines, fill=names$lines), inherit.aes = FALSE)
if (cont) {
p <- p + geom_line()
if (self$options$ciEmm && is.null(names$plots) && is.null(names$lines))
p <- p + geom_ribbon(aes_string(x=names$x, ymin=names$lower, ymax=names$upper), show.legend=TRUE, alpha=.3)
} else {
p <- p + geom_point(position = dodge)
if (self$options$ciEmm)
p <- p + geom_errorbar(aes_string(x=names$x, ymin=names$lower, ymax=names$upper), width=.1, size=.8, position=dodge)
}
if ( ! is.null(names$plots)) {
formula <- as.formula(paste(". ~", names$plots))
p <- p + facet_grid(formula)
}
p <- p +
labs(x=labels$x, y=labels$y, fill=labels$lines, color=labels$lines) +
ggtheme + theme(panel.spacing = unit(2, "lines"))
return(p)
},
#### Helper functions ----
.modelTerms = function() {
blocks <- self$options$blocks
terms <- list()
if (is.null(blocks)) {
terms[[1]] <- c(self$options$covs, self$options$factors)
} else {
for (i in seq_along(blocks)) {
terms[[i]] <- unlist(blocks[1:i], recursive = FALSE)
}
}
private$terms <- terms
},
.coefTerms = function(terms) {
covs <- self$options$covs
factors <- self$options$factors
refLevels <- self$options$refLevels
refVars <- sapply(refLevels, function(x) x$var)
levels <- list()
for (factor in factors)
levels[[factor]] <- levels(self$data[[factor]])
contrLevels <- list(); refLevel <- list(); contr <- list(); rContr <- list()
for (term in terms) {
if (term %in% factors) {
ref <- refLevels[[which(term == refVars)]][['ref']]
refNo <- which(ref == levels[[term]])
contrLevels[[term]] <- levels[[term]][-refNo]
refLevel[[term]] <- levels[[term]][refNo]
if (length(terms) > 1)
contr[[term]] <- paste0('(', paste(contrLevels[[term]], refLevel[[term]], sep = ' \u2013 '), ')')
else
contr[[term]] <- paste(contrLevels[[term]], refLevel[[term]], sep = ' \u2013 ')
rContr[[term]] <- paste0(jmvcore::toB64(term), 1:length(contrLevels[[term]]))
} else {
contr[[term]] <- term
rContr[[term]] <- jmvcore::toB64(term)
}
}
grid <- expand.grid(contr)
coefNames <- apply(grid, 1, jmvcore::stringifyTerm)
grid2 <- expand.grid(rContr)
coefTerms <- list()
for (i in 1:nrow(grid2))
coefTerms[[i]] <- as.character(unlist(grid2[i,]))
return(list(coefNames=coefNames, coefTerms=coefTerms))
},
.formulas = function() {
dep <- self$options$dep
depB64 <- jmvcore::toB64(dep)
terms <- private$terms
formulas <- list();
for (i in seq_along(terms)) {
termsB64 <- lapply(terms[[i]], jmvcore::toB64)
composedTerms <- jmvcore::composeTerms(termsB64)
formulas[[i]] <- as.formula(paste(depB64, paste0(composedTerms, collapse ="+"), sep="~"))
}
return(formulas)
},
.cleanData = function() {
dep <- self$options$dep
covs <- self$options$covs
factors <- self$options$factors
refLevels <- self$options$refLevels
dataRaw <- self$data
data <- list()
refVars <- sapply(refLevels, function(x) x$var)
for (factor in factors) {
ref <- refLevels[[which(factor == refVars)]][['ref']]
rows <- jmvcore::toB64(as.character(dataRaw[[factor]]))
levels <- jmvcore::toB64(levels(dataRaw[[factor]]))
column <- factor(rows, levels=levels)
column <- relevel(column, ref = jmvcore::toB64(ref))
data[[jmvcore::toB64(factor)]] <- column
stats::contrasts(data[[jmvcore::toB64(factor)]]) <- private$.createContrasts(levels)
}
for (cov in c(dep, covs))
data[[jmvcore::toB64(cov)]] <- jmvcore::toNumeric(dataRaw[[cov]])
attr(data, 'row.names') <- seq_len(length(data[[1]]))
attr(data, 'class') <- 'data.frame'
data <- jmvcore::naOmit(data)
return(data)
},
.plotSize = function(emm) {
data <- self$data
covs <- self$options$covs
factors <- self$options$factors
levels <- list()
for (i in seq_along(emm)) {
column <- data[[ emm[i] ]]
if (emm[i] %in% factors) {
levels[[ emm[i] ]] <- levels(column)
} else {
if (i == 1)
levels[[ emm[i] ]] <- ''
else
levels[[ emm[i] ]] <- c('-1SD', 'Mean', '+1SD')
}
}
nLevels <- as.numeric(sapply(levels, length))
nLevels <- ifelse(is.na(nLevels[1:3]), 1, nLevels[1:3])
nCharLevels <- as.numeric(sapply(lapply(levels, nchar), max))
nCharLevels <- ifelse(is.na(nCharLevels[1:3]), 0, nCharLevels[1:3])
nCharNames <- as.numeric(nchar(names(levels)))
nCharNames <- ifelse(is.na(nCharNames[1:3]), 0, nCharNames[1:3])
xAxis <- 30 + 20
yAxis <- 30 + 20
if (emm[1] %in% factors) {
width <- max(350, 25 * nLevels[1] * nLevels[2] * nLevels[3])
height <- 300 + ifelse(nLevels[3] > 1, 20, 0)
} else {
width <- max(350, 300 * nLevels[3])
height <- 300 + ifelse(nLevels[3] > 1, 20, 0)
}
legend <- max(25 + 21 + 3.5 + 8.3 * nCharLevels[2] + 28, 25 + 10 * nCharNames[2] + 28)
width <- yAxis + width + ifelse(nLevels[2] > 1, legend, 0)
height <- xAxis + height
return(c(width, height))
},
.createContrasts=function(levels) {
if (self$options$intercept == 'refLevel') {
contrast <- contr.treatment(levels)
dimnames(contrast) <- NULL
} else {
nLevels <- length(levels)
dummy <- contr.treatment(levels)
dimnames(dummy) <- NULL
coding <- matrix(rep(1/nLevels, prod(dim(dummy))), ncol=nLevels-1)
contrast <- (dummy - coding)
}
return(contrast)
},
.stdEst = function(model) {
# From 'QuantPsyc' R package
b <- summary(model)$coef[-1,1]
sx <- sapply(model$model[-1], sd)
sy <- sapply(model$model[1], sd)
beta <- b * sx / sy
CI <- stats::confint(model, level = self$options$ciWidthStdEst / 100)[-1,]
betaCI <- CI * sx / sy
if (is.matrix(betaCI))
r <- list(beta=beta, lower=betaCI[,1], upper=betaCI[,2])
else
r <- list(beta=beta, lower=betaCI[1], upper=betaCI[2])
return(r)
},
.scaleData = function(data) {
for (col in names(data)) {
if ( ! is.factor(data[[col]]))
data[[col]] <- scale(data[[col]])
}
return(data)
})
)
| /R/linreg.b.R | no_license | jonathon-love/jmv | R | false | false | 41,079 | r |
linRegClass <- R6::R6Class(
"linRegClass",
inherit = linRegBase,
private = list(
#### Member variables ----
terms = NULL,
coefTerms = list(),
emMeans = list(),
#### Init + run functions ----
.init = function() {
private$.modelTerms()
private$.initModelFitTable()
private$.initModelCompTable()
private$.initModelSpec()
private$.initAnovaTables()
private$.initCoefTable()
private$.initCollinearityTable()
private$.initResPlots()
private$.initEmm()
private$.initEmmTable()
},
.run = function() {
ready <- TRUE
if (is.null(self$options$dep) || length(self$options$blocks) < 1 || length(self$options$blocks[[1]]) == 0)
ready <- FALSE
if (ready) {
data <- private$.cleanData()
results <- private$.compute(data)
private$.populateModelFitTable(results)
private$.populateModelCompTable(results)
private$.populateAnovaTables(results)
private$.populateCoefTables(results)
private$.populateCooksTable(results)
private$.populateCollinearityTable(results)
private$.populateDurbinWatsonTable(results)
private$.populateNormality(results)
private$.prepareQQPlot(results)
private$.prepareResPlots(data, results)
private$.prepareEmmPlots(results$models, data=data)
private$.populateEmmTables()
# private$.prepareCoefPlot(results)
}
},
#### Compute results ----
.compute = function(data) {
formulas <- private$.formulas()
scaledData <- private$.scaleData(data)
models <- list(); modelsScaled <- list(); anovaTerms <- list()
for (i in seq_along(formulas)) {
models[[i]] <- lm(formulas[[i]], data=data)
anovaTerms[[i]] <- car::Anova(models[[i]], type=3, singular.ok=TRUE)
modelsScaled[[i]] <- lm(formulas[[i]], data=scaledData)
}
ANOVA <- do.call(stats::anova, models)
AIC <- list(); BIC <- list(); CI <- list();
CIScaled <- list(); dwTest <- list(); VIF <- list(); cooks <- list()
for (i in seq_along(models)) {
AIC[[i]] <- stats::AIC(models[[i]])
BIC[[i]] <- stats::BIC(models[[i]])
# betas[[i]] <- private$.stdEst(models[[i]])
CI[[i]] <- stats::confint(models[[i]], level = self$options$ciWidth / 100)
CIScaled[[i]] <- stats::confint(modelsScaled[[i]], level = self$options$ciWidth / 100)
cooks[[i]] <- stats::cooks.distance(models[[i]])
if (length(private$terms[[i]]) > 1)
VIF[[i]] <- car::vif(models[[i]])
else
VIF[[i]] <- NULL
if (self$options$durbin)
dwTest[[i]] <- car::durbinWatsonTest(models[[i]])
else
dwTest[[i]] <- NULL
}
return(list(models=models, modelsScaled=modelsScaled, ANOVA=ANOVA,
anovaTerms=anovaTerms, AIC=AIC, BIC=BIC, CI=CI, CIScaled=CIScaled,
dwTest=dwTest, VIF=VIF, cooks=cooks))
},
#### Init tables/plots functions ----
.initModelFitTable = function() {
table <- self$results$modelFit
blocks <- self$options$blocks
for (i in seq_along(self$options$blocks))
table$addRow(rowKey=i, values=list(model = i))
},
.initModelCompTable = function() {
table <- self$results$modelComp
blocks <- self$options$blocks
if (length(blocks) <= 1) {
table$setVisible(visible = FALSE)
return()
}
for (i in 1:(length(blocks)-1))
table$addRow(rowKey=i, values=list(model1 = i, model2 = as.integer(i+1)))
},
.initModelSpec = function() {
groups <- self$results$models
for (i in seq_along(self$options$blocks)) {
groups$addItem(key=i)
group <- groups$get(key=i)
group$setTitle(paste("Model",i))
}
},
.initAnovaTables = function() {
groups <- self$results$models
termsAll <- private$terms
for (i in seq_along(termsAll)) {
table <- groups$get(key=i)$anova
terms <- termsAll[[i]]
for (j in seq_along(terms))
table$addRow(rowKey=paste0(terms[[j]]), values=list(term = jmvcore::stringifyTerm(terms[j])))
table$addRow(rowKey='.RES', values=list(term = 'Residuals'))
table$addFormat(col=1, rowKey='.RES', format=Cell.BEGIN_GROUP)
table$setNote("ss", "Type 3 sum of squares")
}
},
.initCoefTable = function() {
groups <- self$results$models
termsAll <- private$terms
data <- self$data
factors <- self$options$factors
dep <- self$options$dep
for (i in seq_along(termsAll)) {
table <- groups$get(key=i)$coef
ciWidth <- self$options$ciWidth
table$getColumn('lower')$setSuperTitle(jmvcore::format('{}% Confidence Interval', ciWidth))
table$getColumn('upper')$setSuperTitle(jmvcore::format('{}% Confidence Interval', ciWidth))
ciWidthStdEst <- self$options$ciWidthStdEst
table$getColumn('stdEstLower')$setSuperTitle(jmvcore::format('{}% Confidence Interval', ciWidthStdEst))
table$getColumn('stdEstUpper')$setSuperTitle(jmvcore::format('{}% Confidence Interval', ciWidthStdEst))
coefTerms <- list()
table$addRow(rowKey="`(Intercept)`", values=list(term = "Intercept"))
coefTerms[[1]] <- "(Intercept)"
if ( ! is.null(factors)) {
note <- ifelse(self$options$intercept == 'refLevel', 'Represents reference level',
'Represents grand mean')
table$addFootnote(rowKey="`(Intercept)`", 'term', note)
}
terms <- termsAll[[i]]
for (j in seq_along(terms)) {
if (any(terms[[j]] %in% factors)) { # check if there are factors in the term
table$addRow(rowKey=terms[[j]], values=list(term = paste0(jmvcore::stringifyTerm(terms[[j]]), ':'),
est='', se='', t='', p='',
lower='', upper='', stdEst='',
stdEstLower='', stdEstUpper=''))
coefs <- private$.coefTerms(terms[[j]])
coefNames <- coefs$coefNames
for (k in seq_along(coefNames)) {
rowKey <- jmvcore::composeTerm(coefs$coefTerms[[k]])
table$addRow(rowKey=rowKey, values=list(term = coefNames[[k]]))
table$addFormat(rowKey=rowKey, col=1, Cell.INDENTED)
}
coefTerms <- c(coefTerms, coefs$coefTerms)
} else {
rowKey <- jmvcore::composeTerm(jmvcore::toB64(terms[[j]]))
table$addRow(rowKey=rowKey, values=list(term = jmvcore::stringifyTerm(terms[[j]])))
coefTerms[[length(coefTerms) + 1]] <- jmvcore::toB64(terms[[j]])
}
}
private$coefTerms[[i]] <- coefTerms
}
},
.initCollinearityTable = function() {
groups <- self$results$models
termsAll <- private$terms
for (i in seq_along(termsAll)) {
table <- groups$get(key=i)$assump$collin
terms <- termsAll[[i]]
if (length(terms) < 1)
terms <- ''
for (i in seq_along(terms))
table$addRow(rowKey=i, values=list(term = jmvcore::stringifyTerm(terms[i])))
}
},
.initResPlots=function() {
groups <- self$results$models
termsAll <- private$terms
covs <- self$options$covs
for (i in seq_along(termsAll)) {
modelTerms <- termsAll[[i]]
if (length(modelTerms) < 1) {
terms <- ''
} else {
terms <- c('Fitted', self$options$dep)
for (term in modelTerms) {
if (length(term) == 1 && term %in% covs)
terms <- c(terms, term)
}
}
images <- groups$get(key=i)$assump$resPlots
for (term in terms)
images$addItem(term)
}
},
.initEmm = function() {
groups <- self$results$models
termsAll <- private$terms
emMeans <- self$options$emMeans
for (i in seq_along(termsAll)) {
group <- groups$get(key=i)$emm
terms <- unique(unlist(termsAll[[i]]))
for (j in seq_along(emMeans)) {
emm <- emMeans[[j]]
if ( ! is.null(emm) && all(emm %in% terms)) {
group$addItem(key=j)
emmGroup <- group$get(key=j)
emmGroup$setTitle(jmvcore::stringifyTerm(emm))
image <- emmGroup$emmPlot
size <- private$.plotSize(emm)
image$setSize(size[1], size[2])
}
}
}
},
.initEmmTable = function() {
groups <- self$results$models
termsAll <- private$terms
emMeans <- self$options$emMeans
factors <- self$options$factors
for (i in seq_along(termsAll)) {
group <- groups$get(key=i)$emm
terms <- unique(unlist(termsAll[[i]]))
for (j in seq_along(emMeans)) {
emm <- emMeans[[j]]
if ( ! is.null(emm) && all(emm %in% terms)) {
emmGroup <- group$get(key=j)
table <- emmGroup$emmTable
table$setTitle(paste0('Estimated Marginal Means - ', jmvcore::stringifyTerm(emm)))
nLevels <- numeric(length(emm))
for (k in rev(seq_along(emm))) {
if (emm[k] %in% factors) {
table$addColumn(name=emm[k], title=emm[k], type='text', combineBelow=TRUE)
nLevels[k] <- length(levels(self$data[[ emm[k] ]]))
} else {
table$addColumn(name=emm[k], title=emm[k], type='number', combineBelow=TRUE)
nLevels[k] <- 3
}
}
table$addColumn(name='emmean', title='Marginal Mean', type='number')
table$addColumn(name='se', title='SE', type='number')
table$addColumn(name='lower', title='Lower', type='number', superTitle=paste0(self$options$ciWidthEmm, '% Confidence Interval'), visibl="(ciEmm)")
table$addColumn(name='upper', title='Upper', type='number', superTitle=paste0(self$options$ciWidthEmm, '% Confidence Interval'), visibl="(ciEmm)")
nRows <- prod(nLevels)
for (k in 1:nRows) {
row <- list()
table$addRow(rowKey=k, row)
}
}
}
}
},
#### Populate tables functions ----
.populateModelFitTable = function(results) {
table <- self$results$modelFit
models <- results$models
# modelsBF <- results$modelsBF
AIC <- results$AIC
BIC <- results$BIC
for (i in seq_along(models)) {
row <- list()
row[["aic"]] <- AIC[[i]]
row[["bic"]] <- BIC[[i]]
row[["r"]] <- sqrt(summary(models[[i]])$r.squared)
row[["r2"]] <- summary(models[[i]])$r.squared
row[["r2Adj"]] <- summary(models[[i]])$adj.r.squared
row[["rmse"]] <- sqrt(mean(models[[i]]$residuals^2))
# row[["bf"]] <- exp(modelsBF[[i]]@bayesFactor$bf)
# row[["err"]] <- modelsBF[[i]]@bayesFactor$error
F <- summary(models[[i]])$fstatistic
if ( ! is.null(F)) {
row[["f"]] <- as.numeric(F[1])
row[["df1"]] <- as.numeric(F[2])
row[["df2"]] <- as.numeric(F[3])
row[["p"]] <- stats::pf(F[1], F[2], F[3], lower.tail=FALSE)
} else {
row[["f"]] <- ""
row[["df1"]] <- ""
row[["df2"]] <- ""
row[["p"]] <- ""
}
table$setRow(rowNo=i, values = row)
}
},
.populateModelCompTable = function(results) {
table <- self$results$modelComp
models <- results$models
# modelsBF <- results$modelsBF
ANOVA <- results$ANOVA
r <- ANOVA[-1,]
if (length(models) <= 1)
return()
for (i in 1:(length(models)-1)) {
row <- list()
row[["r2"]] <- abs(summary(models[[i]])$r.squared - summary(models[[i+1]])$r.squared)
row[["f"]] <- (r[i,4] / r[i,3]) / (r[i,2] / r[i,1])
row[["df1"]] <- r[i,3]
row[["df2"]] <- r[i,1]
row[["p"]] <- stats::pf(row[["f"]], row[["df1"]], row[["df2"]], lower.tail=FALSE)
# BF <- modelsBF[[i+1]]/modelsBF[[i]]
# row[["bf"]] <- exp(BF@bayesFactor$bf)
# row[["err"]] <- BF@bayesFactor$error
table$setRow(rowNo=i, values = row)
}
},
.populateAnovaTables = function(results) {
groups <- self$results$models
termsAll <- private$terms
anova <- results$anovaTerms
for (i in seq_along(termsAll)) {
table <- groups$get(key=i)$anova
terms <- termsAll[[i]]
termsB64 <- lapply(terms, jmvcore::toB64)
r <- anova[[i]]
rowTerms <- jmvcore::decomposeTerms(rownames(r))
resIndex <- length(rowTerms)
for (j in seq_along(terms)) {
term <- termsB64[[j]]
# check which rows have the same length + same terms
index <- which(length(term) == sapply(rowTerms, length) &
sapply(rowTerms, function(x) all(term %in% x)))
ss <- r[index,'Sum Sq']
df <- r[index,'Df']
ms <- ss / df
F <- r[index,'F value']
p <- r[index,'Pr(>F)']
if ( ! is.finite(ss))
ss <- 0
if ( ! is.finite(ms))
ms <- ''
if ( ! is.finite(F))
F <- ''
if ( ! is.finite(p))
p <- ''
row <- list(ss=ss, df=df, ms=ms, F=F, p=p)
table$setRow(rowKey=paste0(terms[[j]]), values = row)
}
ss <- r[resIndex,'Sum Sq']
df <- r[resIndex,'Df']
ms <- ss / df
row <- list(ss=ss, df=df, ms=ms, F='', p='')
table$setRow(rowKey='.RES', values = row)
}
},
.populateCoefTables = function(results) {
groups <- self$results$models
termsAll <- private$coefTerms
models <- results$models
modelsScaled <- results$modelsScaled
for (i in seq_along(termsAll)) {
table <- groups$get(key=i)$coef
model <- summary(models[[i]])
modelScaled <- summary(modelsScaled[[i]])
CI <- results$CI[[i]]
CIScaled <- results$CIScaled[[i]]
coef<- model$coef
coefScaled <- modelScaled$coef
# stdEst <- results$betas[[i]]
terms <- termsAll[[i]]
rowTerms <- jmvcore::decomposeTerms(rownames(coef))
for (j in seq_along(terms)) {
term <- terms[[j]]
# check which rows have the same length + same terms
index <- which(length(term) == sapply(rowTerms, length) &
sapply(rowTerms, function(x) all(term %in% x)))
row <- list()
row[["est"]] <- coef[index, 1]
row[["se"]] <- coef[index, 2]
row[["t"]] <- coef[index, 3]
row[["p"]] <- coef[index, 4]
row[["lower"]] <- CI[index, 1]
row[["upper"]] <- CI[index, 2]
if (rowTerms[index] == "(Intercept)") {
row[["stdEst"]] <- ""
row[["stdEstLower"]] <- ""
row[["stdEstUpper"]] <- ""
} else {
row[["stdEst"]] <- coefScaled[index, 1]
row[["stdEstLower"]] <- CIScaled[index, 1]
row[["stdEstUpper"]] <- CIScaled[index, 2]
}
table$setRow(rowKey=jmvcore::composeTerm(term), values = row)
}
}
},
.populateCooksTable = function(results) {
groups <- self$results$models
termsAll <- private$terms
for (i in seq_along(termsAll)) {
table <- groups$get(key=i)$dataSummary$cooks
cooks <- results$cooks[[i]]
row <- list()
row[['mean']] <- mean(cooks)
row[['median']] <- median(cooks)
row[['sd']] <- sd(cooks)
row[['min']] <- min(cooks)
row[['max']] <- max(cooks)
table$setRow(rowNo=1, values=row)
}
},
.populateDurbinWatsonTable = function(results) {
if (length(results$dwTest) == 0)
return()
groups <- self$results$models
termsAll <- private$terms
for (i in seq_along(termsAll)) {
table <- groups$get(key=i)$assump$durbin
dwTest <- results$dwTest[[i]]
row <- list()
row[["autoCor"]] <- as.numeric(dwTest[1])
row[["dw"]] <- as.numeric(dwTest[2])
row[["p"]] <- as.numeric(dwTest[3])
table$setRow(rowNo=1, values=row)
}
},
.populateCollinearityTable = function(results) {
groups <- self$results$models
termsAll <- private$terms
for (i in seq_along(termsAll)) {
table <- groups$get(key=i)$assump$collin
terms <- lapply(termsAll[[i]], jmvcore::toB64)
if (length(results$VIF) == 0)
VIF <- NULL
else
VIF <- results$VIF[[i]]
if (length(dim(VIF)) > 1) {
names <- rownames(VIF)
VIF <- VIF[,3]
names(VIF) <- names
}
rowTerms <- jmvcore::decomposeTerms(names(VIF))
for (i in seq_along(terms)) {
row <- list()
if (length(terms) <= 1) {
row[["tol"]] <- 1
row[["vif"]] <- 1
} else {
# check which rows have the same length + same terms
index <- which(length(terms[[i]]) == sapply(rowTerms, length) &
sapply(rowTerms, function(x) all(terms[[i]] %in% x)))
row[["tol"]] <- 1 / as.numeric(VIF[index])
row[["vif"]] <- as.numeric(VIF[index])
}
table$setRow(rowNo=i, values=row)
}
}
},
.populateEmmTables = function() {
groups <- self$results$models
termsAll <- private$terms
emMeans <- self$options$emMeans
factors <- self$options$factors
covs <- self$options$covs
emmTables <- private$emMeans
for (i in seq_along(termsAll)) {
group <- groups$get(key=i)$emm
terms <- unique(unlist(termsAll[[i]]))
for (j in seq_along(emMeans)) {
emm <- emMeans[[j]]
if ( ! is.null(emm) && all(emm %in% terms)) {
emmGroup <- group$get(key=j)
table <- emmGroup$emmTable
emmTable <- emmTables[[i]][[j]]
covValues <- list()
for (k in seq_along(emm)) {
if (emm[k] %in% covs)
covValues[[ emm[k] ]] <- sort(unique(emmTable[, jmvcore::toB64(emm[k])]))
}
for (k in 1:nrow(emmTable)) {
row <- list()
sign <- list()
for (l in seq_along(emm)) {
value <- emmTable[k, jmvcore::toB64(emm[l])]
if (emm[l] %in% factors) {
row[[emm[l]]] <- jmvcore::fromB64(value)
} else {
row[[emm[l]]] <- value
if (value == covValues[[ emm[l] ]][1])
sign[[ emm[l] ]] <- '\u207B'
else if (value == covValues[[ emm[l] ]][3])
sign[[ emm[l] ]] <- '\u207A'
else
sign[[ emm[l] ]] <- '<sup>\u03BC</sup>'
}
}
row[['emmean']] <- emmTable[k, 'emmean']
row[['se']] <- emmTable[k, 'SE']
row[['lower']] <- emmTable[k, 'lower.CL']
row[['upper']] <- emmTable[k, 'upper.CL']
table$setRow(rowNo=k, values=row)
if (length(covValues) > 0) {
table$setNote("sub", "\u207B mean - 1SD, <sup>\u03BC</sup> mean, \u207A mean + 1SD")
for (l in seq_along(emm)) {
if (emm[l] %in% covs)
table$addSymbol(rowNo=k, emm[l], sign[[ emm[l] ]])
}
}
}
}
}
}
},
.populateNormality = function(results) {
groups <- self$results$models
termsAll <- private$terms
for (i in seq_along(termsAll)) {
model <- results$models[[i]]
table <- groups$get(key=i)$assump$get('norm')
res <- try(shapiro.test(model$residuals), silent=TRUE)
if (jmvcore::isError(res)) {
values <- list(`s[sw]`=NaN, `p[sw]`='')
} else {
values <- list(`s[sw]`=res$statistic, `p[sw]`=res$p.value)
}
table$setRow(rowNo=1, values)
}
},
#### Plot functions ----
.prepareQQPlot = function(results) {
groups <- self$results$models
termsAll <- private$terms
for (i in seq_along(termsAll)) {
model <- results$models[[i]]
image <- groups$get(key=i)$assump$get('qqPlot')
df <- as.data.frame(qqnorm(scale(model$residuals), plot.it=FALSE))
image$setState(df)
}
},
.qqPlot = function(image, ggtheme, theme, ...) {
if (is.null(image$state))
return(FALSE)
p <- ggplot(data=image$state, aes(x=x, y=y)) +
geom_abline(slope=1, intercept=0, colour=theme$color[1]) +
geom_point(aes(x=x,y=y), size=2, colour=theme$color[1]) +
xlab("Theoretical Quantiles") +
ylab("Standardized Residuals") +
ggtheme
return(p)
},
.prepareResPlots = function(data, results) {
groups <- self$results$models
termsAll <- private$terms
for (i in seq_along(termsAll)) {
model <- results$models[[i]]
res <- model$residuals
images <- groups$get(key=i)$assump$resPlots
for (term in images$itemKeys) {
if (term == 'Fitted') {
x <- model$fitted.values
} else {
x <- data[[jmvcore::toB64(term)]]
}
df <- data.frame(y=res, x=x)
image <- images$get(key=term)
image$setState(list(df=df, xlab=term))
}
}
},
.resPlot = function(image, ggtheme, theme, ...) {
if (is.null(image$state))
return(FALSE)
p <- ggplot(data=image$state$df, aes(y=y, x=x)) +
geom_point(aes(x=x,y=y), colour=theme$color[1]) +
xlab(image$state$xlab) +
ylab("Residuals") +
ggtheme
return(p)
},
.prepareCoefPlot = function(results) {
image <- self$results$coefPlot
betas <- results$betas[[private$modelSelected]]
df <- data.frame(
term = jmvcore::fromB64(names(betas$beta)),
estimate = as.numeric(betas$beta),
conf.low = as.numeric(betas$lower),
conf.high = as.numeric(betas$upper),
group = rep('CI', length(betas$beta))
)
df$term <- factor(df$term, rev(df$term))
image$setState(df)
},
.coefPlot = function(image, ggtheme, theme, ...) {
if (is.null(image$state))
return(FALSE)
themeSpec <- theme(
legend.position = 'right',
legend.background = element_rect("transparent"),
legend.title = element_blank(),
legend.key = element_blank(),
legend.text = element_text(size=16, colour='#333333'))
errorType <- paste0(self$options$ciWidth, '% CI')
p <- ggplot(data=image$state) +
geom_hline(yintercept=0, linetype="dotted", colour=theme$color[1], size=1.2) +
geom_errorbar(aes(x=term, ymin=conf.low, ymax=conf.high, width=.1, colour='colour'), size=.8) +
geom_point(aes(x=term, y=estimate, colour='colour'), shape=21, fill=theme$fill[1], size=3) +
scale_colour_manual(name='', values=c(colour=theme$color[1]), labels=paste("", errorType)) +
labs(x="Predictor", y="Standardized Estimate") +
coord_flip() +
ggtheme + themeSpec
return(p)
},
.prepareEmmPlots = function(models, data) {
covs <- self$options$covs
factors <- self$options$factors
dep <- self$options$dep
groups <- self$results$models
termsAll <- private$terms
emMeans <- self$options$emMeans
emmTables <- list()
for (i in seq_along(termsAll)) {
group <- groups$get(key=i)$emm
terms <- unique(unlist(termsAll[[i]]))
model <- models[[i]]
emmTable <- list()
for (j in seq_along(emMeans)) {
term <- emMeans[[j]]
if ( ! is.null(term) && all(term %in% terms)) {
image <- group$get(key=j)$emmPlot
termB64 <- jmvcore::toB64(term)
FUN <- list(); FUN2 <- list()
cont <- FALSE
for(k in seq_along(termB64)) {
if (term[k] %in% covs) {
if (k == 1) {
FUN[[termB64[k]]] <- function(x) pretty(x, 25)
cont <- TRUE
} else {
FUN[[termB64[k]]] <- function(x) c(mean(x)-sd(x), mean(x), mean(x)+sd(x))
}
FUN2[[termB64[[k]]]] <- function(x) c(mean(x)-sd(x), mean(x), mean(x)+sd(x))
}
}
formula <- formula(paste('~', jmvcore::composeTerm(termB64)))
if (self$options$emmWeights)
weights <- 'equal'
else
weights <- 'cells'
suppressMessages({
mm <- try(
emmeans::emmeans(model, formula, cov.reduce=FUN, options=list(level=self$options$ciWidthEmm / 100), weights = weights, data=data),
silent = TRUE
)
emmTable[[ j ]] <- try(
as.data.frame(summary(emmeans::emmeans(model, formula, cov.reduce=FUN2, options=list(level=self$options$ciWidthEmm / 100), weights = weights, data=data))),
silent = TRUE
)
})
# if (class(mm) == 'try-error')
# jmvcore::reject('No variable named rank in the reference grid')
d <- as.data.frame(summary(mm))
for (k in 1:3) {
if ( ! is.na(termB64[k])) {
if (term[k] %in% covs) {
if (k > 1) {
d[[ termB64[k] ]] <- factor(d[[ termB64[k] ]])
levels(d[[ termB64[k] ]]) <- c('-1SD', 'Mean', '+1SD')
}
} else {
d[[ termB64[k] ]] <- factor(jmvcore::fromB64(d[[ termB64[k] ]]),
jmvcore::fromB64(levels(d[[ termB64[k] ]])))
}
}
}
names <- list('x'=termB64[1], 'y'='emmean', 'lines'=termB64[2], 'plots'=termB64[3], 'lower'='lower.CL', 'upper'='upper.CL')
names <- lapply(names, function(x) if (is.na(x)) NULL else x)
labels <- list('x'=term[1], 'y'=dep, 'lines'=term[2], 'plots'=term[3])
labels <- lapply(labels, function(x) if (is.na(x)) NULL else x)
image$setState(list(data=d, names=names, labels=labels, cont=cont))
}
}
emmTables[[i]] <- emmTable
}
private$emMeans <- emmTables
},
.emmPlot = function(image, ggtheme, theme, ...) {
if (is.null(image$state))
return(FALSE)
data <- image$state$data
names <- image$state$names
labels <- image$state$labels
cont <- image$state$cont
dodge <- position_dodge(0.4)
p <- ggplot(data=data, aes_string(x=names$x, y=names$y, color=names$lines, fill=names$lines), inherit.aes = FALSE)
if (cont) {
p <- p + geom_line()
if (self$options$ciEmm && is.null(names$plots) && is.null(names$lines))
p <- p + geom_ribbon(aes_string(x=names$x, ymin=names$lower, ymax=names$upper), show.legend=TRUE, alpha=.3)
} else {
p <- p + geom_point(position = dodge)
if (self$options$ciEmm)
p <- p + geom_errorbar(aes_string(x=names$x, ymin=names$lower, ymax=names$upper), width=.1, size=.8, position=dodge)
}
if ( ! is.null(names$plots)) {
formula <- as.formula(paste(". ~", names$plots))
p <- p + facet_grid(formula)
}
p <- p +
labs(x=labels$x, y=labels$y, fill=labels$lines, color=labels$lines) +
ggtheme + theme(panel.spacing = unit(2, "lines"))
return(p)
},
#### Helper functions ----
.modelTerms = function() {
blocks <- self$options$blocks
terms <- list()
if (is.null(blocks)) {
terms[[1]] <- c(self$options$covs, self$options$factors)
} else {
for (i in seq_along(blocks)) {
terms[[i]] <- unlist(blocks[1:i], recursive = FALSE)
}
}
private$terms <- terms
},
.coefTerms = function(terms) {
covs <- self$options$covs
factors <- self$options$factors
refLevels <- self$options$refLevels
refVars <- sapply(refLevels, function(x) x$var)
levels <- list()
for (factor in factors)
levels[[factor]] <- levels(self$data[[factor]])
contrLevels <- list(); refLevel <- list(); contr <- list(); rContr <- list()
for (term in terms) {
if (term %in% factors) {
ref <- refLevels[[which(term == refVars)]][['ref']]
refNo <- which(ref == levels[[term]])
contrLevels[[term]] <- levels[[term]][-refNo]
refLevel[[term]] <- levels[[term]][refNo]
if (length(terms) > 1)
contr[[term]] <- paste0('(', paste(contrLevels[[term]], refLevel[[term]], sep = ' \u2013 '), ')')
else
contr[[term]] <- paste(contrLevels[[term]], refLevel[[term]], sep = ' \u2013 ')
rContr[[term]] <- paste0(jmvcore::toB64(term), 1:length(contrLevels[[term]]))
} else {
contr[[term]] <- term
rContr[[term]] <- jmvcore::toB64(term)
}
}
grid <- expand.grid(contr)
coefNames <- apply(grid, 1, jmvcore::stringifyTerm)
grid2 <- expand.grid(rContr)
coefTerms <- list()
for (i in 1:nrow(grid2))
coefTerms[[i]] <- as.character(unlist(grid2[i,]))
return(list(coefNames=coefNames, coefTerms=coefTerms))
},
.formulas = function() {
dep <- self$options$dep
depB64 <- jmvcore::toB64(dep)
terms <- private$terms
formulas <- list();
for (i in seq_along(terms)) {
termsB64 <- lapply(terms[[i]], jmvcore::toB64)
composedTerms <- jmvcore::composeTerms(termsB64)
formulas[[i]] <- as.formula(paste(depB64, paste0(composedTerms, collapse ="+"), sep="~"))
}
return(formulas)
},
.cleanData = function() {
dep <- self$options$dep
covs <- self$options$covs
factors <- self$options$factors
refLevels <- self$options$refLevels
dataRaw <- self$data
data <- list()
refVars <- sapply(refLevels, function(x) x$var)
for (factor in factors) {
ref <- refLevels[[which(factor == refVars)]][['ref']]
rows <- jmvcore::toB64(as.character(dataRaw[[factor]]))
levels <- jmvcore::toB64(levels(dataRaw[[factor]]))
column <- factor(rows, levels=levels)
column <- relevel(column, ref = jmvcore::toB64(ref))
data[[jmvcore::toB64(factor)]] <- column
stats::contrasts(data[[jmvcore::toB64(factor)]]) <- private$.createContrasts(levels)
}
for (cov in c(dep, covs))
data[[jmvcore::toB64(cov)]] <- jmvcore::toNumeric(dataRaw[[cov]])
attr(data, 'row.names') <- seq_len(length(data[[1]]))
attr(data, 'class') <- 'data.frame'
data <- jmvcore::naOmit(data)
return(data)
},
.plotSize = function(emm) {
data <- self$data
covs <- self$options$covs
factors <- self$options$factors
levels <- list()
for (i in seq_along(emm)) {
column <- data[[ emm[i] ]]
if (emm[i] %in% factors) {
levels[[ emm[i] ]] <- levels(column)
} else {
if (i == 1)
levels[[ emm[i] ]] <- ''
else
levels[[ emm[i] ]] <- c('-1SD', 'Mean', '+1SD')
}
}
nLevels <- as.numeric(sapply(levels, length))
nLevels <- ifelse(is.na(nLevels[1:3]), 1, nLevels[1:3])
nCharLevels <- as.numeric(sapply(lapply(levels, nchar), max))
nCharLevels <- ifelse(is.na(nCharLevels[1:3]), 0, nCharLevels[1:3])
nCharNames <- as.numeric(nchar(names(levels)))
nCharNames <- ifelse(is.na(nCharNames[1:3]), 0, nCharNames[1:3])
xAxis <- 30 + 20
yAxis <- 30 + 20
if (emm[1] %in% factors) {
width <- max(350, 25 * nLevels[1] * nLevels[2] * nLevels[3])
height <- 300 + ifelse(nLevels[3] > 1, 20, 0)
} else {
width <- max(350, 300 * nLevels[3])
height <- 300 + ifelse(nLevels[3] > 1, 20, 0)
}
legend <- max(25 + 21 + 3.5 + 8.3 * nCharLevels[2] + 28, 25 + 10 * nCharNames[2] + 28)
width <- yAxis + width + ifelse(nLevels[2] > 1, legend, 0)
height <- xAxis + height
return(c(width, height))
},
.createContrasts=function(levels) {
if (self$options$intercept == 'refLevel') {
contrast <- contr.treatment(levels)
dimnames(contrast) <- NULL
} else {
nLevels <- length(levels)
dummy <- contr.treatment(levels)
dimnames(dummy) <- NULL
coding <- matrix(rep(1/nLevels, prod(dim(dummy))), ncol=nLevels-1)
contrast <- (dummy - coding)
}
return(contrast)
},
.stdEst = function(model) {
# From 'QuantPsyc' R package
b <- summary(model)$coef[-1,1]
sx <- sapply(model$model[-1], sd)
sy <- sapply(model$model[1], sd)
beta <- b * sx / sy
CI <- stats::confint(model, level = self$options$ciWidthStdEst / 100)[-1,]
betaCI <- CI * sx / sy
if (is.matrix(betaCI))
r <- list(beta=beta, lower=betaCI[,1], upper=betaCI[,2])
else
r <- list(beta=beta, lower=betaCI[1], upper=betaCI[2])
return(r)
},
.scaleData = function(data) {
for (col in names(data)) {
if ( ! is.factor(data[[col]]))
data[[col]] <- scale(data[[col]])
}
return(data)
})
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.emr_operations.R
\name{cancel_steps}
\alias{cancel_steps}
\title{Cancels a pending step or steps in a running cluster}
\usage{
cancel_steps(ClusterId = NULL, StepIds = NULL)
}
\arguments{
\item{ClusterId}{The \code{ClusterID} for which specified steps will be canceled. Use RunJobFlow and ListClusters to get ClusterIDs.}
\item{StepIds}{The list of \code{StepIDs} to cancel. Use ListSteps to get steps and their states for the specified cluster.}
}
\description{
Cancels a pending step or steps in a running cluster. Available only in Amazon EMR versions 4.8.0 and later, excluding version 5.0.0. A maximum of 256 steps are allowed in each CancelSteps request. CancelSteps is idempotent but asynchronous; it does not guarantee a step will be canceled, even if the request is successfully submitted. You can only cancel steps that are in a \code{PENDING} state.
}
\section{Accepted Parameters}{
\preformatted{cancel_steps(
ClusterId = "string",
StepIds = list(
"string"
)
)
}
}
| /service/paws.emr/man/cancel_steps.Rd | permissive | CR-Mercado/paws | R | false | true | 1,072 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.emr_operations.R
\name{cancel_steps}
\alias{cancel_steps}
\title{Cancels a pending step or steps in a running cluster}
\usage{
cancel_steps(ClusterId = NULL, StepIds = NULL)
}
\arguments{
\item{ClusterId}{The \code{ClusterID} for which specified steps will be canceled. Use RunJobFlow and ListClusters to get ClusterIDs.}
\item{StepIds}{The list of \code{StepIDs} to cancel. Use ListSteps to get steps and their states for the specified cluster.}
}
\description{
Cancels a pending step or steps in a running cluster. Available only in Amazon EMR versions 4.8.0 and later, excluding version 5.0.0. A maximum of 256 steps are allowed in each CancelSteps request. CancelSteps is idempotent but asynchronous; it does not guarantee a step will be canceled, even if the request is successfully submitted. You can only cancel steps that are in a \code{PENDING} state.
}
\section{Accepted Parameters}{
\preformatted{cancel_steps(
ClusterId = "string",
StepIds = list(
"string"
)
)
}
}
|
# test_taro_recommender.R
file_dir = getwd(); if( grepl('tests', getwd()) ){ wd <- getwd(); setwd(".."); file_dir <- getwd(); setwd(wd) }
source(paste(file_dir,"/tests/test_helper.R",sep=""))
source(paste(file_dir,"/lib/taro_mining/taro_recommender.R",sep=""))
context("Taro Recommender")
context(" prepare data for recommender by product x person")
# Taro.Recommender.productByPerson
test_that(" group by products and order by quantity", {
cust <- c('jack','jack','daniel','park','park','jack','adam','jessica','jordan')
product <- c('banana','apple','peach','apple','apple','banana','peach','avacado','banana')
sales <- c(2,3,1,3,3,2,1,2,3)
date <- as.Date(c("2014-01-02","2014-02-02","2014-01-02","2014-04-02","2014-05-09","2014-06-02","2014-04-02","2014-05-09","2014-06-02"))
test_data <- data.frame(cust, date, sales, product)
mat <- Taro.Recommender.productByPerson(test_data)
expect_equivalent(dimnames(mat)$user, c("jack", "daniel", "park", "adam", "jessica", "jordan") )
expect_equivalent(dimnames(mat)$item, c("banana", "peach", "apple", 'avacado') )
expect_equivalent(as.numeric(mat[1,1]), 1) # jack bought banana?
expect_equivalent(as.numeric(mat[1,2]), 0) # jack bought peach?
expect_equivalent(as.numeric(mat[2,3]), 0) # daniel bought apple?
expect_equivalent(as.numeric(mat[2,2]), 1) # daniel bought peach?
### prediction
cust_recom <- Taro.Recommender.build(mat)
recomFor <- function(person) {
cust_recom[cust_recom$cust == person,]
}
recoms <- recomFor('jack')
expect_equivalent(as.character(recoms$recom[1]), 'peach')
}) | /tests/test_taro_recommender.R | no_license | he9qi/taro-mining | R | false | false | 1,615 | r | # test_taro_recommender.R
file_dir = getwd(); if( grepl('tests', getwd()) ){ wd <- getwd(); setwd(".."); file_dir <- getwd(); setwd(wd) }
source(paste(file_dir,"/tests/test_helper.R",sep=""))
source(paste(file_dir,"/lib/taro_mining/taro_recommender.R",sep=""))
context("Taro Recommender")
context(" prepare data for recommender by product x person")
# Taro.Recommender.productByPerson
test_that(" group by products and order by quantity", {
cust <- c('jack','jack','daniel','park','park','jack','adam','jessica','jordan')
product <- c('banana','apple','peach','apple','apple','banana','peach','avacado','banana')
sales <- c(2,3,1,3,3,2,1,2,3)
date <- as.Date(c("2014-01-02","2014-02-02","2014-01-02","2014-04-02","2014-05-09","2014-06-02","2014-04-02","2014-05-09","2014-06-02"))
test_data <- data.frame(cust, date, sales, product)
mat <- Taro.Recommender.productByPerson(test_data)
expect_equivalent(dimnames(mat)$user, c("jack", "daniel", "park", "adam", "jessica", "jordan") )
expect_equivalent(dimnames(mat)$item, c("banana", "peach", "apple", 'avacado') )
expect_equivalent(as.numeric(mat[1,1]), 1) # jack bought banana?
expect_equivalent(as.numeric(mat[1,2]), 0) # jack bought peach?
expect_equivalent(as.numeric(mat[2,3]), 0) # daniel bought apple?
expect_equivalent(as.numeric(mat[2,2]), 1) # daniel bought peach?
### prediction
cust_recom <- Taro.Recommender.build(mat)
recomFor <- function(person) {
cust_recom[cust_recom$cust == person,]
}
recoms <- recomFor('jack')
expect_equivalent(as.character(recoms$recom[1]), 'peach')
}) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create_query.R
\name{create.gquery.graph}
\alias{create.gquery.graph}
\title{Create a Griffin query.}
\usage{
create.gquery.graph(df.graph, nodes)
}
\arguments{
\item{df.graph}{dataframe with source nodes, target nodes and the type of interactions}
\item{nodes}{vector with all node names}
}
\value{
query java query to run Griffin
}
\description{
This function takes an interaction regulatory graph and creates a griffin query.
}
\details{
The graph must include: source, target and type of interaction
Valid types of interctions are:
false: Contradiction
MA: Mandatory, ambiguous
MPU (or +): Mandatory, positive, unambiguous
MPPA: Mandatory, positive, possibly ambiguous
MNU (or -): Mandatory, negative, unambiguous
MNPA: Mandatory, negative, possibly ambiguous
MUSU: Mandatory, unknown sign, unambiguous
MUSPA: Mandatory, unknown sign, possibly ambiguous
NR: No regulation
OA: Optional, ambiguous
OPU: Optional, positive, unambiguous
OPPA: Optional, positive, possibly ambiguous
ONU: Optional, negative, unambiguous
ONPA: Optional, negative, possibly ambiguous
OUSU: Optional, unknown sign, unambiguous
true: Tautology
}
\examples{
> genes = c('a','b','c')
> inter = data.frame(source=c('a','b','b','c','c'),
target=c('b','b','c','b','c'),
type=c('+','+','+','-','+'),
stringsAsFactors = F )
> create.gquery.graph(inter, genes)
}
| /man/create.gquery.graph.Rd | permissive | gsc0107/rgriffin | R | false | true | 1,529 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create_query.R
\name{create.gquery.graph}
\alias{create.gquery.graph}
\title{Create a Griffin query.}
\usage{
create.gquery.graph(df.graph, nodes)
}
\arguments{
\item{df.graph}{dataframe with source nodes, target nodes and the type of interactions}
\item{nodes}{vector with all node names}
}
\value{
query java query to run Griffin
}
\description{
This function takes an interaction regulatory graph and creates a griffin query.
}
\details{
The graph must include: source, target and type of interaction
Valid types of interctions are:
false: Contradiction
MA: Mandatory, ambiguous
MPU (or +): Mandatory, positive, unambiguous
MPPA: Mandatory, positive, possibly ambiguous
MNU (or -): Mandatory, negative, unambiguous
MNPA: Mandatory, negative, possibly ambiguous
MUSU: Mandatory, unknown sign, unambiguous
MUSPA: Mandatory, unknown sign, possibly ambiguous
NR: No regulation
OA: Optional, ambiguous
OPU: Optional, positive, unambiguous
OPPA: Optional, positive, possibly ambiguous
ONU: Optional, negative, unambiguous
ONPA: Optional, negative, possibly ambiguous
OUSU: Optional, unknown sign, unambiguous
true: Tautology
}
\examples{
> genes = c('a','b','c')
> inter = data.frame(source=c('a','b','b','c','c'),
target=c('b','b','c','b','c'),
type=c('+','+','+','-','+'),
stringsAsFactors = F )
> create.gquery.graph(inter, genes)
}
|
source("/home/mr984/diversity_metrics/scripts/checkplot_initials.R")
source("/home/mr984/diversity_metrics/scripts/checkplot_inf.R")
reps<-50
outerreps<-1000
size<-rev(round(10^seq(2, 5, 0.25)))[
13
]
nc<-12
plan(strategy=multisession, workers=nc)
map(rev(1:outerreps), function(x){
start<-Sys.time()
out<-checkplot_inf(flatten(flatten(SADs_list))[[8]], l=1, inds=size, reps=reps)
write.csv(out, paste("/scratch/mr984/SAD8","l",1,"inds", size, "outernew", x, ".csv", sep="_"), row.names=F)
rm(out)
print(Sys.time()-start)
})
| /scripts/checkplots_for_parallel_amarel/asy_1031.R | no_license | dushoff/diversity_metrics | R | false | false | 534 | r | source("/home/mr984/diversity_metrics/scripts/checkplot_initials.R")
source("/home/mr984/diversity_metrics/scripts/checkplot_inf.R")
reps<-50
outerreps<-1000
size<-rev(round(10^seq(2, 5, 0.25)))[
13
]
nc<-12
plan(strategy=multisession, workers=nc)
map(rev(1:outerreps), function(x){
start<-Sys.time()
out<-checkplot_inf(flatten(flatten(SADs_list))[[8]], l=1, inds=size, reps=reps)
write.csv(out, paste("/scratch/mr984/SAD8","l",1,"inds", size, "outernew", x, ".csv", sep="_"), row.names=F)
rm(out)
print(Sys.time()-start)
})
|
column_code <- list(
tag = function(tag) {
# return(paste(tag, species, sep='-'))
return(tag)
},
detection_date = function(earliest_detection_date_time) {
require(lubridate)
detection_date <- parse_date_time(x=earliest_detection_date_time, orders='mdyhm')
detection_date[detection_date > now()] <-
detection_date[detection_date > now()] - years(100)
return(detection_date)
},
river = function(river) return(river),
area = function(area) return(area),
section = function(section) return(section),
survey = function(survey) return(survey),
sample_name = function(sample_name) return(sample_name),
reader_id = function(reader_id) return(reader_id)
)
source_data <- dbGetQuery(link$conn, "SELECT * FROM tags_detected;")
source_data <- pipeline_data_transformation(
data=source_data, pipeline=column_code)
dbWriteTable(link$conn, 'data_detections', source_data, row.names=FALSE,
overwrite=TRUE, append=FALSE)
| /data_table_stage/form_data_detections.R | no_license | evanchildress/westbrook-data | R | false | false | 950 | r | column_code <- list(
tag = function(tag) {
# return(paste(tag, species, sep='-'))
return(tag)
},
detection_date = function(earliest_detection_date_time) {
require(lubridate)
detection_date <- parse_date_time(x=earliest_detection_date_time, orders='mdyhm')
detection_date[detection_date > now()] <-
detection_date[detection_date > now()] - years(100)
return(detection_date)
},
river = function(river) return(river),
area = function(area) return(area),
section = function(section) return(section),
survey = function(survey) return(survey),
sample_name = function(sample_name) return(sample_name),
reader_id = function(reader_id) return(reader_id)
)
source_data <- dbGetQuery(link$conn, "SELECT * FROM tags_detected;")
source_data <- pipeline_data_transformation(
data=source_data, pipeline=column_code)
dbWriteTable(link$conn, 'data_detections', source_data, row.names=FALSE,
overwrite=TRUE, append=FALSE)
|
# install.packages('rvest')
library(rvest)
# 'https://auto.naver.com/bike/lineup.nhn?bikeNo=5134'
# 'https://auto.naver.com/bike/mnfcoMain.nhn?mnfcoNo=1'
auto_naver <- 'https://auto.naver.com'
mnfco_main <- '/bike/mnfcoMain.nhn?mnfcoNo='
x <- 17
mnfco_x <- paste0(auto_naver, mnfco_main, x)
html.mnfco_no <- read_html(mnfco_x)
html.mkr_group <- html_node(html.mnfco_no, '.mkr_group')
html.bike_lst <- html_nodes(html.mkr_group, '.bike_lst')
html.bike_a <- html_nodes(html.bike_lst, 'a')
links <- html_attr(html.bike_a, 'href')
write.csv(links, "links.csv")
| /ex02/ex02.R | no_license | the-books/r_for_kini | R | false | false | 579 | r | # install.packages('rvest')
library(rvest)
# 'https://auto.naver.com/bike/lineup.nhn?bikeNo=5134'
# 'https://auto.naver.com/bike/mnfcoMain.nhn?mnfcoNo=1'
auto_naver <- 'https://auto.naver.com'
mnfco_main <- '/bike/mnfcoMain.nhn?mnfcoNo='
x <- 17
mnfco_x <- paste0(auto_naver, mnfco_main, x)
html.mnfco_no <- read_html(mnfco_x)
html.mkr_group <- html_node(html.mnfco_no, '.mkr_group')
html.bike_lst <- html_nodes(html.mkr_group, '.bike_lst')
html.bike_a <- html_nodes(html.bike_lst, 'a')
links <- html_attr(html.bike_a, 'href')
write.csv(links, "links.csv")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getIncidence.R
\name{AggFunc}
\alias{AggFunc}
\title{Function for making the AggByYear and AggByAge functions.}
\usage{
AggFunc(RHS)
}
\arguments{
\item{RHS}{The variable name as string that you want to aggregate by.}
}
\value{
function
}
\description{
A function factory for creating specific AggByFuncs, see for
example \code{\link{AggByYear}}. This function is a closure and so returns
another function.
}
\examples{
AggByYear <- AggFunc("Year")
}
| /man/AggFunc.Rd | no_license | vando026/ahri | R | false | true | 529 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getIncidence.R
\name{AggFunc}
\alias{AggFunc}
\title{Function for making the AggByYear and AggByAge functions.}
\usage{
AggFunc(RHS)
}
\arguments{
\item{RHS}{The variable name as string that you want to aggregate by.}
}
\value{
function
}
\description{
A function factory for creating specific AggByFuncs, see for
example \code{\link{AggByYear}}. This function is a closure and so returns
another function.
}
\examples{
AggByYear <- AggFunc("Year")
}
|
# Logistic Regression
# Import the dataset
setwd("E:\\HHges - Mkt Anyts\\BITS Pilani Bussiness Anallytics\\04.Logistic Regression")
dataset = read.csv('Customer Churn.csv')
summary(dataset)
# Split the dataset into the Training set and Test set
#install.packages('caTools')
library(caTools)
set.seed(2000)
#Split data from vector Y into two sets in predefined ratio
#while preserving relative ratios of different labels in Y.
#Used to split the data used during classification into train and test subsets
split = sample.split(dataset$Churn, SplitRatio = 0.75)
tail(dataset)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
names(training_set)
# Fitting Logistic Regression to the Training set
classifier = glm(formula = Churn ~
as.factor(Gender)+
Age+
EstimatedSalary,
family = binomial,
data = training_set)
summary(classifier)
# Fitting Logistic Regression to the Training set - Gender is being insignificant is dropped here
classifier = glm(formula = Churn ~ Age+EstimatedSalary,
family = binomial,
data = training_set)
summary(classifier)$coefficient
# Predicting the Test set results
# type=reponse is used to give predicted probibilites
prob_pred = predict(classifier, type = 'response',
newdata = test_set)
df_prob_pred = as.data.frame(prob_pred)
head(df_prob_pred)
y_pred = ifelse(prob_pred > 0.5, 1, 0)
y_pred
# Making the Confusion Matrix
cm = table(test_set[,5], y_pred)
cm
forecast::accuracy(test_set[,5], y_pred)
forecast::con | /87-FA/logistic_regression.R | no_license | DUanalytics/rAnalytics | R | false | false | 1,647 | r | # Logistic Regression
# Import the dataset
setwd("E:\\HHges - Mkt Anyts\\BITS Pilani Bussiness Anallytics\\04.Logistic Regression")
dataset = read.csv('Customer Churn.csv')
summary(dataset)
# Split the dataset into the Training set and Test set
#install.packages('caTools')
library(caTools)
set.seed(2000)
#Split data from vector Y into two sets in predefined ratio
#while preserving relative ratios of different labels in Y.
#Used to split the data used during classification into train and test subsets
split = sample.split(dataset$Churn, SplitRatio = 0.75)
tail(dataset)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
names(training_set)
# Fitting Logistic Regression to the Training set
classifier = glm(formula = Churn ~
as.factor(Gender)+
Age+
EstimatedSalary,
family = binomial,
data = training_set)
summary(classifier)
# Fitting Logistic Regression to the Training set - Gender is being insignificant is dropped here
classifier = glm(formula = Churn ~ Age+EstimatedSalary,
family = binomial,
data = training_set)
summary(classifier)$coefficient
# Predicting the Test set results
# type=reponse is used to give predicted probibilites
prob_pred = predict(classifier, type = 'response',
newdata = test_set)
df_prob_pred = as.data.frame(prob_pred)
head(df_prob_pred)
y_pred = ifelse(prob_pred > 0.5, 1, 0)
y_pred
# Making the Confusion Matrix
cm = table(test_set[,5], y_pred)
cm
forecast::accuracy(test_set[,5], y_pred)
forecast::con |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/install.R
\name{install_exiftool}
\alias{install_exiftool}
\title{Install ExifTool, downloading (by default) the current version}
\usage{
install_exiftool(
install_location = NULL,
win_exe = NULL,
local_exiftool = NULL,
quiet = FALSE
)
}
\arguments{
\item{install_location}{Path to the directory into which ExifTool
should be installed. If \code{NULL} (the default),
installation will be into the (initially empty)
\code{exiftool} folder in the \pkg{exiftoolr} package's
directory tree.}
\item{win_exe}{Logical, only used on Windows machines. Should we
install the standalone ExifTool Windows executable or the
ExifTool Perl library? (The latter relies, for its execution,
on an existing installation of Perl being present on the
user's machine.) If set to \code{NULL} (the default), the
function installs the Windows executable on Windows machines
and the Perl library on other operating systems.}
\item{local_exiftool}{If installing ExifTool from a local "*.zip"
or ".tar.gz", supply the path to that file as a character
string. With default value, `NULL`, the function downloads
ExifTool from \url{https://exiftool.org}
and then installs it.}
\item{quiet}{Logical. Should function should be chatty?}
}
\value{
Called for its side effect
}
\description{
Install the current version of ExifTool
}
| /man/install_exiftool.Rd | no_license | barreto91/exiftoolr | R | false | true | 1,390 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/install.R
\name{install_exiftool}
\alias{install_exiftool}
\title{Install ExifTool, downloading (by default) the current version}
\usage{
install_exiftool(
install_location = NULL,
win_exe = NULL,
local_exiftool = NULL,
quiet = FALSE
)
}
\arguments{
\item{install_location}{Path to the directory into which ExifTool
should be installed. If \code{NULL} (the default),
installation will be into the (initially empty)
\code{exiftool} folder in the \pkg{exiftoolr} package's
directory tree.}
\item{win_exe}{Logical, only used on Windows machines. Should we
install the standalone ExifTool Windows executable or the
ExifTool Perl library? (The latter relies, for its execution,
on an existing installation of Perl being present on the
user's machine.) If set to \code{NULL} (the default), the
function installs the Windows executable on Windows machines
and the Perl library on other operating systems.}
\item{local_exiftool}{If installing ExifTool from a local "*.zip"
or ".tar.gz", supply the path to that file as a character
string. With default value, `NULL`, the function downloads
ExifTool from \url{https://exiftool.org}
and then installs it.}
\item{quiet}{Logical. Should function should be chatty?}
}
\value{
Called for its side effect
}
\description{
Install the current version of ExifTool
}
|
source("sim_EM.R")
sim.EM<-function(true.K, fold.change, num.disc, g, n,
distrib,method="EM",pval_thresh=0.4,filt_method=c("pval","mad"),
disp="gene",fixed_parms=T, fixed_coef=6.5,fixed_phi=0.35,
nsims=10){
# disp: "gene" or "cluster"
# low: coef 3.75-3.84, phi 0.13-0.15
# med: coef 6.59-6.62, phi 0.32-0.38
# high: coef 7.84-7.85, phi 1.00-1.32
# Fixed phi: scalar for gene-wise, vector of length K for cluster-wise
sim = nsims # number of sims (set eq to number of cores for now)
dir_name = sprintf("Sim_%d_%d_%d_%f_%f_%s_fixed_%f_%f_%s",n,g,true.K,fold.change,num.disc,distrib,fixed_coef,fixed_phi,filt_method)
dir.create(sprintf("Diagnostics/%s",dir_name))
# max n = 100, max #
if(distrib=="poisson"){
source("Pan EM.R")
} else if(distrib=="nb"){
source("NB Pan EM par.R")
} else{
print("no distrib input. Defaulting to Poisson")
source("Pan EM.R")
}
true_clusters<-NA # TRUE clusters not known for real data
init_y<-init_y[1:g,1:n]
row_names<-paste("gene",seq(g))
col_names<-paste("subj",seq(n))
cts<-as.matrix(init_y)
rownames(cts)<-row_names
colnames(cts)<-col_names
coldata<-data.frame(matrix(paste("cl",true_clusters,sep=""),nrow=n))
rownames(coldata)<-colnames(cts)
colnames(coldata)<-"cluster"
dds<-DESeqDataSetFromMatrix(countData = cts,
colData = coldata,
design = ~ 1)
dds<-DESeq(dds)
init_size_factors<-sizeFactors(dds)
init_norm_y<-counts(dds,normalized=TRUE)
# Unpenalized run to find initial cluster estimates based on K=k
k=true.K
if(!fixed_parms){
X_init<-EM(y=init_y,k=k,lambda1=0,lambda2=0,tau=0,size_factors=init_size_factors,norm_y=init_norm_y,true_clusters=true_clusters,prefix="init",dir=dir_name,method=method,disp=disp)
init_coefs<-X_init$coefs # save init estimates for coefs & pi
init_phi<-X_init$phi
} else{
# fixed coefs and phi
init_coefs <- matrix(fixed_coef,nrow=g,ncol=k)
if(disp=="gene"){
init_phi <- rep(fixed_phi,g)
} else{ init_phi <- matrix(fixed_phi,nrow=g,ncol=k,byrow=T) }
}
size_factors<-init_size_factors # use this for all simulations
sim_coefs<-matrix(rep(rowSums(init_coefs)/k,times=k),ncol=k)
fold_change<-fold.change
nondisc_fold_change<-0 # fixed nondisc fold change
tt<-floor(num.disc*g)
sim_coefs[1:tt,]<-matrix(rep( fold_change*(c(0:(k-1))+rep((1-k)/2,times=k)) ,times=tt),nrow=tt,byrow=TRUE)+sim_coefs[1:tt,]
#sim_coefs[(tt+1):g,]<-matrix(rep( nondisc_fold_change*(c(0:(k-1))+rep((1-k)/2,times=k)) ,times=(g-tt)),nrow=(g-tt),byrow=TRUE)+sim_coefs[(tt+1):g,] # nondisc fold change = 0 so this doesn't get changed
sim_pi<-rep(1/true.K,times=true.K)
sink(file=sprintf("Diagnostics/%s/sim_parms_%s_%s.txt",dir_name,method,disp))
cat("SIMULATED CLUSTER PROPORTIONS:\n")
cat(sim_pi)
cat("\n==========================================")
cat("SIZE FACTORS:\n")
cat(size_factors)
cat("\n==========================================")
cat("SIMULATED COEFFICIENTS:\n")
write.table(sim_coefs,quote=F)
cat("\n==========================================")
cat("SIMULATED DISPERSION PARMS:\n")
write.table(init_phi,quote=F)
cat("\n==========================================")
sink()
#### SIMULATIONS ####
# Simulations to find K (Order Selection)
all_data <- list(list())
for(ii in 1:sim){
# Simulate data based on initial estimates/estimate size factors
## to simulate phi to be very small (fixed +10 extrapoisson variation)
if(!is.null(ncol(init_phi))){ # check for whether init_phi is of dimension 1
sim.dat<-simulate_data(n=n,k=true.K,g=g,init_pi=sim_pi,b=sim_coefs,size_factors=size_factors,distrib=distrib,phi=init_phi) # cluster-wise disp param
} else{
sim.dat<-simulate_data_g(n=n,k=true.K,g=g,init_pi=sim_pi,b=sim_coefs,size_factors=size_factors,distrib=distrib,phi=init_phi) # gene-specific disp param
}
y<-sim.dat$y
z<-sim.dat$z
true_clusters<-rep(0,times=n)
for(i in 1:n){
true_clusters[i]<-which(z[,i]==1)
}
norm_y = y
for(i in 1:n){
norm_y[,i] = y[,i]/size_factors[i]
}
true_disc=c(rep(TRUE,tt),rep(FALSE,(g-tt)))
all_data[[ii]]<-list(y=y,
true_clusters=true_clusters,
size_factors=size_factors,
norm_y=norm_y,
true_disc=true_disc
)
}
final_Ks = rep(NA,sim)
# Function to run simulation in parallel
for(ii in 1:sim){
y = all_data[[ii]]$y
true_clusters = all_data[[ii]]$true_clusters
norm_y = all_data[[ii]]$norm_y
true_disc = all_data[[ii]]$true_disc
if(filt_method=="pval"){
pvals = NB.GOF(y=y,size_factors=size_factors,nsim=1000)
FDR_pvals = p.adjust(pvals,"fdr")
# pre-filtering by pval
#filt_ids = (pvals <= pval_thresh)
filt_ids = pvals <= quantile(pvals,0.25)
} else if(filt_method=="mad"){
mads = rep(0,g)
for(j in 1:g){
mads[j] = mad(log(norm_y[j,]+0.1))
}
filt_ids = mads >= quantile(mads,0.75)
}
y=y[filt_ids,]
norm_y=norm_y[filt_ids,]
true_disc=true_disc[filt_ids]
subs_sim_coefs=sim_coefs[filt_ids,]
if(disp=="gene"){
subs_init_phi=init_phi[filt_ids]
} else if(disp=="cluster"){
subs_init_phi=init_phi[filt_ids,]
}
# Order selection
K_search=c(1:7)
list_BIC=matrix(0,nrow=length(K_search),ncol=2)
list_BIC[,1]=K_search
print(paste("Dataset",ii,"Order Selection:"))
for(aa in 1:nrow(list_BIC)){
pref = sprintf("order%d",ii)
X<-EM(y=y,k=list_BIC[aa,1],lambda1=0,lambda2=0,tau=0,size_factors=size_factors,norm_y=norm_y,true_clusters=true_clusters,true_disc=true_disc,prefix=pref,dir=dir_name,method=method,disp=disp) # no penalty
list_BIC[aa,2]<-X$BIC
if(list_BIC[aa,1]==true.K){
compare_X = X
}
print(list_BIC[aa,])
print(paste("Time:",X$time_elap,"seconds"))
}
sink(file=sprintf("Diagnostics/%s/%s_%s_final%d_order.txt",dir_name,method,disp,ii))
max_k=list_BIC[which.min(list_BIC[,2]),1]
cat(paste("True order:",true.K,"\n"))
cat(paste("Optimal order selected:",max_k,"\n"))
cat("RUN WITH CORRECT ORDER:\n")
MSE_coefs = sum((compare_X$coefs - subs_sim_coefs)^2)/(sum(filt_ids)*true.K)
MSE_phi = sum((subs_init_phi-compare_X$phi)^2)/(sum(filt_ids)*true.K) # test
cat(paste("ARI:",adjustedRandIndex(compare_X$final_clusters,true_clusters),"\n"))
cat(paste("MSE of true vs discovered coefs:",MSE_coefs,"\n"))
cat(paste("MSE of true vs discovered phi:",MSE_phi,"\n"))
cat(paste("% of correctly ID'ed disc genes:",sum(!compare_X$nondiscriminatory==true_disc)/sum(true_disc),"\n"))
cat(paste("PPs (n x k):\n"))
write.table(t(compare_X$wts),quote=F,col.names=F)
sink()
pdf(file=sprintf("Diagnostics/%s/%s_%s_final%d_order.pdf",dir_name,method,disp,ii))
for(c in 1:true.K){
cl_ids = true_clusters==c
for(cc in 1:true.K){
boxplot(compare_X$wts[cc,cl_ids],main=sprintf("Boxplot of PP for subjects of true cl%d being in cl%d",c,cc))
}
}
annotation_col = data.frame(cbind(true_clusters,compare_X$final_clusters))
colnames(annotation_col)=c("True","Derived")
rownames(annotation_col)=c(1:ncol(norm_y))
colnames(norm_y)=c(1:ncol(norm_y))
annotation_col2 = annotation_col[order(true_clusters),]
pheatmap(log(norm_y[,order(compare_X$final_clusters)]+0.1),cluster_cols = F,scale="row",annotation_col = annotation_col2)
dev.off()
save(compare_X,file=sprintf("Filtering/true_K_%s_%s_run_%d.out",filt_method,disp,ii))
final_Ks[ii] = max_k
}
return(final_Ks)
}
mad_gene_Ks=sim.EM(true.K=4, fold.change=1, num.disc=0.1, g=1000, n=160,
distrib="nb",method="EM",pval_thresh=0.4,filt_method="mad",
disp="gene",fixed_parms=T, fixed_coef=6.5,fixed_phi=0.35,
nsims=10)
mad_cl_Ks=sim.EM(true.K=4, fold.change=1, num.disc=0.1, g=1000, n=160,
distrib="nb",method="EM",pval_thresh=0.4,filt_method="mad",
disp="cluster",fixed_parms=T, fixed_coef=6.5,fixed_phi=0.35,
nsims=10)
pval_gene_Ks=sim.EM(true.K=4, fold.change=1, num.disc=0.1, g=1000, n=160,
distrib="nb",method="EM",pval_thresh=0.4,filt_method="pval",
disp="gene",fixed_parms=T, fixed_coef=6.5,fixed_phi=0.35,
nsims=10)
pval_cl_Ks=sim.EM(true.K=4, fold.change=1, num.disc=0.1, g=1000, n=160,
distrib="nb",method="EM",pval_thresh=0.4,filt_method="pval",
disp="cluster",fixed_parms=T, fixed_coef=6.5,fixed_phi=0.35,
nsims=10)
| /order_select_comp.R | no_license | DavidKLim/EM | R | false | false | 8,876 | r | source("sim_EM.R")
sim.EM<-function(true.K, fold.change, num.disc, g, n,
distrib,method="EM",pval_thresh=0.4,filt_method=c("pval","mad"),
disp="gene",fixed_parms=T, fixed_coef=6.5,fixed_phi=0.35,
nsims=10){
# disp: "gene" or "cluster"
# low: coef 3.75-3.84, phi 0.13-0.15
# med: coef 6.59-6.62, phi 0.32-0.38
# high: coef 7.84-7.85, phi 1.00-1.32
# Fixed phi: scalar for gene-wise, vector of length K for cluster-wise
sim = nsims # number of sims (set eq to number of cores for now)
dir_name = sprintf("Sim_%d_%d_%d_%f_%f_%s_fixed_%f_%f_%s",n,g,true.K,fold.change,num.disc,distrib,fixed_coef,fixed_phi,filt_method)
dir.create(sprintf("Diagnostics/%s",dir_name))
# max n = 100, max #
if(distrib=="poisson"){
source("Pan EM.R")
} else if(distrib=="nb"){
source("NB Pan EM par.R")
} else{
print("no distrib input. Defaulting to Poisson")
source("Pan EM.R")
}
true_clusters<-NA # TRUE clusters not known for real data
init_y<-init_y[1:g,1:n]
row_names<-paste("gene",seq(g))
col_names<-paste("subj",seq(n))
cts<-as.matrix(init_y)
rownames(cts)<-row_names
colnames(cts)<-col_names
coldata<-data.frame(matrix(paste("cl",true_clusters,sep=""),nrow=n))
rownames(coldata)<-colnames(cts)
colnames(coldata)<-"cluster"
dds<-DESeqDataSetFromMatrix(countData = cts,
colData = coldata,
design = ~ 1)
dds<-DESeq(dds)
init_size_factors<-sizeFactors(dds)
init_norm_y<-counts(dds,normalized=TRUE)
# Unpenalized run to find initial cluster estimates based on K=k
k=true.K
if(!fixed_parms){
X_init<-EM(y=init_y,k=k,lambda1=0,lambda2=0,tau=0,size_factors=init_size_factors,norm_y=init_norm_y,true_clusters=true_clusters,prefix="init",dir=dir_name,method=method,disp=disp)
init_coefs<-X_init$coefs # save init estimates for coefs & pi
init_phi<-X_init$phi
} else{
# fixed coefs and phi
init_coefs <- matrix(fixed_coef,nrow=g,ncol=k)
if(disp=="gene"){
init_phi <- rep(fixed_phi,g)
} else{ init_phi <- matrix(fixed_phi,nrow=g,ncol=k,byrow=T) }
}
size_factors<-init_size_factors # use this for all simulations
sim_coefs<-matrix(rep(rowSums(init_coefs)/k,times=k),ncol=k)
fold_change<-fold.change
nondisc_fold_change<-0 # fixed nondisc fold change
tt<-floor(num.disc*g)
sim_coefs[1:tt,]<-matrix(rep( fold_change*(c(0:(k-1))+rep((1-k)/2,times=k)) ,times=tt),nrow=tt,byrow=TRUE)+sim_coefs[1:tt,]
#sim_coefs[(tt+1):g,]<-matrix(rep( nondisc_fold_change*(c(0:(k-1))+rep((1-k)/2,times=k)) ,times=(g-tt)),nrow=(g-tt),byrow=TRUE)+sim_coefs[(tt+1):g,] # nondisc fold change = 0 so this doesn't get changed
sim_pi<-rep(1/true.K,times=true.K)
sink(file=sprintf("Diagnostics/%s/sim_parms_%s_%s.txt",dir_name,method,disp))
cat("SIMULATED CLUSTER PROPORTIONS:\n")
cat(sim_pi)
cat("\n==========================================")
cat("SIZE FACTORS:\n")
cat(size_factors)
cat("\n==========================================")
cat("SIMULATED COEFFICIENTS:\n")
write.table(sim_coefs,quote=F)
cat("\n==========================================")
cat("SIMULATED DISPERSION PARMS:\n")
write.table(init_phi,quote=F)
cat("\n==========================================")
sink()
#### SIMULATIONS ####
# Simulations to find K (Order Selection)
all_data <- list(list())
for(ii in 1:sim){
# Simulate data based on initial estimates/estimate size factors
## to simulate phi to be very small (fixed +10 extrapoisson variation)
if(!is.null(ncol(init_phi))){ # check for whether init_phi is of dimension 1
sim.dat<-simulate_data(n=n,k=true.K,g=g,init_pi=sim_pi,b=sim_coefs,size_factors=size_factors,distrib=distrib,phi=init_phi) # cluster-wise disp param
} else{
sim.dat<-simulate_data_g(n=n,k=true.K,g=g,init_pi=sim_pi,b=sim_coefs,size_factors=size_factors,distrib=distrib,phi=init_phi) # gene-specific disp param
}
y<-sim.dat$y
z<-sim.dat$z
true_clusters<-rep(0,times=n)
for(i in 1:n){
true_clusters[i]<-which(z[,i]==1)
}
norm_y = y
for(i in 1:n){
norm_y[,i] = y[,i]/size_factors[i]
}
true_disc=c(rep(TRUE,tt),rep(FALSE,(g-tt)))
all_data[[ii]]<-list(y=y,
true_clusters=true_clusters,
size_factors=size_factors,
norm_y=norm_y,
true_disc=true_disc
)
}
final_Ks = rep(NA,sim)
# Function to run simulation in parallel
for(ii in 1:sim){
y = all_data[[ii]]$y
true_clusters = all_data[[ii]]$true_clusters
norm_y = all_data[[ii]]$norm_y
true_disc = all_data[[ii]]$true_disc
if(filt_method=="pval"){
pvals = NB.GOF(y=y,size_factors=size_factors,nsim=1000)
FDR_pvals = p.adjust(pvals,"fdr")
# pre-filtering by pval
#filt_ids = (pvals <= pval_thresh)
filt_ids = pvals <= quantile(pvals,0.25)
} else if(filt_method=="mad"){
mads = rep(0,g)
for(j in 1:g){
mads[j] = mad(log(norm_y[j,]+0.1))
}
filt_ids = mads >= quantile(mads,0.75)
}
y=y[filt_ids,]
norm_y=norm_y[filt_ids,]
true_disc=true_disc[filt_ids]
subs_sim_coefs=sim_coefs[filt_ids,]
if(disp=="gene"){
subs_init_phi=init_phi[filt_ids]
} else if(disp=="cluster"){
subs_init_phi=init_phi[filt_ids,]
}
# Order selection
K_search=c(1:7)
list_BIC=matrix(0,nrow=length(K_search),ncol=2)
list_BIC[,1]=K_search
print(paste("Dataset",ii,"Order Selection:"))
for(aa in 1:nrow(list_BIC)){
pref = sprintf("order%d",ii)
X<-EM(y=y,k=list_BIC[aa,1],lambda1=0,lambda2=0,tau=0,size_factors=size_factors,norm_y=norm_y,true_clusters=true_clusters,true_disc=true_disc,prefix=pref,dir=dir_name,method=method,disp=disp) # no penalty
list_BIC[aa,2]<-X$BIC
if(list_BIC[aa,1]==true.K){
compare_X = X
}
print(list_BIC[aa,])
print(paste("Time:",X$time_elap,"seconds"))
}
sink(file=sprintf("Diagnostics/%s/%s_%s_final%d_order.txt",dir_name,method,disp,ii))
max_k=list_BIC[which.min(list_BIC[,2]),1]
cat(paste("True order:",true.K,"\n"))
cat(paste("Optimal order selected:",max_k,"\n"))
cat("RUN WITH CORRECT ORDER:\n")
MSE_coefs = sum((compare_X$coefs - subs_sim_coefs)^2)/(sum(filt_ids)*true.K)
MSE_phi = sum((subs_init_phi-compare_X$phi)^2)/(sum(filt_ids)*true.K) # test
cat(paste("ARI:",adjustedRandIndex(compare_X$final_clusters,true_clusters),"\n"))
cat(paste("MSE of true vs discovered coefs:",MSE_coefs,"\n"))
cat(paste("MSE of true vs discovered phi:",MSE_phi,"\n"))
cat(paste("% of correctly ID'ed disc genes:",sum(!compare_X$nondiscriminatory==true_disc)/sum(true_disc),"\n"))
cat(paste("PPs (n x k):\n"))
write.table(t(compare_X$wts),quote=F,col.names=F)
sink()
pdf(file=sprintf("Diagnostics/%s/%s_%s_final%d_order.pdf",dir_name,method,disp,ii))
for(c in 1:true.K){
cl_ids = true_clusters==c
for(cc in 1:true.K){
boxplot(compare_X$wts[cc,cl_ids],main=sprintf("Boxplot of PP for subjects of true cl%d being in cl%d",c,cc))
}
}
annotation_col = data.frame(cbind(true_clusters,compare_X$final_clusters))
colnames(annotation_col)=c("True","Derived")
rownames(annotation_col)=c(1:ncol(norm_y))
colnames(norm_y)=c(1:ncol(norm_y))
annotation_col2 = annotation_col[order(true_clusters),]
pheatmap(log(norm_y[,order(compare_X$final_clusters)]+0.1),cluster_cols = F,scale="row",annotation_col = annotation_col2)
dev.off()
save(compare_X,file=sprintf("Filtering/true_K_%s_%s_run_%d.out",filt_method,disp,ii))
final_Ks[ii] = max_k
}
return(final_Ks)
}
mad_gene_Ks=sim.EM(true.K=4, fold.change=1, num.disc=0.1, g=1000, n=160,
distrib="nb",method="EM",pval_thresh=0.4,filt_method="mad",
disp="gene",fixed_parms=T, fixed_coef=6.5,fixed_phi=0.35,
nsims=10)
mad_cl_Ks=sim.EM(true.K=4, fold.change=1, num.disc=0.1, g=1000, n=160,
distrib="nb",method="EM",pval_thresh=0.4,filt_method="mad",
disp="cluster",fixed_parms=T, fixed_coef=6.5,fixed_phi=0.35,
nsims=10)
pval_gene_Ks=sim.EM(true.K=4, fold.change=1, num.disc=0.1, g=1000, n=160,
distrib="nb",method="EM",pval_thresh=0.4,filt_method="pval",
disp="gene",fixed_parms=T, fixed_coef=6.5,fixed_phi=0.35,
nsims=10)
pval_cl_Ks=sim.EM(true.K=4, fold.change=1, num.disc=0.1, g=1000, n=160,
distrib="nb",method="EM",pval_thresh=0.4,filt_method="pval",
disp="cluster",fixed_parms=T, fixed_coef=6.5,fixed_phi=0.35,
nsims=10)
|
% Generated by roxygen2 (4.1.1.9000): do not edit by hand
% Please edit documentation in R/geo_functions.R
\name{freegeoip}
\alias{freegeoip}
\title{Geolocate IP addresses in R}
\source{
\url{http://heuristically.wordpress.com/2013/05/20/geolocate-ip-addresses-in-r/}. \url{http://freegeoip.net/json/}
}
\usage{
freegeoip(ip = myip(), format = ifelse(length(ip) == 1, "list",
"dataframe"), ...)
}
\arguments{
\item{ip}{a character vector of ips (default is the output from \link{myip})}
\item{format}{format of the output. Either "list" (default) or "data.frame"}
\item{...}{not in use}
}
\value{
a list or data.frame with details on your geo location based on the freegeoip.net service.
}
\description{
This R function uses the free freegeoip.net geocoding service to resolve an IP address (or a vector of them) into country, region, city, zip, latitude, longitude, area and metro codes.
The function require rjson.
}
\examples{
\dontrun{
freegeoip()
## http://www.students.ncl.ac.uk/keith.newman/r/maps-in-r
# install.packages("maps")
# install.packages("mapdata")
library(maps)
library(mapdata) # Contains the hi-resolution points that mark out the countries.
map('worldHires')
require(installr)
myip_details <- freegeoip(myip())
my_lati <- myip_details$latitude
my_long <- myip_details$longitude
points(my_lati,my_long,col=2,pch=18, cex = 1)
# lines(c(my_lati,0) ,c(my_long, 50), col = 2)#'
}
}
\author{
Heuristic Andrew (see source for details)
}
\seealso{
\link{freegeoip}, \link{myip}, \link{cranometer}
}
| /man/freegeoip.Rd | no_license | absolutoslo/installr | R | false | false | 1,524 | rd | % Generated by roxygen2 (4.1.1.9000): do not edit by hand
% Please edit documentation in R/geo_functions.R
\name{freegeoip}
\alias{freegeoip}
\title{Geolocate IP addresses in R}
\source{
\url{http://heuristically.wordpress.com/2013/05/20/geolocate-ip-addresses-in-r/}. \url{http://freegeoip.net/json/}
}
\usage{
freegeoip(ip = myip(), format = ifelse(length(ip) == 1, "list",
"dataframe"), ...)
}
\arguments{
\item{ip}{a character vector of ips (default is the output from \link{myip})}
\item{format}{format of the output. Either "list" (default) or "data.frame"}
\item{...}{not in use}
}
\value{
a list or data.frame with details on your geo location based on the freegeoip.net service.
}
\description{
This R function uses the free freegeoip.net geocoding service to resolve an IP address (or a vector of them) into country, region, city, zip, latitude, longitude, area and metro codes.
The function require rjson.
}
\examples{
\dontrun{
freegeoip()
## http://www.students.ncl.ac.uk/keith.newman/r/maps-in-r
# install.packages("maps")
# install.packages("mapdata")
library(maps)
library(mapdata) # Contains the hi-resolution points that mark out the countries.
map('worldHires')
require(installr)
myip_details <- freegeoip(myip())
my_lati <- myip_details$latitude
my_long <- myip_details$longitude
points(my_lati,my_long,col=2,pch=18, cex = 1)
# lines(c(my_lati,0) ,c(my_long, 50), col = 2)#'
}
}
\author{
Heuristic Andrew (see source for details)
}
\seealso{
\link{freegeoip}, \link{myip}, \link{cranometer}
}
|
#source_type_id must be entered as number c(11,21,31,32....)
Average_Speed_by_Hour_Roadtype_sourcetype <- function (county_id,source_type_id) {
if(county_id<10000)
{county_id=toString(county_id)
county_id=paste("0",county_id,sep="")
}
else
{county_id=toString(county_id)}
if(county_id %in% avgspeeddistribution$countyID)
{
binspeed=c(2,5,10,15,20,25,30,35,40,45,50,55,60,65,70,73)
hourlist=c(15,25,35,45,55,65,75,85,95,105,115,125,135,145,155,165,175,185,195,205,215,225,235,245)
roadtypelist=c(2,3,4,5)
#cut down dataframe to match inputs
newdata=avgspeeddistribution[avgspeeddistribution$countyID==county_id & avgspeeddistribution$hourDayID%in% c(15,25,35,45,55,65,75,85,95,105,115,125,135,145,155,165,175,185,195,205,215,225,235,245) & avgspeeddistribution$sourceTypeID==source_type_id,]
#add calculated column for plotting
newdata$intcol<-0
newdata$avgspeed<-0
#add value to calculated column
for (n in 1:nrow(newdata))
{
newdata$intcol[n]<-newdata[n,]$avgSpeedFraction*binspeed[newdata[n,]$avgSpeedBinID]
}
xx=nrow(newdata)/16
for (n in 1:xx){
n1=n*16-15
n2=n*16
xxx=sum(newdata$intcol[n1:n2])
newdata$avgspeed[n1:n2]<-xxx
}
newdata=newdata[newdata$avgSpeedBinID==1,]
newdata[newdata$hourDayID==15,]$hourDayID<-1
newdata[newdata$hourDayID==25,]$hourDayID<-2
newdata[newdata$hourDayID==35,]$hourDayID<-3
newdata[newdata$hourDayID==45,]$hourDayID<-4
newdata[newdata$hourDayID==55,]$hourDayID<-5
newdata[newdata$hourDayID==65,]$hourDayID<-6
newdata[newdata$hourDayID==75,]$hourDayID<-7
newdata[newdata$hourDayID==85,]$hourDayID<-8
newdata[newdata$hourDayID==95,]$hourDayID<-9
newdata[newdata$hourDayID==105,]$hourDayID<-10
newdata[newdata$hourDayID==115,]$hourDayID<-11
newdata[newdata$hourDayID==125,]$hourDayID<-12
newdata[newdata$hourDayID==135,]$hourDayID<-13
newdata[newdata$hourDayID==145,]$hourDayID<-14
newdata[newdata$hourDayID==155,]$hourDayID<-15
newdata[newdata$hourDayID==165,]$hourDayID<-16
newdata[newdata$hourDayID==175,]$hourDayID<-17
newdata[newdata$hourDayID==185,]$hourDayID<-18
newdata[newdata$hourDayID==195,]$hourDayID<-19
newdata[newdata$hourDayID==205,]$hourDayID<-20
newdata[newdata$hourDayID==215,]$hourDayID<-21
newdata[newdata$hourDayID==225,]$hourDayID<-22
newdata[newdata$hourDayID==235,]$hourDayID<-23
newdata[newdata$hourDayID==245,]$hourDayID<-24
sourcetypeID=toString(sourcetypetable[sourcetypetable$typeID==source_type_id,]$sourcetype)
county=toString(county_id)
sourcetype2=paste(toString(sourcetypeID),county,sep=" ")
ft <- function(){
function(x) format(x,nsmall = 2,scientific = FALSE)
}
final=ggplot(data=newdata,aes(x=factor(hourDayID),y=avgspeed,colour=factor(roadTypeID),shape=factor(roadTypeID)))+geom_line(aes(group=roadTypeID),size=1)+geom_point(size=7)+scale_shape_identity()+labs(title=sprintf("Average Speed by Hour and Roadtype %s",sourcetype2),x="Hour",y="Average Speed (mph)")+theme(plot.background = element_rect(fill = '#FF0033'),axis.text=element_text(color="black"),axis.text=element_text(size=12),axis.title=element_text(size=15),title=element_text(size=20))+annotate("text",x=20,y=20,label=c("2=rr 3=ru 4=ur 5=uu"))
#add LADCO footer
final=arrangeGrob(final,sub=textGrob("LADCO Moves Evaluation Software 2015",x=0,hjust=-0.1,vjust=0.4,gp=gpar(fontface="italic",fontsize=18)))
#send plot to a directory
county_id2=as.numeric(county_id)
stateid=countyxref[countyxref$indfullname==county_id2,]$statefips
stateid=state_lowercase[state_lowercase$stateID==stateid,]$states
gg_path=paste(ggsave_statepath,paste("\\",stateid,sep=""),sep="")
gg_path=paste(gg_path,paste("\\Average_Speed_by_Hour_Roadtype_sourcetype_",county_id,sep=""),sep="")
gg_path=paste(gg_path,sourcetypeID,sep="_")
gg_path=paste(gg_path,".png",sep="")
ggsave(plot = final,file=gg_path, type = "cairo-png")
return(final)}
else
{
print(sprintf("county %s data not found",county_id))}
}
| /report_scripts/Reference_County_Plots/Average_Speed_by_Hour_Roadtype_sourcetype.R | no_license | m-skiles/LADCO-SMOKE-MOVES-Input-QA-Tool | R | false | false | 3,989 | r |
#source_type_id must be entered as number c(11,21,31,32....)
Average_Speed_by_Hour_Roadtype_sourcetype <- function (county_id,source_type_id) {
if(county_id<10000)
{county_id=toString(county_id)
county_id=paste("0",county_id,sep="")
}
else
{county_id=toString(county_id)}
if(county_id %in% avgspeeddistribution$countyID)
{
binspeed=c(2,5,10,15,20,25,30,35,40,45,50,55,60,65,70,73)
hourlist=c(15,25,35,45,55,65,75,85,95,105,115,125,135,145,155,165,175,185,195,205,215,225,235,245)
roadtypelist=c(2,3,4,5)
#cut down dataframe to match inputs
newdata=avgspeeddistribution[avgspeeddistribution$countyID==county_id & avgspeeddistribution$hourDayID%in% c(15,25,35,45,55,65,75,85,95,105,115,125,135,145,155,165,175,185,195,205,215,225,235,245) & avgspeeddistribution$sourceTypeID==source_type_id,]
#add calculated column for plotting
newdata$intcol<-0
newdata$avgspeed<-0
#add value to calculated column
for (n in 1:nrow(newdata))
{
newdata$intcol[n]<-newdata[n,]$avgSpeedFraction*binspeed[newdata[n,]$avgSpeedBinID]
}
xx=nrow(newdata)/16
for (n in 1:xx){
n1=n*16-15
n2=n*16
xxx=sum(newdata$intcol[n1:n2])
newdata$avgspeed[n1:n2]<-xxx
}
newdata=newdata[newdata$avgSpeedBinID==1,]
newdata[newdata$hourDayID==15,]$hourDayID<-1
newdata[newdata$hourDayID==25,]$hourDayID<-2
newdata[newdata$hourDayID==35,]$hourDayID<-3
newdata[newdata$hourDayID==45,]$hourDayID<-4
newdata[newdata$hourDayID==55,]$hourDayID<-5
newdata[newdata$hourDayID==65,]$hourDayID<-6
newdata[newdata$hourDayID==75,]$hourDayID<-7
newdata[newdata$hourDayID==85,]$hourDayID<-8
newdata[newdata$hourDayID==95,]$hourDayID<-9
newdata[newdata$hourDayID==105,]$hourDayID<-10
newdata[newdata$hourDayID==115,]$hourDayID<-11
newdata[newdata$hourDayID==125,]$hourDayID<-12
newdata[newdata$hourDayID==135,]$hourDayID<-13
newdata[newdata$hourDayID==145,]$hourDayID<-14
newdata[newdata$hourDayID==155,]$hourDayID<-15
newdata[newdata$hourDayID==165,]$hourDayID<-16
newdata[newdata$hourDayID==175,]$hourDayID<-17
newdata[newdata$hourDayID==185,]$hourDayID<-18
newdata[newdata$hourDayID==195,]$hourDayID<-19
newdata[newdata$hourDayID==205,]$hourDayID<-20
newdata[newdata$hourDayID==215,]$hourDayID<-21
newdata[newdata$hourDayID==225,]$hourDayID<-22
newdata[newdata$hourDayID==235,]$hourDayID<-23
newdata[newdata$hourDayID==245,]$hourDayID<-24
sourcetypeID=toString(sourcetypetable[sourcetypetable$typeID==source_type_id,]$sourcetype)
county=toString(county_id)
sourcetype2=paste(toString(sourcetypeID),county,sep=" ")
ft <- function(){
function(x) format(x,nsmall = 2,scientific = FALSE)
}
final=ggplot(data=newdata,aes(x=factor(hourDayID),y=avgspeed,colour=factor(roadTypeID),shape=factor(roadTypeID)))+geom_line(aes(group=roadTypeID),size=1)+geom_point(size=7)+scale_shape_identity()+labs(title=sprintf("Average Speed by Hour and Roadtype %s",sourcetype2),x="Hour",y="Average Speed (mph)")+theme(plot.background = element_rect(fill = '#FF0033'),axis.text=element_text(color="black"),axis.text=element_text(size=12),axis.title=element_text(size=15),title=element_text(size=20))+annotate("text",x=20,y=20,label=c("2=rr 3=ru 4=ur 5=uu"))
#add LADCO footer
final=arrangeGrob(final,sub=textGrob("LADCO Moves Evaluation Software 2015",x=0,hjust=-0.1,vjust=0.4,gp=gpar(fontface="italic",fontsize=18)))
#send plot to a directory
county_id2=as.numeric(county_id)
stateid=countyxref[countyxref$indfullname==county_id2,]$statefips
stateid=state_lowercase[state_lowercase$stateID==stateid,]$states
gg_path=paste(ggsave_statepath,paste("\\",stateid,sep=""),sep="")
gg_path=paste(gg_path,paste("\\Average_Speed_by_Hour_Roadtype_sourcetype_",county_id,sep=""),sep="")
gg_path=paste(gg_path,sourcetypeID,sep="_")
gg_path=paste(gg_path,".png",sep="")
ggsave(plot = final,file=gg_path, type = "cairo-png")
return(final)}
else
{
print(sprintf("county %s data not found",county_id))}
}
|
# ===== DATA PREPARATION STEP 2 =====
# ===== Preparation of data for use in SiroSOM software.
#
# Before anything else, de-spike using a running median filter (stats::runmed).
#
# Several data sets are required:
# 1.
# First method is a complete-cases analysis with cross-validation, so requires
# a complete-cases data set.
# The main issue is selecting a balance between a large enough set of variables,
# and a large enough set of boreholes containing those variables.
# 2.
# A SOM imputation data set - the same as the first, except with a few extra
# rows containing no TC data for which we can 'predict' a TC value. How to
# select the extra rows? I don't know, but possibly look at the graphical
# well logs from BR Thompson's thesis to decide, or Pechnig key wells. Should
# contain representatives from each of the main strat units I want to examine.
# 3.
# An alternative SOM data set, where each borehole will be treated individually,
# to produce data-driven clusters that will be then be assigned TC values that
# relate to identified lithological clusters. This one needs lith and strat
# columns
# 4.
# A set of any of the above that also contains temperature gradient (regardless
# of whether it has been affected by non-conductive transients). It will be
# interesting to see whether and how the linear regression and SOM techniques
# are affected.
library(sqldf)
library(signal)
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# Data processing for single well-log files.
# Target data frames to hold compiled log data.
# tc.only holds data rows with TC measurements,
# tc.neighbours holds tc data plus a few rows either side.
tc.only <- NULL # can add rows using rbind to NULL.
tc.neighbours <- NULL
filename <- paste("merged_log_lab_data", "B15005_merged.csv", sep="/")
borename <- sub("_merged.csv", "", basename(filename))
logs <- read.csv(filename, na.strings="NA")
# Skip these columns in both complete.cases filter, and running median:
skip.colnames <- c("DEPTH", "MEAS_TC", "MEAS_DEN", "MEAS_PORO", "STRAT")
# logs.clean <- ImPartialCompleteCases(logs, c("DEPTH", "DT", "GR", "LN", "NEUT", "RES", "SN", "SP", "TEMP", "STRAT"))
logs.clean <- PartialCompleteCases(logs, skip.colnames)
logs.clean[is.na(logs.clean)] <- 0 # replace NAs with zero.
# De-spike the logs using a running median. Window only needs to be big enough
# to encompass any spikes, with a little extra for wiggle room. If typical
# spike is 3 points, then a filter with k=5 is enough.
# Perform SMOOTHING / UPSCALING using a different procedure, maybe with a
# Savitzky-Golay (polynomial fitting) filter, or running mean.
# Apply filter to some of the columns:
despike <- logs.clean
skip.indices <- which(colnames(despike) %in% skip.colnames)
despike[, -skip.indices] <-
sapply(
despike[, -skip.indices],
runmed, k=5)
# Firstly create a df with only TC result rows
logs.tc.only <- sqldf("
SELECT *
FROM logs l
WHERE l.MEAS_TC IS NOT NULL
ORDER BY DEPTH
")
# Append to tc.only data frame
tc.only <- merge(logs.tc.only, tc.only, all=TRUE)
# Select depths for only those rows with TC data
depths <- sqldf("
SELECT l.DEPTH
FROM logs l
WHERE l.MEAS_TC IS NOT NULL
ORDER BY DEPTH
")
# Secondly create a df with some neighbouring rows as well
# Filter neighbouring log depths of TC values
neighbours <- sqldf("
SELECT DISTINCT l.*
FROM despike l, depths d
WHERE l.DEPTH >= d.DEPTH - 5
AND l.DEPTH <= d.DEPTH + 5
ORDER BY DEPTH
")
# Write neighbours to file (CSV)
filename <- "B15005.csv"
write.csv(neighbours, file=paste("subsets", filename, sep="/"), row.names=FALSE)
# Create a new label variable containing the bore name
bore <- rep(bore.name, nrow(despike)) # create a borename attribute
new.logs <- cbind(bore, despike)
new.logs$borename <- rep(bore.name, nrow(despike))
# Add tc subset to compilation df
tc.only <- merge(tc.only, new.logs, all=T)
# Replace missing values in a vector:
var[is.na(var)] <- mean(var, na.rm = TRUE)
# Replace missing values in a data frame's vector:
df$var[is.na(df$var)] <- mean(df$var, na.rm = TRUE)
| /subset_tc_data_single_well.R | no_license | ottadini/som-project | R | false | false | 4,140 | r | # ===== DATA PREPARATION STEP 2 =====
# ===== Preparation of data for use in SiroSOM software.
#
# Before anything else, de-spike using a running median filter (stats::runmed).
#
# Several data sets are required:
# 1.
# First method is a complete-cases analysis with cross-validation, so requires
# a complete-cases data set.
# The main issue is selecting a balance between a large enough set of variables,
# and a large enough set of boreholes containing those variables.
# 2.
# A SOM imputation data set - the same as the first, except with a few extra
# rows containing no TC data for which we can 'predict' a TC value. How to
# select the extra rows? I don't know, but possibly look at the graphical
# well logs from BR Thompson's thesis to decide, or Pechnig key wells. Should
# contain representatives from each of the main strat units I want to examine.
# 3.
# An alternative SOM data set, where each borehole will be treated individually,
# to produce data-driven clusters that will be then be assigned TC values that
# relate to identified lithological clusters. This one needs lith and strat
# columns
# 4.
# A set of any of the above that also contains temperature gradient (regardless
# of whether it has been affected by non-conductive transients). It will be
# interesting to see whether and how the linear regression and SOM techniques
# are affected.
library(sqldf)
library(signal)
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# Data processing for single well-log files.
# Target data frames to hold compiled log data.
# tc.only holds data rows with TC measurements,
# tc.neighbours holds tc data plus a few rows either side.
tc.only <- NULL # can add rows using rbind to NULL.
tc.neighbours <- NULL
filename <- paste("merged_log_lab_data", "B15005_merged.csv", sep="/")
borename <- sub("_merged.csv", "", basename(filename))
logs <- read.csv(filename, na.strings="NA")
# Skip these columns in both complete.cases filter, and running median:
skip.colnames <- c("DEPTH", "MEAS_TC", "MEAS_DEN", "MEAS_PORO", "STRAT")
# logs.clean <- ImPartialCompleteCases(logs, c("DEPTH", "DT", "GR", "LN", "NEUT", "RES", "SN", "SP", "TEMP", "STRAT"))
logs.clean <- PartialCompleteCases(logs, skip.colnames)
logs.clean[is.na(logs.clean)] <- 0 # replace NAs with zero.
# De-spike the logs using a running median. Window only needs to be big enough
# to encompass any spikes, with a little extra for wiggle room. If typical
# spike is 3 points, then a filter with k=5 is enough.
# Perform SMOOTHING / UPSCALING using a different procedure, maybe with a
# Savitzky-Golay (polynomial fitting) filter, or running mean.
# Apply filter to some of the columns:
despike <- logs.clean
skip.indices <- which(colnames(despike) %in% skip.colnames)
despike[, -skip.indices] <-
sapply(
despike[, -skip.indices],
runmed, k=5)
# Firstly create a df with only TC result rows
logs.tc.only <- sqldf("
SELECT *
FROM logs l
WHERE l.MEAS_TC IS NOT NULL
ORDER BY DEPTH
")
# Append to tc.only data frame
tc.only <- merge(logs.tc.only, tc.only, all=TRUE)
# Select depths for only those rows with TC data
depths <- sqldf("
SELECT l.DEPTH
FROM logs l
WHERE l.MEAS_TC IS NOT NULL
ORDER BY DEPTH
")
# Secondly create a df with some neighbouring rows as well
# Filter neighbouring log depths of TC values
neighbours <- sqldf("
SELECT DISTINCT l.*
FROM despike l, depths d
WHERE l.DEPTH >= d.DEPTH - 5
AND l.DEPTH <= d.DEPTH + 5
ORDER BY DEPTH
")
# Write neighbours to file (CSV)
filename <- "B15005.csv"
write.csv(neighbours, file=paste("subsets", filename, sep="/"), row.names=FALSE)
# Create a new label variable containing the bore name
bore <- rep(bore.name, nrow(despike)) # create a borename attribute
new.logs <- cbind(bore, despike)
new.logs$borename <- rep(bore.name, nrow(despike))
# Add tc subset to compilation df
tc.only <- merge(tc.only, new.logs, all=T)
# Replace missing values in a vector:
var[is.na(var)] <- mean(var, na.rm = TRUE)
# Replace missing values in a data frame's vector:
df$var[is.na(df$var)] <- mean(df$var, na.rm = TRUE)
|
# Decision Tree Regression
# Importing the dataset
dataset = read.csv('Position_Salaries.csv')
dataset = dataset[2:3]
# Splitting the dataset into the Training set and Test set
# # install.packages('caTools')
# library(caTools)
# set.seed(123)
# split = sample.split(dataset$Salary, SplitRatio = 2/3)
# training_set = subset(dataset, split == TRUE)
# test_set = subset(dataset, split == FALSE)
# Feature Scaling
# training_set = scale(training_set)
# test_set = scale(test_set)
# Fitting the Decision Tree Regression Model to the dataset
# Create your regressor here
# install.packages('rpart')
library(rpart)
regressor = rpart(formula = Salary ~ .,
data = dataset,
control = rpart.control(minsplit = 1))
# Predicting a new result
y_pred = predict(regressor, data.frame(Level = 6.5))
#Visuaslizing the Decision Tree Regression Model results (for higher resolution and smoother curve)
library(ggplot2)
x_grid = seq(min(dataset$Level), max(dataset$Level), 0.01)
ggplot() +
geom_point(aes(x = dataset$Level, y = dataset$Salary),
colour = 'red') +
geom_line(aes(x = x_grid, y = predict(regressor, newdata = data.frame(Level = x_grid))),
colour = 'blue') +
ggtitle('Truth or Bluff (Decision Tree Regression Model)') +
xlab('Level') +
ylab('Salary')
| /decision_tree_regression.R | no_license | rko1985/python_r-machine_learning-decision_tree_regression | R | false | false | 1,321 | r | # Decision Tree Regression
# Importing the dataset
dataset = read.csv('Position_Salaries.csv')
dataset = dataset[2:3]
# Splitting the dataset into the Training set and Test set
# # install.packages('caTools')
# library(caTools)
# set.seed(123)
# split = sample.split(dataset$Salary, SplitRatio = 2/3)
# training_set = subset(dataset, split == TRUE)
# test_set = subset(dataset, split == FALSE)
# Feature Scaling
# training_set = scale(training_set)
# test_set = scale(test_set)
# Fitting the Decision Tree Regression Model to the dataset
# Create your regressor here
# install.packages('rpart')
library(rpart)
regressor = rpart(formula = Salary ~ .,
data = dataset,
control = rpart.control(minsplit = 1))
# Predicting a new result
y_pred = predict(regressor, data.frame(Level = 6.5))
#Visuaslizing the Decision Tree Regression Model results (for higher resolution and smoother curve)
library(ggplot2)
x_grid = seq(min(dataset$Level), max(dataset$Level), 0.01)
ggplot() +
geom_point(aes(x = dataset$Level, y = dataset$Salary),
colour = 'red') +
geom_line(aes(x = x_grid, y = predict(regressor, newdata = data.frame(Level = x_grid))),
colour = 'blue') +
ggtitle('Truth or Bluff (Decision Tree Regression Model)') +
xlab('Level') +
ylab('Salary')
|
context(" CVIs")
# =================================================================================================
# setup
# =================================================================================================
## Original objects in env
ols <- ls()
# =================================================================================================
# centroids
# =================================================================================================
with(persistent, {
test_that("CVIs give the same results as references.", {
skip_on_cran()
expect_known_value(base_cvis, file_name(base_cvis))
expect_known_value(internal_fcvis, file_name(internal_fcvis))
expect_known_value(external_fcvis, file_name(external_fcvis))
expect_known_value(cvis_tadp, file_name(cvis_tadp))
expect_known_value(cvis_hc, file_name(cvis_hc))
expect_known_value(cvis_tadp_cent, file_name(cvis_tadp_cent))
expect_known_value(cvis_hc_cent, file_name(cvis_hc_cent))
})
})
# =================================================================================================
# clean
# =================================================================================================
rm(list = setdiff(ls(), ols))
| /fuzzedpackages/dtwclust/tests/testthat/regression/cvis.R | no_license | akhikolla/testpackages | R | false | false | 1,319 | r | context(" CVIs")
# =================================================================================================
# setup
# =================================================================================================
## Original objects in env
ols <- ls()
# =================================================================================================
# centroids
# =================================================================================================
with(persistent, {
test_that("CVIs give the same results as references.", {
skip_on_cran()
expect_known_value(base_cvis, file_name(base_cvis))
expect_known_value(internal_fcvis, file_name(internal_fcvis))
expect_known_value(external_fcvis, file_name(external_fcvis))
expect_known_value(cvis_tadp, file_name(cvis_tadp))
expect_known_value(cvis_hc, file_name(cvis_hc))
expect_known_value(cvis_tadp_cent, file_name(cvis_tadp_cent))
expect_known_value(cvis_hc_cent, file_name(cvis_hc_cent))
})
})
# =================================================================================================
# clean
# =================================================================================================
rm(list = setdiff(ls(), ols))
|
name=read.table('fq.txt') ############names of all individuals ####################
name=as.matrix(name)
for(j in 1:67) ########67 baboons
{
tmp=strsplit(name[j],'_')
name[j]=(tmp[[1]][1])
}
for(i in 1:21) #####21 chromosomes, we call asm for each chromosome.
{
chr=numeric()
SNP=numeric()
CpG=numeric()
SNPlist=numeric()
CpGlist=numeric()
count=1
for(j in 1:67)
{
str=paste(name[j],'_part',as.character(i),'.asm',sep='')
if(file.exists(str))
{
data=read.table(str)
data=as.matrix(data)
N=nrow(data)
}else{
next
}
for(k in 2:N)
{
tmp=strsplit(data[k,1],'_')
chr[count]=390124480-as.numeric(tmp[[1]][2])+1
SNP[count]=as.numeric(data[k,2])
CpG[count]=as.numeric(data[k,6])
count=count+1
}
}
sSNP=unique(sort(SNP))
N=length(sSNP)
for(k in 1:N)
{
idx=which(SNP==sSNP[k])
CpGtmp=unique(sort(CpG[idx]))
n1=length(CpGtmp)
n2=length(SNPlist)
SNPlist[(n2+1):(n2+n1)]=sSNP[k]
CpGlist[(n2+1):(n2+n1)]=CpGtmp
}
r1=matrix(0,ncol=67,nrow=length(CpGlist))
r2=r1
y1=r1
y2=r1
genotype=r1
for(j in 1:67)
{
cat(j)
str=paste(name[j],'_part',as.character(i),'.asm',sep='')
if(file.exists(str))
{
data=read.table(str)
data=as.matrix(data)
N=nrow(data)
}else{
next
}
for(k in 2:N)
{
snp=as.numeric(data[k,2])
cpg=as.numeric(data[k,6])
idx=which(SNPlist==snp)
idx1=which(CpGlist[idx]==cpg)
if(data[k,3]==data[k,4]&&data[k,3]==data[k,5])
{
tmp=strsplit(data[k,7],'-')
y1[idx[idx1],j]=as.numeric(tmp[[1]][1])
y2[idx[idx1],j]=0
r1[idx[idx1],j]=as.numeric(tmp[[1]][1])+as.numeric(tmp[[1]][2])
r2[idx[idx1],j]=0
genotype[idx[idx1],j]=0
}else if(data[k,3]!=data[k,4]&&data[k,3]!=data[k,5]){
tmp=strsplit(data[k,7],'-')
y1[idx[idx1],j]=as.numeric(tmp[[1]][1])
y2[idx[idx1],j]=0
r1[idx[idx1],j]=as.numeric(tmp[[1]][1])+as.numeric(tmp[[1]][2])
r2[idx[idx1],j]=0
genotype[idx[idx1],j]=2
}else{
tmp=strsplit(data[k,7],'-')
tmp2=strsplit(data[k,8],'-')
y1[idx[idx1],j]=as.numeric(tmp[[1]][1])
y2[idx[idx1],j]=as.numeric(tmp2[[1]][1])
r1[idx[idx1],j]=as.numeric(tmp[[1]][1])+as.numeric(tmp[[1]][2])
r2[idx[idx1],j]=as.numeric(tmp2[[1]][1])+as.numeric(tmp2[[1]][2])
genotype[idx[idx1],j]=1
}
}
}
chr=numeric(length=length(CpGlist))+i
str2=paste('data_chr',as.character(i),'.RData',sep='')
save(SNPlist,CpGlist,y1,y2,r1,r2,genotype,file=str2)
}
CpGList=numeric()
SNPList=numeric()
Chr=numeric()
r1m=matrix(0,ncol=67,nrow=0)
r2m=r1m
y1m=r1m
y2m=r1m
genotypem=r1m
for(i in 1:21)
{
str=paste('data_chr',as.character(i),'.RData',sep='')
load(str)
Chr=c(Chr,chr)
CpGList=c(CpGList,CpGlist)
SNPList=c(SNPList,SNPlist)
r1m=rbind(r1m,r1)
r2m=rbind(r2m,r2)
y1m=rbind(y1m,y1)
y2m=rbind(y2m,y2)
genotypem=rbind(genotypem,genotype)
}
chr=Chr
ym=y1m+y2m
rm=r1m+r2m
CpGlist=CpGList
SNPlist=SNPList
N=nrow(genotypem)
############Filtering i#####################
num=numeric()
for(i in 1:N)
{
num[i]=length(which(rm[i,]>0))
}
idx=which(num<20)
y1m=y1m[-idx,]
y2m=y2m[-idx,]
r1m=r1m[-idx,]
r2m=r2m[-idx,]
genotypem=genotypem[-idx,]
ym=ym[-idx,]
rm=rm[-idx,]
CpGlist=CpGlist[-idx]
SNPlist=SNPlist[-idx]
chr=chr[-idx]
N=nrow(genotypem)
############Filtering ii#####################
num=numeric()
num1=numeric()
num2=numeric()
for(i in 1:N)
{
idx=which(rm[i,]>0)
ratio=ym[i,idx]/rm[i,idx]
num[i]=length(idx)
num1[i]=length(which(ratio<0.1))
num2[i]=length(which(ratio>0.9))
}
ratio1=num1/num
ratio2=num2/num
idx1=which(ratio1>0.9)
idx2=which(ratio2>0.9)
idx=union(idx1,idx2)
y1m=y1m[-idx,]
y2m=y2m[-idx,]
r1m=r1m[-idx,]
r2m=r2m[-idx,]
genotypem=genotypem[-idx,]
ym=ym[-idx,]
rm=rm[-idx,]
CpGlist=CpGlist[-idx]
SNPlist=SNPlist[-idx]
chr=chr[-idx]
N=nrow(genotypem)
############Filtering iii#####################
a_rdp=numeric()
for(i in 1:N)
{
a_rdp[i]=sum(rm[i,])/length(which(rm[i,]>0))
}
idx=which(a_rdp<5)
y1m=y1m[-idx,]
y2m=y2m[-idx,]
r1m=r1m[-idx,]
r2m=r2m[-idx,]
genotypem=genotypem[-idx,]
ym=ym[-idx,]
rm=rm[-idx,]
CpGlist=CpGlist[-idx]
SNPlist=SNPlist[-idx]
chr=chr[-idx]
N=nrow(genotypem)
############Filtering iv#####################
maf=numeric()
for(i in 1:N)
{
idx=which(rm[i,]>0)
maf[i]=(length(which(genotypem[i,idx]==1))+length(which(genotypem[i,idx]==2))*2)/length(idx)/2
}
idx=union(which(maf<0.05),which(maf>0.95))
y1m=y1m[-idx,]
y2m=y2m[-idx,]
r1m=r1m[-idx,]
r2m=r2m[-idx,]
genotypem=genotypem[-idx,]
ym=ym[-idx,]
rm=rm[-idx,]
CpGlist=CpGlist[-idx]
SNPlist=SNPlist[-idx]
chr=chr[-idx]
N=nrow(genotypem)
geno<-list()
geno[[1]]<-matrix(0,ncol=N,nrow=67)
geno[[2]]<-matrix(0,ncol=N,nrow=67)
for(i in 1:N)
{
for(j in 1:67)
{
if(is.na(genotypem[i,j]))
{
geno[[2]][j,i]=0/0
geno[[1]][j,i]=0/0
}else if(genotypem[i,j]==1){
geno[[2]][j,i]=1
}else if(genotypem[i,j]==2){
geno[[2]][j,i]=1
geno[[1]][j,i]=1
}
}
}
names(geno) <- c('hap1', 'hap2')
data<-list()
data[[1]]<-matrix(0,ncol=N,nrow=67)
data[[2]]<-matrix(0,ncol=N,nrow=67)
data[[3]]<-matrix(0,ncol=N,nrow=67)
data[[4]]<-matrix(0,ncol=N,nrow=67)
data[[5]]<-matrix(0,ncol=N,nrow=67)
data[[6]]<-matrix(0,ncol=N,nrow=67)
data[[1]]<-t(rm)
data[[2]]<-t(ym)
data[[3]]<-t(r1m)
data[[4]]<-t(r2m)
data[[5]]<-t(y1m)
data[[6]]<-t(y2m)
names(data) <- c('r', 'y', 'r1', 'r2', 'y1', 'y2')
| /Realdata/asm_merge.R | no_license | fanyue322/IMAGEreproduce | R | false | false | 5,558 | r | name=read.table('fq.txt') ############names of all individuals ####################
name=as.matrix(name)
for(j in 1:67) ########67 baboons
{
tmp=strsplit(name[j],'_')
name[j]=(tmp[[1]][1])
}
for(i in 1:21) #####21 chromosomes, we call asm for each chromosome.
{
chr=numeric()
SNP=numeric()
CpG=numeric()
SNPlist=numeric()
CpGlist=numeric()
count=1
for(j in 1:67)
{
str=paste(name[j],'_part',as.character(i),'.asm',sep='')
if(file.exists(str))
{
data=read.table(str)
data=as.matrix(data)
N=nrow(data)
}else{
next
}
for(k in 2:N)
{
tmp=strsplit(data[k,1],'_')
chr[count]=390124480-as.numeric(tmp[[1]][2])+1
SNP[count]=as.numeric(data[k,2])
CpG[count]=as.numeric(data[k,6])
count=count+1
}
}
sSNP=unique(sort(SNP))
N=length(sSNP)
for(k in 1:N)
{
idx=which(SNP==sSNP[k])
CpGtmp=unique(sort(CpG[idx]))
n1=length(CpGtmp)
n2=length(SNPlist)
SNPlist[(n2+1):(n2+n1)]=sSNP[k]
CpGlist[(n2+1):(n2+n1)]=CpGtmp
}
r1=matrix(0,ncol=67,nrow=length(CpGlist))
r2=r1
y1=r1
y2=r1
genotype=r1
for(j in 1:67)
{
cat(j)
str=paste(name[j],'_part',as.character(i),'.asm',sep='')
if(file.exists(str))
{
data=read.table(str)
data=as.matrix(data)
N=nrow(data)
}else{
next
}
for(k in 2:N)
{
snp=as.numeric(data[k,2])
cpg=as.numeric(data[k,6])
idx=which(SNPlist==snp)
idx1=which(CpGlist[idx]==cpg)
if(data[k,3]==data[k,4]&&data[k,3]==data[k,5])
{
tmp=strsplit(data[k,7],'-')
y1[idx[idx1],j]=as.numeric(tmp[[1]][1])
y2[idx[idx1],j]=0
r1[idx[idx1],j]=as.numeric(tmp[[1]][1])+as.numeric(tmp[[1]][2])
r2[idx[idx1],j]=0
genotype[idx[idx1],j]=0
}else if(data[k,3]!=data[k,4]&&data[k,3]!=data[k,5]){
tmp=strsplit(data[k,7],'-')
y1[idx[idx1],j]=as.numeric(tmp[[1]][1])
y2[idx[idx1],j]=0
r1[idx[idx1],j]=as.numeric(tmp[[1]][1])+as.numeric(tmp[[1]][2])
r2[idx[idx1],j]=0
genotype[idx[idx1],j]=2
}else{
tmp=strsplit(data[k,7],'-')
tmp2=strsplit(data[k,8],'-')
y1[idx[idx1],j]=as.numeric(tmp[[1]][1])
y2[idx[idx1],j]=as.numeric(tmp2[[1]][1])
r1[idx[idx1],j]=as.numeric(tmp[[1]][1])+as.numeric(tmp[[1]][2])
r2[idx[idx1],j]=as.numeric(tmp2[[1]][1])+as.numeric(tmp2[[1]][2])
genotype[idx[idx1],j]=1
}
}
}
chr=numeric(length=length(CpGlist))+i
str2=paste('data_chr',as.character(i),'.RData',sep='')
save(SNPlist,CpGlist,y1,y2,r1,r2,genotype,file=str2)
}
CpGList=numeric()
SNPList=numeric()
Chr=numeric()
r1m=matrix(0,ncol=67,nrow=0)
r2m=r1m
y1m=r1m
y2m=r1m
genotypem=r1m
for(i in 1:21)
{
str=paste('data_chr',as.character(i),'.RData',sep='')
load(str)
Chr=c(Chr,chr)
CpGList=c(CpGList,CpGlist)
SNPList=c(SNPList,SNPlist)
r1m=rbind(r1m,r1)
r2m=rbind(r2m,r2)
y1m=rbind(y1m,y1)
y2m=rbind(y2m,y2)
genotypem=rbind(genotypem,genotype)
}
chr=Chr
ym=y1m+y2m
rm=r1m+r2m
CpGlist=CpGList
SNPlist=SNPList
N=nrow(genotypem)
############Filtering i#####################
num=numeric()
for(i in 1:N)
{
num[i]=length(which(rm[i,]>0))
}
idx=which(num<20)
y1m=y1m[-idx,]
y2m=y2m[-idx,]
r1m=r1m[-idx,]
r2m=r2m[-idx,]
genotypem=genotypem[-idx,]
ym=ym[-idx,]
rm=rm[-idx,]
CpGlist=CpGlist[-idx]
SNPlist=SNPlist[-idx]
chr=chr[-idx]
N=nrow(genotypem)
############Filtering ii#####################
num=numeric()
num1=numeric()
num2=numeric()
for(i in 1:N)
{
idx=which(rm[i,]>0)
ratio=ym[i,idx]/rm[i,idx]
num[i]=length(idx)
num1[i]=length(which(ratio<0.1))
num2[i]=length(which(ratio>0.9))
}
ratio1=num1/num
ratio2=num2/num
idx1=which(ratio1>0.9)
idx2=which(ratio2>0.9)
idx=union(idx1,idx2)
y1m=y1m[-idx,]
y2m=y2m[-idx,]
r1m=r1m[-idx,]
r2m=r2m[-idx,]
genotypem=genotypem[-idx,]
ym=ym[-idx,]
rm=rm[-idx,]
CpGlist=CpGlist[-idx]
SNPlist=SNPlist[-idx]
chr=chr[-idx]
N=nrow(genotypem)
############Filtering iii#####################
a_rdp=numeric()
for(i in 1:N)
{
a_rdp[i]=sum(rm[i,])/length(which(rm[i,]>0))
}
idx=which(a_rdp<5)
y1m=y1m[-idx,]
y2m=y2m[-idx,]
r1m=r1m[-idx,]
r2m=r2m[-idx,]
genotypem=genotypem[-idx,]
ym=ym[-idx,]
rm=rm[-idx,]
CpGlist=CpGlist[-idx]
SNPlist=SNPlist[-idx]
chr=chr[-idx]
N=nrow(genotypem)
############Filtering iv#####################
maf=numeric()
for(i in 1:N)
{
idx=which(rm[i,]>0)
maf[i]=(length(which(genotypem[i,idx]==1))+length(which(genotypem[i,idx]==2))*2)/length(idx)/2
}
idx=union(which(maf<0.05),which(maf>0.95))
y1m=y1m[-idx,]
y2m=y2m[-idx,]
r1m=r1m[-idx,]
r2m=r2m[-idx,]
genotypem=genotypem[-idx,]
ym=ym[-idx,]
rm=rm[-idx,]
CpGlist=CpGlist[-idx]
SNPlist=SNPlist[-idx]
chr=chr[-idx]
N=nrow(genotypem)
geno<-list()
geno[[1]]<-matrix(0,ncol=N,nrow=67)
geno[[2]]<-matrix(0,ncol=N,nrow=67)
for(i in 1:N)
{
for(j in 1:67)
{
if(is.na(genotypem[i,j]))
{
geno[[2]][j,i]=0/0
geno[[1]][j,i]=0/0
}else if(genotypem[i,j]==1){
geno[[2]][j,i]=1
}else if(genotypem[i,j]==2){
geno[[2]][j,i]=1
geno[[1]][j,i]=1
}
}
}
names(geno) <- c('hap1', 'hap2')
data<-list()
data[[1]]<-matrix(0,ncol=N,nrow=67)
data[[2]]<-matrix(0,ncol=N,nrow=67)
data[[3]]<-matrix(0,ncol=N,nrow=67)
data[[4]]<-matrix(0,ncol=N,nrow=67)
data[[5]]<-matrix(0,ncol=N,nrow=67)
data[[6]]<-matrix(0,ncol=N,nrow=67)
data[[1]]<-t(rm)
data[[2]]<-t(ym)
data[[3]]<-t(r1m)
data[[4]]<-t(r2m)
data[[5]]<-t(y1m)
data[[6]]<-t(y2m)
names(data) <- c('r', 'y', 'r1', 'r2', 'y1', 'y2')
|
require(Rweibo)
# register application
registerApp(app_name = "mytest", "GDdmIQH6jh", "MCD8BKwGdgPHv")
# create OAuth object
roauth <- createOAuth("mytest", "rweibo")
# return the latest public weibos
res1 <- statuses.public_timeline(roauth, count = 5)
res1
# return the latest weibos of the authenticating user and his friends
res2 <- statuses.friends_timeline(roauth, count = 5)
res2
# post a new weibo
res3 <- statuses.update(roauth, status = "hello world*!@#$&=+")
# repost a weibo
res4 <- statuses.repost(roauth, id = res3$idstr, status = "test repost")
# post a comment to a weibo
res5 <- comments.create(roauth, id = res4$idstr, comment = "test comment")
# search content
res6 <- web.search.content("Rweibo", page = 3, combinewith = NULL, sleepsd = 0)
res7 <- web.search.content("Rweibo", page = 5, combinewith = res6, sleepsd = 0)
| /demo/demo.R | no_license | sjhfx/Rweibo | R | false | false | 855 | r |
require(Rweibo)
# register application
registerApp(app_name = "mytest", "GDdmIQH6jh", "MCD8BKwGdgPHv")
# create OAuth object
roauth <- createOAuth("mytest", "rweibo")
# return the latest public weibos
res1 <- statuses.public_timeline(roauth, count = 5)
res1
# return the latest weibos of the authenticating user and his friends
res2 <- statuses.friends_timeline(roauth, count = 5)
res2
# post a new weibo
res3 <- statuses.update(roauth, status = "hello world*!@#$&=+")
# repost a weibo
res4 <- statuses.repost(roauth, id = res3$idstr, status = "test repost")
# post a comment to a weibo
res5 <- comments.create(roauth, id = res4$idstr, comment = "test comment")
# search content
res6 <- web.search.content("Rweibo", page = 3, combinewith = NULL, sleepsd = 0)
res7 <- web.search.content("Rweibo", page = 5, combinewith = res6, sleepsd = 0)
|
#####---------------------------------------------------------------------------
## implement recycling rule for function arguments
#####---------------------------------------------------------------------------
recycle <-
function(...) {
dots <- list(...)
maxL <- max(vapply(dots, length, integer(1)))
lapply(dots, rep, length=maxL)
}
#####---------------------------------------------------------------------------
## Hoyt / Nakagami-q distribution
## correlated bivariate normal distribution rewritten in polar coordinates
## pdf, cdf, and inverse cdf of the distribution of the radius
#####---------------------------------------------------------------------------
## determine parameters for Hoyt distribution
getHoytParam <-
function(x) {
UseMethod("getHoytParam")
}
## based on data frame with (x,y)-coords
getHoytParam.data.frame <-
function(x) {
sigma <- cov(getXYmat(x)) # covariance matrix
x <- eigen(sigma)$values # eigenvalues
NextMethod("getHoytParam")
}
## based on list of covariance matrices
getHoytParam.list <-
function(x) {
if(!all(vapply(x, is.matrix, logical(1)))) { stop("x must be a matrix") }
if(!all(vapply(x, is.numeric, logical(1)))) { stop("x must be numeric") }
if(!all(vapply(x, dim, integer(2)) == 2L)) { stop("x must be (2 x 2)-matrix") }
getEV <- function(sigma) { # eigenvalues from covariance matrix
if(!isTRUE(all.equal(sigma, t(sigma)))) {
stop("x must be symmetric")
}
lambda <- eigen(sigma)$values
if(!all(lambda >= -sqrt(.Machine$double.eps) * abs(lambda[1]))) {
stop("x is numerically not positive definite")
}
lambda
}
ev <- lapply(x, getEV) # eigenvalues for all matrices
ev1 <- vapply(ev, head, FUN.VALUE=numeric(1), n=1) # all first eigenvalues
ev2 <- vapply(ev, tail, FUN.VALUE=numeric(1), n=1) # all second eigenvalues
qpar <- 1/sqrt(((ev1+ev2)/ev2) - 1) # Hoyt q
omega <- ev1+ev2 # Hoyt omega
return(list(q=qpar, omega=omega))
}
## based on covariance matrix
getHoytParam.matrix <-
function(x) {
if(any(dim(x) != 2L)) { stop("x must be a (2 x 2)-matrix") }
if(!isTRUE(all.equal(x, t(x)))) { stop("x must be symmetric") }
x <- eigen(x)$values
NextMethod("getHoytParam")
}
## based on 2-vector of eigenvalues
## not vectorized
getHoytParam.default <-
function(x) {
if(!is.numeric(x)) { stop("x must be numeric") }
if(any(x < 0)) { stop("x must be >= 0") }
if(length(x) != 2L) { stop("x must have length 2") }
if(!all(x >= -sqrt(.Machine$double.eps) * abs(max(x)))) {
stop("x is numerically not positive definite")
}
x <- sort(x, decreasing=TRUE) # largest eigenvalue first
ev1 <- x[1]
ev2 <- x[2]
qpar <- 1 / sqrt(((ev1+ev2) / ev2) - 1) # Hoyt q
omega <- ev1+ev2 # Hoyt omega
return(list(q=qpar, omega=omega))
}
# determine eigenvalues from Hoyt parameters
getEVfromHoyt <-
function(qpar, omega) {
nnaQ <- which(!is.na(qpar))
nnaO <- which(!is.na(omega))
stopifnot(all(qpar[nnaQ] > 0), all(qpar[nnaQ] < 1), all(omega[nnaO] > 0))
ev2 <- omega / ((1/qpar^2) + 1) # 2nd eigenvalue
ev1 <- omega - ev2 # 1st eigenvalue
## sort each pair of eigenvalues in descending order
ev1ord <- pmax(ev1, ev2)
ev2ord <- pmin(ev1, ev2)
return(list(ev1=ev1ord, ev2=ev2ord))
}
#####---------------------------------------------------------------------------
## pdf Hoyt distribution
## http://reference.wolfram.com/mathematica/ref/HoytDistribution.html
dHoyt <-
function(x, qpar, omega) {
is.na(x) <- is.nan(x) # replace NaN with NA
is.na(qpar) <- (qpar <= 0) | (qpar >= 1) | !is.finite(qpar)
is.na(omega) <- (omega <= 0) | !is.finite(omega)
argL <- recycle(x, qpar, omega)
x <- argL[[1]]
qpar <- argL[[2]]
omega <- argL[[3]]
dens <- numeric(length(x)) # initialize density to 0
keep <- which((x >= 0) | !is.finite(x)) # keep non-negative x, NA, -Inf, Inf
if(length(keep) < 1L) { return(dens) } # nothing to do
lfac1 <- log(x[keep]) + log(1 + qpar[keep]^2) - log(qpar[keep]*omega[keep])
lfac2 <- -x[keep]^2*(1+qpar[keep]^2)^2/(4*qpar[keep]^2*omega[keep])
bArg <- (x[keep]^2*(1-qpar[keep]^4) /(4*qpar[keep]^2*omega[keep]))
lfac3 <- log(besselI(bArg, nu=0, expon.scaled=TRUE)) + bArg
res <- exp(lfac1+lfac2+lfac3) # this may be NaN
dens[keep] <- ifelse(is.nan(res), 0, res) # if so, set to 0
return(dens)
}
## equivalent
## Hoyt, RS. 1947. Probability functions for the modulus and angle of the
## normal complex variate. Bell System Technical Journal, 26(2). 318-359.
## Hoyt pdf is for scaled variables with S := 1/sqrt(Su^2+Sv^2), u=U/S, v=V/S
## -> set r to r/S and pdf to pdf/S
# dCNhoyt <- function(r, sigma) {
# ev <- eigen(sigma)$values
# b <- abs(diff(ev)) / sum(ev)
# S <- sqrt(sum(ev))
# r <- r/S
#
# fac1 <- (2*r/sqrt(1-b^2)) * exp(-r^2/(1-b^2))
# bArg <- (b*r^2/(1-b^2))
# fac2 <- exp(log(besselI(bArg, nu=0, expon.scaled=TRUE)) + bArg)
# dens <- fac1*fac2 / S
#
# return(dens)
# }
## equivalent
## Greenwalt, CR & Shultz, ME. 1968.
## Principles of Error Theory and Cartographic Applications
## ACIC TR-96, Appendix D-3, eq. 3
# dGreenwalt <- function(r, sigma) {
# ev <- eigen(sigma)$values
# fac1 <- 1/prod(sqrt(ev))
# fac2 <- r*exp(-(r^2/(4*ev[1])) * (1 + (ev[1]/ev[2])))
# bArg <- (r^2/(4*ev[1])) * ((ev[1]/ev[2]) - 1)
# fac3 <- exp(log(besselI(bArg, nu=0, expon.scaled=TRUE)) + bArg)
# dens <- fac1*fac2*fac3
#
# return(dens)
# }
#####---------------------------------------------------------------------------
## generalized Marcum Q-function from non-central chi^2 distribution
## Nuttall, AH. (1975). Some integrals involving the Q-M function.
## IEEE Transactions on Information Theory, 21 (1), 95-96
marcumQ <-
function(a, b, nu, lower.tail=TRUE) {
pchisq(b^2, df=2*nu, ncp=a^2, lower.tail=lower.tail)
}
#####---------------------------------------------------------------------------
## cdf Hoyt distribution in closed form
## Paris, JF. 2009. Nakagami-q (Hoyt) distribution function with applications.
## Electronics Letters, 45(4). 210-211. Erratum: doi:10.1049/el.2009.0828
pHoyt <-
function(q, qpar, omega, lower.tail=TRUE) {
is.na(qpar) <- (qpar <= 0) | (qpar >= 1) | !is.finite(qpar)
is.na(omega) <- (omega <= 0) | !is.finite(omega)
argL <- recycle(q, qpar, omega)
q <- argL[[1]]
qpar <- argL[[2]]
omega <- argL[[3]]
pp <- numeric(length(q)) # initialize probabilities to 0
keep <- which((q >= 0) | !is.finite(q)) # keep non-negative q, NA, NaN, -Inf, Inf
alphaQ <- (sqrt((1 - qpar[keep]^4))/(2*qpar[keep])) * sqrt((1 + qpar[keep])/(1 - qpar[keep]))
betaQ <- (sqrt((1 - qpar[keep]^4))/(2*qpar[keep])) * sqrt((1 - qpar[keep])/(1 + qpar[keep]))
y <- q[keep] / sqrt(omega[keep])
if(lower.tail) {
pp[keep] <- marcumQ( betaQ*y, alphaQ*y, nu=1, lower.tail=lower.tail) -
marcumQ(alphaQ*y, betaQ*y, nu=1, lower.tail=lower.tail)
## special cases not caught so far
pp[which(q == -Inf)] <- 0
pp[which(q == Inf)] <- 1
} else {
pp[keep] <- 1 + marcumQ( betaQ*y, alphaQ*y, nu=1, lower.tail=lower.tail) -
marcumQ(alphaQ*y, betaQ*y, nu=1, lower.tail=lower.tail)
## special cases not caught so far
pp[which(q < 0)] <- 1
pp[which(q == Inf)] <- 0
}
return(pp)
}
## equivalent
## Hoyt, RS. 1947. Probability functions for the modulus and angle of the
## normal complex variate. Bell System Technical Journal, 26(2). 318-359.
# pCNhoyt <- function(qq, sigma) {
# ev <- eigen(sigma)$values
# b <- abs(diff(ev)) / sum(ev)
# S <- sqrt(sum(ev))
# qq <- qq/S # rescale
#
# intFun <- function(r, b) {
# fac1 <- r*exp(-(r^2/(1-b^2)))
# bArg <- (b*r^2/(1-b^2))
# fac2 <- exp(log(besselI(bArg, nu=0, expon.scaled=TRUE)) + bArg)
# res <- fac1*fac2 # this may be NaN
# ifelse(is.finite(res), res, 0) # if so, return 0
# }
#
# pp <- (1/sqrt(1-b^2)) * sapply(qq, function(x) 2*integrate(intFun, 0, x, b=b)$value)
# return(pp)
# }
## equivalent
## Greenwalt, CR & Shultz, ME. 1968.
## Principles of Error Theory and Cartographic Applications
## ACIC TR-96, Appendix D-3, eq3
# pCNgreenwalt <- function(qq, sigma) {
# intFun <- function(r, ev) {
# fac1 <- r*exp(-(r^2/(4*ev[1])) * (1 + (ev[1]/ev[2])))
# ## modified Bessel function of first kind and order 0
# bArg <- (r^2/(4*ev[1])) * ((ev[1]/ev[2]) - 1)
# fac2 <- exp(log(besselI(bArg, nu=0, expon.scaled=TRUE)) + bArg)
# res <- fac1*fac2 # this may be NaN
# return(ifelse(is.finite(res), res, 0)) # if so, return 0
# }
#
# ev <- eigen(sigma)$values
# pp <- (1/prod(sqrt(ev))) * sapply(qq, function(x) integrate(intFun, 0, x, ev=ev)$value)
# return(pp)
# }
## equivalent
## Hoover, WE. 1984. Algorithms For Confidence Circles, and Ellipses.
## Washington, D.C., National Oceanic and Atmospheric Administration.
## NOAA Technical Report NOS 107 C&GS 3, 1-29. p. 9.
# pCNhoover <- function(qq, sigma) {
# ev <- eigen(sigma)$values
# Hk <- qq / sqrt(ev[1])
# Hc <- sqrt(ev[2] / ev[1])
# Hbeta <- 2*Hc / pi
# Hgamma <- (Hk/(2*Hc))^2
#
# Hw <- function(phi, Hc) {
# (Hc^2 - 1)*cos(phi) - (Hc^2 + 1)
# }
#
# Hf <- function(phi, Hc, Hgamma) {
# (exp(Hgamma*Hw(phi, Hc)) - 1) / Hw(phi, Hc)
# }
#
# Hbeta * integrate(Hf, 0, pi, Hc=Hc, Hgamma=Hgamma)$value
# }
#####---------------------------------------------------------------------------
## Hoyt quantile function through root finding of cdf
qHoyt <-
function(p, qpar, omega, lower.tail=TRUE, loUp=NULL) {
is.na(qpar) <- (qpar <= 0) | (qpar >= 1) | !is.finite(qpar)
is.na(omega) <- (omega <= 0) | !is.finite(omega)
argL <- recycle(p, qpar, omega)
p <- argL[[1]]
qpar <- argL[[2]]
omega <- argL[[3]]
qq <- rep(NA_real_, length(p))
keep <- which((p >= 0) & (p < 1))
if(length(keep) < 1) { return(qq) }
if(is.null(loUp)) { # no search interval given
## use Grubbs chi^2 quantile for setting root finding interval
## Grubbs-Liu chi^2 and Hoyt can diverge
GP <- getGPfromHP(qpar, omega) # Grubbs parameters
qGrubbs <- qChisqGrubbs(p[keep], m=GP$m, v=GP$v, muX=GP$muX,
varX=GP$varX, l=GP$l, delta=GP$delta,
lower.tail=lower.tail, type="Liu")
qGrubbs.6 <- qChisqGrubbs(0.6, m=GP$m, v=GP$v, muX=GP$muX,
varX=GP$varX, l=GP$l, delta=GP$delta,
lower.tail=lower.tail, type="Liu")
qLo <- ifelse(p[keep] <= 0.5, 0, 0.25*qGrubbs)
qUp <- ifelse(p[keep] <= 0.5, qGrubbs.6, 1.75*qGrubbs)
loUp <- split(cbind(qLo, qUp), seq_along(p))
} else {
if(is.matrix(loUp)) {
loUp <- split(loUp, seq_len(nrow(loUp)))
} else if(is.vector(loUp)) {
loUp <- list(loUp)
} else if(!is.list(loUp)) {
stop("loUp must be a list, a matrix, a vector, or missing entirely")
}
}
cdf <- function(x, p, qpar, omega, lower.tail) {
pHoyt(x, qpar=qpar, omega=omega, lower.tail=lower.tail) - p
}
getQ <- function(p, qpar, omega, loUp, lower.tail) {
tryCatch(uniroot(cdf, interval=loUp, p=p, qpar=qpar, omega=omega,
lower.tail=lower.tail)$root,
error=function(e) return(NA_real_))
}
qq[keep] <- unlist(Map(getQ, p=p[keep], qpar=qpar[keep], omega=omega[keep],
loUp=loUp[keep], lower.tail=lower.tail[1]))
return(qq)
}
#####---------------------------------------------------------------------------
## random numbers from Hoyt distribution
rHoyt <-
function(n, qpar, omega, method=c("eigen", "chol", "cdf"), loUp=NULL) {
is.na(qpar) <- (qpar <= 0) | (qpar >= 1) | !is.finite(qpar)
is.na(omega) <- (omega <= 0) | !is.finite(omega)
method <- match.arg(method)
## if n is a vector, its length determines number of random variates
n <- if(length(n) > 1L) { length(n) } else { n }
qpar <- qpar[1] # only first shape parameter is used
omega <- omega[1] # only first scale parameter is used
rn <- if(method == "eigen") {
lambda <- unlist(getEVfromHoyt(qpar, omega)) # eigenvalues
## simulated 2D normal vectors with mean 0
X <- matrix(rnorm(n*length(lambda)), nrow=n) # with identity cov-mat
xy <- X %*% diag(sqrt(lambda), length(lambda))
sqrt(rowSums(xy^2)) # distances to center
} else if(method == "chol") {
lambda <- getEVfromHoyt(qpar, omega)
sigma <- cbind(c(lambda$ev1, 0), c(0, lambda$ev2))
CF <- chol(sigma, pivot=TRUE) # Cholesky-factor
idx <- order(attr(CF, "pivot"))
CFord <- CF[, idx]
## simulated 2D normal vectors with mean 0
xy <- matrix(rnorm(n*ncol(sigma)), nrow=n) %*% CFord
sqrt(rowSums(xy^2)) # distances to center
} else if(method == "cdf") {
## root finding of pHoyt() given uniform random probabilities:
## find x such that F(x) - U = 0
cdf <- function(x, u, qpar, omega) {
pHoyt(x, qpar=qpar, omega=omega) - u
}
## find quantile via uniroot() with error handling
getQ <- function(u, qpar, omega, loUp) {
tryCatch(uniroot(cdf, interval=loUp, u=u, qpar=qpar, omega=omega)$root,
error=function(e) return(NA_real_))
}
u <- runif(n) # uniform random numbers
## determine search interval(s) for uniroot()
if(is.null(loUp)) { # no search interval given
## use Grubbs chi^2 quantile for setting root finding interval
## Grubbs-Liu chi^2 and Hoyt can diverge
GP <- getGPfromHP(qpar, omega) # Grubbs parameters and quantiles
qGrubbs <- qChisqGrubbs(u, m=GP$m, v=GP$v, muX=GP$muX,
varX=GP$varX, l=GP$l, delta=GP$delta, type="Liu")
qGrubbs.6 <- qChisqGrubbs(0.6, m=GP$m, v=GP$v, muX=GP$muX,
varX=GP$varX, l=GP$l, delta=GP$delta, type="Liu")
qLo <- ifelse(u <= 0.5, 0, 0.25*qGrubbs)
qUp <- ifelse(u <= 0.5, qGrubbs.6, 1.75*qGrubbs)
loUp <- split(cbind(qLo, qUp), seq_along(u))
} else {
if(is.matrix(loUp)) {
loUp <- split(loUp, seq_len(nrow(loUp)))
} else if(is.vector(loUp)) {
loUp <- list(loUp)
} else if(!is.list(loUp)) {
stop("loUp must be a list, a matrix, a vector, or missing entirely")
}
}
unlist(Map(getQ, u=u, qpar=qpar, omega=omega, loUp=loUp))
}
return(rn)
}
| /shotGroups/R/hoyt.R | no_license | ingted/R-Examples | R | false | false | 15,879 | r | #####---------------------------------------------------------------------------
## implement recycling rule for function arguments
#####---------------------------------------------------------------------------
recycle <-
function(...) {
dots <- list(...)
maxL <- max(vapply(dots, length, integer(1)))
lapply(dots, rep, length=maxL)
}
#####---------------------------------------------------------------------------
## Hoyt / Nakagami-q distribution
## correlated bivariate normal distribution rewritten in polar coordinates
## pdf, cdf, and inverse cdf of the distribution of the radius
#####---------------------------------------------------------------------------
## determine parameters for Hoyt distribution
getHoytParam <-
function(x) {
UseMethod("getHoytParam")
}
## based on data frame with (x,y)-coords
getHoytParam.data.frame <-
function(x) {
sigma <- cov(getXYmat(x)) # covariance matrix
x <- eigen(sigma)$values # eigenvalues
NextMethod("getHoytParam")
}
## based on list of covariance matrices
getHoytParam.list <-
function(x) {
if(!all(vapply(x, is.matrix, logical(1)))) { stop("x must be a matrix") }
if(!all(vapply(x, is.numeric, logical(1)))) { stop("x must be numeric") }
if(!all(vapply(x, dim, integer(2)) == 2L)) { stop("x must be (2 x 2)-matrix") }
getEV <- function(sigma) { # eigenvalues from covariance matrix
if(!isTRUE(all.equal(sigma, t(sigma)))) {
stop("x must be symmetric")
}
lambda <- eigen(sigma)$values
if(!all(lambda >= -sqrt(.Machine$double.eps) * abs(lambda[1]))) {
stop("x is numerically not positive definite")
}
lambda
}
ev <- lapply(x, getEV) # eigenvalues for all matrices
ev1 <- vapply(ev, head, FUN.VALUE=numeric(1), n=1) # all first eigenvalues
ev2 <- vapply(ev, tail, FUN.VALUE=numeric(1), n=1) # all second eigenvalues
qpar <- 1/sqrt(((ev1+ev2)/ev2) - 1) # Hoyt q
omega <- ev1+ev2 # Hoyt omega
return(list(q=qpar, omega=omega))
}
## based on covariance matrix
getHoytParam.matrix <-
function(x) {
if(any(dim(x) != 2L)) { stop("x must be a (2 x 2)-matrix") }
if(!isTRUE(all.equal(x, t(x)))) { stop("x must be symmetric") }
x <- eigen(x)$values
NextMethod("getHoytParam")
}
## based on 2-vector of eigenvalues
## not vectorized
getHoytParam.default <-
function(x) {
if(!is.numeric(x)) { stop("x must be numeric") }
if(any(x < 0)) { stop("x must be >= 0") }
if(length(x) != 2L) { stop("x must have length 2") }
if(!all(x >= -sqrt(.Machine$double.eps) * abs(max(x)))) {
stop("x is numerically not positive definite")
}
x <- sort(x, decreasing=TRUE) # largest eigenvalue first
ev1 <- x[1]
ev2 <- x[2]
qpar <- 1 / sqrt(((ev1+ev2) / ev2) - 1) # Hoyt q
omega <- ev1+ev2 # Hoyt omega
return(list(q=qpar, omega=omega))
}
# determine eigenvalues from Hoyt parameters
getEVfromHoyt <-
function(qpar, omega) {
nnaQ <- which(!is.na(qpar))
nnaO <- which(!is.na(omega))
stopifnot(all(qpar[nnaQ] > 0), all(qpar[nnaQ] < 1), all(omega[nnaO] > 0))
ev2 <- omega / ((1/qpar^2) + 1) # 2nd eigenvalue
ev1 <- omega - ev2 # 1st eigenvalue
## sort each pair of eigenvalues in descending order
ev1ord <- pmax(ev1, ev2)
ev2ord <- pmin(ev1, ev2)
return(list(ev1=ev1ord, ev2=ev2ord))
}
#####---------------------------------------------------------------------------
## pdf Hoyt distribution
## http://reference.wolfram.com/mathematica/ref/HoytDistribution.html
dHoyt <-
function(x, qpar, omega) {
is.na(x) <- is.nan(x) # replace NaN with NA
is.na(qpar) <- (qpar <= 0) | (qpar >= 1) | !is.finite(qpar)
is.na(omega) <- (omega <= 0) | !is.finite(omega)
argL <- recycle(x, qpar, omega)
x <- argL[[1]]
qpar <- argL[[2]]
omega <- argL[[3]]
dens <- numeric(length(x)) # initialize density to 0
keep <- which((x >= 0) | !is.finite(x)) # keep non-negative x, NA, -Inf, Inf
if(length(keep) < 1L) { return(dens) } # nothing to do
lfac1 <- log(x[keep]) + log(1 + qpar[keep]^2) - log(qpar[keep]*omega[keep])
lfac2 <- -x[keep]^2*(1+qpar[keep]^2)^2/(4*qpar[keep]^2*omega[keep])
bArg <- (x[keep]^2*(1-qpar[keep]^4) /(4*qpar[keep]^2*omega[keep]))
lfac3 <- log(besselI(bArg, nu=0, expon.scaled=TRUE)) + bArg
res <- exp(lfac1+lfac2+lfac3) # this may be NaN
dens[keep] <- ifelse(is.nan(res), 0, res) # if so, set to 0
return(dens)
}
## equivalent
## Hoyt, RS. 1947. Probability functions for the modulus and angle of the
## normal complex variate. Bell System Technical Journal, 26(2). 318-359.
## Hoyt pdf is for scaled variables with S := 1/sqrt(Su^2+Sv^2), u=U/S, v=V/S
## -> set r to r/S and pdf to pdf/S
# dCNhoyt <- function(r, sigma) {
# ev <- eigen(sigma)$values
# b <- abs(diff(ev)) / sum(ev)
# S <- sqrt(sum(ev))
# r <- r/S
#
# fac1 <- (2*r/sqrt(1-b^2)) * exp(-r^2/(1-b^2))
# bArg <- (b*r^2/(1-b^2))
# fac2 <- exp(log(besselI(bArg, nu=0, expon.scaled=TRUE)) + bArg)
# dens <- fac1*fac2 / S
#
# return(dens)
# }
## equivalent
## Greenwalt, CR & Shultz, ME. 1968.
## Principles of Error Theory and Cartographic Applications
## ACIC TR-96, Appendix D-3, eq. 3
# dGreenwalt <- function(r, sigma) {
# ev <- eigen(sigma)$values
# fac1 <- 1/prod(sqrt(ev))
# fac2 <- r*exp(-(r^2/(4*ev[1])) * (1 + (ev[1]/ev[2])))
# bArg <- (r^2/(4*ev[1])) * ((ev[1]/ev[2]) - 1)
# fac3 <- exp(log(besselI(bArg, nu=0, expon.scaled=TRUE)) + bArg)
# dens <- fac1*fac2*fac3
#
# return(dens)
# }
#####---------------------------------------------------------------------------
## generalized Marcum Q-function from non-central chi^2 distribution
## Nuttall, AH. (1975). Some integrals involving the Q-M function.
## IEEE Transactions on Information Theory, 21 (1), 95-96
marcumQ <-
function(a, b, nu, lower.tail=TRUE) {
pchisq(b^2, df=2*nu, ncp=a^2, lower.tail=lower.tail)
}
#####---------------------------------------------------------------------------
## cdf Hoyt distribution in closed form
## Paris, JF. 2009. Nakagami-q (Hoyt) distribution function with applications.
## Electronics Letters, 45(4). 210-211. Erratum: doi:10.1049/el.2009.0828
pHoyt <-
function(q, qpar, omega, lower.tail=TRUE) {
is.na(qpar) <- (qpar <= 0) | (qpar >= 1) | !is.finite(qpar)
is.na(omega) <- (omega <= 0) | !is.finite(omega)
argL <- recycle(q, qpar, omega)
q <- argL[[1]]
qpar <- argL[[2]]
omega <- argL[[3]]
pp <- numeric(length(q)) # initialize probabilities to 0
keep <- which((q >= 0) | !is.finite(q)) # keep non-negative q, NA, NaN, -Inf, Inf
alphaQ <- (sqrt((1 - qpar[keep]^4))/(2*qpar[keep])) * sqrt((1 + qpar[keep])/(1 - qpar[keep]))
betaQ <- (sqrt((1 - qpar[keep]^4))/(2*qpar[keep])) * sqrt((1 - qpar[keep])/(1 + qpar[keep]))
y <- q[keep] / sqrt(omega[keep])
if(lower.tail) {
pp[keep] <- marcumQ( betaQ*y, alphaQ*y, nu=1, lower.tail=lower.tail) -
marcumQ(alphaQ*y, betaQ*y, nu=1, lower.tail=lower.tail)
## special cases not caught so far
pp[which(q == -Inf)] <- 0
pp[which(q == Inf)] <- 1
} else {
pp[keep] <- 1 + marcumQ( betaQ*y, alphaQ*y, nu=1, lower.tail=lower.tail) -
marcumQ(alphaQ*y, betaQ*y, nu=1, lower.tail=lower.tail)
## special cases not caught so far
pp[which(q < 0)] <- 1
pp[which(q == Inf)] <- 0
}
return(pp)
}
## equivalent
## Hoyt, RS. 1947. Probability functions for the modulus and angle of the
## normal complex variate. Bell System Technical Journal, 26(2). 318-359.
# pCNhoyt <- function(qq, sigma) {
# ev <- eigen(sigma)$values
# b <- abs(diff(ev)) / sum(ev)
# S <- sqrt(sum(ev))
# qq <- qq/S # rescale
#
# intFun <- function(r, b) {
# fac1 <- r*exp(-(r^2/(1-b^2)))
# bArg <- (b*r^2/(1-b^2))
# fac2 <- exp(log(besselI(bArg, nu=0, expon.scaled=TRUE)) + bArg)
# res <- fac1*fac2 # this may be NaN
# ifelse(is.finite(res), res, 0) # if so, return 0
# }
#
# pp <- (1/sqrt(1-b^2)) * sapply(qq, function(x) 2*integrate(intFun, 0, x, b=b)$value)
# return(pp)
# }
## equivalent
## Greenwalt, CR & Shultz, ME. 1968.
## Principles of Error Theory and Cartographic Applications
## ACIC TR-96, Appendix D-3, eq3
# pCNgreenwalt <- function(qq, sigma) {
# intFun <- function(r, ev) {
# fac1 <- r*exp(-(r^2/(4*ev[1])) * (1 + (ev[1]/ev[2])))
# ## modified Bessel function of first kind and order 0
# bArg <- (r^2/(4*ev[1])) * ((ev[1]/ev[2]) - 1)
# fac2 <- exp(log(besselI(bArg, nu=0, expon.scaled=TRUE)) + bArg)
# res <- fac1*fac2 # this may be NaN
# return(ifelse(is.finite(res), res, 0)) # if so, return 0
# }
#
# ev <- eigen(sigma)$values
# pp <- (1/prod(sqrt(ev))) * sapply(qq, function(x) integrate(intFun, 0, x, ev=ev)$value)
# return(pp)
# }
## equivalent
## Hoover, WE. 1984. Algorithms For Confidence Circles, and Ellipses.
## Washington, D.C., National Oceanic and Atmospheric Administration.
## NOAA Technical Report NOS 107 C&GS 3, 1-29. p. 9.
# pCNhoover <- function(qq, sigma) {
# ev <- eigen(sigma)$values
# Hk <- qq / sqrt(ev[1])
# Hc <- sqrt(ev[2] / ev[1])
# Hbeta <- 2*Hc / pi
# Hgamma <- (Hk/(2*Hc))^2
#
# Hw <- function(phi, Hc) {
# (Hc^2 - 1)*cos(phi) - (Hc^2 + 1)
# }
#
# Hf <- function(phi, Hc, Hgamma) {
# (exp(Hgamma*Hw(phi, Hc)) - 1) / Hw(phi, Hc)
# }
#
# Hbeta * integrate(Hf, 0, pi, Hc=Hc, Hgamma=Hgamma)$value
# }
#####---------------------------------------------------------------------------
## Hoyt quantile function through root finding of cdf
qHoyt <-
function(p, qpar, omega, lower.tail=TRUE, loUp=NULL) {
is.na(qpar) <- (qpar <= 0) | (qpar >= 1) | !is.finite(qpar)
is.na(omega) <- (omega <= 0) | !is.finite(omega)
argL <- recycle(p, qpar, omega)
p <- argL[[1]]
qpar <- argL[[2]]
omega <- argL[[3]]
qq <- rep(NA_real_, length(p))
keep <- which((p >= 0) & (p < 1))
if(length(keep) < 1) { return(qq) }
if(is.null(loUp)) { # no search interval given
## use Grubbs chi^2 quantile for setting root finding interval
## Grubbs-Liu chi^2 and Hoyt can diverge
GP <- getGPfromHP(qpar, omega) # Grubbs parameters
qGrubbs <- qChisqGrubbs(p[keep], m=GP$m, v=GP$v, muX=GP$muX,
varX=GP$varX, l=GP$l, delta=GP$delta,
lower.tail=lower.tail, type="Liu")
qGrubbs.6 <- qChisqGrubbs(0.6, m=GP$m, v=GP$v, muX=GP$muX,
varX=GP$varX, l=GP$l, delta=GP$delta,
lower.tail=lower.tail, type="Liu")
qLo <- ifelse(p[keep] <= 0.5, 0, 0.25*qGrubbs)
qUp <- ifelse(p[keep] <= 0.5, qGrubbs.6, 1.75*qGrubbs)
loUp <- split(cbind(qLo, qUp), seq_along(p))
} else {
if(is.matrix(loUp)) {
loUp <- split(loUp, seq_len(nrow(loUp)))
} else if(is.vector(loUp)) {
loUp <- list(loUp)
} else if(!is.list(loUp)) {
stop("loUp must be a list, a matrix, a vector, or missing entirely")
}
}
cdf <- function(x, p, qpar, omega, lower.tail) {
pHoyt(x, qpar=qpar, omega=omega, lower.tail=lower.tail) - p
}
getQ <- function(p, qpar, omega, loUp, lower.tail) {
tryCatch(uniroot(cdf, interval=loUp, p=p, qpar=qpar, omega=omega,
lower.tail=lower.tail)$root,
error=function(e) return(NA_real_))
}
qq[keep] <- unlist(Map(getQ, p=p[keep], qpar=qpar[keep], omega=omega[keep],
loUp=loUp[keep], lower.tail=lower.tail[1]))
return(qq)
}
#####---------------------------------------------------------------------------
## random numbers from Hoyt distribution
rHoyt <-
function(n, qpar, omega, method=c("eigen", "chol", "cdf"), loUp=NULL) {
is.na(qpar) <- (qpar <= 0) | (qpar >= 1) | !is.finite(qpar)
is.na(omega) <- (omega <= 0) | !is.finite(omega)
method <- match.arg(method)
## if n is a vector, its length determines number of random variates
n <- if(length(n) > 1L) { length(n) } else { n }
qpar <- qpar[1] # only first shape parameter is used
omega <- omega[1] # only first scale parameter is used
rn <- if(method == "eigen") {
lambda <- unlist(getEVfromHoyt(qpar, omega)) # eigenvalues
## simulated 2D normal vectors with mean 0
X <- matrix(rnorm(n*length(lambda)), nrow=n) # with identity cov-mat
xy <- X %*% diag(sqrt(lambda), length(lambda))
sqrt(rowSums(xy^2)) # distances to center
} else if(method == "chol") {
lambda <- getEVfromHoyt(qpar, omega)
sigma <- cbind(c(lambda$ev1, 0), c(0, lambda$ev2))
CF <- chol(sigma, pivot=TRUE) # Cholesky-factor
idx <- order(attr(CF, "pivot"))
CFord <- CF[, idx]
## simulated 2D normal vectors with mean 0
xy <- matrix(rnorm(n*ncol(sigma)), nrow=n) %*% CFord
sqrt(rowSums(xy^2)) # distances to center
} else if(method == "cdf") {
## root finding of pHoyt() given uniform random probabilities:
## find x such that F(x) - U = 0
cdf <- function(x, u, qpar, omega) {
pHoyt(x, qpar=qpar, omega=omega) - u
}
## find quantile via uniroot() with error handling
getQ <- function(u, qpar, omega, loUp) {
tryCatch(uniroot(cdf, interval=loUp, u=u, qpar=qpar, omega=omega)$root,
error=function(e) return(NA_real_))
}
u <- runif(n) # uniform random numbers
## determine search interval(s) for uniroot()
if(is.null(loUp)) { # no search interval given
## use Grubbs chi^2 quantile for setting root finding interval
## Grubbs-Liu chi^2 and Hoyt can diverge
GP <- getGPfromHP(qpar, omega) # Grubbs parameters and quantiles
qGrubbs <- qChisqGrubbs(u, m=GP$m, v=GP$v, muX=GP$muX,
varX=GP$varX, l=GP$l, delta=GP$delta, type="Liu")
qGrubbs.6 <- qChisqGrubbs(0.6, m=GP$m, v=GP$v, muX=GP$muX,
varX=GP$varX, l=GP$l, delta=GP$delta, type="Liu")
qLo <- ifelse(u <= 0.5, 0, 0.25*qGrubbs)
qUp <- ifelse(u <= 0.5, qGrubbs.6, 1.75*qGrubbs)
loUp <- split(cbind(qLo, qUp), seq_along(u))
} else {
if(is.matrix(loUp)) {
loUp <- split(loUp, seq_len(nrow(loUp)))
} else if(is.vector(loUp)) {
loUp <- list(loUp)
} else if(!is.list(loUp)) {
stop("loUp must be a list, a matrix, a vector, or missing entirely")
}
}
unlist(Map(getQ, u=u, qpar=qpar, omega=omega, loUp=loUp))
}
return(rn)
}
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
output$distPlot <- renderPlot({
# generate bins based on input$bins from ui.R
x <- faithful[, 2]
bins <- seq(min(x), max(x), length.out = input$bins + 1)
# draw the histogram with the specified number of bins
hist(x, breaks = bins, col = 'darkgray', border = 'white')
})
output$testPlot <- renderPlot({
#generate bins from ui.R
x <- faithful[,2]
bins <- seq(min(x),max(x),length.out = input$test )
#draw the histogram
hist(x,breaks =bins ,col='blue',border ='yellow')
})
output$text1 <- renderText({
paste('Your input:',input$text)
})
})#shinyServer
| /R_Project_01/server.R | no_license | kirk760099/R_Project_test01 | R | false | false | 972 | r | #
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
output$distPlot <- renderPlot({
# generate bins based on input$bins from ui.R
x <- faithful[, 2]
bins <- seq(min(x), max(x), length.out = input$bins + 1)
# draw the histogram with the specified number of bins
hist(x, breaks = bins, col = 'darkgray', border = 'white')
})
output$testPlot <- renderPlot({
#generate bins from ui.R
x <- faithful[,2]
bins <- seq(min(x),max(x),length.out = input$test )
#draw the histogram
hist(x,breaks =bins ,col='blue',border ='yellow')
})
output$text1 <- renderText({
paste('Your input:',input$text)
})
})#shinyServer
|
library(randomUniformForest)
### Name: rUniformForest.combine
### Title: Incremental learning for random Uniform Forests
### Aliases: rUniformForest.combine
### Keywords: incremental learning
### ** Examples
## not run
## Classification : synthetic data
## get many forests and combine them
# n = 200; p = 10
## Simulate 'p' gaussian vectors with random parameters between -10 and 10.
# X <- simulationData(n,p)
## Make a rule to create response vector
# epsilon1 = runif(n,-1,1)
# epsilon2 = runif(n,-1,1)
# rule = 2*(X[,1]*X[,2] + X[,3]*X[,4]) + epsilon1*X[,5] + epsilon2*X[,6]
# Y <- as.factor(ifelse(rule > mean(rule), 1, 0))
## create many subsamples
# manyCuts <- cut(1:n, 5, labels = FALSE)
## compute many different models
# ruf1 <- randomUniformForest(X[which(manyCuts == 1),], Y[which(manyCuts == 1)],
# ntree = 30, mtry = 2, BreimanBounds = FALSE, importance = FALSE, threads = 1)
# ruf2 <- randomUniformForest(X[which(manyCuts == 2),], Y[which(manyCuts == 2)],
# ntree = 40, nodesize = 10, BreimanBounds = FALSE, importance = FALSE, threads = 1)
# ruf3 <- randomUniformForest(X[which(manyCuts == 3),], Y[which(manyCuts == 3)],
# ntree = 50, bagging = TRUE, BreimanBounds = FALSE, importance = FALSE, threads = 1)
## combine them in one ensemble
# ruf.combined <- rUniformForest.combine(ruf1, ruf2, ruf3)
# ruf.combined
## or
# rufObjects <- list(ruf1, ruf2, ruf3)
# ruf.combined <- rUniformForest.combine(rufObjects)
## remove 10 older trees
# ruf.combined <- rm.trees(ruf.combined, method = "oldest", howMany = 10)
# ruf.combined
## compute a new model and update
# ruf4 <- randomUniformForest(X[which(manyCuts == 4),], Y[which(manyCuts == 4)],
# ntree = 40, rebalancedsampling = TRUE, BreimanBounds = FALSE, threads = 1)
# ruf.updateCombined <- rUniformForest.combine(ruf.combined, ruf4)
# ruf.updateCombined
# ruf.pred <- predict(ruf.updateCombined, X[which(manyCuts == 5),])
## confusion matrix
# table(ruf.pred, Y[which(manyCuts == 5)])
## comparison with offline learning (that will always be better,
## except in the case of a shifting distribution)
# ruf.offline <- randomUniformForest(X[which(manyCuts != 5),], Y[which(manyCuts !=5)],
# threads = 1, ntree = 150, BreimanBounds = FALSE)
# ruf.offline.pred <- predict(ruf.offline, X[which(manyCuts == 5),])
## confusion matrix
# table(ruf.offline.pred, Y[which(manyCuts == 5)])
| /data/genthat_extracted_code/randomUniformForest/examples/rUniformForest.combine.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 2,377 | r | library(randomUniformForest)
### Name: rUniformForest.combine
### Title: Incremental learning for random Uniform Forests
### Aliases: rUniformForest.combine
### Keywords: incremental learning
### ** Examples
## not run
## Classification : synthetic data
## get many forests and combine them
# n = 200; p = 10
## Simulate 'p' gaussian vectors with random parameters between -10 and 10.
# X <- simulationData(n,p)
## Make a rule to create response vector
# epsilon1 = runif(n,-1,1)
# epsilon2 = runif(n,-1,1)
# rule = 2*(X[,1]*X[,2] + X[,3]*X[,4]) + epsilon1*X[,5] + epsilon2*X[,6]
# Y <- as.factor(ifelse(rule > mean(rule), 1, 0))
## create many subsamples
# manyCuts <- cut(1:n, 5, labels = FALSE)
## compute many different models
# ruf1 <- randomUniformForest(X[which(manyCuts == 1),], Y[which(manyCuts == 1)],
# ntree = 30, mtry = 2, BreimanBounds = FALSE, importance = FALSE, threads = 1)
# ruf2 <- randomUniformForest(X[which(manyCuts == 2),], Y[which(manyCuts == 2)],
# ntree = 40, nodesize = 10, BreimanBounds = FALSE, importance = FALSE, threads = 1)
# ruf3 <- randomUniformForest(X[which(manyCuts == 3),], Y[which(manyCuts == 3)],
# ntree = 50, bagging = TRUE, BreimanBounds = FALSE, importance = FALSE, threads = 1)
## combine them in one ensemble
# ruf.combined <- rUniformForest.combine(ruf1, ruf2, ruf3)
# ruf.combined
## or
# rufObjects <- list(ruf1, ruf2, ruf3)
# ruf.combined <- rUniformForest.combine(rufObjects)
## remove 10 older trees
# ruf.combined <- rm.trees(ruf.combined, method = "oldest", howMany = 10)
# ruf.combined
## compute a new model and update
# ruf4 <- randomUniformForest(X[which(manyCuts == 4),], Y[which(manyCuts == 4)],
# ntree = 40, rebalancedsampling = TRUE, BreimanBounds = FALSE, threads = 1)
# ruf.updateCombined <- rUniformForest.combine(ruf.combined, ruf4)
# ruf.updateCombined
# ruf.pred <- predict(ruf.updateCombined, X[which(manyCuts == 5),])
## confusion matrix
# table(ruf.pred, Y[which(manyCuts == 5)])
## comparison with offline learning (that will always be better,
## except in the case of a shifting distribution)
# ruf.offline <- randomUniformForest(X[which(manyCuts != 5),], Y[which(manyCuts !=5)],
# threads = 1, ntree = 150, BreimanBounds = FALSE)
# ruf.offline.pred <- predict(ruf.offline, X[which(manyCuts == 5),])
## confusion matrix
# table(ruf.offline.pred, Y[which(manyCuts == 5)])
|
# Data generating helpers for testing a development purposes.
# Behavior data with categories documented, not_documented.
# Derived from VA GoCC data.
gen_va_gocc_data <- function(){
structure(list(
sta6a = c("4369AA", "4369AA", "4369AA", "4369AA",
"4369AA", "4369AA", "4429AA", "4429AA", "4429AA", "4429AA", "4429AA",
"4429AA", "4609AA", "4609AA", "4609AA", "4609AA", "4609AA", "4609AA",
"5039AA", "5039AA", "5039AA", "5039AA", "5039AA", "5039AA", "5069AA",
"5069AA", "5069AA", "5069AA", "5069AA", "5069AA", "5129AA", "5129AA",
"5129AA", "5129AA", "5129AA", "5129AA", "5129AC", "5129AC", "5129AC",
"5129AC", "5129AC", "5129AC", "5159AA", "5159AA", "5159AA", "5159AA",
"5159AA", "5159AA", "5299AA", "5299AA", "5299AA", "5299AA", "5299AA",
"5299AA", "5429AA", "5429AA", "5429AA", "5429AA", "5429AA", "5429AA",
"5509AA", "5509AA", "5509AA", "5509AA", "5509AA", "5509AA", "5539AA",
"5539AA", "5539AA", "5539AA", "5539AA", "5539AA", "5549AB", "5549AB",
"5549AB", "5549AB", "5549AB", "5549AB", "5569AA", "5569AA", "5569AA",
"5569AA", "5569AA", "5569AA", "5629AA", "5629AA", "5629AA", "5629AA",
"5629AA", "5629AA", "5689AA", "5689AA", "5689AA", "5689AA", "5689AA",
"5689AA", "5689AB", "5689AB", "5689AB", "5689AB", "5689AB", "5689AB",
"5759AA", "5759AA", "5759AA", "5759AA", "5759AA", "5759AA", "5959AA",
"5959AA", "5959AA", "5959AA", "5959AA", "5959AA", "6079AA", "6079AA",
"6079AA", "6079AA", "6079AA", "6079AA", "6109AA", "6109AA", "6109AA",
"6109AA", "6109AA", "6109AA", "6309AB", "6309AB", "6309AB", "6309AB",
"6309AB", "6309AB", "6359AA", "6359AA", "6359AA", "6359AA", "6359AA",
"6359AA", "6429AA", "6429AA", "6429AA", "6429AA", "6429AA", "6429AA",
"6559AA", "6559AA", "6559AA", "6559AA", "6559AA", "6559AA", "6669AA",
"6669AA", "6669AA", "6669AA", "6669AA", "6669AA", "6939AA", "6939AA",
"6939AA", "6939AA", "6939AA", "6939AA"),
report_month = structure(c(17563,
17591, 17622, 17652, 17683, 17713, 17563, 17591, 17622, 17652,
17683, 17713, 17563, 17591, 17622, 17652, 17683, 17713, 17563,
17591, 17622, 17652, 17683, 17713, 17563, 17591, 17622, 17652,
17683, 17713, 17563, 17591, 17622, 17652, 17683, 17713, 17563,
17591, 17622, 17652, 17683, 17713, 17563, 17591, 17622, 17652,
17683, 17713, 17563, 17591, 17622, 17652, 17683, 17713, 17563,
17591, 17622, 17652, 17683, 17713, 17563, 17591, 17622, 17652,
17683, 17713, 17563, 17591, 17622, 17652, 17683, 17713, 17563,
17591, 17622, 17652, 17683, 17713, 17563, 17591, 17622, 17652,
17683, 17713, 17563, 17591, 17622, 17652, 17683, 17713, 17563,
17591, 17622, 17652, 17683, 17713, 17563, 17591, 17622, 17652,
17683, 17713, 17563, 17591, 17622, 17652, 17683, 17713, 17563,
17591, 17622, 17652, 17683, 17713, 17563, 17591, 17622, 17652,
17683, 17713, 17563, 17591, 17622, 17652, 17683, 17713, 17563,
17591, 17622, 17652, 17683, 17713, 17563, 17591, 17622, 17652,
17683, 17713, 17563, 17591, 17622, 17652, 17683, 17713, 17563,
17591, 17622, 17652, 17683, 17713, 17563, 17591, 17622, 17652,
17683, 17713, 17563, 17591, 17622, 17652, 17683, 17713), class = "Date"),
documented = c(0, 0, 0, 1, 0, 2, 5, 2, 2, 3, 1, 1, 0, 0,
0, 0, 0, 0, 0, 0, 0, 1, 0, 2, 0, 5, 3, 3, 5, 5, 0, 0, 0,
5, 7, 4, 0, 0, 0, 5, 1, 3, 1, 0, 0, 2, 1, 2, 1, 0, 0, 1,
1, 2, 0, 1, 3, 8, 7, 8, 0, 3, 2, 3, 1, 0, 0, 0, 0, 0, 3,
5, 0, 0, 0, 0, 0, 0, 3, 3, 2, 2, 2, 3, 0, 0, 0, 0, 0, 0,
3, 5, 7, 5, 7, 3, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 7, 8, 10, 5, 10, 8, 10, 9, 8, 0, 0, 0, 1, 1, 2, 0,
0, 0, 3, 2, 4, 0, 0, 10, 9, 13, 7, 0, 0, 1, 1, 4, 7, 0, 1,
1, 1, 0, 8, 0, 0, 0, 0, 0, 0, 6, 6, 8, 8, 5, 10),
not_documented = c(1,
0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 4, 1, 1, 1, 0, 4, 1, 1,
1, 3, 2, 2, 4, 0, 1, 0, 0, 1, 8, 8, 7, 9, 2, 1, 4, 5, 3,
2, 2, 1, 7, 8, 8, 4, 2, 1, 3, 5, 6, 3, 3, 3, 3, 6, 7, 3,
0, 0, 4, 5, 6, 9, 5, 5, 6, 7, 5, 11, 0, 0, 3, 0, 1, 1, 2,
0, 0, 0, 0, 0, 0, 0, 4, 1, 4, 6, 4, 1, 0, 0, 0, 0, 0, 1,
0, 0, 0, 0, 0, 0, 2, 6, 5, 5, 1, 6, 10, 8, 11, 5, 0, 0, 0,
0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 9, 8, 5, 3, 2, 2, 22, 21,
9, 10, 3, 6, 14, 13, 2, 6, 4, 0, 5, 9, 5, 4, 4, 1, 3, 1,
3, 2, 1, 3, 0, 0, 0, 0, 0, 0)),
class = c("spec_tbl_df", "tbl_df", "tbl", "data.frame"),
row.names = c(NA, -162L),
spec = structure(list(
cols = list(
sta6a = structure(list(), class = c("collector_character", "collector")),
report_month = structure(list(format = ""), class = c("collector_date", "collector")),
documented = structure(list(), class = c("collector_double", "collector")),
not_documented = structure(list(), class = c("collector_double", "collector"))),
default = structure(list(), class = c("collector_guess", "collector")),
skip = 1),
class = "col_spec"))
}
# Behavior data with perscribing numerators, denominators, and rates.
# Derived from publicly available NHS perscriber data.
gen_mtx_behavior_data <- function(){
structure(list(
practice = c("B85008", "B85008", "B85008", "B85008",
"B85008", "B85008", "B85008", "B85008", "B85008", "B85008", "E82012",
"E82012", "E82012", "E82012", "E82012", "E82012", "E82012", "E82012",
"E82012", "E82012", "E83006", "E83006", "E83006", "E83006", "E83006",
"E83006", "E83006", "E83006", "E83006", "E83006", "E87746", "E87746",
"E87746", "E87746", "E87746", "E87746", "E87746", "E87746", "E87746",
"E87746", "H83037", "H83037", "H83037", "H83037", "H83037", "H83037",
"H83037", "H83037", "H83037", "H83037", "L84071", "L84071", "L84071",
"L84071", "L84071", "L84071", "L84071", "L84071", "L84071", "L84071",
"M82006", "M82006", "M82006", "M82006", "M82006", "M82006", "M82006",
"M82006", "M82006", "M82006", "M85027", "M85027", "M85027", "M85027",
"M85027", "M85027", "M85027", "M85027", "M85027", "M85027", "P81063",
"P81063", "P81063", "P81063", "P81063", "P81063", "P81063", "P81063",
"P81063", "P81063"),
period = structure(c(17532, 17563, 17591,
17622, 17652, 17683, 17744, 17775, 17805, 17836, 17532, 17563,
17591, 17622, 17652, 17683, 17744, 17775, 17805, 17836, 17532,
17563, 17591, 17622, 17652, 17683, 17744, 17775, 17805, 17836,
17532, 17563, 17591, 17622, 17652, 17683, 17744, 17775, 17805,
17836, 17532, 17563, 17591, 17622, 17652, 17683, 17744, 17775,
17805, 17836, 17532, 17563, 17591, 17622, 17652, 17683, 17744,
17775, 17805, 17836, 17532, 17563, 17591, 17622, 17652, 17683,
17744, 17775, 17805, 17836, 17532, 17563, 17591, 17622, 17652,
17683, 17744, 17775, 17805, 17836, 17532, 17563, 17591, 17622,
17652, 17683, 17744, 17775, 17805, 17836),
class = "Date"),
total_scripts = c(19,
19, 21, 16, 19, 23, 22, 22, 24, 23, 31, 27, 29, 25, 27, 32, 35,
26, 28, 35, 8, 2, 5, 4, 7, 1, 5, 1, 6, 3, 4, 4, 8, 4, 4, 4, 4,
4, 4, 4, 5, 6, 7, 15, 14, 8, 7, 7, 7, 9, 32, 27, 35, 33, 27,
33, 26, 32, 33, 30, 34, 38, 40, 39, 39, 34, 37, 40, 43, 39, 27,
23, 21, 17, 20, 17, 21, 16, 22, 16, 28, 29, 31, 22, 29, 29, 29,
20, 37, 27),
total_quantity = c(600, 580, 612, 388, 580, 580,
460, 508, 652, 656, 900, 704, 848, 616, 744, 796, 860, 612, 694,
868, 284, 76, 140, 196, 204, 24, 224, 32, 208, 56, 32, 32, 52,
32, 32, 8, 8, 8, 8, 8, 136, 168, 204, 231, 368, 228, 220, 160,
184, 236, 777, 700, 832, 828, 648, 812, 608, 780, 784, 732, 768,
910, 926, 948, 892, 804, 900, 888, 1028, 924, 631, 486, 496,
428, 502, 372, 528, 372, 556, 358, 666, 808, 688, 616, 744, 714,
690, 600, 826, 678),
high_dose_scripts = c(0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 0, 0,
0, 1, 1, 4, 4, 8, 4, 4, 4, 4, 4, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ),
high_dose_quantity = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 12, 0, 0, 0, 8, 8, 32, 32,
52, 32, 32, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 4,
4, 4, 4, 4, 4, 4, 8, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
hd_script_ratio = c(0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0.25, 0.285714285714286, 0, 0, 0, 0.166666666666667, 0.333333333333333,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03125,
0.037037037037037, 0.0285714285714286, 0.0303030303030303, 0.037037037037037,
0.0303030303030303, 0.0384615384615385, 0.03125, 0.0606060606060606,
0.0333333333333333, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0476190476190476,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
hd_quantity_ratio = c(0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0.0204081632653061, 0.0588235294117647, 0, 0, 0, 0.0384615384615385,
0.142857142857143, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0.00514800514800515, 0.00571428571428571, 0.00480769230769231,
0.00483091787439614, 0.00617283950617284, 0.00492610837438424,
0.00657894736842105, 0.00512820512820513, 0.0102040816326531,
0.00546448087431694, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0161290322580645,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)),
row.names = c(NA, -90L),
spec = structure(
list( cols = list(
X1 = structure(list(), class = c("collector_double", "collector")),
practice = structure(list(), class = c("collector_character", "collector")),
period = structure(list(format = ""), class = c("collector_date", "collector")),
total_scripts = structure(list(), class = c("collector_double", "collector")),
total_quantity = structure(list(), class = c("collector_double", "collector")),
high_dose_scripts = structure(list(), class = c("collector_double", "collector")),
high_dose_quantity = structure(list(), class = c("collector_double", "collector")),
hd_script_ratio = structure(list(), class = c("collector_double", "collector")),
hd_quantity_ratio = structure(list(), class = c("collector_double", "collector"))),
default = structure(list(), class = c("collector_guess", "collector")),
skip = 1), class = "col_spec"),
class = c("tbl_df", "tbl", "data.frame"))
}
| /tests/testthat/helper_data.R | no_license | Display-Lab/pictoralist | R | false | false | 11,415 | r | # Data generating helpers for testing a development purposes.
# Behavior data with categories documented, not_documented.
# Derived from VA GoCC data.
gen_va_gocc_data <- function(){
structure(list(
sta6a = c("4369AA", "4369AA", "4369AA", "4369AA",
"4369AA", "4369AA", "4429AA", "4429AA", "4429AA", "4429AA", "4429AA",
"4429AA", "4609AA", "4609AA", "4609AA", "4609AA", "4609AA", "4609AA",
"5039AA", "5039AA", "5039AA", "5039AA", "5039AA", "5039AA", "5069AA",
"5069AA", "5069AA", "5069AA", "5069AA", "5069AA", "5129AA", "5129AA",
"5129AA", "5129AA", "5129AA", "5129AA", "5129AC", "5129AC", "5129AC",
"5129AC", "5129AC", "5129AC", "5159AA", "5159AA", "5159AA", "5159AA",
"5159AA", "5159AA", "5299AA", "5299AA", "5299AA", "5299AA", "5299AA",
"5299AA", "5429AA", "5429AA", "5429AA", "5429AA", "5429AA", "5429AA",
"5509AA", "5509AA", "5509AA", "5509AA", "5509AA", "5509AA", "5539AA",
"5539AA", "5539AA", "5539AA", "5539AA", "5539AA", "5549AB", "5549AB",
"5549AB", "5549AB", "5549AB", "5549AB", "5569AA", "5569AA", "5569AA",
"5569AA", "5569AA", "5569AA", "5629AA", "5629AA", "5629AA", "5629AA",
"5629AA", "5629AA", "5689AA", "5689AA", "5689AA", "5689AA", "5689AA",
"5689AA", "5689AB", "5689AB", "5689AB", "5689AB", "5689AB", "5689AB",
"5759AA", "5759AA", "5759AA", "5759AA", "5759AA", "5759AA", "5959AA",
"5959AA", "5959AA", "5959AA", "5959AA", "5959AA", "6079AA", "6079AA",
"6079AA", "6079AA", "6079AA", "6079AA", "6109AA", "6109AA", "6109AA",
"6109AA", "6109AA", "6109AA", "6309AB", "6309AB", "6309AB", "6309AB",
"6309AB", "6309AB", "6359AA", "6359AA", "6359AA", "6359AA", "6359AA",
"6359AA", "6429AA", "6429AA", "6429AA", "6429AA", "6429AA", "6429AA",
"6559AA", "6559AA", "6559AA", "6559AA", "6559AA", "6559AA", "6669AA",
"6669AA", "6669AA", "6669AA", "6669AA", "6669AA", "6939AA", "6939AA",
"6939AA", "6939AA", "6939AA", "6939AA"),
report_month = structure(c(17563,
17591, 17622, 17652, 17683, 17713, 17563, 17591, 17622, 17652,
17683, 17713, 17563, 17591, 17622, 17652, 17683, 17713, 17563,
17591, 17622, 17652, 17683, 17713, 17563, 17591, 17622, 17652,
17683, 17713, 17563, 17591, 17622, 17652, 17683, 17713, 17563,
17591, 17622, 17652, 17683, 17713, 17563, 17591, 17622, 17652,
17683, 17713, 17563, 17591, 17622, 17652, 17683, 17713, 17563,
17591, 17622, 17652, 17683, 17713, 17563, 17591, 17622, 17652,
17683, 17713, 17563, 17591, 17622, 17652, 17683, 17713, 17563,
17591, 17622, 17652, 17683, 17713, 17563, 17591, 17622, 17652,
17683, 17713, 17563, 17591, 17622, 17652, 17683, 17713, 17563,
17591, 17622, 17652, 17683, 17713, 17563, 17591, 17622, 17652,
17683, 17713, 17563, 17591, 17622, 17652, 17683, 17713, 17563,
17591, 17622, 17652, 17683, 17713, 17563, 17591, 17622, 17652,
17683, 17713, 17563, 17591, 17622, 17652, 17683, 17713, 17563,
17591, 17622, 17652, 17683, 17713, 17563, 17591, 17622, 17652,
17683, 17713, 17563, 17591, 17622, 17652, 17683, 17713, 17563,
17591, 17622, 17652, 17683, 17713, 17563, 17591, 17622, 17652,
17683, 17713, 17563, 17591, 17622, 17652, 17683, 17713), class = "Date"),
documented = c(0, 0, 0, 1, 0, 2, 5, 2, 2, 3, 1, 1, 0, 0,
0, 0, 0, 0, 0, 0, 0, 1, 0, 2, 0, 5, 3, 3, 5, 5, 0, 0, 0,
5, 7, 4, 0, 0, 0, 5, 1, 3, 1, 0, 0, 2, 1, 2, 1, 0, 0, 1,
1, 2, 0, 1, 3, 8, 7, 8, 0, 3, 2, 3, 1, 0, 0, 0, 0, 0, 3,
5, 0, 0, 0, 0, 0, 0, 3, 3, 2, 2, 2, 3, 0, 0, 0, 0, 0, 0,
3, 5, 7, 5, 7, 3, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 7, 8, 10, 5, 10, 8, 10, 9, 8, 0, 0, 0, 1, 1, 2, 0,
0, 0, 3, 2, 4, 0, 0, 10, 9, 13, 7, 0, 0, 1, 1, 4, 7, 0, 1,
1, 1, 0, 8, 0, 0, 0, 0, 0, 0, 6, 6, 8, 8, 5, 10),
not_documented = c(1,
0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 4, 1, 1, 1, 0, 4, 1, 1,
1, 3, 2, 2, 4, 0, 1, 0, 0, 1, 8, 8, 7, 9, 2, 1, 4, 5, 3,
2, 2, 1, 7, 8, 8, 4, 2, 1, 3, 5, 6, 3, 3, 3, 3, 6, 7, 3,
0, 0, 4, 5, 6, 9, 5, 5, 6, 7, 5, 11, 0, 0, 3, 0, 1, 1, 2,
0, 0, 0, 0, 0, 0, 0, 4, 1, 4, 6, 4, 1, 0, 0, 0, 0, 0, 1,
0, 0, 0, 0, 0, 0, 2, 6, 5, 5, 1, 6, 10, 8, 11, 5, 0, 0, 0,
0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 9, 8, 5, 3, 2, 2, 22, 21,
9, 10, 3, 6, 14, 13, 2, 6, 4, 0, 5, 9, 5, 4, 4, 1, 3, 1,
3, 2, 1, 3, 0, 0, 0, 0, 0, 0)),
class = c("spec_tbl_df", "tbl_df", "tbl", "data.frame"),
row.names = c(NA, -162L),
spec = structure(list(
cols = list(
sta6a = structure(list(), class = c("collector_character", "collector")),
report_month = structure(list(format = ""), class = c("collector_date", "collector")),
documented = structure(list(), class = c("collector_double", "collector")),
not_documented = structure(list(), class = c("collector_double", "collector"))),
default = structure(list(), class = c("collector_guess", "collector")),
skip = 1),
class = "col_spec"))
}
# Behavior data with perscribing numerators, denominators, and rates.
# Derived from publicly available NHS perscriber data.
gen_mtx_behavior_data <- function(){
structure(list(
practice = c("B85008", "B85008", "B85008", "B85008",
"B85008", "B85008", "B85008", "B85008", "B85008", "B85008", "E82012",
"E82012", "E82012", "E82012", "E82012", "E82012", "E82012", "E82012",
"E82012", "E82012", "E83006", "E83006", "E83006", "E83006", "E83006",
"E83006", "E83006", "E83006", "E83006", "E83006", "E87746", "E87746",
"E87746", "E87746", "E87746", "E87746", "E87746", "E87746", "E87746",
"E87746", "H83037", "H83037", "H83037", "H83037", "H83037", "H83037",
"H83037", "H83037", "H83037", "H83037", "L84071", "L84071", "L84071",
"L84071", "L84071", "L84071", "L84071", "L84071", "L84071", "L84071",
"M82006", "M82006", "M82006", "M82006", "M82006", "M82006", "M82006",
"M82006", "M82006", "M82006", "M85027", "M85027", "M85027", "M85027",
"M85027", "M85027", "M85027", "M85027", "M85027", "M85027", "P81063",
"P81063", "P81063", "P81063", "P81063", "P81063", "P81063", "P81063",
"P81063", "P81063"),
period = structure(c(17532, 17563, 17591,
17622, 17652, 17683, 17744, 17775, 17805, 17836, 17532, 17563,
17591, 17622, 17652, 17683, 17744, 17775, 17805, 17836, 17532,
17563, 17591, 17622, 17652, 17683, 17744, 17775, 17805, 17836,
17532, 17563, 17591, 17622, 17652, 17683, 17744, 17775, 17805,
17836, 17532, 17563, 17591, 17622, 17652, 17683, 17744, 17775,
17805, 17836, 17532, 17563, 17591, 17622, 17652, 17683, 17744,
17775, 17805, 17836, 17532, 17563, 17591, 17622, 17652, 17683,
17744, 17775, 17805, 17836, 17532, 17563, 17591, 17622, 17652,
17683, 17744, 17775, 17805, 17836, 17532, 17563, 17591, 17622,
17652, 17683, 17744, 17775, 17805, 17836),
class = "Date"),
total_scripts = c(19,
19, 21, 16, 19, 23, 22, 22, 24, 23, 31, 27, 29, 25, 27, 32, 35,
26, 28, 35, 8, 2, 5, 4, 7, 1, 5, 1, 6, 3, 4, 4, 8, 4, 4, 4, 4,
4, 4, 4, 5, 6, 7, 15, 14, 8, 7, 7, 7, 9, 32, 27, 35, 33, 27,
33, 26, 32, 33, 30, 34, 38, 40, 39, 39, 34, 37, 40, 43, 39, 27,
23, 21, 17, 20, 17, 21, 16, 22, 16, 28, 29, 31, 22, 29, 29, 29,
20, 37, 27),
total_quantity = c(600, 580, 612, 388, 580, 580,
460, 508, 652, 656, 900, 704, 848, 616, 744, 796, 860, 612, 694,
868, 284, 76, 140, 196, 204, 24, 224, 32, 208, 56, 32, 32, 52,
32, 32, 8, 8, 8, 8, 8, 136, 168, 204, 231, 368, 228, 220, 160,
184, 236, 777, 700, 832, 828, 648, 812, 608, 780, 784, 732, 768,
910, 926, 948, 892, 804, 900, 888, 1028, 924, 631, 486, 496,
428, 502, 372, 528, 372, 556, 358, 666, 808, 688, 616, 744, 714,
690, 600, 826, 678),
high_dose_scripts = c(0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 0, 0,
0, 1, 1, 4, 4, 8, 4, 4, 4, 4, 4, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ),
high_dose_quantity = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 12, 0, 0, 0, 8, 8, 32, 32,
52, 32, 32, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 4,
4, 4, 4, 4, 4, 4, 8, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
hd_script_ratio = c(0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0.25, 0.285714285714286, 0, 0, 0, 0.166666666666667, 0.333333333333333,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03125,
0.037037037037037, 0.0285714285714286, 0.0303030303030303, 0.037037037037037,
0.0303030303030303, 0.0384615384615385, 0.03125, 0.0606060606060606,
0.0333333333333333, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0476190476190476,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
hd_quantity_ratio = c(0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0.0204081632653061, 0.0588235294117647, 0, 0, 0, 0.0384615384615385,
0.142857142857143, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0.00514800514800515, 0.00571428571428571, 0.00480769230769231,
0.00483091787439614, 0.00617283950617284, 0.00492610837438424,
0.00657894736842105, 0.00512820512820513, 0.0102040816326531,
0.00546448087431694, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0161290322580645,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)),
row.names = c(NA, -90L),
spec = structure(
list( cols = list(
X1 = structure(list(), class = c("collector_double", "collector")),
practice = structure(list(), class = c("collector_character", "collector")),
period = structure(list(format = ""), class = c("collector_date", "collector")),
total_scripts = structure(list(), class = c("collector_double", "collector")),
total_quantity = structure(list(), class = c("collector_double", "collector")),
high_dose_scripts = structure(list(), class = c("collector_double", "collector")),
high_dose_quantity = structure(list(), class = c("collector_double", "collector")),
hd_script_ratio = structure(list(), class = c("collector_double", "collector")),
hd_quantity_ratio = structure(list(), class = c("collector_double", "collector"))),
default = structure(list(), class = c("collector_guess", "collector")),
skip = 1), class = "col_spec"),
class = c("tbl_df", "tbl", "data.frame"))
}
|
test_that("ohwhaley works", {
what <- c("This is a character string")
say(what)
expect_type(what, "character")
expect_gt(length(what), 0)
# test the output is of correct format
expect_length(say(), 0) #Returns something with a length of 0
expect_null(say()) #Returns null
expect_invisible(say()) #Returns invisibly
expect_message(say()) #Returns a message
})
| /ohwhaley/tests/testthat/test-ohwhaley.R | permissive | akansha-kumar/ohwhaley | R | false | false | 396 | r | test_that("ohwhaley works", {
what <- c("This is a character string")
say(what)
expect_type(what, "character")
expect_gt(length(what), 0)
# test the output is of correct format
expect_length(say(), 0) #Returns something with a length of 0
expect_null(say()) #Returns null
expect_invisible(say()) #Returns invisibly
expect_message(say()) #Returns a message
})
|
#' plotConfMatrix plots a confusion matrix with some statistics.
#'
#' The function plots a confusion matrix with some statistics. The function is used internally by \code{\link{dataPlot}} but it can be used separatelly.
#' @param data A table which contains a confusion matrix.
#' @return Nothing to return.
#' @examples
#' data <- table(as.factor(c(1,2,4,2,4,5)),as.factor(c(1,2,5,4,5,2)))
#' plotConfMatrix(data)
plotConfMatrix <- function(data){
res <- confusionMatrix(data)
layout(matrix(c(1,1,2)))
# The above rescales the confusion matrix such that columns sum to 100.
opar <- par(mar=c(6.1, 9.1, 1, 2))
x <- x.orig <- unclass(data)
x <- log(x + 0.5) * 2.33
x[x < 0] <- NA
x[x > 10] <- 10
diag(x) <- -diag(x)
image(seq_len(ncol(x)), seq_len(ncol(x)),
-(x[, nrow(x):1]), xlab='', ylab='',
col=colorRampPalette(c(hsv(h = 0, s = 0.9, v = 0.9, alpha = 1),
hsv(h = 0, s = 0, v = 0.9, alpha = 1),
hsv(h = 2/6, s = 0.9, v = 0.9, alpha = 1)))(41),
xaxt='n', yaxt='n', zlim=c(-10, 10))
axis(1, at=seq_len(ncol(x)), labels=FALSE, cex.axis=1.2, font = 2)
title(xlab='Actual', line=4.5, cex = 1.2)
text(seq_len(ncol(x)), par("usr")[3] - 0.8, labels = colnames(x), pos = 1 ,font = 2, xpd = TRUE)
axis(2, at=ncol(x):1, labels=colnames(x), las=1, cex.axis=1.2,font = 2)
title(ylab='Predicted', line=7.5, cex = 1.2)
abline(h = 0:ncol(x) + 0.5, col = 'gray')
abline(v = 0:ncol(x) + 0.5, col = 'gray')
text(seq_len(ncol(x)), rep(ncol(x):1, each=ncol(x)),
labels = c(x.orig),cex=1.2, font=2)
box(lwd=2)
par(opar) # reset par
# add in the specifics
plot(c(100, 0),c(0, 50), type = "n", xlab="", ylab="", main = "DETAILS", xaxt='n', yaxt='n')
text(20, 35, names(res$overall[1]), cex=1.2, font=2)
text(20, 15, round(as.numeric(res$overall[1])*100, 3), cex=1.4)
if(ncol(x) > 2){
text(40, 35, colnames(res$byClass)[7], cex=1.2, font=2)
res$byClass[is.na(res$byClass[,7]),7] <- 0
text(40, 15, round(mean(as.numeric(res$byClass[,7]))*100, 3), cex=1.4)
text(60, 35, colnames(res$byClass)[1], cex=1.2, font=2)
text(60, 15, round(mean(as.numeric(res$byClass[,1]))*100, 3), cex=1.4)
text(80, 35, colnames(res$byClass)[2], cex=1.2, font=2)
text(80, 15, round(mean(as.numeric(res$byClass[,2]))*100, 3), cex=1.4)
}else{
text(50, 35, names(res$byClass)[1], cex=1.2, font=2)
text(50, 15, round(mean(as.numeric(res$byClass[1]))*100, 3), cex=1.4)
text(80, 35, names(res$byClass)[2], cex=1.2, font=2)
text(80, 15, round(mean(as.numeric(res$byClass[2]))*100, 3), cex=1.4)
}
}
| /R/plotConfMatrix.R | no_license | danielredondo/KnowSeq | R | false | false | 2,664 | r | #' plotConfMatrix plots a confusion matrix with some statistics.
#'
#' The function plots a confusion matrix with some statistics. The function is used internally by \code{\link{dataPlot}} but it can be used separatelly.
#' @param data A table which contains a confusion matrix.
#' @return Nothing to return.
#' @examples
#' data <- table(as.factor(c(1,2,4,2,4,5)),as.factor(c(1,2,5,4,5,2)))
#' plotConfMatrix(data)
plotConfMatrix <- function(data){
res <- confusionMatrix(data)
layout(matrix(c(1,1,2)))
# The above rescales the confusion matrix such that columns sum to 100.
opar <- par(mar=c(6.1, 9.1, 1, 2))
x <- x.orig <- unclass(data)
x <- log(x + 0.5) * 2.33
x[x < 0] <- NA
x[x > 10] <- 10
diag(x) <- -diag(x)
image(seq_len(ncol(x)), seq_len(ncol(x)),
-(x[, nrow(x):1]), xlab='', ylab='',
col=colorRampPalette(c(hsv(h = 0, s = 0.9, v = 0.9, alpha = 1),
hsv(h = 0, s = 0, v = 0.9, alpha = 1),
hsv(h = 2/6, s = 0.9, v = 0.9, alpha = 1)))(41),
xaxt='n', yaxt='n', zlim=c(-10, 10))
axis(1, at=seq_len(ncol(x)), labels=FALSE, cex.axis=1.2, font = 2)
title(xlab='Actual', line=4.5, cex = 1.2)
text(seq_len(ncol(x)), par("usr")[3] - 0.8, labels = colnames(x), pos = 1 ,font = 2, xpd = TRUE)
axis(2, at=ncol(x):1, labels=colnames(x), las=1, cex.axis=1.2,font = 2)
title(ylab='Predicted', line=7.5, cex = 1.2)
abline(h = 0:ncol(x) + 0.5, col = 'gray')
abline(v = 0:ncol(x) + 0.5, col = 'gray')
text(seq_len(ncol(x)), rep(ncol(x):1, each=ncol(x)),
labels = c(x.orig),cex=1.2, font=2)
box(lwd=2)
par(opar) # reset par
# add in the specifics
plot(c(100, 0),c(0, 50), type = "n", xlab="", ylab="", main = "DETAILS", xaxt='n', yaxt='n')
text(20, 35, names(res$overall[1]), cex=1.2, font=2)
text(20, 15, round(as.numeric(res$overall[1])*100, 3), cex=1.4)
if(ncol(x) > 2){
text(40, 35, colnames(res$byClass)[7], cex=1.2, font=2)
res$byClass[is.na(res$byClass[,7]),7] <- 0
text(40, 15, round(mean(as.numeric(res$byClass[,7]))*100, 3), cex=1.4)
text(60, 35, colnames(res$byClass)[1], cex=1.2, font=2)
text(60, 15, round(mean(as.numeric(res$byClass[,1]))*100, 3), cex=1.4)
text(80, 35, colnames(res$byClass)[2], cex=1.2, font=2)
text(80, 15, round(mean(as.numeric(res$byClass[,2]))*100, 3), cex=1.4)
}else{
text(50, 35, names(res$byClass)[1], cex=1.2, font=2)
text(50, 15, round(mean(as.numeric(res$byClass[1]))*100, 3), cex=1.4)
text(80, 35, names(res$byClass)[2], cex=1.2, font=2)
text(80, 15, round(mean(as.numeric(res$byClass[2]))*100, 3), cex=1.4)
}
}
|
f <- TestFunctions::piston
d <- 7
n <- 30
x <- lhs::randomLHS(n=n,k=d)
y <- f(x) + rnorm(nrow(x), 0,1e-1)
# y
# system.time({gp <- GauPro_kernel_model$new(X=x, Z=y, kernel = Matern52$new(D=d), verbose = 5)})
system.time({gp <- GauPro_kernel_model$new(X=x, Z=y, kernel = Gaussian$new(D=d), verbose = 5)})
plot(gp$pred_LOO(), y)
gp$plotmarginal()
gp$plotmarginal(gp$X[1,])
plot(gp)
gp$plotmarginalrandom()
gp$EI(runif(7))
gp$EI(lhs::randomLHS(n=100, k=ncol(x)))
xmx <- c(1.2770051, -0.2920814, 0.9825472, -0.2937785, -1.3244573, 6.8359251, -11.4165417)
optim(par=xmx, fn=function(xx){ei <- -gp$EI(xx); cat(xx, ei, "\n"); ei})
gp$maxEI()
gp$maxEI(minimize = T)
f(gp$maxEI())
f(gp$maxEI(minimize = T))
gp$maxqEI(5)
f(gp$maxqEI(5))
gp$maxqEI(5, minimize = T)
f(gp$maxqEI(5, minimize = T))
reldiff <- function(a,b) {abs(a-b)/max(abs(c(a,b)))}
| /scratch/scratch_kernel_model_piston.R | no_license | CollinErickson/GauPro | R | false | false | 876 | r |
f <- TestFunctions::piston
d <- 7
n <- 30
x <- lhs::randomLHS(n=n,k=d)
y <- f(x) + rnorm(nrow(x), 0,1e-1)
# y
# system.time({gp <- GauPro_kernel_model$new(X=x, Z=y, kernel = Matern52$new(D=d), verbose = 5)})
system.time({gp <- GauPro_kernel_model$new(X=x, Z=y, kernel = Gaussian$new(D=d), verbose = 5)})
plot(gp$pred_LOO(), y)
gp$plotmarginal()
gp$plotmarginal(gp$X[1,])
plot(gp)
gp$plotmarginalrandom()
gp$EI(runif(7))
gp$EI(lhs::randomLHS(n=100, k=ncol(x)))
xmx <- c(1.2770051, -0.2920814, 0.9825472, -0.2937785, -1.3244573, 6.8359251, -11.4165417)
optim(par=xmx, fn=function(xx){ei <- -gp$EI(xx); cat(xx, ei, "\n"); ei})
gp$maxEI()
gp$maxEI(minimize = T)
f(gp$maxEI())
f(gp$maxEI(minimize = T))
gp$maxqEI(5)
f(gp$maxqEI(5))
gp$maxqEI(5, minimize = T)
f(gp$maxqEI(5, minimize = T))
reldiff <- function(a,b) {abs(a-b)/max(abs(c(a,b)))}
|
# Generate 8 bins for relative turning angle (TA)
assign.rel_angle.bin=function(dat){
angle.bin.lims=seq(from=-pi, to=pi, by=pi/4)
dat$TA<- NA
for(i in 1:length(angle.bin.lims)) {
tmp=which(dat$rel.angle >= angle.bin.lims[i] & dat$rel.angle < angle.bin.lims[i+1])
dat[tmp,"TA"]=i
}
dat
}
dat<- assign.rel_angle.bin(dat)
#View histogram
ggplot(dat[!is.na(dat$TA),], aes(factor(TA))) +
geom_bar() +
labs(x = "Bin #")
#Generate 6 bins for step length (SL); use 90th percentile as last cutoff
assign.dist.bin=function(dat){
max.dist=max(dat$dist, na.rm = T) #using value from entire dataset, not specific time segment
upper90.thresh=as.numeric(quantile(dat$dist, 0.90, na.rm=T)) #using value from entire dataset
dist.bin.lims=seq(from=0, to=upper90.thresh, length.out = 6)
dist.bin.lims=c(dist.bin.lims, max.dist)
dat$SL<- NA
#names(dat)[7]<- "dist"
for(i in 1:length(dist.bin.lims)) {
tmp=which(dat$dist >= dist.bin.lims[i] & dat$dist < dist.bin.lims[i+1])
dat[tmp,"SL"]=i
}
tmp=which(dat$dist == max.dist)
dat[tmp,"SL"]=6
dat
}
dat<- assign.dist.bin(dat)
#View histogram
ggplot(dat[!is.na(dat$SL),], aes(factor(SL))) +
geom_bar() +
labs(x = "Bin #")
#Assignment of binary response variable for changes in turning angle autocorrelation (TAA)
#Try both with the sign of the direction
#sign
chng.rel_angle.sign=function(dat){
dat$TAA<- NA
for(i in 1:(nrow(dat)-1)) {
dat$TAA[i+1]<- ifelse(sign(dat$rel.angle[i]) == sign(dat$rel.angle[i+1]), 1, 0)
}
dat
}
dat<- chng.rel_angle.sign(dat)
#view time series
ggplot(dat[!is.na(dat$TAA),], aes(x=1:nrow(dat[!is.na(dat$TAA),]),
y=factor(TAA))) +
geom_point(size=2) +
scale_y_discrete("Change in Sign of Relative Turning Angle", breaks=c(0,1),
labels=c("Change Direction","Maintain Direction")) +
xlab("Time")
| /Discretization of Movement Params.R | no_license | ValleLabUF/git_segmentation_behavior | R | false | false | 1,906 | r | # Generate 8 bins for relative turning angle (TA)
assign.rel_angle.bin=function(dat){
angle.bin.lims=seq(from=-pi, to=pi, by=pi/4)
dat$TA<- NA
for(i in 1:length(angle.bin.lims)) {
tmp=which(dat$rel.angle >= angle.bin.lims[i] & dat$rel.angle < angle.bin.lims[i+1])
dat[tmp,"TA"]=i
}
dat
}
dat<- assign.rel_angle.bin(dat)
#View histogram
ggplot(dat[!is.na(dat$TA),], aes(factor(TA))) +
geom_bar() +
labs(x = "Bin #")
#Generate 6 bins for step length (SL); use 90th percentile as last cutoff
assign.dist.bin=function(dat){
max.dist=max(dat$dist, na.rm = T) #using value from entire dataset, not specific time segment
upper90.thresh=as.numeric(quantile(dat$dist, 0.90, na.rm=T)) #using value from entire dataset
dist.bin.lims=seq(from=0, to=upper90.thresh, length.out = 6)
dist.bin.lims=c(dist.bin.lims, max.dist)
dat$SL<- NA
#names(dat)[7]<- "dist"
for(i in 1:length(dist.bin.lims)) {
tmp=which(dat$dist >= dist.bin.lims[i] & dat$dist < dist.bin.lims[i+1])
dat[tmp,"SL"]=i
}
tmp=which(dat$dist == max.dist)
dat[tmp,"SL"]=6
dat
}
dat<- assign.dist.bin(dat)
#View histogram
ggplot(dat[!is.na(dat$SL),], aes(factor(SL))) +
geom_bar() +
labs(x = "Bin #")
#Assignment of binary response variable for changes in turning angle autocorrelation (TAA)
#Try both with the sign of the direction
#sign
chng.rel_angle.sign=function(dat){
dat$TAA<- NA
for(i in 1:(nrow(dat)-1)) {
dat$TAA[i+1]<- ifelse(sign(dat$rel.angle[i]) == sign(dat$rel.angle[i+1]), 1, 0)
}
dat
}
dat<- chng.rel_angle.sign(dat)
#view time series
ggplot(dat[!is.na(dat$TAA),], aes(x=1:nrow(dat[!is.na(dat$TAA),]),
y=factor(TAA))) +
geom_point(size=2) +
scale_y_discrete("Change in Sign of Relative Turning Angle", breaks=c(0,1),
labels=c("Change Direction","Maintain Direction")) +
xlab("Time")
|
#' Retrieve immediate children taxa for a given taxon name or ID.
#'
#' This function is different from [downstream()] in that it only
#' collects immediate taxonomic children, while [downstream()]
#' collects taxonomic names down to a specified taxonomic rank, e.g.,
#' getting all species in a family.
#'
#' @export
#' @param sci_id Vector of taxa names (character) or IDs (character or numeric)
#' to query.
#' @param db character; database to query. One or more of `itis`,
#' `ncbi`, `worms`, or `bold`. Note that each taxonomic data
#' source has their own identifiers, so that if you provide the wrong
#' `db` value for the identifier you could get a result, but it will
#' likely be wrong (not what you were expecting). If using ncbi, we recommend
#' getting an API key; see [taxize-authentication]
#' @param rows (numeric) Any number from 1 to infinity. If the default NA, all
#' rows are considered. Note that this parameter is ignored if you pass in a
#' taxonomic id of any of the acceptable classes: tsn. NCBI has a
#' method for this function but rows doesn't work.
#' @param x Deprecated, see `sci_id`
#' @param ... Further args passed on to [ritis::hierarchy_down()],
#' [ncbi_children()], [worrms::wm_children()], [bold_children()]
#' See those functions for what parameters can be passed on.
#'
#' @section ncbi:
#' note that with `db = "ncbi"`, we set `ambiguous = TRUE`; that is, children
#' taxa with words like "unclassified", "unknown", "uncultured", "sp." are
#' NOT removed
#'
#' @section bold:
#' BEWARE: `db="bold"` scrapes the BOLD website, so may be unstable. That is,
#' one day it may work, and the next it may fail. Open an issue if you
#' encounter an error: https://github.com/ropensci/taxize/issues
#'
#' @return A named list of data.frames with the children names of every
#' supplied taxa. You get an NA if there was no match in the database.
#'
#' @examples \dontrun{
#' # Plug in taxonomic IDs
#' children(161994, db = "itis")
#' children(8028, db = "ncbi")
#' ## works with numeric if as character as well
#' children(161994, db = "itis")
#' children(88899, db = "bold")
#' children(as.boldid(88899))
#'
#' # Plug in taxon names
#' children("Salmo", db = 'itis')
#' children("Salmo", db = 'ncbi')
#' children("Salmo", db = 'worms')
#' children("Salmo", db = 'bold')
#'
#' # Plug in IDs
#' (id <- get_wormsid("Gadus"))
#' children(id)
#'
#' # Many taxa
#' sp <- c("Tragia", "Schistocarpha", "Encalypta")
#' children(sp, db = 'itis')
#'
#' # Two data sources
#' (ids <- get_ids("Apis", db = c('ncbi','itis')))
#' children(ids)
#' ## same result
#' children(get_ids("Apis", db = c('ncbi','itis')))
#'
#' # Use the rows parameter
#' children("Poa", db = 'itis')
#' children("Poa", db = 'itis', rows=1)
#'
#' # use curl options
#' res <- children("Poa", db = 'itis', rows=1, verbose = TRUE)
#' }
children <- function(...){
UseMethod("children")
}
#' @export
#' @rdname children
children.default <- function(sci_id, db = NULL, rows = NA, x = NULL, ...) {
nstop(db)
if (!is.null(x)) {
lifecycle::deprecate_warn(when = "v0.9.97", what = "children(x)", with = "children(sci_id)")
sci_id <- x
}
results <- switch(
db,
itis = {
id <- process_children_ids(sci_id, db, get_tsn, rows = rows, ...)
stats::setNames(children(id, ...), sci_id)
},
ncbi = {
if (all(grepl("^[[:digit:]]*$", sci_id))) {
id <- sci_id
class(id) <- "uid"
stats::setNames(children(id, ...), sci_id)
} else {
out <- ncbi_children(name = sci_id, ...)
structure(out, class = 'children', db = 'ncbi', .Names = sci_id)
}
},
worms = {
id <- process_children_ids(sci_id, db, get_wormsid, rows = rows, ...)
stats::setNames(children(id, ...), sci_id)
},
bold = {
id <- process_children_ids(as.character(sci_id), db, get_boldid,
rows = rows, ...)
stats::setNames(children(id, ...), sci_id)
},
stop("the provided db value was not recognised", call. = FALSE)
)
set_output_types(results, sci_id, db)
}
# Ensure that the output types are consistent when searches return nothing
itis_blank <- data.frame(
parentname = character(0),
parenttsn = character(0),
rankname = character(0),
taxonname = character(0),
tsn = character(0),
stringsAsFactors = FALSE
)
worms_blank <- ncbi_blank <- bold_blank <-
data.frame(
childtaxa_id = character(0),
childtaxa_name = character(0),
childtaxa_rank = character(0),
stringsAsFactors = FALSE
)
set_output_types <- function(x, x_names, db){
blank_fun <- switch(
db,
itis = function(w) if (nrow(w) == 0 || all(is.na(w))) itis_blank else w,
ncbi = function(w) if (nrow(w) == 0 || all(is.na(w))) ncbi_blank else w,
worms = function(w) if (nrow(w) == 0 || all(is.na(w))) worms_blank else w,
bold = function(w) if (nrow(w) == 0 || all(is.na(w))) bold_blank else w
)
typed_results <- lapply(seq_along(x), function(i) blank_fun(x[[i]]))
names(typed_results) <- x_names
attributes(typed_results) <- attributes(x)
typed_results
}
process_children_ids <- function(input, db, fxn, ...){
g <- tryCatch(as.numeric(as.character(input)), warning = function(e) e)
if (inherits(g, "condition")) return(eval(fxn)(input, ...))
if (is.numeric(g) || is.character(input) && all(grepl("[[:digit:]]", input))) {
as_fxn <- switch(db, itis = as.tsn, worms = as.wormsid, bold = as.boldid)
as_fxn(input, check = FALSE)
} else {
eval(fxn)(input, ...)
}
}
#' @export
#' @rdname children
children.tsn <- function(sci_id, db = NULL, ...) {
warn_db(list(db = db), "itis")
fun <- function(y){
# return NA if NA is supplied
if (is.na(y)) {
out <- NA
} else {
out <- ritis::hierarchy_down(y, ...)
}
}
out <- lapply(sci_id, fun)
names(out) <- sci_id
class(out) <- 'children'
attr(out, 'db') <- 'itis'
return(out)
}
df2dt2tbl <- function(x) {
tibble::as_tibble(
data.table::setDF(
data.table::rbindlist(
x, use.names = TRUE, fill = TRUE)
)
)
}
#' @export
#' @rdname children
children.wormsid <- function(sci_id, db = NULL, ...) {
warn_db(list(db = db), "worms")
fun <- function(y){
# return NA if NA is supplied
if (is.na(y)) {
out <- NA
} else {
out <- worms_children_all(y, ...)
stats::setNames(
out[names(out) %in% c('AphiaID', 'scientificname', 'rank')],
c('childtaxa_id', 'childtaxa_name', 'childtaxa_rank')
)
}
}
out <- lapply(sci_id, fun)
names(out) <- sci_id
class(out) <- 'children'
attr(out, 'db') <- 'worms'
return(out)
}
#' @export
#' @rdname children
children.ids <- function(sci_id, db = NULL, ...) {
fun <- function(y, ...){
# return NA if NA is supplied
if (is.na(y)) {
out <- NA
} else {
out <- children(y, ...)
}
return(out)
}
out <- lapply(sci_id, fun)
class(out) <- 'children_ids'
return(out)
}
#' @export
#' @rdname children
children.uid <- function(sci_id, db = NULL, ...) {
warn_db(list(db = db), "uid")
out <- if (is.na(sci_id)) {
stats::setNames(list(ncbi_blank), sci_id)
} else {
ncbi_children(id = sci_id, ambiguous = TRUE, ...)
}
class(out) <- 'children'
attr(out, 'db') <- 'ncbi'
return(out)
}
#' @export
#' @rdname children
children.boldid <- function(sci_id, db = NULL, ...) {
warn_db(list(db = db), "bold")
out <- if (is.na(sci_id)) {
stats::setNames(list(bold_blank), sci_id)
} else {
bold_children(id = sci_id, ...)
}
class(out) <- 'children'
attr(out, 'db') <- 'bold'
return(out)
}
| /R/children.R | permissive | ropensci/taxize | R | false | false | 7,613 | r | #' Retrieve immediate children taxa for a given taxon name or ID.
#'
#' This function is different from [downstream()] in that it only
#' collects immediate taxonomic children, while [downstream()]
#' collects taxonomic names down to a specified taxonomic rank, e.g.,
#' getting all species in a family.
#'
#' @export
#' @param sci_id Vector of taxa names (character) or IDs (character or numeric)
#' to query.
#' @param db character; database to query. One or more of `itis`,
#' `ncbi`, `worms`, or `bold`. Note that each taxonomic data
#' source has their own identifiers, so that if you provide the wrong
#' `db` value for the identifier you could get a result, but it will
#' likely be wrong (not what you were expecting). If using ncbi, we recommend
#' getting an API key; see [taxize-authentication]
#' @param rows (numeric) Any number from 1 to infinity. If the default NA, all
#' rows are considered. Note that this parameter is ignored if you pass in a
#' taxonomic id of any of the acceptable classes: tsn. NCBI has a
#' method for this function but rows doesn't work.
#' @param x Deprecated, see `sci_id`
#' @param ... Further args passed on to [ritis::hierarchy_down()],
#' [ncbi_children()], [worrms::wm_children()], [bold_children()]
#' See those functions for what parameters can be passed on.
#'
#' @section ncbi:
#' note that with `db = "ncbi"`, we set `ambiguous = TRUE`; that is, children
#' taxa with words like "unclassified", "unknown", "uncultured", "sp." are
#' NOT removed
#'
#' @section bold:
#' BEWARE: `db="bold"` scrapes the BOLD website, so may be unstable. That is,
#' one day it may work, and the next it may fail. Open an issue if you
#' encounter an error: https://github.com/ropensci/taxize/issues
#'
#' @return A named list of data.frames with the children names of every
#' supplied taxa. You get an NA if there was no match in the database.
#'
#' @examples \dontrun{
#' # Plug in taxonomic IDs
#' children(161994, db = "itis")
#' children(8028, db = "ncbi")
#' ## works with numeric if as character as well
#' children(161994, db = "itis")
#' children(88899, db = "bold")
#' children(as.boldid(88899))
#'
#' # Plug in taxon names
#' children("Salmo", db = 'itis')
#' children("Salmo", db = 'ncbi')
#' children("Salmo", db = 'worms')
#' children("Salmo", db = 'bold')
#'
#' # Plug in IDs
#' (id <- get_wormsid("Gadus"))
#' children(id)
#'
#' # Many taxa
#' sp <- c("Tragia", "Schistocarpha", "Encalypta")
#' children(sp, db = 'itis')
#'
#' # Two data sources
#' (ids <- get_ids("Apis", db = c('ncbi','itis')))
#' children(ids)
#' ## same result
#' children(get_ids("Apis", db = c('ncbi','itis')))
#'
#' # Use the rows parameter
#' children("Poa", db = 'itis')
#' children("Poa", db = 'itis', rows=1)
#'
#' # use curl options
#' res <- children("Poa", db = 'itis', rows=1, verbose = TRUE)
#' }
children <- function(...){
UseMethod("children")
}
#' @export
#' @rdname children
children.default <- function(sci_id, db = NULL, rows = NA, x = NULL, ...) {
nstop(db)
if (!is.null(x)) {
lifecycle::deprecate_warn(when = "v0.9.97", what = "children(x)", with = "children(sci_id)")
sci_id <- x
}
results <- switch(
db,
itis = {
id <- process_children_ids(sci_id, db, get_tsn, rows = rows, ...)
stats::setNames(children(id, ...), sci_id)
},
ncbi = {
if (all(grepl("^[[:digit:]]*$", sci_id))) {
id <- sci_id
class(id) <- "uid"
stats::setNames(children(id, ...), sci_id)
} else {
out <- ncbi_children(name = sci_id, ...)
structure(out, class = 'children', db = 'ncbi', .Names = sci_id)
}
},
worms = {
id <- process_children_ids(sci_id, db, get_wormsid, rows = rows, ...)
stats::setNames(children(id, ...), sci_id)
},
bold = {
id <- process_children_ids(as.character(sci_id), db, get_boldid,
rows = rows, ...)
stats::setNames(children(id, ...), sci_id)
},
stop("the provided db value was not recognised", call. = FALSE)
)
set_output_types(results, sci_id, db)
}
# Ensure that the output types are consistent when searches return nothing
itis_blank <- data.frame(
parentname = character(0),
parenttsn = character(0),
rankname = character(0),
taxonname = character(0),
tsn = character(0),
stringsAsFactors = FALSE
)
worms_blank <- ncbi_blank <- bold_blank <-
data.frame(
childtaxa_id = character(0),
childtaxa_name = character(0),
childtaxa_rank = character(0),
stringsAsFactors = FALSE
)
set_output_types <- function(x, x_names, db){
blank_fun <- switch(
db,
itis = function(w) if (nrow(w) == 0 || all(is.na(w))) itis_blank else w,
ncbi = function(w) if (nrow(w) == 0 || all(is.na(w))) ncbi_blank else w,
worms = function(w) if (nrow(w) == 0 || all(is.na(w))) worms_blank else w,
bold = function(w) if (nrow(w) == 0 || all(is.na(w))) bold_blank else w
)
typed_results <- lapply(seq_along(x), function(i) blank_fun(x[[i]]))
names(typed_results) <- x_names
attributes(typed_results) <- attributes(x)
typed_results
}
process_children_ids <- function(input, db, fxn, ...){
g <- tryCatch(as.numeric(as.character(input)), warning = function(e) e)
if (inherits(g, "condition")) return(eval(fxn)(input, ...))
if (is.numeric(g) || is.character(input) && all(grepl("[[:digit:]]", input))) {
as_fxn <- switch(db, itis = as.tsn, worms = as.wormsid, bold = as.boldid)
as_fxn(input, check = FALSE)
} else {
eval(fxn)(input, ...)
}
}
#' @export
#' @rdname children
children.tsn <- function(sci_id, db = NULL, ...) {
warn_db(list(db = db), "itis")
fun <- function(y){
# return NA if NA is supplied
if (is.na(y)) {
out <- NA
} else {
out <- ritis::hierarchy_down(y, ...)
}
}
out <- lapply(sci_id, fun)
names(out) <- sci_id
class(out) <- 'children'
attr(out, 'db') <- 'itis'
return(out)
}
df2dt2tbl <- function(x) {
tibble::as_tibble(
data.table::setDF(
data.table::rbindlist(
x, use.names = TRUE, fill = TRUE)
)
)
}
#' @export
#' @rdname children
children.wormsid <- function(sci_id, db = NULL, ...) {
warn_db(list(db = db), "worms")
fun <- function(y){
# return NA if NA is supplied
if (is.na(y)) {
out <- NA
} else {
out <- worms_children_all(y, ...)
stats::setNames(
out[names(out) %in% c('AphiaID', 'scientificname', 'rank')],
c('childtaxa_id', 'childtaxa_name', 'childtaxa_rank')
)
}
}
out <- lapply(sci_id, fun)
names(out) <- sci_id
class(out) <- 'children'
attr(out, 'db') <- 'worms'
return(out)
}
#' @export
#' @rdname children
children.ids <- function(sci_id, db = NULL, ...) {
fun <- function(y, ...){
# return NA if NA is supplied
if (is.na(y)) {
out <- NA
} else {
out <- children(y, ...)
}
return(out)
}
out <- lapply(sci_id, fun)
class(out) <- 'children_ids'
return(out)
}
#' @export
#' @rdname children
children.uid <- function(sci_id, db = NULL, ...) {
warn_db(list(db = db), "uid")
out <- if (is.na(sci_id)) {
stats::setNames(list(ncbi_blank), sci_id)
} else {
ncbi_children(id = sci_id, ambiguous = TRUE, ...)
}
class(out) <- 'children'
attr(out, 'db') <- 'ncbi'
return(out)
}
#' @export
#' @rdname children
children.boldid <- function(sci_id, db = NULL, ...) {
warn_db(list(db = db), "bold")
out <- if (is.na(sci_id)) {
stats::setNames(list(bold_blank), sci_id)
} else {
bold_children(id = sci_id, ...)
}
class(out) <- 'children'
attr(out, 'db') <- 'bold'
return(out)
}
|
library(ggplot2)
library(plyr)
library(dplyr)
library(stringr)
# ## for troubleshooting
# rm(list=ls())
# load("issuesOpenEpics.RData")
# issuesDf <- issuesOpenEpics
plotBarMBacklog <- function(issuesDf){
issuesDf <- subset(issuesDf, issuesDf$closed_at=="none")
issuesDf$epic_name <- sapply(X = strsplit(issuesDf$title, ": "), FUN = function(x){x[2]})
issuesDf$prefixAlpha <- sapply(X = strsplit(issuesDf$prefix, "-"), FUN = function(x){x[1]})
issuesDf$count <- 1
df <- issuesDf
df <- subset(df, df$prefixAlpha=="E")
df <- df[order(df$epic_name, decreasing = F),]
th <- theme(panel.background = element_rect(fill = 'white'),
legend.position="none",
axis.text.x = element_blank(),
axis.title.x = element_blank()
)
plot <- ggplot(data=df)
plot <- plot + geom_bar(stat="identity", color = "white")
plot <- plot + aes(x=repo, y=count, fill = factor(count), label = str_wrap(epic_name, width = 30))
plot <- plot + theme(legend.position='none')
plot <- plot + scale_fill_manual(values = rep("#757575", nrow(df)))
plot <- plot + scale_y_reverse()
plot <- plot + geom_text(size = 3, position = position_stack(vjust = 0.5), color = "white")
plot <- plot + th
plot
}
| /functions/plotBarMBacklog.R | no_license | JimmyKuruvilla/scrumBoard | R | false | false | 1,247 | r | library(ggplot2)
library(plyr)
library(dplyr)
library(stringr)
# ## for troubleshooting
# rm(list=ls())
# load("issuesOpenEpics.RData")
# issuesDf <- issuesOpenEpics
plotBarMBacklog <- function(issuesDf){
issuesDf <- subset(issuesDf, issuesDf$closed_at=="none")
issuesDf$epic_name <- sapply(X = strsplit(issuesDf$title, ": "), FUN = function(x){x[2]})
issuesDf$prefixAlpha <- sapply(X = strsplit(issuesDf$prefix, "-"), FUN = function(x){x[1]})
issuesDf$count <- 1
df <- issuesDf
df <- subset(df, df$prefixAlpha=="E")
df <- df[order(df$epic_name, decreasing = F),]
th <- theme(panel.background = element_rect(fill = 'white'),
legend.position="none",
axis.text.x = element_blank(),
axis.title.x = element_blank()
)
plot <- ggplot(data=df)
plot <- plot + geom_bar(stat="identity", color = "white")
plot <- plot + aes(x=repo, y=count, fill = factor(count), label = str_wrap(epic_name, width = 30))
plot <- plot + theme(legend.position='none')
plot <- plot + scale_fill_manual(values = rep("#757575", nrow(df)))
plot <- plot + scale_y_reverse()
plot <- plot + geom_text(size = 3, position = position_stack(vjust = 0.5), color = "white")
plot <- plot + th
plot
}
|
#!/home/rpeng/bin/Rscript --no-save
start <- proc.time()
source("poisson-gibbs.R")
gibbsState <- .readRDS("gibbsState.rds")
set.seed(500)
## Run collapsed models
pgibbs(gibbsState,
maxit = 80000,
deleteCache = TRUE
)
print(proc.time() - start)
| /runPGibbs.R | no_license | rdpeng/multiDLMpaper | R | false | false | 273 | r | #!/home/rpeng/bin/Rscript --no-save
start <- proc.time()
source("poisson-gibbs.R")
gibbsState <- .readRDS("gibbsState.rds")
set.seed(500)
## Run collapsed models
pgibbs(gibbsState,
maxit = 80000,
deleteCache = TRUE
)
print(proc.time() - start)
|
# The right hand side of the log likelihood of a Batschelet-type distribution
ll_rhs_bat <- function(x, mu, kp, lam, tlam_fun) {
kp * sum(cos(tlam_fun(x - mu, lam)))
}
# # The left hand side of the log likelihood of a inverse Batschelet distribution
# ll_lhs_invbat <- function(n, kp, lam) {
# -n * (logBesselI(kp, 0) + log(K_kplam(kp, lam)))
# }
sample_mu_bat <- function(x, mu_cur, kp, lam, tlam_fun, mu_logprior_fun) {
# Sample a candidate from the distribution of mu when we have a von Mises
# distribution.
C_j <- sum(cos(x))
S_j <- sum(sin(x))
R_j <- sqrt(C_j^2 + S_j^2)
mu_can <- circglmbayes::rvmc(1, mu_cur, R_j * kp)
ll_can <- ll_rhs_bat(x, mu_can, kp, lam, tlam_fun)
ll_cur <- ll_rhs_bat(x, mu_cur, kp, lam, tlam_fun)
# The proposal is von Mises and thus symmetric, so the transition
# probabilities of MH are omitted here.
mu_lograt <- ll_can + mu_logprior_fun(mu_can) - ll_cur - mu_logprior_fun(mu_cur)
if (mu_lograt > log(stats::runif(1))) {
return(mu_can)
} else {
return(mu_cur)
}
}
sample_mu_bat_2 <- function(x, mu_cur, kp, lam, tlam_fun, mu_logprior_fun) {
# Sample a candidate from the distribution of mu when we have a von Mises
# distribution.
C_j <- sum(cos(x))
S_j <- sum(sin(x))
R_j <- sqrt(C_j^2 + S_j^2)
mu_hat <- atan2(S_j, C_j)
mu_can <- circglmbayes::rvmc(1, mu_hat, R_j * kp)
ll_can <- ll_rhs_bat(x, mu_can, kp, lam, tlam_fun)
ll_cur <- ll_rhs_bat(x, mu_cur, kp, lam, tlam_fun)
logp_mu_can_to_cur <- dvm(mu_cur, mu_hat, kp, log = TRUE)
logp_mu_cur_to_can <- dvm(mu_can, mu_hat, kp, log = TRUE)
mu_lograt <- ll_can + mu_logprior_fun(mu_can) + logp_mu_can_to_cur -
ll_cur - mu_logprior_fun(mu_cur) - logp_mu_cur_to_can
if (mu_lograt > log(stats::runif(1))) {
return(mu_can)
} else {
return(mu_cur)
}
}
# Reparametrized gamma proposal to make tuning parameter and mean interpretable.
# This is equal to chi square with 'mean' degrees of freedom if var_tune = 1.
dgammaprop <- function(x, mean = 1, var_tune = 1, log = FALSE) {
gamma_var <- 2 * mean * var_tune
gamma_scale <- gamma_var / mean
gamma_shape <- mean / gamma_scale
stats::dgamma(x, shape = gamma_shape, scale = gamma_scale, log = log)
}
rgammaprop <- function(n = 1, mean = 1, var_tune = 1) {
gamma_var <- 2 * mean * var_tune
gamma_scale <- gamma_var / mean
gamma_shape <- mean / gamma_scale
stats::rgamma(n = n, shape = gamma_shape, scale = gamma_scale)
}
sample_kp_bat <- function(x, mu, kp_cur, lam, llbat, kp_logprior_fun, var_tune = 1) {
# Sample a candidate
if (kp_cur > 0) {
kp_can <- rgammaprop(1, mean = kp_cur, var_tune = var_tune)
logp_kp_can_to_cur <- dgammaprop(kp_cur, kp_can, var_tune, log = TRUE)
logp_kp_cur_to_can <- dgammaprop(kp_can, kp_cur, var_tune, log = TRUE)
} else {
# If kp_cur == 0, usual sampling from gamma breaks down, so we retry by
# drawing proposals from the distribution with kp_cur == .1.
kp_can <- rgammaprop(1, mean = .1, var_tune = var_tune)
logp_kp_can_to_cur <- dgammaprop(.1, kp_can, var_tune, log = TRUE)
logp_kp_cur_to_can <- dgammaprop(kp_can, .1, var_tune, log = TRUE)
}
ll_can <- llbat(x, mu, kp_can, lam, log = TRUE)
ll_cur <- llbat(x, mu, kp_cur, lam, log = TRUE)
kp_lograt <- ll_can + kp_logprior_fun(kp_can) + logp_kp_can_to_cur -
ll_cur - kp_logprior_fun(kp_cur) - logp_kp_cur_to_can
if (kp_lograt > log(stats::runif(1))) {
return(kp_can)
} else {
return(kp_cur)
}
}
sample_lam_bat <- function(x, mu, kp, lam_cur, llbat, lam_logprior_fun, lam_bw = .05) {
# Sample a candidate
lam_can <- stats::runif(1, max(-1, lam_cur - lam_bw), min(1, lam_cur + lam_bw))
ll_can <- llbat(x, mu, kp, lam_can)
ll_cur <- llbat(x, mu, kp, lam_cur)
logp_lam_can_to_cur <- stats::dunif(lam_cur, max(-1, lam_can - lam_bw),
min(1, lam_can + lam_bw), log = TRUE)
logp_lam_cur_to_can <- stats::dunif(lam_can, max(-1, lam_cur - lam_bw),
min(1, lam_cur + lam_bw), log = TRUE)
lam_lograt <- ll_can + lam_logprior_fun(lam_can) + logp_lam_can_to_cur -
ll_cur - lam_logprior_fun(lam_cur) - logp_lam_cur_to_can
if (lam_lograt > log(stats::runif(1))) {
return(lam_can)
} else {
return(lam_cur)
}
}
sample_kp_and_lam_bat <- function(x, mu, kp_cur, lam_cur, llbat, lam_bw = .05,
kp_logprior_fun, lam_logprior_fun, var_tune = 1) {
if (kp_cur > 0) {
kp_can <- rgammaprop(1, mean = kp_cur, var_tune = var_tune)
logp_kp_can_to_cur <- dgammaprop(kp_cur, kp_can, var_tune, log = TRUE)
logp_kp_cur_to_can <- dgammaprop(kp_can, kp_cur, var_tune, log = TRUE)
} else {
# If kp_cur == 0, usual sampling from gamma breaks down, so we retry by
# drawing proposals from the distribution with kp_cur == .1.
kp_can <- rgammaprop(1, mean = .1, var_tune = var_tune)
logp_kp_can_to_cur <- dgammaprop(.1, kp_can, var_tune, log = TRUE)
logp_kp_cur_to_can <- dgammaprop(kp_can, .1, var_tune, log = TRUE)
}
lam_can <- stats::runif(1,
max(-1, lam_cur - lam_bw),
min(1, lam_cur + lam_bw))
logp_kp_can_to_cur <- dgammaprop(kp_cur, kp_can, var_tune, log = TRUE)
logp_kp_cur_to_can <- dgammaprop(kp_can, kp_cur, var_tune, log = TRUE)
logp_lam_can_to_cur <- stats::dunif(lam_cur,
max(-1, lam_can - lam_bw),
min(1, lam_can + lam_bw),
log = TRUE)
logp_lam_cur_to_can <- stats::dunif(lam_can,
max(-1, lam_cur - lam_bw),
min(1, lam_cur + lam_bw),
log = TRUE)
ll_can <- llbat(x, mu, kp_can, lam_can, log = TRUE)
ll_cur <- llbat(x, mu, kp_cur, lam_cur, log = TRUE)
kplam_lograt <- ll_can + kp_logprior_fun(kp_can) + lam_logprior_fun(lam_can) +
logp_kp_can_to_cur + logp_lam_can_to_cur -
ll_cur - kp_logprior_fun(kp_cur) - lam_logprior_fun(lam_cur) -
logp_kp_cur_to_can - logp_lam_cur_to_can
if (kplam_lograt > log(stats::runif(1))) {
return(c(kp_can, lam_can))
} else {
return(c(kp_cur, lam_cur))
}
}
#' A rescaled beta prior for lambda
#'
#' This prior is symmetric between -1 and 1, and captures the prior belief that
#' values of \code{lam} on the boundary of the parameter space are a prior
#' unlikely.
#'
#' @param lam Numeric;
#'
#' @return The log-prior probability.
#' @export
#'
lam_beta_log_prior_2_2 <- function(lam) {
stats::dbeta( (lam + 1) / 2, 2, 2, log = TRUE)
}
logBesselI <- function(x, nu) log(besselI(x, nu, expon.scaled = TRUE)) + x
#' The Jeffreys prior for the von Mises distribution
#'
#' @param kp Numeric;
#'
#' @return An unnormalized value.
#' @export
vm_kp_jeffreys_prior <- function(kp) {
logAkp <- logBesselI(kp, 1) - logBesselI(kp, 0)
return(
exp(0.5 * (log(kp) +
logAkp +
log(0.5 +
exp(logBesselI(kp, 2) - log(2) - logBesselI(kp, 0)) -
exp(logAkp) ^ 2))))
}
#' @rdname vm_kp_jeffreys_prior
#' @export
vm_kp_jeffreys_logprior <- function(kp) {
logAkp <- logBesselI(kp, 1) - logBesselI(kp, 0)
return(
0.5 * (log(kp) +
logAkp +
log(0.5 +
exp(logBesselI(kp, 2) - log(2) - logBesselI(kp, 0)) -
exp(logAkp) ^ 2)))
}
#' MCMC sampling for Batschelet-type distributions.
#'
#' @param x A numeric vector of angles, in radians
#' @param Q Integer; The number of iterations to return after taking burn in and
#' thinning into account.
#' @param burnin Integer; The number of (non-thinned) iterations to discard. No
#' burn in is performed by default.
#' @param thin Integer; Number of iterations to sample for each saved iteration.
#' Defaults to 1, which means no thinning.
#' @param n_comp Integer; Fixed number of components to estimate.
#' @param bat_type Either 'inverse' or 'power', the type of distribution to fit.
#' The two distributions are similar, but the power Batschelet distribution is
#' computationally much less demanding.
#' @param init_pmat A numeric matrix with \code{n_comp} rows and four columns,
#' corresponding to \eqn{\mu, \kappa, \lambda, \alpha}, in that order. Gives
#' starting values for the parameters. If any element is \code{NA}, it will be
#' given a default starting value. For \eqn{mu}, the default starting values
#' are equally spaced on the circle. For \eqn{\kappa}, the default starting
#' value is 5. For \eqn{\lambda}, the default starting value is 0, which
#' corresponds to the von Mises distribution. For \eqn{\alpha}, the default
#' starting value is \code{1/n_comp}.
#' @param fixed_pmat A numeric matrix with \code{n_comp} rows and four columns,
#' corresponding to \eqn{\mu, \kappa, \lambda, \alpha}, in that order. Any
#' element that is not \code{NA} in this matrix will be held constant at the
#' given value and not sampled.
#' @param mu_logprior_fun Function; A function with a single argument, which
#' returns the log of the prior probability of \eqn{\mu}. Defaults to a
#' uniform prior function.
#' @param kp_logprior_fun Function; A function with a single argument, which
#' returns the log of the prior probability of \eqn{\kappa}. Defaults to a
#' uniform prior function. In contrast to the other parameters, for
#' \eqn{\kappa} the constant (uniform) prior is improper.
#' @param lam_logprior_fun Function; A function with a single argument, which
#' returns the log of the prior probability of \eqn{\lambda}. Defaults to a
#' uniform prior function.
#' @param alph_prior_param Integer vector; The mixture weight parameter vector
#' \eqn{\alpha} is given its conjugate Dirichlet prior. The default is
#' \code{rep(1, n_comp)}, which is the noninformative uniform prior over the
#' \code{n_comp} simplex.
#' @param joint_kp_lam Logical; If \code{TRUE}, the parameters \code{kp} and
#' \code{lam} are drawn jointly. This can be beneficial if these are strongly
#' correlated.
#' @param verbose Integer up to 4; Determines the amount of printed debug
#' information.
#' @param lam_bw Numeric; the maximum distance from the current lambda at which
#' uniform proposals are drawn.
#' @param kp_bw Numeric; A tuning parameter for kappa proposals. If \code{kp_bw
#' == 1}, the chi-square distribution is used. Often, this distribution is too
#' wide, so this parameter can be set to \code{0 < kp_bw < 1} to use a gamma
#' proposal which has lower variance than the chi-square.
#' @param compute_variance Logical; Whether to add circular variance to the
#' returned mcmc sample.
#' @param compute_waic Logical; Whether to compute the WAIC. Can be
#' computationally demanding if \code{n * Q} is large.
#'
#' @importFrom stats var
#'
#' @return A numeric matrix of sampled parameter values.
#' @export
#'
#' @examples
#' x <- rinvbatmix(100)
#' mcmcBatscheletMixture(x, Q = 10)
mcmcBatscheletMixture <- function(x, Q = 1000,
burnin = 0, thin = 1,
n_comp = 4,
bat_type = "inverse",
init_pmat = matrix(NA, n_comp, 4),
fixed_pmat = matrix(NA, n_comp, 4),
joint_kp_lam = FALSE,
kp_bw = 1,
lam_bw = .05,
mu_logprior_fun = function(mu) -log(2*pi),
kp_logprior_fun = function(kp) 1,
lam_logprior_fun = function(lam) -log(2),
alph_prior_param = rep(1, n_comp),
compute_variance = TRUE,
compute_waic = FALSE,
verbose = 0) {
# Select Batschelet type
if (bat_type == "inverse") {
dbat_fun <- dinvbat
llbat <- likinvbat
tlam_fun <- t_lam
} else if (bat_type == "power") {
dbat_fun <- dpowbat
llbat <- likpowbat
tlam_fun <- tpow_lam
} else {
stop("Unknown Batschelet type.")
}
# A matrix with logicals for each initial value of the parameter matrix.
na_fixedpmat <- is.na(fixed_pmat)
na_initpmat <- is.na(init_pmat)
if (any(!na_fixedpmat[, 2] & fixed_pmat[,2] < 0)) stop("Invalid fixed kappa value.")
if (any(!na_fixedpmat[, 3] & abs(fixed_pmat[,3]) > 1)) stop("Invalid fixed lambda value.")
# Set initial values if the initial parameter matrix is not given for that parameter (has NAs).
init_pmat[, 1] <- ifelse(na_initpmat[, 1], seq(0, 2*pi, length.out = n_comp + 1)[-1], init_pmat[, 1])
init_pmat[, 2] <- ifelse(na_initpmat[, 2], rep(5, n_comp), init_pmat[, 2])
init_pmat[, 3] <- ifelse(na_initpmat[, 3], rep(0, n_comp), init_pmat[, 3])
init_pmat[, 4] <- ifelse(na_initpmat[, 4], rep(1/n_comp, n_comp), init_pmat[, 4])
# Force x to be in range -pi, pi.
x <- force_neg_pi_pi(x)
n <- length(x)
# Initialize parameters
mu_cur <- init_pmat[, 1]
kp_cur <- init_pmat[, 2]
lam_cur <- init_pmat[, 3]
alph_cur <- init_pmat[, 4]
# Matrix of acceptance totals for kp and lam, which will be divided by the
# total later.
acc_mat <- matrix(0, nrow = n_comp, ncol = 2)
colnames(acc_mat) <- c("kp", "lam")
rownames(acc_mat) <- 1:n_comp
# Initialize latent group labeling
z_cur <- integer(n)
output_matrix <- matrix(NA, nrow = Q, ncol = n_comp*4)
colnames(output_matrix) <- c(paste0("mu_", 1:n_comp),
paste0("kp_", 1:n_comp),
paste0("lam_", 1:n_comp),
paste0("alph_", 1:n_comp))
ll_vec <- numeric(Q)
# Add WAIC log-likelihood accumulation vector.
if (compute_waic) {
ll_each_th_curpars <- matrix(NA, nrow = Q, ncol = n)
}
if (compute_variance) {
variance_matrix <- matrix(NA, nrow = Q, ncol = n_comp*3)
colnames(variance_matrix) <- c(paste0("mean_res_len_", 1:n_comp),
paste0("circ_var_", 1:n_comp),
paste0("circ_sd_", 1:n_comp))
}
Qbythin <- Q * thin + burnin
if (verbose) cat("Starting MCMC sampling.\n")
if (verbose > 1) cat("Iteration:\n")
for (i in 1:Qbythin) {
if (verbose > 1) cat(sprintf("%5s, ", i))
### Sample group assignments z
# Probability of each component for each data point.
W <- sapply(1:n_comp, function(k) {
alph_cur[k] * dbat_fun(x, mu_cur[k], kp_cur[k], lam_cur[k])
})
w_rowsum <- rowSums(W)
# If some datapoints are numerically equal to zero, assign it equally to
# each component. Hopefully, this does not happen again in the next
# iteration.
W[w_rowsum == 0, ] <- 1/n_comp
w_rowsum[w_rowsum == 0] <- 1
W <- W / w_rowsum
# Randomly sample group assignments from the component probabilities.
z_cur <- apply(W, 1, function(row_probs) sample(x = 1:n_comp, size = 1,
prob = row_probs))
# Sample weights alph
dir_asum <- sapply(1:n_comp, function(j) sum(z_cur == j))
alph_cur <- ifelse(na_fixedpmat[, 4],
MCMCpack::rdirichlet(1, dir_asum + alph_prior_param),
alph_cur)
# After sampling the current group assignments, the parameters for each
# component only can be sampled separately.
for (j in 1:n_comp) {
if (verbose > 2) cat(j)
# Dataset assigned to this component.
x_j <- x[z_cur == j]
# Check whether anything is assigned to this components. If not, don't
# update the parameters.
if (length(x_j) == 0) {
if (verbose > 1) cat("---")
next
}
if (verbose > 2) cat("m")
# Sample mu
if (na_fixedpmat[j, 1]) {
mu_cur[j] <- sample_mu_bat_2(x_j, mu_cur[j], kp_cur[j], lam_cur[j],
tlam_fun, mu_logprior_fun)
}
if (joint_kp_lam) {
if (verbose > 2) cat("kl")
kplam_curj <- sample_kp_and_lam_bat(x_j,
mu_cur[j], kp_cur[j], lam_cur[j],
llbat, lam_bw = lam_bw,
kp_logprior_fun, lam_logprior_fun,
var_tune = kp_bw)
# Assign the new values if they are new, and set acceptance count.
if (kp_cur[j] != kplam_curj[1]) {
kp_cur[j] <- kplam_curj[1]
acc_mat[j, 1] <- acc_mat[j, 1] + 1
}
if (lam_cur[j] != kplam_curj[2]) {
lam_cur[j] <- kplam_curj[2]
acc_mat[j, 2] <- acc_mat[j, 2] + 1
}
} else {
if (verbose > 2) cat("k")
# Sample kp
if (na_fixedpmat[j, 2]) {
kp_new <- sample_kp_bat(x_j, mu_cur[j], kp_cur[j], lam_cur[j],
llbat, kp_logprior_fun, var_tune = kp_bw)
if (kp_cur[j] != kp_new) {
kp_cur[j] <- kp_new
acc_mat[j, 1] <- acc_mat[j, 1] + 1
}
}
if (verbose > 2) cat("l")
# Sample lam
if (na_fixedpmat[j, 3]) {
lam_new <- sample_lam_bat(x_j, mu_cur[j], kp_cur[j], lam_cur[j],
llbat, lam_logprior_fun, lam_bw = lam_bw)
if (lam_cur[j] != lam_new) {
lam_cur[j] <- lam_new
acc_mat[j, 2] <- acc_mat[j, 2] + 1
}
}
}
}
# Possibly extremely detailed debugging information.
if (verbose > 3) {
cat("\n",
sprintf("mu: %8s, ", round(mu_cur, 3)), "\n",
sprintf("kp: %8s, ", round(kp_cur, 3)), "\n",
sprintf("lam: %8s, ", round(lam_cur, 3)), "\n",
sprintf("alph: %8s, ", round(alph_cur, 3)), "\n")
}
if (i %% 50 == 0 && verbose == 1) cat(i, ", \n", sep = "")
if (i %% 5 == 0 && verbose > 1) cat("\n")
if (i %% thin == 0 && i >= burnin) {
isav <- (i - burnin) / thin
output_matrix[isav, ] <- c(mu_cur, kp_cur, lam_cur, alph_cur)
# A vector with log-densities for each data point.
each_th_ll <- dbatmix(x = x, dbat_fun = dbat_fun,
mu_cur, kp_cur, lam_cur, alph_cur,
log = TRUE)
ll_vec[isav] <- sum(each_th_ll)
if (compute_waic) {
ll_each_th_curpars[isav, ] <- each_th_ll
}
if (compute_variance) {
# Compute mean resultant lengths for each component.
R_bar_cur <- sapply(1:n_comp, function(i) {
computeMeanResultantLengthBat(kp_cur[i], lam_cur[i], bat_type)
})
# Compute variances.
circ_var_cur <- 1 - R_bar_cur
circ_sd_cur <- computeCircSD(R_bar_cur)
# Put the results in an output matrix.
variance_matrix[isav, ] <- c(R_bar_cur, circ_var_cur, circ_sd_cur)
}
}
}
# Compute acceptance ratio.
acc_mat <- acc_mat / Q
if (verbose) cat("\nFinished.\n")
# Collect output
if (compute_variance) output_matrix <- cbind(output_matrix, variance_matrix)
# The log-posterior function that we have just sampled from.
log_posterior <- function(pvec, data = x) {
# If there is one value in pvec missing, assume it is one of the alpha
# weights because this might happen when bridgesampling for example.
if ((length(pvec) %% 4) == 3) {
n_comp <- (length(pvec) + 1)/4
pvec <- c(pvec, 1 - sum(pvec[(3*n_comp + 1):(4*n_comp - 1)]))
}
n_comp <- length(pvec)/4
mus <- pvec[1:n_comp]
kps <- pvec[(n_comp + 1):(2*n_comp)]
lams <- pvec[(2*n_comp + 1):(3*n_comp)]
alphs <- pvec[(3*n_comp + 1):(4*n_comp)]
if (!identical(sum(alphs), 1)) {
warning("Log-posterior adapting weight vector alpha to sum to one.")
alphs <- alphs / sum(alphs)
}
ll_part <- sum(dbatmix(x, dbat_fun = dbat_fun,
mus, kps, lams, alphs,
log = TRUE))
prior_part <- sum(c(vapply(mus, mu_logprior_fun, 0),
vapply(kps, kp_logprior_fun, 0),
vapply(lams, lam_logprior_fun, 0),
log(MCMCpack::ddirichlet(alphs,
alpha = alph_prior_param))))
ll_part + prior_part
}
# Create a new environment for log_posterior so that the file size of the
# resulting object is not too large.
log_post_env <- new.env()
log_post_env$data <- x
log_post_env$n_comp <- n_comp
log_post_env$dbat_fun <- dbat_fun
log_post_env$mu_logprior_fun <- mu_logprior_fun
log_post_env$kp_logprior_fun <- kp_logprior_fun
log_post_env$lam_logprior_fun <- lam_logprior_fun
log_post_env$alph_prior_param <- alph_prior_param
environment(log_posterior) <- log_post_env
out_list <- list(mcmc_sample = coda::mcmc(output_matrix,
start = burnin + 1,
end = Qbythin,
thin = thin),
ll_vec = ll_vec,
log_posterior = log_posterior,
acceptance_rates = acc_mat,
prior_list = list(mu_logprior_fun, kp_logprior_fun,
lam_logprior_fun, alph_prior_param))
if (compute_waic) {
lppd <- sum(log(colSums(exp(ll_each_th_curpars)))) - n * log(Q)
waic_logofmean <- log(colMeans(exp(ll_each_th_curpars)))
waic_meanoflog <- colMeans(ll_each_th_curpars)
p_waic1 <- 2 * sum(waic_logofmean - waic_meanoflog)
p_waic2 <- sum(apply(ll_each_th_curpars, 2, var))
waic1 <- -2 * (lppd - p_waic1)
waic2 <- -2 * (lppd - p_waic2)
out_list$ic <- list(lppd = lppd,
waic_1 = c(p_waic1 = 2 * p_waic1, waic1 = waic1),
waic_2 = c(p_waic2 = 2 * p_waic2, waic2 = waic2))
} else {
out_list$ic <- list()
}
return(out_list)
}
| /R/mcmcBatschelet.R | no_license | keesmulder/flexcircmix | R | false | false | 22,388 | r |
# The right hand side of the log likelihood of a Batschelet-type distribution
ll_rhs_bat <- function(x, mu, kp, lam, tlam_fun) {
kp * sum(cos(tlam_fun(x - mu, lam)))
}
# # The left hand side of the log likelihood of a inverse Batschelet distribution
# ll_lhs_invbat <- function(n, kp, lam) {
# -n * (logBesselI(kp, 0) + log(K_kplam(kp, lam)))
# }
sample_mu_bat <- function(x, mu_cur, kp, lam, tlam_fun, mu_logprior_fun) {
# Sample a candidate from the distribution of mu when we have a von Mises
# distribution.
C_j <- sum(cos(x))
S_j <- sum(sin(x))
R_j <- sqrt(C_j^2 + S_j^2)
mu_can <- circglmbayes::rvmc(1, mu_cur, R_j * kp)
ll_can <- ll_rhs_bat(x, mu_can, kp, lam, tlam_fun)
ll_cur <- ll_rhs_bat(x, mu_cur, kp, lam, tlam_fun)
# The proposal is von Mises and thus symmetric, so the transition
# probabilities of MH are omitted here.
mu_lograt <- ll_can + mu_logprior_fun(mu_can) - ll_cur - mu_logprior_fun(mu_cur)
if (mu_lograt > log(stats::runif(1))) {
return(mu_can)
} else {
return(mu_cur)
}
}
sample_mu_bat_2 <- function(x, mu_cur, kp, lam, tlam_fun, mu_logprior_fun) {
# Sample a candidate from the distribution of mu when we have a von Mises
# distribution.
C_j <- sum(cos(x))
S_j <- sum(sin(x))
R_j <- sqrt(C_j^2 + S_j^2)
mu_hat <- atan2(S_j, C_j)
mu_can <- circglmbayes::rvmc(1, mu_hat, R_j * kp)
ll_can <- ll_rhs_bat(x, mu_can, kp, lam, tlam_fun)
ll_cur <- ll_rhs_bat(x, mu_cur, kp, lam, tlam_fun)
logp_mu_can_to_cur <- dvm(mu_cur, mu_hat, kp, log = TRUE)
logp_mu_cur_to_can <- dvm(mu_can, mu_hat, kp, log = TRUE)
mu_lograt <- ll_can + mu_logprior_fun(mu_can) + logp_mu_can_to_cur -
ll_cur - mu_logprior_fun(mu_cur) - logp_mu_cur_to_can
if (mu_lograt > log(stats::runif(1))) {
return(mu_can)
} else {
return(mu_cur)
}
}
# Reparametrized gamma proposal to make tuning parameter and mean interpretable.
# This is equal to chi square with 'mean' degrees of freedom if var_tune = 1.
dgammaprop <- function(x, mean = 1, var_tune = 1, log = FALSE) {
gamma_var <- 2 * mean * var_tune
gamma_scale <- gamma_var / mean
gamma_shape <- mean / gamma_scale
stats::dgamma(x, shape = gamma_shape, scale = gamma_scale, log = log)
}
rgammaprop <- function(n = 1, mean = 1, var_tune = 1) {
gamma_var <- 2 * mean * var_tune
gamma_scale <- gamma_var / mean
gamma_shape <- mean / gamma_scale
stats::rgamma(n = n, shape = gamma_shape, scale = gamma_scale)
}
sample_kp_bat <- function(x, mu, kp_cur, lam, llbat, kp_logprior_fun, var_tune = 1) {
# Sample a candidate
if (kp_cur > 0) {
kp_can <- rgammaprop(1, mean = kp_cur, var_tune = var_tune)
logp_kp_can_to_cur <- dgammaprop(kp_cur, kp_can, var_tune, log = TRUE)
logp_kp_cur_to_can <- dgammaprop(kp_can, kp_cur, var_tune, log = TRUE)
} else {
# If kp_cur == 0, usual sampling from gamma breaks down, so we retry by
# drawing proposals from the distribution with kp_cur == .1.
kp_can <- rgammaprop(1, mean = .1, var_tune = var_tune)
logp_kp_can_to_cur <- dgammaprop(.1, kp_can, var_tune, log = TRUE)
logp_kp_cur_to_can <- dgammaprop(kp_can, .1, var_tune, log = TRUE)
}
ll_can <- llbat(x, mu, kp_can, lam, log = TRUE)
ll_cur <- llbat(x, mu, kp_cur, lam, log = TRUE)
kp_lograt <- ll_can + kp_logprior_fun(kp_can) + logp_kp_can_to_cur -
ll_cur - kp_logprior_fun(kp_cur) - logp_kp_cur_to_can
if (kp_lograt > log(stats::runif(1))) {
return(kp_can)
} else {
return(kp_cur)
}
}
sample_lam_bat <- function(x, mu, kp, lam_cur, llbat, lam_logprior_fun, lam_bw = .05) {
# Sample a candidate
lam_can <- stats::runif(1, max(-1, lam_cur - lam_bw), min(1, lam_cur + lam_bw))
ll_can <- llbat(x, mu, kp, lam_can)
ll_cur <- llbat(x, mu, kp, lam_cur)
logp_lam_can_to_cur <- stats::dunif(lam_cur, max(-1, lam_can - lam_bw),
min(1, lam_can + lam_bw), log = TRUE)
logp_lam_cur_to_can <- stats::dunif(lam_can, max(-1, lam_cur - lam_bw),
min(1, lam_cur + lam_bw), log = TRUE)
lam_lograt <- ll_can + lam_logprior_fun(lam_can) + logp_lam_can_to_cur -
ll_cur - lam_logprior_fun(lam_cur) - logp_lam_cur_to_can
if (lam_lograt > log(stats::runif(1))) {
return(lam_can)
} else {
return(lam_cur)
}
}
sample_kp_and_lam_bat <- function(x, mu, kp_cur, lam_cur, llbat, lam_bw = .05,
kp_logprior_fun, lam_logprior_fun, var_tune = 1) {
if (kp_cur > 0) {
kp_can <- rgammaprop(1, mean = kp_cur, var_tune = var_tune)
logp_kp_can_to_cur <- dgammaprop(kp_cur, kp_can, var_tune, log = TRUE)
logp_kp_cur_to_can <- dgammaprop(kp_can, kp_cur, var_tune, log = TRUE)
} else {
# If kp_cur == 0, usual sampling from gamma breaks down, so we retry by
# drawing proposals from the distribution with kp_cur == .1.
kp_can <- rgammaprop(1, mean = .1, var_tune = var_tune)
logp_kp_can_to_cur <- dgammaprop(.1, kp_can, var_tune, log = TRUE)
logp_kp_cur_to_can <- dgammaprop(kp_can, .1, var_tune, log = TRUE)
}
lam_can <- stats::runif(1,
max(-1, lam_cur - lam_bw),
min(1, lam_cur + lam_bw))
logp_kp_can_to_cur <- dgammaprop(kp_cur, kp_can, var_tune, log = TRUE)
logp_kp_cur_to_can <- dgammaprop(kp_can, kp_cur, var_tune, log = TRUE)
logp_lam_can_to_cur <- stats::dunif(lam_cur,
max(-1, lam_can - lam_bw),
min(1, lam_can + lam_bw),
log = TRUE)
logp_lam_cur_to_can <- stats::dunif(lam_can,
max(-1, lam_cur - lam_bw),
min(1, lam_cur + lam_bw),
log = TRUE)
ll_can <- llbat(x, mu, kp_can, lam_can, log = TRUE)
ll_cur <- llbat(x, mu, kp_cur, lam_cur, log = TRUE)
kplam_lograt <- ll_can + kp_logprior_fun(kp_can) + lam_logprior_fun(lam_can) +
logp_kp_can_to_cur + logp_lam_can_to_cur -
ll_cur - kp_logprior_fun(kp_cur) - lam_logprior_fun(lam_cur) -
logp_kp_cur_to_can - logp_lam_cur_to_can
if (kplam_lograt > log(stats::runif(1))) {
return(c(kp_can, lam_can))
} else {
return(c(kp_cur, lam_cur))
}
}
#' A rescaled beta prior for lambda
#'
#' This prior is symmetric between -1 and 1, and captures the prior belief that
#' values of \code{lam} on the boundary of the parameter space are a prior
#' unlikely.
#'
#' @param lam Numeric;
#'
#' @return The log-prior probability.
#' @export
#'
lam_beta_log_prior_2_2 <- function(lam) {
stats::dbeta( (lam + 1) / 2, 2, 2, log = TRUE)
}
logBesselI <- function(x, nu) log(besselI(x, nu, expon.scaled = TRUE)) + x
#' The Jeffreys prior for the von Mises distribution
#'
#' @param kp Numeric;
#'
#' @return An unnormalized value.
#' @export
vm_kp_jeffreys_prior <- function(kp) {
logAkp <- logBesselI(kp, 1) - logBesselI(kp, 0)
return(
exp(0.5 * (log(kp) +
logAkp +
log(0.5 +
exp(logBesselI(kp, 2) - log(2) - logBesselI(kp, 0)) -
exp(logAkp) ^ 2))))
}
#' @rdname vm_kp_jeffreys_prior
#' @export
vm_kp_jeffreys_logprior <- function(kp) {
logAkp <- logBesselI(kp, 1) - logBesselI(kp, 0)
return(
0.5 * (log(kp) +
logAkp +
log(0.5 +
exp(logBesselI(kp, 2) - log(2) - logBesselI(kp, 0)) -
exp(logAkp) ^ 2)))
}
#' MCMC sampling for Batschelet-type distributions.
#'
#' @param x A numeric vector of angles, in radians
#' @param Q Integer; The number of iterations to return after taking burn in and
#' thinning into account.
#' @param burnin Integer; The number of (non-thinned) iterations to discard. No
#' burn in is performed by default.
#' @param thin Integer; Number of iterations to sample for each saved iteration.
#' Defaults to 1, which means no thinning.
#' @param n_comp Integer; Fixed number of components to estimate.
#' @param bat_type Either 'inverse' or 'power', the type of distribution to fit.
#' The two distributions are similar, but the power Batschelet distribution is
#' computationally much less demanding.
#' @param init_pmat A numeric matrix with \code{n_comp} rows and four columns,
#' corresponding to \eqn{\mu, \kappa, \lambda, \alpha}, in that order. Gives
#' starting values for the parameters. If any element is \code{NA}, it will be
#' given a default starting value. For \eqn{mu}, the default starting values
#' are equally spaced on the circle. For \eqn{\kappa}, the default starting
#' value is 5. For \eqn{\lambda}, the default starting value is 0, which
#' corresponds to the von Mises distribution. For \eqn{\alpha}, the default
#' starting value is \code{1/n_comp}.
#' @param fixed_pmat A numeric matrix with \code{n_comp} rows and four columns,
#' corresponding to \eqn{\mu, \kappa, \lambda, \alpha}, in that order. Any
#' element that is not \code{NA} in this matrix will be held constant at the
#' given value and not sampled.
#' @param mu_logprior_fun Function; A function with a single argument, which
#' returns the log of the prior probability of \eqn{\mu}. Defaults to a
#' uniform prior function.
#' @param kp_logprior_fun Function; A function with a single argument, which
#' returns the log of the prior probability of \eqn{\kappa}. Defaults to a
#' uniform prior function. In contrast to the other parameters, for
#' \eqn{\kappa} the constant (uniform) prior is improper.
#' @param lam_logprior_fun Function; A function with a single argument, which
#' returns the log of the prior probability of \eqn{\lambda}. Defaults to a
#' uniform prior function.
#' @param alph_prior_param Integer vector; The mixture weight parameter vector
#' \eqn{\alpha} is given its conjugate Dirichlet prior. The default is
#' \code{rep(1, n_comp)}, which is the noninformative uniform prior over the
#' \code{n_comp} simplex.
#' @param joint_kp_lam Logical; If \code{TRUE}, the parameters \code{kp} and
#' \code{lam} are drawn jointly. This can be beneficial if these are strongly
#' correlated.
#' @param verbose Integer up to 4; Determines the amount of printed debug
#' information.
#' @param lam_bw Numeric; the maximum distance from the current lambda at which
#' uniform proposals are drawn.
#' @param kp_bw Numeric; A tuning parameter for kappa proposals. If \code{kp_bw
#' == 1}, the chi-square distribution is used. Often, this distribution is too
#' wide, so this parameter can be set to \code{0 < kp_bw < 1} to use a gamma
#' proposal which has lower variance than the chi-square.
#' @param compute_variance Logical; Whether to add circular variance to the
#' returned mcmc sample.
#' @param compute_waic Logical; Whether to compute the WAIC. Can be
#' computationally demanding if \code{n * Q} is large.
#'
#' @importFrom stats var
#'
#' @return A numeric matrix of sampled parameter values.
#' @export
#'
#' @examples
#' x <- rinvbatmix(100)
#' mcmcBatscheletMixture(x, Q = 10)
mcmcBatscheletMixture <- function(x, Q = 1000,
burnin = 0, thin = 1,
n_comp = 4,
bat_type = "inverse",
init_pmat = matrix(NA, n_comp, 4),
fixed_pmat = matrix(NA, n_comp, 4),
joint_kp_lam = FALSE,
kp_bw = 1,
lam_bw = .05,
mu_logprior_fun = function(mu) -log(2*pi),
kp_logprior_fun = function(kp) 1,
lam_logprior_fun = function(lam) -log(2),
alph_prior_param = rep(1, n_comp),
compute_variance = TRUE,
compute_waic = FALSE,
verbose = 0) {
# Select Batschelet type
if (bat_type == "inverse") {
dbat_fun <- dinvbat
llbat <- likinvbat
tlam_fun <- t_lam
} else if (bat_type == "power") {
dbat_fun <- dpowbat
llbat <- likpowbat
tlam_fun <- tpow_lam
} else {
stop("Unknown Batschelet type.")
}
# A matrix with logicals for each initial value of the parameter matrix.
na_fixedpmat <- is.na(fixed_pmat)
na_initpmat <- is.na(init_pmat)
if (any(!na_fixedpmat[, 2] & fixed_pmat[,2] < 0)) stop("Invalid fixed kappa value.")
if (any(!na_fixedpmat[, 3] & abs(fixed_pmat[,3]) > 1)) stop("Invalid fixed lambda value.")
# Set initial values if the initial parameter matrix is not given for that parameter (has NAs).
init_pmat[, 1] <- ifelse(na_initpmat[, 1], seq(0, 2*pi, length.out = n_comp + 1)[-1], init_pmat[, 1])
init_pmat[, 2] <- ifelse(na_initpmat[, 2], rep(5, n_comp), init_pmat[, 2])
init_pmat[, 3] <- ifelse(na_initpmat[, 3], rep(0, n_comp), init_pmat[, 3])
init_pmat[, 4] <- ifelse(na_initpmat[, 4], rep(1/n_comp, n_comp), init_pmat[, 4])
# Force x to be in range -pi, pi.
x <- force_neg_pi_pi(x)
n <- length(x)
# Initialize parameters
mu_cur <- init_pmat[, 1]
kp_cur <- init_pmat[, 2]
lam_cur <- init_pmat[, 3]
alph_cur <- init_pmat[, 4]
# Matrix of acceptance totals for kp and lam, which will be divided by the
# total later.
acc_mat <- matrix(0, nrow = n_comp, ncol = 2)
colnames(acc_mat) <- c("kp", "lam")
rownames(acc_mat) <- 1:n_comp
# Initialize latent group labeling
z_cur <- integer(n)
output_matrix <- matrix(NA, nrow = Q, ncol = n_comp*4)
colnames(output_matrix) <- c(paste0("mu_", 1:n_comp),
paste0("kp_", 1:n_comp),
paste0("lam_", 1:n_comp),
paste0("alph_", 1:n_comp))
ll_vec <- numeric(Q)
# Add WAIC log-likelihood accumulation vector.
if (compute_waic) {
ll_each_th_curpars <- matrix(NA, nrow = Q, ncol = n)
}
if (compute_variance) {
variance_matrix <- matrix(NA, nrow = Q, ncol = n_comp*3)
colnames(variance_matrix) <- c(paste0("mean_res_len_", 1:n_comp),
paste0("circ_var_", 1:n_comp),
paste0("circ_sd_", 1:n_comp))
}
Qbythin <- Q * thin + burnin
if (verbose) cat("Starting MCMC sampling.\n")
if (verbose > 1) cat("Iteration:\n")
for (i in 1:Qbythin) {
if (verbose > 1) cat(sprintf("%5s, ", i))
### Sample group assignments z
# Probability of each component for each data point.
W <- sapply(1:n_comp, function(k) {
alph_cur[k] * dbat_fun(x, mu_cur[k], kp_cur[k], lam_cur[k])
})
w_rowsum <- rowSums(W)
# If some datapoints are numerically equal to zero, assign it equally to
# each component. Hopefully, this does not happen again in the next
# iteration.
W[w_rowsum == 0, ] <- 1/n_comp
w_rowsum[w_rowsum == 0] <- 1
W <- W / w_rowsum
# Randomly sample group assignments from the component probabilities.
z_cur <- apply(W, 1, function(row_probs) sample(x = 1:n_comp, size = 1,
prob = row_probs))
# Sample weights alph
dir_asum <- sapply(1:n_comp, function(j) sum(z_cur == j))
alph_cur <- ifelse(na_fixedpmat[, 4],
MCMCpack::rdirichlet(1, dir_asum + alph_prior_param),
alph_cur)
# After sampling the current group assignments, the parameters for each
# component only can be sampled separately.
for (j in 1:n_comp) {
if (verbose > 2) cat(j)
# Dataset assigned to this component.
x_j <- x[z_cur == j]
# Check whether anything is assigned to this components. If not, don't
# update the parameters.
if (length(x_j) == 0) {
if (verbose > 1) cat("---")
next
}
if (verbose > 2) cat("m")
# Sample mu
if (na_fixedpmat[j, 1]) {
mu_cur[j] <- sample_mu_bat_2(x_j, mu_cur[j], kp_cur[j], lam_cur[j],
tlam_fun, mu_logprior_fun)
}
if (joint_kp_lam) {
if (verbose > 2) cat("kl")
kplam_curj <- sample_kp_and_lam_bat(x_j,
mu_cur[j], kp_cur[j], lam_cur[j],
llbat, lam_bw = lam_bw,
kp_logprior_fun, lam_logprior_fun,
var_tune = kp_bw)
# Assign the new values if they are new, and set acceptance count.
if (kp_cur[j] != kplam_curj[1]) {
kp_cur[j] <- kplam_curj[1]
acc_mat[j, 1] <- acc_mat[j, 1] + 1
}
if (lam_cur[j] != kplam_curj[2]) {
lam_cur[j] <- kplam_curj[2]
acc_mat[j, 2] <- acc_mat[j, 2] + 1
}
} else {
if (verbose > 2) cat("k")
# Sample kp
if (na_fixedpmat[j, 2]) {
kp_new <- sample_kp_bat(x_j, mu_cur[j], kp_cur[j], lam_cur[j],
llbat, kp_logprior_fun, var_tune = kp_bw)
if (kp_cur[j] != kp_new) {
kp_cur[j] <- kp_new
acc_mat[j, 1] <- acc_mat[j, 1] + 1
}
}
if (verbose > 2) cat("l")
# Sample lam
if (na_fixedpmat[j, 3]) {
lam_new <- sample_lam_bat(x_j, mu_cur[j], kp_cur[j], lam_cur[j],
llbat, lam_logprior_fun, lam_bw = lam_bw)
if (lam_cur[j] != lam_new) {
lam_cur[j] <- lam_new
acc_mat[j, 2] <- acc_mat[j, 2] + 1
}
}
}
}
# Possibly extremely detailed debugging information.
if (verbose > 3) {
cat("\n",
sprintf("mu: %8s, ", round(mu_cur, 3)), "\n",
sprintf("kp: %8s, ", round(kp_cur, 3)), "\n",
sprintf("lam: %8s, ", round(lam_cur, 3)), "\n",
sprintf("alph: %8s, ", round(alph_cur, 3)), "\n")
}
if (i %% 50 == 0 && verbose == 1) cat(i, ", \n", sep = "")
if (i %% 5 == 0 && verbose > 1) cat("\n")
if (i %% thin == 0 && i >= burnin) {
isav <- (i - burnin) / thin
output_matrix[isav, ] <- c(mu_cur, kp_cur, lam_cur, alph_cur)
# A vector with log-densities for each data point.
each_th_ll <- dbatmix(x = x, dbat_fun = dbat_fun,
mu_cur, kp_cur, lam_cur, alph_cur,
log = TRUE)
ll_vec[isav] <- sum(each_th_ll)
if (compute_waic) {
ll_each_th_curpars[isav, ] <- each_th_ll
}
if (compute_variance) {
# Compute mean resultant lengths for each component.
R_bar_cur <- sapply(1:n_comp, function(i) {
computeMeanResultantLengthBat(kp_cur[i], lam_cur[i], bat_type)
})
# Compute variances.
circ_var_cur <- 1 - R_bar_cur
circ_sd_cur <- computeCircSD(R_bar_cur)
# Put the results in an output matrix.
variance_matrix[isav, ] <- c(R_bar_cur, circ_var_cur, circ_sd_cur)
}
}
}
# Compute acceptance ratio.
acc_mat <- acc_mat / Q
if (verbose) cat("\nFinished.\n")
# Collect output
if (compute_variance) output_matrix <- cbind(output_matrix, variance_matrix)
# The log-posterior function that we have just sampled from.
log_posterior <- function(pvec, data = x) {
# If there is one value in pvec missing, assume it is one of the alpha
# weights because this might happen when bridgesampling for example.
if ((length(pvec) %% 4) == 3) {
n_comp <- (length(pvec) + 1)/4
pvec <- c(pvec, 1 - sum(pvec[(3*n_comp + 1):(4*n_comp - 1)]))
}
n_comp <- length(pvec)/4
mus <- pvec[1:n_comp]
kps <- pvec[(n_comp + 1):(2*n_comp)]
lams <- pvec[(2*n_comp + 1):(3*n_comp)]
alphs <- pvec[(3*n_comp + 1):(4*n_comp)]
if (!identical(sum(alphs), 1)) {
warning("Log-posterior adapting weight vector alpha to sum to one.")
alphs <- alphs / sum(alphs)
}
ll_part <- sum(dbatmix(x, dbat_fun = dbat_fun,
mus, kps, lams, alphs,
log = TRUE))
prior_part <- sum(c(vapply(mus, mu_logprior_fun, 0),
vapply(kps, kp_logprior_fun, 0),
vapply(lams, lam_logprior_fun, 0),
log(MCMCpack::ddirichlet(alphs,
alpha = alph_prior_param))))
ll_part + prior_part
}
# Create a new environment for log_posterior so that the file size of the
# resulting object is not too large.
log_post_env <- new.env()
log_post_env$data <- x
log_post_env$n_comp <- n_comp
log_post_env$dbat_fun <- dbat_fun
log_post_env$mu_logprior_fun <- mu_logprior_fun
log_post_env$kp_logprior_fun <- kp_logprior_fun
log_post_env$lam_logprior_fun <- lam_logprior_fun
log_post_env$alph_prior_param <- alph_prior_param
environment(log_posterior) <- log_post_env
out_list <- list(mcmc_sample = coda::mcmc(output_matrix,
start = burnin + 1,
end = Qbythin,
thin = thin),
ll_vec = ll_vec,
log_posterior = log_posterior,
acceptance_rates = acc_mat,
prior_list = list(mu_logprior_fun, kp_logprior_fun,
lam_logprior_fun, alph_prior_param))
if (compute_waic) {
lppd <- sum(log(colSums(exp(ll_each_th_curpars)))) - n * log(Q)
waic_logofmean <- log(colMeans(exp(ll_each_th_curpars)))
waic_meanoflog <- colMeans(ll_each_th_curpars)
p_waic1 <- 2 * sum(waic_logofmean - waic_meanoflog)
p_waic2 <- sum(apply(ll_each_th_curpars, 2, var))
waic1 <- -2 * (lppd - p_waic1)
waic2 <- -2 * (lppd - p_waic2)
out_list$ic <- list(lppd = lppd,
waic_1 = c(p_waic1 = 2 * p_waic1, waic1 = waic1),
waic_2 = c(p_waic2 = 2 * p_waic2, waic2 = waic2))
} else {
out_list$ic <- list()
}
return(out_list)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/segsites.R
\name{segsites}
\alias{segsites}
\title{Segregating sites}
\usage{
segsites(n, theta)
}
\arguments{
\item{n}{sample size (positive integer)}
\item{theta}{mutation parameter (positive)}
}
\value{
A `disc_phase_type` object containing subintensity matrix (P), vector of initial probabilities (alpha) and defect (probability of not entering any transient
state prior to absorption)
}
\description{
Generator of subintensity matrix for the special case of segregating sites, ie. summary of all frequency counts
}
\examples{
segsites(n = 4, theta = 2)
}
| /man/segsites.Rd | no_license | aumath-advancedr2019/ticphasetype | R | false | true | 640 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/segsites.R
\name{segsites}
\alias{segsites}
\title{Segregating sites}
\usage{
segsites(n, theta)
}
\arguments{
\item{n}{sample size (positive integer)}
\item{theta}{mutation parameter (positive)}
}
\value{
A `disc_phase_type` object containing subintensity matrix (P), vector of initial probabilities (alpha) and defect (probability of not entering any transient
state prior to absorption)
}
\description{
Generator of subintensity matrix for the special case of segregating sites, ie. summary of all frequency counts
}
\examples{
segsites(n = 4, theta = 2)
}
|
#Plot Clean Polymerase Reads and Clean Subreads Length/Quality distritution
library(ggplot2)
library(grid)
#d<-read.table('/ifshk5/BC_COM_P3/F16FTSEUHT0555/HUMfzyD/work/F16FTSEUHT0555_HUMfzyD_new/Process/FilterData/FW57/Filter_Pacbio/FW57.filtered_PolymeraseReads.length.quality.data',header = T, sep='\t')
#A=ggplot(d)+labs(title='PolymeraseReads Length distribution \n(FW57)\n', x='PolymeraseReads Length(bp)', y='Number of Reads(#)')+
#geom_histogram(aes(x=length),binwidth = 1000,fill="blue",col="black")+
#theme(legend.position="none", plot.title=element_text(face='bold'), axis.text=element_text(color='black',size=12))
#B=ggplot(d)+labs(title='PolymeraseReads Quality distribution \n(FW57)\n', x='PolymeraseReads Quality(#)', y='Number of Reads(#)')+
#geom_histogram(aes(x=quality),binwidth = 0.001,fill="green",col="black")+
#theme(legend.position="none", plot.title=element_text(face='bold'), axis.text=element_text(color='black',size=12))
pdf("Pacbio.Cleandata.pdf", width = 10, height = 8)
d<-read.table('2cell.fasta.len',header = T, sep="\t")
A<-ggplot(d)+labs(title='Subreads Length distribution \n(Amanita.subjunquillea.H-1)\n', x='Subreads Length(bp)', y='Number of Reads(#)')+
geom_histogram(aes(x=length),binwidth = 1000,fill="blue",col="black")
#theme(legend.position="none", plot.title=element_text(face='bold'), axis.text=element_text(color='black',size=12))
A
#grid.newpage()
dev.off()
| /R/histogram/draw.r | no_license | bioCKO/bin | R | false | false | 1,412 | r | #Plot Clean Polymerase Reads and Clean Subreads Length/Quality distritution
library(ggplot2)
library(grid)
#d<-read.table('/ifshk5/BC_COM_P3/F16FTSEUHT0555/HUMfzyD/work/F16FTSEUHT0555_HUMfzyD_new/Process/FilterData/FW57/Filter_Pacbio/FW57.filtered_PolymeraseReads.length.quality.data',header = T, sep='\t')
#A=ggplot(d)+labs(title='PolymeraseReads Length distribution \n(FW57)\n', x='PolymeraseReads Length(bp)', y='Number of Reads(#)')+
#geom_histogram(aes(x=length),binwidth = 1000,fill="blue",col="black")+
#theme(legend.position="none", plot.title=element_text(face='bold'), axis.text=element_text(color='black',size=12))
#B=ggplot(d)+labs(title='PolymeraseReads Quality distribution \n(FW57)\n', x='PolymeraseReads Quality(#)', y='Number of Reads(#)')+
#geom_histogram(aes(x=quality),binwidth = 0.001,fill="green",col="black")+
#theme(legend.position="none", plot.title=element_text(face='bold'), axis.text=element_text(color='black',size=12))
pdf("Pacbio.Cleandata.pdf", width = 10, height = 8)
d<-read.table('2cell.fasta.len',header = T, sep="\t")
A<-ggplot(d)+labs(title='Subreads Length distribution \n(Amanita.subjunquillea.H-1)\n', x='Subreads Length(bp)', y='Number of Reads(#)')+
geom_histogram(aes(x=length),binwidth = 1000,fill="blue",col="black")
#theme(legend.position="none", plot.title=element_text(face='bold'), axis.text=element_text(color='black',size=12))
A
#grid.newpage()
dev.off()
|
# 2015-08-11
# Prepare R with necessary package installation
install.packages("assertthat")
install.packages("Hmisc")
install.packages("foreign")
install.packages("data.table")
install.packages("gmodels")
# install.packages("datascibc", source=T)
install.packages("boot")
install.packages("PSAgraphics")
install.packages("optmatch")
install.packages("MatchIt")
install.packages("Matching")
| /prep/prep.R | no_license | docsteveharris/paper-spotepi | R | false | false | 390 | r | # 2015-08-11
# Prepare R with necessary package installation
install.packages("assertthat")
install.packages("Hmisc")
install.packages("foreign")
install.packages("data.table")
install.packages("gmodels")
# install.packages("datascibc", source=T)
install.packages("boot")
install.packages("PSAgraphics")
install.packages("optmatch")
install.packages("MatchIt")
install.packages("Matching")
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/sdwba.R
\name{rad2deg}
\alias{rad2deg}
\title{Convert between degrees and radians.}
\usage{
rad2deg <- function(x) x * 180 / pi
deg2rad <- function(x) x * pi / 180
}
\arguments{
\item{x}{Angle to convert}
}
\description{
Convert between degrees and radians.
}
\examples{
rad2deg(pi)
[1] 180
deg2rad(180)
[1] 3.141593
}
| /sdwba.Rcheck/00_pkg_src/sdwba/man/rad2deg.Rd | no_license | ElOceanografo/SDWBA.R | R | false | false | 406 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/sdwba.R
\name{rad2deg}
\alias{rad2deg}
\title{Convert between degrees and radians.}
\usage{
rad2deg <- function(x) x * 180 / pi
deg2rad <- function(x) x * pi / 180
}
\arguments{
\item{x}{Angle to convert}
}
\description{
Convert between degrees and radians.
}
\examples{
rad2deg(pi)
[1] 180
deg2rad(180)
[1] 3.141593
}
|
## These two funcions store a matrix and its inverse in temprary memory
## This function creates a list of 4 functions
## 1.set the value of a matrix
## 2.get the value of a matrix
## 3.set the value of the inverse
## 4.get the value of the inverse
makeCacheMatrix <- function(x = matrix()) {
i<-NULL
set<-function(y){
x<<-y
i<<-NULL
}
get<-function() x
setinverse<-function(inverse) i<<-inverse
getinverse<-function() i
list(set=set,get=get,setinverse=setinverse,getinverse=getinverse)
}
## This function check if a value is assigned to the inverse.
## If yes, it returns that value.
##If not, it calculates the inverse of the cached matrix from above function
cacheSolve <- function(x, ...) {
i<-x$getinverse()
if(!is.null(i)){
message("getting cached inverse")
return(i)
}
MyMatrix<-x$get()
i<-solve(MyMatrix)
x$setinverse(i)
i ## Return a matrix that is the inverse of 'x'
}
| /cachematrix.R | no_license | davidzxc574/ProgrammingAssignment2 | R | false | false | 937 | r | ## These two funcions store a matrix and its inverse in temprary memory
## This function creates a list of 4 functions
## 1.set the value of a matrix
## 2.get the value of a matrix
## 3.set the value of the inverse
## 4.get the value of the inverse
makeCacheMatrix <- function(x = matrix()) {
i<-NULL
set<-function(y){
x<<-y
i<<-NULL
}
get<-function() x
setinverse<-function(inverse) i<<-inverse
getinverse<-function() i
list(set=set,get=get,setinverse=setinverse,getinverse=getinverse)
}
## This function check if a value is assigned to the inverse.
## If yes, it returns that value.
##If not, it calculates the inverse of the cached matrix from above function
cacheSolve <- function(x, ...) {
i<-x$getinverse()
if(!is.null(i)){
message("getting cached inverse")
return(i)
}
MyMatrix<-x$get()
i<-solve(MyMatrix)
x$setinverse(i)
i ## Return a matrix that is the inverse of 'x'
}
|
library(dplyr)
# quickly make some pedigrees
# name the target individuals 1 and 2. Missing data are 0's
pedigrees <- list(
FS = data.frame(
Kid = c(1, 2, 3, 4),
Pa = c(3, 3, 0, 0),
Ma = c(4, 4, 0, 0),
Sex = c(1, 1, 1, 2),
Observed = c(1, 1, 0, 0)),
HS = data.frame(
Kid = c(1, 2, 3, 4, 5),
Pa = c(4, 5, 0, 0, 0),
Ma = c(3, 3, 0, 0, 0),
Sex = c(1, 1, 2, 1, 1),
Observed = c(1, 1, 0, 0, 0))
)
# quickly make some mapped marker data which we will put
# onto 5 different chromosomes named 4,9,14,19,and 21
d <- c(4,9,14,19,21)
names(d) <- d
# put n markers on each, where n = 200/the index. i.e. imagine that they are getting shorter.
# let the length of each chromosome, in morgans be 1000 cM / the index.
set.seed(15)
markers_on_map <- lapply(d, function(x) {
cm <- 1000 / x
n <- round(20000/x)
# simulate positions for them
pos <- sort(round(runif(n, min = 0, max = cm), digits = 3))
names(pos) <- paste("chr", x, "-", 1:length(pos), sep ="")
# simulate allele freqs for them and put those along with the postion in a list
ret <- lapply(pos, function(x) {
rr <- list()
rr$pos <- x
A <- sample(2:8, 1) # number of alleles
rg <- rgamma(n = A, shape = 2, scale = 1)
rr$freqs <- round(rg/sum(rg), digits = 5)
rr
})
ret
})
# let's explore a long format for these guys
long_markers <- lapply(markers_on_map, function(x) {
lapply(x, function(y) {
data.frame(Pos = y$pos, Allele = paste("a", 1:length(y$freqs), sep = ""), Freq = y$freqs, stringsAsFactors = FALSE)
}) %>% bind_rows(.id = "Locus")
}) %>% bind_rows(.id = "Chrom")
long_markers$Chrom <- as.integer(long_markers$Chrom)
# here we provide an index for each allele at each locus and an index for each locus
long_markers2 <- long_markers %>%
group_by(Chrom, Locus) %>%
mutate(AlleIdx = as.integer(1 + n_distinct(Allele) - rank(Freq, ties.method = "first"))) %>%
group_by(Chrom) %>%
mutate(LocIdx = as.integer(factor(Pos, levels = sort(unique(Pos))))) %>%
arrange(Chrom, LocIdx, AlleIdx)
# and now, if we want to efficiently write these to a Morgan file we
# will want to bung the allele freqs together into a string
long_markers2 %>%
group_by(Chrom, LocIdx) %>%
summarise(FreqString = paste(Freq, collapse = " ")) %>%
mutate(setchrom = "set chrom", markers = "markers", allefreqs = "allele freqs") %>%
select(setchrom, Chrom, markers, LocIdx, allefreqs, FreqString) %>%
write.table(., file = "/tmp/testit.txt", row.names = FALSE, col.names = FALSE, quote = FALSE)
# so, when we implement this we will require the user to supply a long format data frame
# that has Chrom (int), LocIdx (int), Pos (dbl), AlleIdx (int), and Freq (dbl)
# then we don't have to worry about doing the ranking of the alleles, etc---the user will
# have to take care of that. The AlleIdx will have to correspond to the indexing of the alleles that
# translates directly to the genotypes. Maybe we should allow for columns of "LocusName"
# and "AlleleName"
# write out a matrix of kappa values for the different relationships we will look at.
kappas <- as.matrix(
read.table(
textConnection(
"Relat kappa0 kappa1 kappa2
MZ 0 0 1
PO 0 0 1
FS 0.25 0.5 0.25
HS 0.5 0.5 0
GP 0.5 0.5 0
AN 0.5 0.5 0
DFC 0.5625 0.375 0.0625
FC 0.75 0.25 0
HC 0.875 0.125 0
U 1 0 0
"),
header = TRUE, row.names = 1
)
)
#### Make some microhaplotype data ####
# we just simulate 200 chromosomes using ms and have either 1, 2, 3, 4, or 5 segregating
# sites. We want 100 of these by the end, so let us do 20 of each number of seg sites
library(stringr)
system("
source ~/.bashrc;
echo \"41234 20641 60651\" > seedms;
rm -f splud;
for R in 1 2 3 4 5; do
ms 200 20 -s $R >> splud
done
")
x <- readLines("splud")
x2 <- x[!str_detect(x, "[a-z/2-9]")]
x3 <- x2[x2!=""]
y <- data.frame(hap01 = x3, stringsAsFactors = FALSE) %>%
tbl_df
y$LocNum <- rep(1:100, each = 200)
# now me make 0 or 1 be ACGT for each locus. We make an array for
# what the 0's and 1's mean. Far out
set.seed(5)
DNA_types <- lapply(1:100, function(x) matrix(unlist(lapply(1:5, function(x) sample(c("A", "C", "G", "T"), 2 ))), byrow=T, ncol = 2))
# now make a column of DNA bases for each haplotype. We need a function for that.
# this is klugie and not vectorized, but I only need to do it once.
hapseq <- function(x, L) {
ivec <- as.numeric(str_split(x, "")[[1]]) + 1
len = length(ivec)
idx <- cbind(1:len, ivec)
paste(DNA_types[[L]][idx], collapse = "")
}
# this is klugie but will work....
y$hap <- sapply(1:nrow(y), function(l) hapseq(y$hap01[l], y$LocNum[l]))
# check these:
y %>%
group_by(LocNum, hap01, hap) %>%
tally
# yep, it looks good. So, now, make markers out of them
# we will have some garbage on the chrom name for all of them
y2 <- y %>%
mutate(Chrom = "ddRAD",
Locus = paste("ddRAD", LocNum, sep = "_"),
Pos = LocNum
) %>%
rename(LocIdx = LocNum,
Allele = hap)
# now compute allele freqs
microhaps <- y2 %>%
group_by(Chrom, Locus, Pos, LocIdx, Allele) %>%
tally %>%
mutate(Freq = n / sum(n)) %>%
select(-n) %>%
mutate(AlleIdx = NA) %>%
reindex_markers()
save(microhaps, file = "data/microhaps.rda")
| /scratch/create-data.R | no_license | eriqande/CKMRsim | R | false | false | 5,292 | r | library(dplyr)
# quickly make some pedigrees
# name the target individuals 1 and 2. Missing data are 0's
pedigrees <- list(
FS = data.frame(
Kid = c(1, 2, 3, 4),
Pa = c(3, 3, 0, 0),
Ma = c(4, 4, 0, 0),
Sex = c(1, 1, 1, 2),
Observed = c(1, 1, 0, 0)),
HS = data.frame(
Kid = c(1, 2, 3, 4, 5),
Pa = c(4, 5, 0, 0, 0),
Ma = c(3, 3, 0, 0, 0),
Sex = c(1, 1, 2, 1, 1),
Observed = c(1, 1, 0, 0, 0))
)
# quickly make some mapped marker data which we will put
# onto 5 different chromosomes named 4,9,14,19,and 21
d <- c(4,9,14,19,21)
names(d) <- d
# put n markers on each, where n = 200/the index. i.e. imagine that they are getting shorter.
# let the length of each chromosome, in morgans be 1000 cM / the index.
set.seed(15)
markers_on_map <- lapply(d, function(x) {
cm <- 1000 / x
n <- round(20000/x)
# simulate positions for them
pos <- sort(round(runif(n, min = 0, max = cm), digits = 3))
names(pos) <- paste("chr", x, "-", 1:length(pos), sep ="")
# simulate allele freqs for them and put those along with the postion in a list
ret <- lapply(pos, function(x) {
rr <- list()
rr$pos <- x
A <- sample(2:8, 1) # number of alleles
rg <- rgamma(n = A, shape = 2, scale = 1)
rr$freqs <- round(rg/sum(rg), digits = 5)
rr
})
ret
})
# let's explore a long format for these guys
long_markers <- lapply(markers_on_map, function(x) {
lapply(x, function(y) {
data.frame(Pos = y$pos, Allele = paste("a", 1:length(y$freqs), sep = ""), Freq = y$freqs, stringsAsFactors = FALSE)
}) %>% bind_rows(.id = "Locus")
}) %>% bind_rows(.id = "Chrom")
long_markers$Chrom <- as.integer(long_markers$Chrom)
# here we provide an index for each allele at each locus and an index for each locus
long_markers2 <- long_markers %>%
group_by(Chrom, Locus) %>%
mutate(AlleIdx = as.integer(1 + n_distinct(Allele) - rank(Freq, ties.method = "first"))) %>%
group_by(Chrom) %>%
mutate(LocIdx = as.integer(factor(Pos, levels = sort(unique(Pos))))) %>%
arrange(Chrom, LocIdx, AlleIdx)
# and now, if we want to efficiently write these to a Morgan file we
# will want to bung the allele freqs together into a string
long_markers2 %>%
group_by(Chrom, LocIdx) %>%
summarise(FreqString = paste(Freq, collapse = " ")) %>%
mutate(setchrom = "set chrom", markers = "markers", allefreqs = "allele freqs") %>%
select(setchrom, Chrom, markers, LocIdx, allefreqs, FreqString) %>%
write.table(., file = "/tmp/testit.txt", row.names = FALSE, col.names = FALSE, quote = FALSE)
# so, when we implement this we will require the user to supply a long format data frame
# that has Chrom (int), LocIdx (int), Pos (dbl), AlleIdx (int), and Freq (dbl)
# then we don't have to worry about doing the ranking of the alleles, etc---the user will
# have to take care of that. The AlleIdx will have to correspond to the indexing of the alleles that
# translates directly to the genotypes. Maybe we should allow for columns of "LocusName"
# and "AlleleName"
# write out a matrix of kappa values for the different relationships we will look at.
kappas <- as.matrix(
read.table(
textConnection(
"Relat kappa0 kappa1 kappa2
MZ 0 0 1
PO 0 0 1
FS 0.25 0.5 0.25
HS 0.5 0.5 0
GP 0.5 0.5 0
AN 0.5 0.5 0
DFC 0.5625 0.375 0.0625
FC 0.75 0.25 0
HC 0.875 0.125 0
U 1 0 0
"),
header = TRUE, row.names = 1
)
)
#### Make some microhaplotype data ####
# we just simulate 200 chromosomes using ms and have either 1, 2, 3, 4, or 5 segregating
# sites. We want 100 of these by the end, so let us do 20 of each number of seg sites
library(stringr)
system("
source ~/.bashrc;
echo \"41234 20641 60651\" > seedms;
rm -f splud;
for R in 1 2 3 4 5; do
ms 200 20 -s $R >> splud
done
")
x <- readLines("splud")
x2 <- x[!str_detect(x, "[a-z/2-9]")]
x3 <- x2[x2!=""]
y <- data.frame(hap01 = x3, stringsAsFactors = FALSE) %>%
tbl_df
y$LocNum <- rep(1:100, each = 200)
# now me make 0 or 1 be ACGT for each locus. We make an array for
# what the 0's and 1's mean. Far out
set.seed(5)
DNA_types <- lapply(1:100, function(x) matrix(unlist(lapply(1:5, function(x) sample(c("A", "C", "G", "T"), 2 ))), byrow=T, ncol = 2))
# now make a column of DNA bases for each haplotype. We need a function for that.
# this is klugie and not vectorized, but I only need to do it once.
hapseq <- function(x, L) {
ivec <- as.numeric(str_split(x, "")[[1]]) + 1
len = length(ivec)
idx <- cbind(1:len, ivec)
paste(DNA_types[[L]][idx], collapse = "")
}
# this is klugie but will work....
y$hap <- sapply(1:nrow(y), function(l) hapseq(y$hap01[l], y$LocNum[l]))
# check these:
y %>%
group_by(LocNum, hap01, hap) %>%
tally
# yep, it looks good. So, now, make markers out of them
# we will have some garbage on the chrom name for all of them
y2 <- y %>%
mutate(Chrom = "ddRAD",
Locus = paste("ddRAD", LocNum, sep = "_"),
Pos = LocNum
) %>%
rename(LocIdx = LocNum,
Allele = hap)
# now compute allele freqs
microhaps <- y2 %>%
group_by(Chrom, Locus, Pos, LocIdx, Allele) %>%
tally %>%
mutate(Freq = n / sum(n)) %>%
select(-n) %>%
mutate(AlleIdx = NA) %>%
reindex_markers()
save(microhaps, file = "data/microhaps.rda")
|
function(input, output) {
output$ui <- renderUI({
if (is.null(input$input_type))
return()
# Depending on input$input_type, we'll generate a different
# UI component and send it to the client.
switch(input$input_type,
"slider" = sliderInput("dynamic", "Dynamic",
min = 1, max = 20, value = 10),
"text" = textInput("dynamic", "Dynamic",
value = "starting value"),
"numeric" = numericInput("dynamic", "Dynamic",
value = 12),
"checkbox" = checkboxInput("dynamic", "Dynamic",
value = TRUE),
"checkboxGroup" = checkboxGroupInput("dynamic", "Dynamic",
choices = c("Option 1" = "option1",
"Option 2" = "option2"),
selected = "option2"
),
"radioButtons" = radioButtons("dynamic", "Dynamic",
choices = c("Option 1" = "option1",
"Option 2" = "option2"),
selected = "option2"
),
"selectInput" = selectInput("dynamic", "Dynamic",
choices = c("Option 1" = "option1",
"Option 2" = "option2"),
selected = "option2"
),
"selectInput (multi)" = selectInput("dynamic", "Dynamic",
choices = c("Option 1" = "option1",
"Option 2" = "option2"),
selected = c("option1", "option2"),
multiple = TRUE
),
"date" = dateInput("dynamic", "Dynamic"),
"daterange" = dateRangeInput("dynamic", "Dynamic")
)
})
output$input_type_text <- renderText({
input$input_type
})
output$dynamic_value <- renderPrint({
str(input$dynamic)
})
}
| /041-dynamic-ui/server.R | permissive | rstudio/shiny-examples | R | false | false | 1,702 | r | function(input, output) {
output$ui <- renderUI({
if (is.null(input$input_type))
return()
# Depending on input$input_type, we'll generate a different
# UI component and send it to the client.
switch(input$input_type,
"slider" = sliderInput("dynamic", "Dynamic",
min = 1, max = 20, value = 10),
"text" = textInput("dynamic", "Dynamic",
value = "starting value"),
"numeric" = numericInput("dynamic", "Dynamic",
value = 12),
"checkbox" = checkboxInput("dynamic", "Dynamic",
value = TRUE),
"checkboxGroup" = checkboxGroupInput("dynamic", "Dynamic",
choices = c("Option 1" = "option1",
"Option 2" = "option2"),
selected = "option2"
),
"radioButtons" = radioButtons("dynamic", "Dynamic",
choices = c("Option 1" = "option1",
"Option 2" = "option2"),
selected = "option2"
),
"selectInput" = selectInput("dynamic", "Dynamic",
choices = c("Option 1" = "option1",
"Option 2" = "option2"),
selected = "option2"
),
"selectInput (multi)" = selectInput("dynamic", "Dynamic",
choices = c("Option 1" = "option1",
"Option 2" = "option2"),
selected = c("option1", "option2"),
multiple = TRUE
),
"date" = dateInput("dynamic", "Dynamic"),
"daterange" = dateRangeInput("dynamic", "Dynamic")
)
})
output$input_type_text <- renderText({
input$input_type
})
output$dynamic_value <- renderPrint({
str(input$dynamic)
})
}
|
#' # Script for doing QA/QC on dung data
library(plyr); library(dplyr);
library(readr)
library(stringi)
library(ggplot2)
library(tidyverse)
#' ## Host Abundance Estimates
#+ dung data ####
dung_data <- read_csv("data/raw_data/dung.csv")
#+ plot visit data ####
plot_visit_data <- read_csv("data/processed_data/plot_visit_data.csv")
names(dung_data)
dung_data <- dung_data %>%
mutate(plot_id = paste(installation, plot_id, sep = " "),
plot_id=tolower(plot_id)) %>%
filter(date>20170601)
# filter(plotid %in% c("n","s","e"))
d <- dung_data %>%
group_by(installation, plot_id) %>%
summarise(n_transect = n_distinct(location))
summary(d)
summary(dung_data)
summary(plot_visit_data)
unique(plot_visit_data$plot_id) %in% unique(dung_data$plot_id)
n_distinct(dung_data$plot_id)
n_distinct(plot_visit_data$plot_id)
anti_join(plot_visit_data, dung_data, "plot_id") %>%
select(installation, plot_id)
### Blanding theater_cogon not included in dung, all other plots account for ###
filter(d, n_transect>4)
n_distinct(dung_data$location)
unique(dung_data$location)
unique(dung_data$species)
dung_data$location[dung_data$location=="North"] <- "north"
dung_data$location[dung_data$location=="South"] <- "south"
dung_data$location[dung_data$location=="East"] <- "east"
dung_data$location[dung_data$location=="West"] <- "west"
dung_data$species[dung_data$species=="Cottontail"] <- "cottontail"
dung_data$species[dung_data$species=="Deer"] <- "deer"
dung_data$species[dung_data$species=="Cow"] <- "cow"
dung_data$species[dung_data$species=="Armadillo"] <- "armadillo"
dung_data$species[dung_data$species=="Hog"] <- "hog"
dung_data$species[dung_data$species=="Racoon"] <- "racoon"
dung_data$species[dung_data$species=="Other"] <- "other"
### Cleaned up transect location and species ID to be consistent ###
filter(dung_data, plot_id=="avonpark a1") %>%
select(date, plot_id, location, species, dung1m, dung2m)
### n_transect of 8 from plot revisits ###
summary(dung_data)
filter(dung_data, is.na(dung1m)) %>%
select(date, plot_id, location, species, dung1m, dung2m)
filter(dung_data, is.na(dung2m)) %>%
select(date, plot_id, location, species, dung1m, dung2m)
dung_data$dung2m[dung_data$plot_id=="blanding m1" & dung_data$location=="east"] <- 0
filter(dung_data, plot_id=="eglin i1")
filter(plot_visit_data, plot_id=="eglin")
#### Eglin i1 north, south, west missing? east is 0 and 0 ####
dung_data$date <- as.Date(as.character(dung_data$date), format = "%Y%m%d")
dung_data <- dung_data %>%
mutate(visit_year = lubridate::year(date))
summary(dung_data)
dung_data <- dung_data %>%
mutate(plot_id = case_when(
plot_id=="gordon z1" ~ "gordon a1",
plot_id=="gordon y1" ~ "gordon b1",
plot_id=="gordon x1" ~ "gordon c1",
plot_id=="gordon w1" ~ "gordon d1",
plot_id=="gordon v1" ~ "gordon e1",
plot_id=="gordon t1" ~ "gordon f1",
plot_id=="gordon s1" ~ "gordon g1",
plot_id=="gordon r1" ~ "gordon h1",
TRUE ~ plot_id
))
unique(dung_data$plot_id)
write_csv(dung_data, "data/processed_data/dung.csv")
#### Steven checked processing, 3 missing Eglin i1 entries, waiting on response from elena 9/17 ####
###### steven adding qaqc for 2019 dung data #####
dung_2019 <- read_csv("data/raw_data/2019_serdp_data/dung-data-entry.csv")
tail(dung_2019)
dung_2019 <- dung_2019 %>%
mutate(date = as.Date(as.character(date), format = "%Y%m%d")) %>%
mutate(visit_year = lubridate::year(date)) %>%
filter(visit_year == 2019)
unique(dung_2019$installation)
dung_all <- rbind(dung_data, dung_2019)
write_csv(dung_all, "data/processed_data/dung.csv")
| /R_scripts/QAQC_scripts/dung_data_qaqc.R | no_license | whalend/SERDP_Project | R | false | false | 3,709 | r | #' # Script for doing QA/QC on dung data
library(plyr); library(dplyr);
library(readr)
library(stringi)
library(ggplot2)
library(tidyverse)
#' ## Host Abundance Estimates
#+ dung data ####
dung_data <- read_csv("data/raw_data/dung.csv")
#+ plot visit data ####
plot_visit_data <- read_csv("data/processed_data/plot_visit_data.csv")
names(dung_data)
dung_data <- dung_data %>%
mutate(plot_id = paste(installation, plot_id, sep = " "),
plot_id=tolower(plot_id)) %>%
filter(date>20170601)
# filter(plotid %in% c("n","s","e"))
d <- dung_data %>%
group_by(installation, plot_id) %>%
summarise(n_transect = n_distinct(location))
summary(d)
summary(dung_data)
summary(plot_visit_data)
unique(plot_visit_data$plot_id) %in% unique(dung_data$plot_id)
n_distinct(dung_data$plot_id)
n_distinct(plot_visit_data$plot_id)
anti_join(plot_visit_data, dung_data, "plot_id") %>%
select(installation, plot_id)
### Blanding theater_cogon not included in dung, all other plots account for ###
filter(d, n_transect>4)
n_distinct(dung_data$location)
unique(dung_data$location)
unique(dung_data$species)
dung_data$location[dung_data$location=="North"] <- "north"
dung_data$location[dung_data$location=="South"] <- "south"
dung_data$location[dung_data$location=="East"] <- "east"
dung_data$location[dung_data$location=="West"] <- "west"
dung_data$species[dung_data$species=="Cottontail"] <- "cottontail"
dung_data$species[dung_data$species=="Deer"] <- "deer"
dung_data$species[dung_data$species=="Cow"] <- "cow"
dung_data$species[dung_data$species=="Armadillo"] <- "armadillo"
dung_data$species[dung_data$species=="Hog"] <- "hog"
dung_data$species[dung_data$species=="Racoon"] <- "racoon"
dung_data$species[dung_data$species=="Other"] <- "other"
### Cleaned up transect location and species ID to be consistent ###
filter(dung_data, plot_id=="avonpark a1") %>%
select(date, plot_id, location, species, dung1m, dung2m)
### n_transect of 8 from plot revisits ###
summary(dung_data)
filter(dung_data, is.na(dung1m)) %>%
select(date, plot_id, location, species, dung1m, dung2m)
filter(dung_data, is.na(dung2m)) %>%
select(date, plot_id, location, species, dung1m, dung2m)
dung_data$dung2m[dung_data$plot_id=="blanding m1" & dung_data$location=="east"] <- 0
filter(dung_data, plot_id=="eglin i1")
filter(plot_visit_data, plot_id=="eglin")
#### Eglin i1 north, south, west missing? east is 0 and 0 ####
dung_data$date <- as.Date(as.character(dung_data$date), format = "%Y%m%d")
dung_data <- dung_data %>%
mutate(visit_year = lubridate::year(date))
summary(dung_data)
dung_data <- dung_data %>%
mutate(plot_id = case_when(
plot_id=="gordon z1" ~ "gordon a1",
plot_id=="gordon y1" ~ "gordon b1",
plot_id=="gordon x1" ~ "gordon c1",
plot_id=="gordon w1" ~ "gordon d1",
plot_id=="gordon v1" ~ "gordon e1",
plot_id=="gordon t1" ~ "gordon f1",
plot_id=="gordon s1" ~ "gordon g1",
plot_id=="gordon r1" ~ "gordon h1",
TRUE ~ plot_id
))
unique(dung_data$plot_id)
write_csv(dung_data, "data/processed_data/dung.csv")
#### Steven checked processing, 3 missing Eglin i1 entries, waiting on response from elena 9/17 ####
###### steven adding qaqc for 2019 dung data #####
dung_2019 <- read_csv("data/raw_data/2019_serdp_data/dung-data-entry.csv")
tail(dung_2019)
dung_2019 <- dung_2019 %>%
mutate(date = as.Date(as.character(date), format = "%Y%m%d")) %>%
mutate(visit_year = lubridate::year(date)) %>%
filter(visit_year == 2019)
unique(dung_2019$installation)
dung_all <- rbind(dung_data, dung_2019)
write_csv(dung_all, "data/processed_data/dung.csv")
|
library(shinydashboard)
library(leaflet)
library(RColorBrewer)
library(dplyr)
require(rgdal)
library(lubridate)
library(curl)
library(ggplot2)
library(stringr)
## read data
# Council Population data
population <- read.csv("data/CDpopu&area.csv")
# CD shapefile
cd <- readOGR("cdinfo/l.a. city council district (2012).shp")
#--------------------isolate & clean data-------------------
shinyServer(function(input, output,session){
# --------------sidebar Hide Default-------
addClass(selector = "body", class = "sidebar-collapse")
# ----------------input---------------
output$serviceText <- renderUI({
h5(strong(paste("Requests of",input$serviceType)),align = "center")
})
# ------download data--------------------
requestdata <- reactive({
date1 <- input$dates[1]
date2 <- input$dates[2]
url = NULL
request <- NULL
for (i in seq(0,1500000000000,50000)){
options("scipen"=20)
url <- append(url,paste("https://data.lacity.org/resource/ndkd-k878.csv?$select=createddate,updateddate,status,servicedate,closeddate,requesttype,address,cd,longitude,latitude&$order=createddate%20DESC&$where=createddate%3E%20%27",date1,"%27%20AND%20createddate%3C%20%27",date2,"%27&$limit=50000&$offset=",i,sep = ""))
a<- length(url)
curl_download(url[a], "savedata/request.csv",
quiet = TRUE, mode = "wb",
handle = new_handle())
requestone <- read.csv("savedata/request.csv")
if (nrow(requestone)>1){
request<- rbind(request,
requestone)
}else{
break
}}
request$CreatedDate <- mdy_hms(request$CreatedDate)
request$UpdatedDate <- mdy_hms(request$UpdatedDate)
return(request)
})
solveddata <- reactive({
request <- requestdata()
solved <- filter(request, Status=="Closed" & !is.na(ServiceDate))
# calculate solve time
solved$duration <- as.numeric(solved$UpdatedDate-solved$CreatedDate,units="mins")
solved <- filter(solved, duration >20)
solved
})
#---------------output----------------
#--------------1.Distribution-------------------
#------------------1.1 map--------------------
# Make a list of icons. We'll index into it based on name.
typeicon <- iconList(
Graffiti = makeIcon("icon/giraffiti.png"),
Bulky = makeIcon("icon/bulky.png"),
Dump = makeIcon("icon/dump.png"),
Appliance = makeIcon("icon/appliance.png"),
Ewaste = makeIcon("icon/ewaste.png"),
Rat = makeIcon("icon/rat.png"),
Singlelight = makeIcon("icon/singlelight.png"),
Multilight = makeIcon("icon/multilight.png"),
Homeless = makeIcon("icon/homeless.png"))
# summary data
summarytable <- reactive({
table <- solveddata()%>%
filter(RequestType==input$serviceType)%>%
group_by(CD)%>%
dplyr::summarise(Duration=mean(duration, na.rm=T))
table2 <- requestdata()%>%
filter(RequestType==input$serviceType)%>%
group_by(CD)%>%
dplyr::summarise(Frequency = n())
table <- merge(table, table2, by.x="CD",by.y="CD")
table$Duration <- as.integer(table$Duration)
table$DurationNum <- table$Duration
# mean duration & duration String
meanDuration <- as.integer(mean(table$Duration))
meanDurStr <- as.character(seconds_to_period(meanDuration*60))
meanDurStr <- str_sub(meanDurStr, start= 1, end=str_locate(meanDurStr,"H")[2]+1)
# each duration string
table$Duration <- as.character(seconds_to_period(table$Duration*60))
table$Duration <- str_sub(table$Duration, start= 1,end=str_locate(table$Duration,"H")[2]+1)
# add average row
table <- rbind(table,c("Average",meanDurStr,as.integer(mean(table$Frequency)),meanDuration))
# order of durationNum
table$DurationNum <- as.numeric(table$DurationNum)
table$Duration <- as.factor(table$Duration)
table$Duration <- factor(table$Duration, ordered=T,
levels = unique(table[order(table$DurationNum),"Duration"]))
table$Frequency <- as.numeric(table$Frequency)
# order CD levels
table$CD <- factor(table$CD, levels = c(1:15,"Average"))
colnames(table)[2:3] <- c("Ave. Solving Time","Num. of Requests")
table<-arrange(table,CD)
table
})
# leaflet data
cddata <- reactive({
table <- summarytable()
cdcount <- table[-16,]
cd@data <- merge(cd@data, cdcount, by.x = "name", by.y = "CD", all.x = T, all.y = F)
cd@data <- merge(cd@data, population,by.x = "name",by.y="CD",all.x=T,all.y=F)
# cd@data$aveDurationNum <- cd@data$aveDuration
# cd@data$aveDuration <- as.character(seconds_to_period(60*cd@data$aveDuration))
# cd@data$aveDuration <- str_sub(cd@data$aveDuration, start= 1, end=str_locate(cd@data$aveDuration,"H")[2]+1)
# cd@data$totalAveDurationNum <- cd@data$totalAveDuration
# cd@data$totalAveDuration <- as.character(seconds_to_period(60*cd@data$totalAveDuration))
# cd@data$totalAveDuration <- str_sub(cd@data$totalAveDuration, start= 1, end=str_locate(cd@data$totalAveDuration,"H")[2]+1)
cd@data$name <- factor(cd@data$name, levels = c(1:15))
cd@data <- cd@data[order(cd@data$name), ]
rownames(cd@data) <- c(0:14)
cd
})
# spatical data link location with icon
solved2data <- reactive({
solved <- solveddata()
solved <- filter(solved, !is.na(Longitude) & !is.na(Latitude))
solved <- filter(solved, RequestType==input$serviceType)
# threshold now: data max volumn -- 65536
## Scale the solved1 dataset if necessary
ThresholdVery = dim(solved)[1]
if (ThresholdVery <= 65536) {
solved1 = solved
} else {
solved1 = solved[1:65536, ]
}
solved1$duration = round(solved1$duration)
solved1$durationStr = seconds_to_period(60*solved1$duration)
solved1 = dplyr::select(solved1, RequestType, Longitude, Latitude, CreatedDate, CD, Address, durationStr)
solved1$CreatedDate = as.factor(solved1$CreatedDate)
solved1$CD = as.factor(solved1$CD)
solved1$Address = as.factor(solved1$Address)
solved1$durationStr = as.character(solved1$durationStr)
# Spatial data - solved2
solved2 <- sp::SpatialPointsDataFrame(
cbind(
solved1[,"Longitude"], # lng
solved1[,"Latitude"], # lat
solved1[,"RequestType"], # RequestType
solved1[,"CD"] # CD
),
data.frame(type = factor(
ifelse(solved1$RequestType == "Graffiti Removal", "Graffiti",
ifelse(solved1$RequestType == "Bulky Items", "Bulky",
ifelse(solved1$RequestType == "Illegal Dumping Pickup", "Dump",
ifelse(solved1$RequestType == "Metal/Household Appliances", "Appliance",
ifelse(solved1$RequestType == "Electronic Waste", "Ewaste",
ifelse(solved1$RequestType == "Dead Animal Removal", "Rat",
ifelse(solved1$RequestType == "Single Streetlight Issue", "Singlelight",
ifelse(solved1$RequestType == "Multiple Streetlight Issue", "Multilight", "Homeless")))))))),
c("Graffiti", "Bulky", "Dump", "Appliance", "Ewaste", "Rat", "Singlelight", "Multilight", "Homeless")
))
)
# add more features to the solved2 spatial dataset
solved2@data$requestType = solved1$RequestType
solved2@data$CD = solved1$CD
solved2@data$Address = solved1$Address
solved2@data$durationStr = solved1$durationStr
solved2@data$CreatedDate = solved1$CreatedDate
solved2
})
# leaflet: draw everything
output$map <- renderLeaflet({
solved2 <-solved2data()
cd <- cddata()
# district pop-up
content <- NULL
for (i in c(10:15, 1:9)) {
content <- append(content, paste(sep = "<br/>",
paste("<b><a><font color = 'Grey'>", "CD Number: ", "</font>", as.numeric(cd@polygons[[i]]@ID)+1, "</a ></b>"),
paste("<b><a><font color = 'Grey'>", "Population: ", "</font>", as.numeric(cd@data$Population[i]), "</a ></b>"),
paste("<b><a><font color = 'Grey'>", "Area: ", "</font>", cd@data$Area..Sq.Mi.[i],"<font color = 'Grey'>", "square miles", "</font>","</a ></b>"),
paste("<b><a><font color = 'Grey'>", "Population Density: ", "</font>", as.integer(cd@data$Density[i]), "<font color = 'Grey'>", "per sq. mi", "</font>","</a ></b>"),
paste("<b><a><font color = 'Grey'>", "District Avg. Solving Time: ", "</font>", cd@data$`Ave. Solving Time`[i], "</a ></b>"),
paste("<b><a><font color = 'Grey'>", "Overall Avg. Solving Time: ", "</font>", summarytable()$`Ave. Solving Time`[16],"</a ></b>")
))
}
## build the html popup for solved2
contentSol <- paste(sep = "<br/>",
paste("<b><a><font color = 'Grey'>", "Request Type: ", "</font>", as.character(solved2@data[, 2]), "</a ></b>"),
paste("<b><a><font color = 'Grey'>", "CD Number: ", "</font>", as.character(solved2@data[, 3]), "</a ></b>"),
paste("<b><a><font color = 'Grey'>", "Address: ", "</font>", as.character(solved2@data[, 4]), "</a ></b>"),
paste("<b><a><font color = 'Grey'>", "Created Date: ", "</font>", as.character(solved2@data[, 6]), "</a ></b>"),
paste("<b><a><font color = 'Grey'>", "Processing Time: ", "</font>", as.character(solved2@data[, 5]), "</a ></b>"))
leaflet(solved2) %>%
addProviderTiles(provider = "CartoDB.Positron") %>%
setView(lng = -118.4, lat = 34.09, zoom = 10) %>%
addPolygons(data = cd, opacity = 0.3, fillOpacity = 0.5,
stroke = T, weight = 1, popup = content,
color =~ colorNumeric("OrRd", Density)(Density)[c(10:15,1:9)],group="Solving Time")%>%
addPolygons(data = cd, opacity = 0.3, fillOpacity = 0.5,
stroke = T, weight = 1, popup = content,
color =~ colorNumeric("OrRd", DurationNum)(DurationNum)[c(10:15,1:9)],group="Pop Density")%>%
addCircleMarkers(lng = solved2@coords[, 1], lat = solved2@coords[, 2],
color = "#d95f0e",radius = 3,
stroke = FALSE, fillOpacity = 0.3,
group = "Show All")%>%
addMarkers(lng = solved2@coords[, 1], lat = solved2@coords[, 2], icon = ~typeicon[type],
clusterOptions = markerClusterOptions(),
popup = paste(contentSol),group = "Cluster") %>%
# addLegend("bottomleft", pal=pal, values=colorData, title=colorBy,
# layerId="colorLegend")%>%
addLayersControl(
baseGroups = c("Pop Density","Solving Time"),
overlayGroups = c("Show All","Cluster"),
options = layersControlOptions(collapsed = F))%>%
hideGroup("Cluster")
})
# ------------ 1.2 Performance Table------------
output$requestPerform <- renderDataTable({
table <- summarytable()[,c(1:3)]
table},
options = list(searching = FALSE,paging = FALSE))
#--------------2.compare--------------------
tab2requestdata <- reactive({
districtnum1 <- input$CD1
districtnum2 <- input$CD2
# districtnum1 <- 1
# districtnum2 <- 2
# date1 <- "2016-08-22"
# date2 <- "2016-09-21"
date1 <- input$tab2date[1]
date2 <- input$tab2date[2]
url = NULL
reqdistall <- NULL
for (i in seq(0, 1500000, 50000)){
options("scipen" = 20)
url <- append(url,paste("https://data.lacity.org/resource/ndkd-k878.csv?$select=createddate,status,updateddate,servicedate,closeddate,requesttype,address,cd,longitude,latitude&$order=createddate%20DESC&$where=createddate%3E%20%27",date1,"%27%20AND%20createddate%3C%20%27",date2,"%27%20AND%20(cd=",districtnum1,"%20OR%20cd=",districtnum2,")%20&$limit=50000&$offset=",i,sep = ""))
a<- length(url)
curl_download(url[a], "savedata/requestdis.csv",
quiet = TRUE, mode = "wb",
handle = new_handle())
requestdist <- read.csv("savedata/requestdis.csv")
if (nrow(requestdist)>1){
reqdistall<- rbind(reqdistall,
requestdist)
}else{
break
}
}
reqdistall_new <- merge(reqdistall, population, by.x = "CD", by.y = "CD", all.x = T)
reqdistall_new$CreatedDate <- mdy_hms(reqdistall_new$CreatedDate)
reqdistall_new$UpdatedDate <- mdy_hms(reqdistall_new$UpdatedDate)
reqdistall_new$duration <- as.numeric(reqdistall_new$UpdatedDate - reqdistall_new$CreatedDate, units="mins")
reqdistall_new
})
tab2solveddata <- reactive({
request <- tab2requestdata()
solved <- filter(request, Status=="Closed" & !is.na(ServiceDate))
# calculate solve time
solved$duration <- as.numeric(solved$UpdatedDate-solved$CreatedDate,units="mins")
solved <- filter(solved, duration >20)
solved
})
output$compare1 <- renderPlot({
data1 = tab2requestdata() %>%
group_by(CD,RequestType) %>%
dplyr::summarise(count = n())
data1$CD <- as.factor(data1$CD)
lev = levels(data1$CD)
lev = lev[c(2,1)]
data1$CD = factor(data1$CD, levels = lev)
#### ggplot for count comparison
ggplot(data1, aes(x=reorder(RequestType,count) , y = count,label = count, fill = factor(CD))) +
geom_bar(stat = "identity", position ="dodge") +
geom_text(position = position_dodge(0.9),size=2,hjust = -0.1) +
xlab("") +
ylab("mins") +
ggtitle(paste("Numer of Requests of CD",input$CD1,"and CD",input$CD2)) +
scale_fill_manual(values=c("#bdd7e7", "#6baed6"),
name="Council District") +
guides(fill = guide_legend(reverse = T)) +
theme_classic() +
theme(legend.position = "top",
axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
axis.ticks.y=element_blank()
) +coord_flip()
})
output$compare2 <- renderPlot({
### ggplot for duration comparison
data2 <- tab2solveddata()%>%
group_by(CD,RequestType) %>%
dplyr::summarise(AvgDuration = sum(duration)/n())
data2$AvgDuration <- as.integer(data2$AvgDuration)
data2$CD <- as.factor(data2$CD)
lev = levels(data2$CD)
lev = lev[c(2,1)]
data2$CD = factor(data2$CD, levels = lev)
ggplot(data2, aes(x=reorder(RequestType,AvgDuration), y = AvgDuration/3600, fill = factor(CD),label = AvgDuration
)) +
geom_bar(stat = "identity",position = "dodge") +
geom_text(position = position_dodge(0.9),size=2,hjust = -0.1) +
xlab("") +
ylab("") +
ggtitle(paste("Avg Solving Time (Day) of CD",input$CD1,"and CD",input$CD2)) +
scale_fill_manual(values=c("#bdd7e7", "#6baed6"),
name="Council District") +
guides(fill = guide_legend(reverse = T)) +
theme_classic() +
theme(legend.position = "top",
axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
axis.ticks.y=element_blank()
) +
coord_flip()
})
#--------------3.trend------------------------------
#------------------One District Data---------------
distdata <- reactive({
datea <- input$histDates[1]
dateb <- input$histDates[2]
districtnum <- input$oneDistrict
# ------------- load district Historical Data--------------
url = NULL
reqdistall <- NULL
for (i in seq(0, 1500000000000, 50000)){
options("scipen" = 20)
url <- append(url,paste("https://data.lacity.org/resource/ndkd-k878.csv?$select=createddate,updateddate,servicedate,status,requesttype,address,cd,longitude,latitude&$order=createddate%20DESC&$where=createddate%3E%20%27",datea,"%27%20AND%20createddate%3C%20%27",dateb,"%27%20AND%20cd=",districtnum,"&$limit=50000&$offset=",i,sep = ""))
a<- length(url)
curl_download(url[a], "savedata/requestdis.csv",
quiet = TRUE, mode = "wb",
handle = new_handle())
requestdist <- read.csv("savedata/requestdis.csv")
if (nrow(requestdist)>1){
reqdistall<- rbind(reqdistall,
requestdist)
}else{
break
}
}
reqdistall_new <- merge(reqdistall, population, by.x = "CD", by.y = "CD", all.x = T)
reqdistall_new$CreatedDate <- mdy_hms(reqdistall_new$CreatedDate)
reqdistall_new$UpdatedDate <- mdy_hms(reqdistall_new$UpdatedDate)
reqdistall_new$duration <- as.numeric(reqdistall_new$UpdatedDate - reqdistall_new$CreatedDate, units="mins")
# service type - color
# per week - count, duration
if(wday(input$histDates[1]) != 1){
new1 = 8 - wday(input$histDates[1])
datestart = input$histDates[1] + days(new1)
}else{
datestart = input$histDates[1]
}
if(wday(input$histDates[2]) != 7){
new2 = wday(input$histDates[2])
dateend = input$histDates[2] - days(new2)
}else{
dateend = input$histDates[2]
}
## weeks that in our calculation
weekcal = (dateend - datestart + 1) / 7
weekcal = as.numeric(weekcal)
reqdistall_new$CreateDate1 = paste(year(reqdistall_new$CreatedDate),
month(reqdistall_new$CreatedDate),
day(reqdistall_new$CreatedDate), sep = "-")
reqdistall_new$CreateDate1 = ymd(reqdistall_new$CreateDate1)
reqdistall_new$weeknum = ceiling(as.numeric((reqdistall_new$CreateDate1- datestart + 1)/7))
## filter data start from weeknum-1 to weeknum-weekcal
reqdistall_new = filter(reqdistall_new, weeknum >= 1, weeknum <= weekcal)
reqdistall_new$weeknum = factor(reqdistall_new$weeknum)
reqdistall_new})
## plot count by weeknum, color by RequestType
output$trend1 <- renderPlot({
## Groupped dataset
reqdistall_group = distdata() %>%
group_by(weeknum, RequestType) %>%
dplyr::summarise(count = n(), Avg_duration = mean(duration))
data <- reqdistall_group%>%
dplyr::filter(RequestType%in%input$serviceTypeAll)
if(wday(input$histDates[1]) != 1){
new1 = 8 - wday(input$histDates[1])
datestart = input$histDates[1] + days(new1)
}else{
datestart = input$histDates[1]
}
if(wday(input$histDates[2]) != 7){
new2 = wday(input$histDates[2])
dateend = input$histDates[2] - days(new2)
}else{
dateend = input$histDates[2]
}
datebreak = seq(datestart, dateend + days(1), by = "1 week")
ggplot(data, aes(x = weeknum, y = count, col = RequestType, linetype = RequestType, group = RequestType)) +
geom_point(size = 1) +
geom_line(size = 1) +
scale_x_discrete(labels = datebreak) +
scale_linetype_manual(values = c(rep("solid", 10), rep("dashed", 6))) +
scale_color_manual(values = c(brewer.pal(10, "Set3"), brewer.pal(6, "Set3")))+
xlab("The First Day of Week") +
ylab("Weekly Number of Requests") +
expand_limits(y = 0) +
ggtitle("Weekly Average Request Number vs. Request Type") +
theme_classic() +
theme(legend.position = "top")+
theme(axis.line.x = element_line(color="black", size = 0.5),
axis.line.y = element_line(color="black", size = 0.5))+
theme(axis.text.x = element_text(angle = 30, hjust = 1))+
theme(panel.grid.major.y=element_line(colour = "grey",size = 0.4))
})
## plot duration by weeknum, color by RequestType
output$trend2 <- renderPlot({
reqdistall_group = distdata() %>%
group_by(weeknum, RequestType) %>%
dplyr::summarise(count = n(), Avg_duration = mean(duration))
data <- reqdistall_group%>%
dplyr::filter(RequestType%in%input$serviceTypeAll)
if(wday(input$histDates[1]) != 1){
new1 = 8 - wday(input$histDates[1])
datestart = input$histDates[1] + days(new1)
}else{
datestart = input$histDates[1]
}
if(wday(input$histDates[2]) != 7){
new2 = wday(input$histDates[2])
dateend = input$histDates[2] - days(new2)
}else{
dateend = input$histDates[2]
}
datebreak = seq(datestart, dateend + days(1), by = "1 week")
ggplot(data, aes(x = weeknum, y = Avg_duration/3600, col = RequestType, linetype = RequestType, group = RequestType)) +
geom_point(size = 1) +
geom_line(size = 1) +
scale_x_discrete(labels = datebreak) +
scale_linetype_manual(values = c(rep("solid", 10), rep("dashed", 6))) +
scale_color_manual(values = c(brewer.pal(10, "Set3"), brewer.pal(6, "Set3")))+
xlab("The First Day of Week") +
ylab("Average Solving Time (Day)") +
expand_limits(y = 0) +
ggtitle("Weekly Average Solving Time vs. Request Type") +
theme_classic() +
theme(legend.position = "top")+
theme(axis.line.x = element_line(color="black", size = 0.5),
axis.line.y = element_line(color="black", size = 0.5))+
theme(axis.text.x = element_text(angle = 30, hjust = 1))+
theme(panel.grid.major.y=element_line(colour = "grey",size = 0.4))
})
output$trend3 <- renderPlot({
reqdistall_group = distdata() %>%
group_by(weeknum, RequestType) %>%
dplyr::summarise(count = n(), Avg_duration = mean(duration))
data1 <- reqdistall_group%>%
dplyr::filter(RequestType%in%input$serviceTypeAll)
reqdistall_group_sloved = distdata()%>%
filter(Status=="Closed")%>%
group_by(weeknum, RequestType) %>%
dplyr::summarise(solvedcount = n())
data2 <- reqdistall_group_sloved%>%
dplyr::filter(RequestType%in%input$serviceTypeAll)
data <- merge(data1,data2,by.x=c("weeknum", "RequestType"),by.y=c("weeknum", "RequestType"))
data$solvingRate <- data$solvedcount/data$count*100
if(wday(input$histDates[1]) != 1){
new1 = 8 - wday(input$histDates[1])
datestart = input$histDates[1] + days(new1)
}else{
datestart = input$histDates[1]
}
if(wday(input$histDates[2]) != 7){
new2 = wday(input$histDates[2])
dateend = input$histDates[2] - days(new2)
}else{
dateend = input$histDates[2]
}
datebreak = seq(datestart, dateend + days(1), by = "1 week")
ggplot(data, aes(x = weeknum, y = solvingRate, col = RequestType, linetype = RequestType, group = RequestType)) +
geom_point(size = 1) +
geom_line(size = 1) +
scale_x_discrete(labels = datebreak) +
scale_linetype_manual(values = c(rep("solid", 10), rep("dashed", 6))) +
scale_color_manual(values = c(brewer.pal(10, "Set3"), brewer.pal(6, "Set3")))+
xlab("The First Day of Week") +
ylab("Weekly Solving Rate (%)") +
coord_cartesian(ylim = c(50, 100))+
ggtitle("Weekly Solving Rate") +
theme_classic() +
theme(legend.position = "top")+
theme(axis.line.x = element_line(color="black", size = 0.5),
axis.line.y = element_line(color="black", size = 0.5))+
theme(axis.text.x = element_text(angle = 30, hjust = 1))+
theme(panel.grid.major.y=element_line(colour = "grey",size = 0.4))
})
options(show.error.messages = T)
#--------------4. tutorial------------------------------
output$howtouse <- renderUI({
})
})
| /shiny/server.R | no_license | dso545group3/Final-Project-Group-3 | R | false | false | 23,772 | r | library(shinydashboard)
library(leaflet)
library(RColorBrewer)
library(dplyr)
require(rgdal)
library(lubridate)
library(curl)
library(ggplot2)
library(stringr)
## read data
# Council Population data
population <- read.csv("data/CDpopu&area.csv")
# CD shapefile
cd <- readOGR("cdinfo/l.a. city council district (2012).shp")
#--------------------isolate & clean data-------------------
shinyServer(function(input, output,session){
# --------------sidebar Hide Default-------
addClass(selector = "body", class = "sidebar-collapse")
# ----------------input---------------
output$serviceText <- renderUI({
h5(strong(paste("Requests of",input$serviceType)),align = "center")
})
# ------download data--------------------
requestdata <- reactive({
date1 <- input$dates[1]
date2 <- input$dates[2]
url = NULL
request <- NULL
for (i in seq(0,1500000000000,50000)){
options("scipen"=20)
url <- append(url,paste("https://data.lacity.org/resource/ndkd-k878.csv?$select=createddate,updateddate,status,servicedate,closeddate,requesttype,address,cd,longitude,latitude&$order=createddate%20DESC&$where=createddate%3E%20%27",date1,"%27%20AND%20createddate%3C%20%27",date2,"%27&$limit=50000&$offset=",i,sep = ""))
a<- length(url)
curl_download(url[a], "savedata/request.csv",
quiet = TRUE, mode = "wb",
handle = new_handle())
requestone <- read.csv("savedata/request.csv")
if (nrow(requestone)>1){
request<- rbind(request,
requestone)
}else{
break
}}
request$CreatedDate <- mdy_hms(request$CreatedDate)
request$UpdatedDate <- mdy_hms(request$UpdatedDate)
return(request)
})
solveddata <- reactive({
request <- requestdata()
solved <- filter(request, Status=="Closed" & !is.na(ServiceDate))
# calculate solve time
solved$duration <- as.numeric(solved$UpdatedDate-solved$CreatedDate,units="mins")
solved <- filter(solved, duration >20)
solved
})
#---------------output----------------
#--------------1.Distribution-------------------
#------------------1.1 map--------------------
# Make a list of icons. We'll index into it based on name.
typeicon <- iconList(
Graffiti = makeIcon("icon/giraffiti.png"),
Bulky = makeIcon("icon/bulky.png"),
Dump = makeIcon("icon/dump.png"),
Appliance = makeIcon("icon/appliance.png"),
Ewaste = makeIcon("icon/ewaste.png"),
Rat = makeIcon("icon/rat.png"),
Singlelight = makeIcon("icon/singlelight.png"),
Multilight = makeIcon("icon/multilight.png"),
Homeless = makeIcon("icon/homeless.png"))
# summary data
summarytable <- reactive({
table <- solveddata()%>%
filter(RequestType==input$serviceType)%>%
group_by(CD)%>%
dplyr::summarise(Duration=mean(duration, na.rm=T))
table2 <- requestdata()%>%
filter(RequestType==input$serviceType)%>%
group_by(CD)%>%
dplyr::summarise(Frequency = n())
table <- merge(table, table2, by.x="CD",by.y="CD")
table$Duration <- as.integer(table$Duration)
table$DurationNum <- table$Duration
# mean duration & duration String
meanDuration <- as.integer(mean(table$Duration))
meanDurStr <- as.character(seconds_to_period(meanDuration*60))
meanDurStr <- str_sub(meanDurStr, start= 1, end=str_locate(meanDurStr,"H")[2]+1)
# each duration string
table$Duration <- as.character(seconds_to_period(table$Duration*60))
table$Duration <- str_sub(table$Duration, start= 1,end=str_locate(table$Duration,"H")[2]+1)
# add average row
table <- rbind(table,c("Average",meanDurStr,as.integer(mean(table$Frequency)),meanDuration))
# order of durationNum
table$DurationNum <- as.numeric(table$DurationNum)
table$Duration <- as.factor(table$Duration)
table$Duration <- factor(table$Duration, ordered=T,
levels = unique(table[order(table$DurationNum),"Duration"]))
table$Frequency <- as.numeric(table$Frequency)
# order CD levels
table$CD <- factor(table$CD, levels = c(1:15,"Average"))
colnames(table)[2:3] <- c("Ave. Solving Time","Num. of Requests")
table<-arrange(table,CD)
table
})
# leaflet data
cddata <- reactive({
table <- summarytable()
cdcount <- table[-16,]
cd@data <- merge(cd@data, cdcount, by.x = "name", by.y = "CD", all.x = T, all.y = F)
cd@data <- merge(cd@data, population,by.x = "name",by.y="CD",all.x=T,all.y=F)
# cd@data$aveDurationNum <- cd@data$aveDuration
# cd@data$aveDuration <- as.character(seconds_to_period(60*cd@data$aveDuration))
# cd@data$aveDuration <- str_sub(cd@data$aveDuration, start= 1, end=str_locate(cd@data$aveDuration,"H")[2]+1)
# cd@data$totalAveDurationNum <- cd@data$totalAveDuration
# cd@data$totalAveDuration <- as.character(seconds_to_period(60*cd@data$totalAveDuration))
# cd@data$totalAveDuration <- str_sub(cd@data$totalAveDuration, start= 1, end=str_locate(cd@data$totalAveDuration,"H")[2]+1)
cd@data$name <- factor(cd@data$name, levels = c(1:15))
cd@data <- cd@data[order(cd@data$name), ]
rownames(cd@data) <- c(0:14)
cd
})
# spatical data link location with icon
solved2data <- reactive({
solved <- solveddata()
solved <- filter(solved, !is.na(Longitude) & !is.na(Latitude))
solved <- filter(solved, RequestType==input$serviceType)
# threshold now: data max volumn -- 65536
## Scale the solved1 dataset if necessary
ThresholdVery = dim(solved)[1]
if (ThresholdVery <= 65536) {
solved1 = solved
} else {
solved1 = solved[1:65536, ]
}
solved1$duration = round(solved1$duration)
solved1$durationStr = seconds_to_period(60*solved1$duration)
solved1 = dplyr::select(solved1, RequestType, Longitude, Latitude, CreatedDate, CD, Address, durationStr)
solved1$CreatedDate = as.factor(solved1$CreatedDate)
solved1$CD = as.factor(solved1$CD)
solved1$Address = as.factor(solved1$Address)
solved1$durationStr = as.character(solved1$durationStr)
# Spatial data - solved2
solved2 <- sp::SpatialPointsDataFrame(
cbind(
solved1[,"Longitude"], # lng
solved1[,"Latitude"], # lat
solved1[,"RequestType"], # RequestType
solved1[,"CD"] # CD
),
data.frame(type = factor(
ifelse(solved1$RequestType == "Graffiti Removal", "Graffiti",
ifelse(solved1$RequestType == "Bulky Items", "Bulky",
ifelse(solved1$RequestType == "Illegal Dumping Pickup", "Dump",
ifelse(solved1$RequestType == "Metal/Household Appliances", "Appliance",
ifelse(solved1$RequestType == "Electronic Waste", "Ewaste",
ifelse(solved1$RequestType == "Dead Animal Removal", "Rat",
ifelse(solved1$RequestType == "Single Streetlight Issue", "Singlelight",
ifelse(solved1$RequestType == "Multiple Streetlight Issue", "Multilight", "Homeless")))))))),
c("Graffiti", "Bulky", "Dump", "Appliance", "Ewaste", "Rat", "Singlelight", "Multilight", "Homeless")
))
)
# add more features to the solved2 spatial dataset
solved2@data$requestType = solved1$RequestType
solved2@data$CD = solved1$CD
solved2@data$Address = solved1$Address
solved2@data$durationStr = solved1$durationStr
solved2@data$CreatedDate = solved1$CreatedDate
solved2
})
# leaflet: draw everything
output$map <- renderLeaflet({
solved2 <-solved2data()
cd <- cddata()
# district pop-up
content <- NULL
for (i in c(10:15, 1:9)) {
content <- append(content, paste(sep = "<br/>",
paste("<b><a><font color = 'Grey'>", "CD Number: ", "</font>", as.numeric(cd@polygons[[i]]@ID)+1, "</a ></b>"),
paste("<b><a><font color = 'Grey'>", "Population: ", "</font>", as.numeric(cd@data$Population[i]), "</a ></b>"),
paste("<b><a><font color = 'Grey'>", "Area: ", "</font>", cd@data$Area..Sq.Mi.[i],"<font color = 'Grey'>", "square miles", "</font>","</a ></b>"),
paste("<b><a><font color = 'Grey'>", "Population Density: ", "</font>", as.integer(cd@data$Density[i]), "<font color = 'Grey'>", "per sq. mi", "</font>","</a ></b>"),
paste("<b><a><font color = 'Grey'>", "District Avg. Solving Time: ", "</font>", cd@data$`Ave. Solving Time`[i], "</a ></b>"),
paste("<b><a><font color = 'Grey'>", "Overall Avg. Solving Time: ", "</font>", summarytable()$`Ave. Solving Time`[16],"</a ></b>")
))
}
## build the html popup for solved2
contentSol <- paste(sep = "<br/>",
paste("<b><a><font color = 'Grey'>", "Request Type: ", "</font>", as.character(solved2@data[, 2]), "</a ></b>"),
paste("<b><a><font color = 'Grey'>", "CD Number: ", "</font>", as.character(solved2@data[, 3]), "</a ></b>"),
paste("<b><a><font color = 'Grey'>", "Address: ", "</font>", as.character(solved2@data[, 4]), "</a ></b>"),
paste("<b><a><font color = 'Grey'>", "Created Date: ", "</font>", as.character(solved2@data[, 6]), "</a ></b>"),
paste("<b><a><font color = 'Grey'>", "Processing Time: ", "</font>", as.character(solved2@data[, 5]), "</a ></b>"))
leaflet(solved2) %>%
addProviderTiles(provider = "CartoDB.Positron") %>%
setView(lng = -118.4, lat = 34.09, zoom = 10) %>%
addPolygons(data = cd, opacity = 0.3, fillOpacity = 0.5,
stroke = T, weight = 1, popup = content,
color =~ colorNumeric("OrRd", Density)(Density)[c(10:15,1:9)],group="Solving Time")%>%
addPolygons(data = cd, opacity = 0.3, fillOpacity = 0.5,
stroke = T, weight = 1, popup = content,
color =~ colorNumeric("OrRd", DurationNum)(DurationNum)[c(10:15,1:9)],group="Pop Density")%>%
addCircleMarkers(lng = solved2@coords[, 1], lat = solved2@coords[, 2],
color = "#d95f0e",radius = 3,
stroke = FALSE, fillOpacity = 0.3,
group = "Show All")%>%
addMarkers(lng = solved2@coords[, 1], lat = solved2@coords[, 2], icon = ~typeicon[type],
clusterOptions = markerClusterOptions(),
popup = paste(contentSol),group = "Cluster") %>%
# addLegend("bottomleft", pal=pal, values=colorData, title=colorBy,
# layerId="colorLegend")%>%
addLayersControl(
baseGroups = c("Pop Density","Solving Time"),
overlayGroups = c("Show All","Cluster"),
options = layersControlOptions(collapsed = F))%>%
hideGroup("Cluster")
})
# ------------ 1.2 Performance Table------------
output$requestPerform <- renderDataTable({
table <- summarytable()[,c(1:3)]
table},
options = list(searching = FALSE,paging = FALSE))
#--------------2.compare--------------------
tab2requestdata <- reactive({
districtnum1 <- input$CD1
districtnum2 <- input$CD2
# districtnum1 <- 1
# districtnum2 <- 2
# date1 <- "2016-08-22"
# date2 <- "2016-09-21"
date1 <- input$tab2date[1]
date2 <- input$tab2date[2]
url = NULL
reqdistall <- NULL
for (i in seq(0, 1500000, 50000)){
options("scipen" = 20)
url <- append(url,paste("https://data.lacity.org/resource/ndkd-k878.csv?$select=createddate,status,updateddate,servicedate,closeddate,requesttype,address,cd,longitude,latitude&$order=createddate%20DESC&$where=createddate%3E%20%27",date1,"%27%20AND%20createddate%3C%20%27",date2,"%27%20AND%20(cd=",districtnum1,"%20OR%20cd=",districtnum2,")%20&$limit=50000&$offset=",i,sep = ""))
a<- length(url)
curl_download(url[a], "savedata/requestdis.csv",
quiet = TRUE, mode = "wb",
handle = new_handle())
requestdist <- read.csv("savedata/requestdis.csv")
if (nrow(requestdist)>1){
reqdistall<- rbind(reqdistall,
requestdist)
}else{
break
}
}
reqdistall_new <- merge(reqdistall, population, by.x = "CD", by.y = "CD", all.x = T)
reqdistall_new$CreatedDate <- mdy_hms(reqdistall_new$CreatedDate)
reqdistall_new$UpdatedDate <- mdy_hms(reqdistall_new$UpdatedDate)
reqdistall_new$duration <- as.numeric(reqdistall_new$UpdatedDate - reqdistall_new$CreatedDate, units="mins")
reqdistall_new
})
tab2solveddata <- reactive({
request <- tab2requestdata()
solved <- filter(request, Status=="Closed" & !is.na(ServiceDate))
# calculate solve time
solved$duration <- as.numeric(solved$UpdatedDate-solved$CreatedDate,units="mins")
solved <- filter(solved, duration >20)
solved
})
output$compare1 <- renderPlot({
data1 = tab2requestdata() %>%
group_by(CD,RequestType) %>%
dplyr::summarise(count = n())
data1$CD <- as.factor(data1$CD)
lev = levels(data1$CD)
lev = lev[c(2,1)]
data1$CD = factor(data1$CD, levels = lev)
#### ggplot for count comparison
ggplot(data1, aes(x=reorder(RequestType,count) , y = count,label = count, fill = factor(CD))) +
geom_bar(stat = "identity", position ="dodge") +
geom_text(position = position_dodge(0.9),size=2,hjust = -0.1) +
xlab("") +
ylab("mins") +
ggtitle(paste("Numer of Requests of CD",input$CD1,"and CD",input$CD2)) +
scale_fill_manual(values=c("#bdd7e7", "#6baed6"),
name="Council District") +
guides(fill = guide_legend(reverse = T)) +
theme_classic() +
theme(legend.position = "top",
axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
axis.ticks.y=element_blank()
) +coord_flip()
})
output$compare2 <- renderPlot({
### ggplot for duration comparison
data2 <- tab2solveddata()%>%
group_by(CD,RequestType) %>%
dplyr::summarise(AvgDuration = sum(duration)/n())
data2$AvgDuration <- as.integer(data2$AvgDuration)
data2$CD <- as.factor(data2$CD)
lev = levels(data2$CD)
lev = lev[c(2,1)]
data2$CD = factor(data2$CD, levels = lev)
ggplot(data2, aes(x=reorder(RequestType,AvgDuration), y = AvgDuration/3600, fill = factor(CD),label = AvgDuration
)) +
geom_bar(stat = "identity",position = "dodge") +
geom_text(position = position_dodge(0.9),size=2,hjust = -0.1) +
xlab("") +
ylab("") +
ggtitle(paste("Avg Solving Time (Day) of CD",input$CD1,"and CD",input$CD2)) +
scale_fill_manual(values=c("#bdd7e7", "#6baed6"),
name="Council District") +
guides(fill = guide_legend(reverse = T)) +
theme_classic() +
theme(legend.position = "top",
axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
axis.ticks.y=element_blank()
) +
coord_flip()
})
#--------------3.trend------------------------------
#------------------One District Data---------------
distdata <- reactive({
datea <- input$histDates[1]
dateb <- input$histDates[2]
districtnum <- input$oneDistrict
# ------------- load district Historical Data--------------
url = NULL
reqdistall <- NULL
for (i in seq(0, 1500000000000, 50000)){
options("scipen" = 20)
url <- append(url,paste("https://data.lacity.org/resource/ndkd-k878.csv?$select=createddate,updateddate,servicedate,status,requesttype,address,cd,longitude,latitude&$order=createddate%20DESC&$where=createddate%3E%20%27",datea,"%27%20AND%20createddate%3C%20%27",dateb,"%27%20AND%20cd=",districtnum,"&$limit=50000&$offset=",i,sep = ""))
a<- length(url)
curl_download(url[a], "savedata/requestdis.csv",
quiet = TRUE, mode = "wb",
handle = new_handle())
requestdist <- read.csv("savedata/requestdis.csv")
if (nrow(requestdist)>1){
reqdistall<- rbind(reqdistall,
requestdist)
}else{
break
}
}
reqdistall_new <- merge(reqdistall, population, by.x = "CD", by.y = "CD", all.x = T)
reqdistall_new$CreatedDate <- mdy_hms(reqdistall_new$CreatedDate)
reqdistall_new$UpdatedDate <- mdy_hms(reqdistall_new$UpdatedDate)
reqdistall_new$duration <- as.numeric(reqdistall_new$UpdatedDate - reqdistall_new$CreatedDate, units="mins")
# service type - color
# per week - count, duration
if(wday(input$histDates[1]) != 1){
new1 = 8 - wday(input$histDates[1])
datestart = input$histDates[1] + days(new1)
}else{
datestart = input$histDates[1]
}
if(wday(input$histDates[2]) != 7){
new2 = wday(input$histDates[2])
dateend = input$histDates[2] - days(new2)
}else{
dateend = input$histDates[2]
}
## weeks that in our calculation
weekcal = (dateend - datestart + 1) / 7
weekcal = as.numeric(weekcal)
reqdistall_new$CreateDate1 = paste(year(reqdistall_new$CreatedDate),
month(reqdistall_new$CreatedDate),
day(reqdistall_new$CreatedDate), sep = "-")
reqdistall_new$CreateDate1 = ymd(reqdistall_new$CreateDate1)
reqdistall_new$weeknum = ceiling(as.numeric((reqdistall_new$CreateDate1- datestart + 1)/7))
## filter data start from weeknum-1 to weeknum-weekcal
reqdistall_new = filter(reqdistall_new, weeknum >= 1, weeknum <= weekcal)
reqdistall_new$weeknum = factor(reqdistall_new$weeknum)
reqdistall_new})
## plot count by weeknum, color by RequestType
output$trend1 <- renderPlot({
## Groupped dataset
reqdistall_group = distdata() %>%
group_by(weeknum, RequestType) %>%
dplyr::summarise(count = n(), Avg_duration = mean(duration))
data <- reqdistall_group%>%
dplyr::filter(RequestType%in%input$serviceTypeAll)
if(wday(input$histDates[1]) != 1){
new1 = 8 - wday(input$histDates[1])
datestart = input$histDates[1] + days(new1)
}else{
datestart = input$histDates[1]
}
if(wday(input$histDates[2]) != 7){
new2 = wday(input$histDates[2])
dateend = input$histDates[2] - days(new2)
}else{
dateend = input$histDates[2]
}
datebreak = seq(datestart, dateend + days(1), by = "1 week")
ggplot(data, aes(x = weeknum, y = count, col = RequestType, linetype = RequestType, group = RequestType)) +
geom_point(size = 1) +
geom_line(size = 1) +
scale_x_discrete(labels = datebreak) +
scale_linetype_manual(values = c(rep("solid", 10), rep("dashed", 6))) +
scale_color_manual(values = c(brewer.pal(10, "Set3"), brewer.pal(6, "Set3")))+
xlab("The First Day of Week") +
ylab("Weekly Number of Requests") +
expand_limits(y = 0) +
ggtitle("Weekly Average Request Number vs. Request Type") +
theme_classic() +
theme(legend.position = "top")+
theme(axis.line.x = element_line(color="black", size = 0.5),
axis.line.y = element_line(color="black", size = 0.5))+
theme(axis.text.x = element_text(angle = 30, hjust = 1))+
theme(panel.grid.major.y=element_line(colour = "grey",size = 0.4))
})
## plot duration by weeknum, color by RequestType
output$trend2 <- renderPlot({
reqdistall_group = distdata() %>%
group_by(weeknum, RequestType) %>%
dplyr::summarise(count = n(), Avg_duration = mean(duration))
data <- reqdistall_group%>%
dplyr::filter(RequestType%in%input$serviceTypeAll)
if(wday(input$histDates[1]) != 1){
new1 = 8 - wday(input$histDates[1])
datestart = input$histDates[1] + days(new1)
}else{
datestart = input$histDates[1]
}
if(wday(input$histDates[2]) != 7){
new2 = wday(input$histDates[2])
dateend = input$histDates[2] - days(new2)
}else{
dateend = input$histDates[2]
}
datebreak = seq(datestart, dateend + days(1), by = "1 week")
ggplot(data, aes(x = weeknum, y = Avg_duration/3600, col = RequestType, linetype = RequestType, group = RequestType)) +
geom_point(size = 1) +
geom_line(size = 1) +
scale_x_discrete(labels = datebreak) +
scale_linetype_manual(values = c(rep("solid", 10), rep("dashed", 6))) +
scale_color_manual(values = c(brewer.pal(10, "Set3"), brewer.pal(6, "Set3")))+
xlab("The First Day of Week") +
ylab("Average Solving Time (Day)") +
expand_limits(y = 0) +
ggtitle("Weekly Average Solving Time vs. Request Type") +
theme_classic() +
theme(legend.position = "top")+
theme(axis.line.x = element_line(color="black", size = 0.5),
axis.line.y = element_line(color="black", size = 0.5))+
theme(axis.text.x = element_text(angle = 30, hjust = 1))+
theme(panel.grid.major.y=element_line(colour = "grey",size = 0.4))
})
output$trend3 <- renderPlot({
reqdistall_group = distdata() %>%
group_by(weeknum, RequestType) %>%
dplyr::summarise(count = n(), Avg_duration = mean(duration))
data1 <- reqdistall_group%>%
dplyr::filter(RequestType%in%input$serviceTypeAll)
reqdistall_group_sloved = distdata()%>%
filter(Status=="Closed")%>%
group_by(weeknum, RequestType) %>%
dplyr::summarise(solvedcount = n())
data2 <- reqdistall_group_sloved%>%
dplyr::filter(RequestType%in%input$serviceTypeAll)
data <- merge(data1,data2,by.x=c("weeknum", "RequestType"),by.y=c("weeknum", "RequestType"))
data$solvingRate <- data$solvedcount/data$count*100
if(wday(input$histDates[1]) != 1){
new1 = 8 - wday(input$histDates[1])
datestart = input$histDates[1] + days(new1)
}else{
datestart = input$histDates[1]
}
if(wday(input$histDates[2]) != 7){
new2 = wday(input$histDates[2])
dateend = input$histDates[2] - days(new2)
}else{
dateend = input$histDates[2]
}
datebreak = seq(datestart, dateend + days(1), by = "1 week")
ggplot(data, aes(x = weeknum, y = solvingRate, col = RequestType, linetype = RequestType, group = RequestType)) +
geom_point(size = 1) +
geom_line(size = 1) +
scale_x_discrete(labels = datebreak) +
scale_linetype_manual(values = c(rep("solid", 10), rep("dashed", 6))) +
scale_color_manual(values = c(brewer.pal(10, "Set3"), brewer.pal(6, "Set3")))+
xlab("The First Day of Week") +
ylab("Weekly Solving Rate (%)") +
coord_cartesian(ylim = c(50, 100))+
ggtitle("Weekly Solving Rate") +
theme_classic() +
theme(legend.position = "top")+
theme(axis.line.x = element_line(color="black", size = 0.5),
axis.line.y = element_line(color="black", size = 0.5))+
theme(axis.text.x = element_text(angle = 30, hjust = 1))+
theme(panel.grid.major.y=element_line(colour = "grey",size = 0.4))
})
options(show.error.messages = T)
#--------------4. tutorial------------------------------
output$howtouse <- renderUI({
})
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MiscFuns.R
\name{vcov.fixest}
\alias{vcov.fixest}
\title{Extracts the variance/covariance of a \code{femlm} fit}
\usage{
\method{vcov}{fixest}(
object,
se,
cluster,
dof = getFixest_dof(),
attr = FALSE,
forceCovariance = FALSE,
keepBounded = FALSE,
...
)
}
\arguments{
\item{object}{A \code{fixest} object. Obtained using the functions \code{\link[fixest]{femlm}}, \code{\link[fixest]{feols}} or \code{\link[fixest]{feglm}}.}
\item{se}{Character scalar. Which kind of standard error should be computed: \dQuote{standard}, \dQuote{hetero}, \dQuote{cluster}, \dQuote{twoway}, \dQuote{threeway} or \dQuote{fourway}? By default if there are clusters in the estimation: \code{se = "cluster"}, otherwise \code{se = "standard"}. Note that this argument can be implicitly deduced from the argument \code{cluster}.}
\item{cluster}{Tells how to cluster the standard-errors (if clustering is requested). Can be either a list of vectors, a character vector of variable names, a formula or an integer vector. Assume we want to perform 2-way clustering over \code{var1} and \code{var2} contained in the data.frame \code{base} used for the estimation. All the following \code{cluster} arguments are valid and do the same thing: \code{cluster = base[, c("var1", "var2")]}, \code{cluster = c("var1", "var2")}, \code{cluster = ~var1+var2}. If the two variables were used as clusters in the estimation, you could further use \code{cluster = 1:2} or leave it blank with \code{se = "twoway"} (assuming \code{var1} [resp. \code{var2}] was the 1st [res. 2nd] cluster).}
\item{dof}{An object of class \code{dof.type} obtained with the function \code{\link[fixest]{dof}}. Represents how the degree of freedom correction should be done.You must use the function \code{\link[fixest]{dof}} for this argument. The arguments and defaults of the function \code{\link[fixest]{dof}} are: \code{adj = TRUE}, \code{fixef.K="nested"}, \code{cluster.adj = TRUE}, \code{cluster.df = "conventional"}, \code{t.df = "conventional"}, \code{fixef.force_exact=FALSE)}. See the help of the function \code{\link[fixest]{dof}} for details.}
\item{attr}{Logical, defaults to \code{FALSE}. Whether to include the attributes describing how the VCOV was computed.}
\item{forceCovariance}{(Advanced users.) Logical, default is \code{FALSE}. In the peculiar case where the obtained Hessian is not invertible (usually because of collinearity of some variables), use this option to force the covariance matrix, by using a generalized inverse of the Hessian. This can be useful to spot where possible problems come from.}
\item{keepBounded}{(Advanced users -- \code{feNmlm} with non-linear part and bounded coefficients only.) Logical, default is \code{FALSE}. If \code{TRUE}, then the bounded coefficients (if any) are treated as unrestricted coefficients and their S.E. is computed (otherwise it is not).}
\item{...}{Other arguments to be passed to \code{\link[fixest]{summary.fixest}}.
The computation of the VCOV matrix is first done in \code{\link[fixest]{summary.fixest}}.}
}
\value{
It returns a \eqn{N\times N} square matrix where \eqn{N} is the number of variables of the fitted model.
This matrix has an attribute \dQuote{type} specifying how this variance/covariance matrix has been computed (i.e. if it was created using a heteroskedascity-robust correction, or if it was clustered along a specific factor, etc).
}
\description{
This function extracts the variance-covariance of estimated parameters from a model estimated with \code{\link[fixest]{femlm}}, \code{\link[fixest]{feols}} or \code{\link[fixest]{feglm}}.
}
\details{
For an explanation on how the standard-errors are computed and what is the exact meaning of the arguments, please have a look at the dedicated vignette: \href{https://cran.r-project.org/package=fixest/vignettes/standard_errors.html}{On standard-errors}.
}
\examples{
# Load trade data
data(trade)
# We estimate the effect of distance on trade (with 3 fixed-effects)
est_pois = femlm(Euros ~ log(dist_km) + log(Year) | Origin + Destination +
Product, trade)
# By default, in the presence of FEs
# the VCOV is clustered along the first FE
vcov(est_pois)
# Heteroskedasticity-robust VCOV
vcov(est_pois, se = "hetero")
# "clustered" VCOV (with respect to the Product factor)
vcov(est_pois, se = "cluster", cluster = trade$Product)
# another way to make the same request:
# note that previously arg. se was optional since deduced from arg. cluster
vcov(est_pois, cluster = "Product")
# yet another way:
vcov(est_pois, cluster = ~Product)
# Another estimation without fixed-effects:
est_pois_simple = femlm(Euros ~ log(dist_km) + log(Year), trade)
# We can still get the clustered VCOV,
# but we need to give the argument cluster:
vcov(est_pois_simple, cluster = ~Product)
}
\seealso{
See also the main estimation functions \code{\link[fixest]{femlm}}, \code{\link[fixest]{feols}} or \code{\link[fixest]{feglm}}. \code{\link[fixest]{summary.fixest}}, \code{\link[fixest]{confint.fixest}}, \code{\link[fixest]{resid.fixest}}, \code{\link[fixest]{predict.fixest}}, \code{\link[fixest]{fixef.fixest}}.
}
\author{
Laurent Berge
}
| /man/vcov.fixest.Rd | no_license | sleeubc/fixest | R | false | true | 5,232 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MiscFuns.R
\name{vcov.fixest}
\alias{vcov.fixest}
\title{Extracts the variance/covariance of a \code{femlm} fit}
\usage{
\method{vcov}{fixest}(
object,
se,
cluster,
dof = getFixest_dof(),
attr = FALSE,
forceCovariance = FALSE,
keepBounded = FALSE,
...
)
}
\arguments{
\item{object}{A \code{fixest} object. Obtained using the functions \code{\link[fixest]{femlm}}, \code{\link[fixest]{feols}} or \code{\link[fixest]{feglm}}.}
\item{se}{Character scalar. Which kind of standard error should be computed: \dQuote{standard}, \dQuote{hetero}, \dQuote{cluster}, \dQuote{twoway}, \dQuote{threeway} or \dQuote{fourway}? By default if there are clusters in the estimation: \code{se = "cluster"}, otherwise \code{se = "standard"}. Note that this argument can be implicitly deduced from the argument \code{cluster}.}
\item{cluster}{Tells how to cluster the standard-errors (if clustering is requested). Can be either a list of vectors, a character vector of variable names, a formula or an integer vector. Assume we want to perform 2-way clustering over \code{var1} and \code{var2} contained in the data.frame \code{base} used for the estimation. All the following \code{cluster} arguments are valid and do the same thing: \code{cluster = base[, c("var1", "var2")]}, \code{cluster = c("var1", "var2")}, \code{cluster = ~var1+var2}. If the two variables were used as clusters in the estimation, you could further use \code{cluster = 1:2} or leave it blank with \code{se = "twoway"} (assuming \code{var1} [resp. \code{var2}] was the 1st [res. 2nd] cluster).}
\item{dof}{An object of class \code{dof.type} obtained with the function \code{\link[fixest]{dof}}. Represents how the degree of freedom correction should be done.You must use the function \code{\link[fixest]{dof}} for this argument. The arguments and defaults of the function \code{\link[fixest]{dof}} are: \code{adj = TRUE}, \code{fixef.K="nested"}, \code{cluster.adj = TRUE}, \code{cluster.df = "conventional"}, \code{t.df = "conventional"}, \code{fixef.force_exact=FALSE)}. See the help of the function \code{\link[fixest]{dof}} for details.}
\item{attr}{Logical, defaults to \code{FALSE}. Whether to include the attributes describing how the VCOV was computed.}
\item{forceCovariance}{(Advanced users.) Logical, default is \code{FALSE}. In the peculiar case where the obtained Hessian is not invertible (usually because of collinearity of some variables), use this option to force the covariance matrix, by using a generalized inverse of the Hessian. This can be useful to spot where possible problems come from.}
\item{keepBounded}{(Advanced users -- \code{feNmlm} with non-linear part and bounded coefficients only.) Logical, default is \code{FALSE}. If \code{TRUE}, then the bounded coefficients (if any) are treated as unrestricted coefficients and their S.E. is computed (otherwise it is not).}
\item{...}{Other arguments to be passed to \code{\link[fixest]{summary.fixest}}.
The computation of the VCOV matrix is first done in \code{\link[fixest]{summary.fixest}}.}
}
\value{
It returns a \eqn{N\times N} square matrix where \eqn{N} is the number of variables of the fitted model.
This matrix has an attribute \dQuote{type} specifying how this variance/covariance matrix has been computed (i.e. if it was created using a heteroskedascity-robust correction, or if it was clustered along a specific factor, etc).
}
\description{
This function extracts the variance-covariance of estimated parameters from a model estimated with \code{\link[fixest]{femlm}}, \code{\link[fixest]{feols}} or \code{\link[fixest]{feglm}}.
}
\details{
For an explanation on how the standard-errors are computed and what is the exact meaning of the arguments, please have a look at the dedicated vignette: \href{https://cran.r-project.org/package=fixest/vignettes/standard_errors.html}{On standard-errors}.
}
\examples{
# Load trade data
data(trade)
# We estimate the effect of distance on trade (with 3 fixed-effects)
est_pois = femlm(Euros ~ log(dist_km) + log(Year) | Origin + Destination +
Product, trade)
# By default, in the presence of FEs
# the VCOV is clustered along the first FE
vcov(est_pois)
# Heteroskedasticity-robust VCOV
vcov(est_pois, se = "hetero")
# "clustered" VCOV (with respect to the Product factor)
vcov(est_pois, se = "cluster", cluster = trade$Product)
# another way to make the same request:
# note that previously arg. se was optional since deduced from arg. cluster
vcov(est_pois, cluster = "Product")
# yet another way:
vcov(est_pois, cluster = ~Product)
# Another estimation without fixed-effects:
est_pois_simple = femlm(Euros ~ log(dist_km) + log(Year), trade)
# We can still get the clustered VCOV,
# but we need to give the argument cluster:
vcov(est_pois_simple, cluster = ~Product)
}
\seealso{
See also the main estimation functions \code{\link[fixest]{femlm}}, \code{\link[fixest]{feols}} or \code{\link[fixest]{feglm}}. \code{\link[fixest]{summary.fixest}}, \code{\link[fixest]{confint.fixest}}, \code{\link[fixest]{resid.fixest}}, \code{\link[fixest]{predict.fixest}}, \code{\link[fixest]{fixef.fixest}}.
}
\author{
Laurent Berge
}
|
\name{print.regtest.rma}
\alias{print.regtest.rma}
\title{Print Method for regtest.rma Objects}
\description{
Print method for objects of class \code{"regtest.rma"}.
}
\usage{
\method{print}{regtest.rma}(x, digits=x$digits, \dots)
}
\arguments{
\item{x}{an object of class \code{"regtest.rma"}.}
\item{digits}{an integer specifying the number of decimal places to which the printed results should be rounded (the default is to take the value from the object).}
\item{\dots}{other arguments.}
}
\details{
The output includes:
\itemize{
\item the model used for the regression test
\item the predictor used for the regression test
\item the value of the test statistic for the test that the predictor is unreleated to the outcomes
\item the degrees of freedom of the test statistic (only if the test statistic follows a t-distribution)
\item the p-value for the test statistic
}
}
\value{
The function does not return an object.
}
\author{Wolfgang Viechtbauer; \email{wvb@www.wvbauer.com}; \url{http://www.wvbauer.com/}}
\seealso{
\code{\link{regtest.rma}}
}
\keyword{print}
| /R/metafor/metafor/man/print.regtest.rma.Rd | no_license | HocineTighi/OpenMeta-analyst- | R | false | false | 1,089 | rd | \name{print.regtest.rma}
\alias{print.regtest.rma}
\title{Print Method for regtest.rma Objects}
\description{
Print method for objects of class \code{"regtest.rma"}.
}
\usage{
\method{print}{regtest.rma}(x, digits=x$digits, \dots)
}
\arguments{
\item{x}{an object of class \code{"regtest.rma"}.}
\item{digits}{an integer specifying the number of decimal places to which the printed results should be rounded (the default is to take the value from the object).}
\item{\dots}{other arguments.}
}
\details{
The output includes:
\itemize{
\item the model used for the regression test
\item the predictor used for the regression test
\item the value of the test statistic for the test that the predictor is unreleated to the outcomes
\item the degrees of freedom of the test statistic (only if the test statistic follows a t-distribution)
\item the p-value for the test statistic
}
}
\value{
The function does not return an object.
}
\author{Wolfgang Viechtbauer; \email{wvb@www.wvbauer.com}; \url{http://www.wvbauer.com/}}
\seealso{
\code{\link{regtest.rma}}
}
\keyword{print}
|
#' Parameters
if (Sys.info()['sysname']=="Windows") {
loc_in <- "C:/Git/Kaggle_BNP/Data/Data/Derive"
loc_out <- "C:/Git/Kaggle_BNP/Data/Anly/S04"
} else {
loc_in <- "/home/acalatroni/Kaggle_BNP/Data/Derive"
loc_out <- "/home/acalatroni/Kaggle_BNP/Anly/S04"
}
#' Packages
pacman::p_load(pacman)
p_load(readr,dplyr)
p_load(h2o,h2oEnsemble)
#' Start h2o
h2o.init(nthreads=-1)
h2o.removeAll()
#' Import RDS files
train <- read.csv(paste0(loc_in,"/train.csv"))
test <- read.csv(paste0(loc_in,"/test.csv"))
#' Import Data
train_h2o <- as.h2o(train, destination_frame = "train.hex")
test_h2o <- as.h2o(test, destination_frame = "test.hex")
#' Setup
y <- "target"
x <- setdiff(names(train_h2o[,-1]), y)
family <- "binomial"
#' Specify the base learner & the metalearner
source(paste0(loc_out,"/",'_base_learners.R'))
learner <- c("h2o.glm.1","h2o.glm.2","h2o.glm.3"
#,
#"h2o.rf.11","h2o.rf.12","h2o.rf.13",
#"h2o.rf.21","h2o.rf.22","h2o.rf.23",
#"h2o.rf.31","h2o.rf.32","h2o.rf.33",
#"h2o.gbm.11","h2o.gbm.12",
#"h2o.gbm.21","h2o.gbm.22",
#"h2o.deeplearning.1","h2o.deeplearning.2","h2o.deeplearning.3",
#"h2o.deeplearning.4", "h2o.deeplearning.5","h2o.deeplearning.6",
#"h2o.deeplearning.7"
)
metalearner <- "h2o.deeplearning.wrapper"
#' Ensemble training
fit <- h2o.ensemble(x = x,
y = y,
training_frame = train_h2o,
family = "binomial",
learner = learner,
metalearner = metalearner,
cvControl = list(V=5)
)
#' Predict
p <- predict.h2o.ensemble(fit,test_h2o)
p1 <- as.vector(p$pred[,"p1"])
submission <- data.frame(ID=test$ID,PredictedProb=p1)
write_csv(submission,paste0(loc_out,"/submission.csv"))
#' All done, shutdown H2O
h2o.shutdown(prompt=FALSE)
| /Anly/S02/h2oe.R | no_license | agstn/Kaggle_BNP | R | false | false | 1,938 | r | #' Parameters
if (Sys.info()['sysname']=="Windows") {
loc_in <- "C:/Git/Kaggle_BNP/Data/Data/Derive"
loc_out <- "C:/Git/Kaggle_BNP/Data/Anly/S04"
} else {
loc_in <- "/home/acalatroni/Kaggle_BNP/Data/Derive"
loc_out <- "/home/acalatroni/Kaggle_BNP/Anly/S04"
}
#' Packages
pacman::p_load(pacman)
p_load(readr,dplyr)
p_load(h2o,h2oEnsemble)
#' Start h2o
h2o.init(nthreads=-1)
h2o.removeAll()
#' Import RDS files
train <- read.csv(paste0(loc_in,"/train.csv"))
test <- read.csv(paste0(loc_in,"/test.csv"))
#' Import Data
train_h2o <- as.h2o(train, destination_frame = "train.hex")
test_h2o <- as.h2o(test, destination_frame = "test.hex")
#' Setup
y <- "target"
x <- setdiff(names(train_h2o[,-1]), y)
family <- "binomial"
#' Specify the base learner & the metalearner
source(paste0(loc_out,"/",'_base_learners.R'))
learner <- c("h2o.glm.1","h2o.glm.2","h2o.glm.3"
#,
#"h2o.rf.11","h2o.rf.12","h2o.rf.13",
#"h2o.rf.21","h2o.rf.22","h2o.rf.23",
#"h2o.rf.31","h2o.rf.32","h2o.rf.33",
#"h2o.gbm.11","h2o.gbm.12",
#"h2o.gbm.21","h2o.gbm.22",
#"h2o.deeplearning.1","h2o.deeplearning.2","h2o.deeplearning.3",
#"h2o.deeplearning.4", "h2o.deeplearning.5","h2o.deeplearning.6",
#"h2o.deeplearning.7"
)
metalearner <- "h2o.deeplearning.wrapper"
#' Ensemble training
fit <- h2o.ensemble(x = x,
y = y,
training_frame = train_h2o,
family = "binomial",
learner = learner,
metalearner = metalearner,
cvControl = list(V=5)
)
#' Predict
p <- predict.h2o.ensemble(fit,test_h2o)
p1 <- as.vector(p$pred[,"p1"])
submission <- data.frame(ID=test$ID,PredictedProb=p1)
write_csv(submission,paste0(loc_out,"/submission.csv"))
#' All done, shutdown H2O
h2o.shutdown(prompt=FALSE)
|
\name{OUwie}
\alias{OUwie}
\title{Generalized Hansen models}
\description{Fits generalized Ornstein-Uhlenbeck-based Hansen models of continuous characters evolving under discrete selective regimes.}
\usage{
OUwie(phy, data, model=c("BM1","BMS","OU1","OUM","OUMV","OUMA","OUMVA",
"TrendyM","TrendyMS"), simmap.tree=FALSE, root.age=NULL,scaleHeight=FALSE,
root.station=TRUE, clade=NULL, mserr="none", starting.vals=NULL, diagn=FALSE,
quiet=FALSE, warn=TRUE, opts = list(algorithm = "NLOPT_LN_SBPLX", maxeval = "1000",
ftol_rel = .Machine$double.eps^0.5))
}
\arguments{
\item{phy}{a phylogenetic tree, in \code{ape} \dQuote{phylo} format and with internal nodes labeled denoting the ancestral selective regimes.}
\item{data}{a data.frame containing species information (see Details).}
\item{model}{models to fit to comparative data (see Details).}
\item{simmap.tree}{a logical indicating whether the input tree is in SIMMAP format. The default is \code{FALSE}.}
\item{root.age}{indicates the age of the tree. This is to be used in cases where the "tips" are not contemporary, such as in cases for fossil trees. Default is \code{NULL} meaning latest tip is modern day.}
\item{scaleHeight}{a logical indicating whether the total tree height should be scaled to 1 (see Details). The default is \code{FALSE}.}
\item{root.station}{a logical indicating whether the starting state, \eqn{\theta_0}{theta_0}, should be estimated (see Details).}
\item{clade}{a list containing a pair of taxa whose MRCA is the clade of interest (see Details).}
\item{mserr}{designates whether a fourth column in the data matrix contains measurement error for each species value ("known"). The measurement error is assumed to be the standard error of the species mean. The default is "none".}
\item{starting.vals}{a vector of initial values for the optimization search. For OU models, two must be supplied, with the first being the initial alpha value and the second being the initial sigma squared. For BM models, just a single value is needed.}
\item{diagn}{a logical indicating whether the full diagnostic analysis should be carried out. The default is \code{FALSE}.}
\item{quiet}{a logical indicating whether progress should be written to the screen. The default is \code{FALSE}.}
\item{warn}{a logical indicating whether a warning should be printed if the number of parameters exceeds ntips/10. The default is \code{TRUE}.}
\item{opts}{a list of options to pass to nloptr for the optimization: useful to adjust for faster, coarser searches}
}
\details{
This function fits various likelihood models for continuous characters evolving under discrete selective regimes. The function returns parameter estimates and their approximate standard errors. The R package \code{nloptr} provides a common interface to NLopt, an open-source library for nonlinear optimization. The likelihood function is maximized using the bounded subplex optimization routine (\code{NLOPT_LN_SBPLX}). As input all \code{OUwie} requires is a tree and a trait data.frame. The tree must be of class \dQuote{phylo} and must contain the ancestral selective regimes as internal node labels. Internal node labels can be applied manually or from some sort of ancestral state reconstruction procedure (BayesTraits, \code{ape}, \code{diversitree}, SIMMAP, etc.), which would then be brought into OUwie. This is essentially what is required by \code{ouch} and Brownie (though Brownie provides built-in ancestral state reconstruction capabilities). The trait data.frame must have column entries in the following order: [,1] species names, [,2] current selective regime, and [,3] the continuous trait of interest. Alternatively, if the user wants to incorporate measurement error (\code{mserr}="known"), then a fourth column, [,4] must be included that provides the standard error estimates for each species mean. However, a global measurement error for all taxa can be estimated from the data (\code{mserr}="est"); is not well tested, so use at your own risk. Also, a user can specify a particular clade as being in a different selective regime, by inputting a pair of species whose mrca is the root of the clade of interest [e.g., \code{clade}=c("taxaA","taxaB")]. OUwie will automatically assign internal node labels and update the data matrix according to this clade designation.
The initial implementation followed \code{ouch} in that the tree is automatically rescaled so that the branch lengths were in proportion to the total height of the tree. However, this makes the results inconsistent with other implementations such as Brownie or \code{geiger}. Therefore, we allow the user to choose whether the tree should be rescaled or not. Note that the when \code{scaleHeight=FALSE} the bounds will have to be adjusted to the appropriate scale.
Possible models are as follows: single-rate Brownian motion (\code{model=BM1}), Brownian motion with different rate parameters for each state on a tree (\code{model=BMS}), Ornstein-Uhlenbeck model with a single optimum for all species (\code{model=OU1}), Ornstein-Uhlenbeck model with different state means and a single \eqn{\alpha}{alpha} and \eqn{\sigma^2}{sigma^2} acting all selective regimes (\code{model=OUM}), and new Ornstein-Uhlenbeck models that assume different state means as well as either multiple \eqn{\sigma^2}{sigma^2} (\code{model=OUMV}), multiple \eqn{\alpha}{alpha} (\code{model=OUMA}), or multiple \eqn{\alpha}{alpha} and \eqn{\sigma^2}{sigma^2} per selective regime (\code{model=OUMVA}).
If \code{root.station} is \code{TRUE} (the default), \eqn{\theta_0}{theta_0} is dropped from the model. Under these conditions it is assumed that the starting value is distributed according to the stationary distribution of the OU process. This would not fit a biological scenario involving moving away from an ancestral state, but it does fit a scenario of evolution at a steady state. Dropping \eqn{\theta_0}{theta_0} from the model can sometimes stabilize estimates of the primary optima, especially in situations where the estimates of \eqn{\theta}{theta} in the full model are non-sensical. In regards to the accuracy of estimating \eqn{\theta_0}{theta_0}, it is important to note that in simulation, as \eqn{\alpha}{alpha} increases estimates of \eqn{\theta_0}{theta_0} converge to zero. Thus, when \eqn{\alpha}{alpha} is large (i.e. \eqn{\alpha}{alpha}>2) it is likely that any inference of an evolutionary trend will be an artifact and positively misleading.
Also note, when specifying the BMS model be mindful of the root.station flag. When root.station=FALSE, the non-censored model of O'Meara et al. 2006 is invoked (i.e., a single regime at the root is estimated), and when root.station==TRUE the group mean model of Thomas et al. 2006 (i.e., the number of means equals the number of regimes). The latter case appears to be a strange special case of OU, in that it behaves similarly to the OUMV model, but without selection. I would say that this is more consistent with the censored test of O'Meara et al. (2006), as opposed to having any real connection to OU. In any case, more work is clearly needed to understand the behavior of the group means model, and therefore, I recommend setting root.station=FALSE in the BMS case.
The Hessian matrix is used as a means to estimate the approximate standard errors of the model parameters and to assess whether they are the maximum likelihood estimates. The variance-covariance matrix of the estimated values of \eqn{\alpha}{alpha} and \eqn{\sigma^2}{sigma^2} are computed as the inverse of the Hessian matrix and the standard errors are the square roots of the diagonals of this matrix. The Hessian is a matrix of second-order derivatives and is approximated in the R package \code{numDeriv}. So, if changes in the value of a parameter results in sharp changes in the slope around the maximum of the log-likelihood function, the second-order derivative will be large, the standard error will be small, and the parameter estimate is considered stable. On the other hand, if the second-order derivative is nearly zero, then the change in the slope around the maximum is also nearly zero, indicating that the parameter value can be moved in any direction without greatly affecting the log-likelihood. In such situations, the standard error of the parameter will be large.
For models that allow \eqn{\alpha}{alpha} and \eqn{\sigma^2}{sigma^2} to vary (i.e., \code{OUMV}, \code{OUMA}, and \code{OUMVA}), the complexity of the model can often times be greater than the information that is contained within the data. As a result one or many parameters are poorly estimated, which can cause the function to return a log-likelihood that is suboptimal. This has great potential for poor model choice and incorrect biological interpretations. An eigendecomposition of the Hessian can provide an indication of whether the search returned the maximum likelihood estimates. If all the eigenvalues of the Hessian are positive, then the Hessian is positive definite, and all parameter estimates are considered reliable. However, if there are both positive and negative eigenvalues, then the objective function is at a saddlepoint and one or several parameters cannot be estimated adequately. One solution is to just fit a simpler model. Another is to actually identify the offending parameters. This can be done through the examination of the eigenvectors. The row order corresponds to the entries in \code{index.matrix}, the columns correspond to the order of values in \code{eigval}, and the larger the value of the row entry the greater the association between the corresponding parameter and the eigenvalue. Thus, the largest values in the columns associated with negative eigenvalues are the parameters that are causing the objective function to be at a saddlepoint.
}
\value{
\code{OUwie} returns an object of class \code{OUwie}. This is a list with elements:
\item{$loglik}{the maximum log-likelihood.}
\item{$AIC}{Akaike information criterion.}
\item{$AICc}{Akaike information criterion corrected for sample-size.}
\item{$BIC}{Bayesian information criterion.}
\item{$model}{The model being fit}
\item{$param.count}{The number of parameters counted in the model.}
\item{$solution}{a matrix containing the maximum likelihood estimates of \eqn{\alpha}{alpha} and \eqn{\sigma^2}{sigma^2}.}
\item{$theta}{a matrix containing the maximum likelihood estimates of \eqn{\theta}{theta} and its standard error.}
\item{$solution.se}{a matrix containing the approximate standard errors of \eqn{\alpha}{alpha} and \eqn{\sigma^2}{sigma^2}. The standard error is calculated as the diagonal of the inverse of the Hessian matrix.}
\item{$tot.state}{A vector of names for the different regimes}
\item{$index.mat}{The indices of the parameters being estimated are returned. The numbers correspond to the row in the \code{eigvect} and can useful for identifying the parameters that are causing the objective function to be at a saddlepoint (see Details)}
\item{$simmap.tree}{A logical indicating whether the input phylogeny is a SIMMAP formatted tree.}
\item{$root.age}{The user-supplied age at the root of the tree.}
\item{$opts}{Internal settings of the likelihood search}
\item{$data}{User-supplied dataset}
\item{$phy}{User-supplied tree}
\item{$root.station}{A logical indicating whether the starting state, \eqn{\theta_0}{theta_0}, was estimated}
\item{$starting.vals}{A vector of user-supplied initial search parameters.}
\item{$lb}{The lower bound set}
\item{$ub}{The upper bound set}
\item{$iterations}{Number of iterations of the likelihood search that were executed}
\item{$mserr.est}{The estimated measurement error if mserr="est". Otherwise, the value is NULL.}
\item{$res}{A vector of residuals from the model fit. The residuals are ordered in the same way as the tips in the tree.}
\item{$eigval}{The eigenvalues from the decomposition of the Hessian of the likelihood function. If any \code{eigval<0} then one or more parameters were not optimized during the likelihood search (see Details)}
\item{$eigvect}{The eigenvectors from the decomposition of the Hessian of the likelihood function is returned (see Details)}
\item{$new.start}{The vector of values to use if you want to restart the run from this point (starting.vals for a new run)}
}
\examples{
\donttest{
data(tworegime)
#Plot the tree and the internal nodes to highlight the selective regimes:
select.reg<-character(length(tree$node.label))
select.reg[tree$node.label == 1] <- "black"
select.reg[tree$node.label == 2] <- "red"
plot(tree)
nodelabels(pch=21, bg=select.reg)
}
\dontrun{
#To see the first 5 lines of the data matrix to see what how to
#structure the data:
trait[1:5,]
#Now fit an OU model that allows different sigma^2:
OUwie(tree,trait,model=c("OUMV"),root.station=TRUE)
#Fit an OU model based on a clade of interest:
OUwie(tree,trait,model=c("OUMV"), root.station=TRUE, clade=c("t50", "t64"))
#For large trees, it may be useful to have ways to restart the search (due to
#finite time per run on a computing cluster, for example). You can do this
#by changing settings of OUwie runs. For example:
run1 <- OUwie(tree,trait,model=c("OUMV"),root.station=TRUE, opts =
list("algorithm"="NLOPT_LN_SBPLX", "maxeval"="500", "ftol_abs"=0.001))
save(run1, file="run1.rda")
#Then, later or in a different session:
load("run1.rda")
run2 <- OUwie(tree,trait,model=c("OUMV"),root.station=TRUE, opts =
list("algorithm"="NLOPT_LN_SBPLX", "maxeval"="500", "ftol_abs"=0.001),
starting.vals=run1$new.start)
#run2 will start off where run1 stopped.
}
}
\references{
Beaulieu J.M., Jhwueng D.C., Boettiger C., and O'Meara B.C. 2012. Modeling stabilizing selection: Expanding the Ornstein-Uhlenbeck model of adaptive evolution. Evolution 66:2369-2383.
O'Meara B.C., Ane C., Sanderson P.C., Wainwright P.C. 2006. Testing for different rates of continuous trait evolution using likelihood. Evolution 60:922-933.
Butler M.A., King A.A. 2004. Phylogenetic comparative analysis: A modeling approach for adaptive evolution. American Naturalist 164:683-695.
Thomas G.H., Freckleton R.P., and Szekely T. 2006. Comparative analysis of the influence of developmental mode on phenotypic diversification rates in shorebirds. Proceedings of the Royal Society, B. 273:1619-1624.
}
\author{Jeremy M. Beaulieu and Brian C. O'Meara}
\keyword{models}
| /man/OUwie.Rd | no_license | claycressler/OUwie | R | false | false | 14,346 | rd | \name{OUwie}
\alias{OUwie}
\title{Generalized Hansen models}
\description{Fits generalized Ornstein-Uhlenbeck-based Hansen models of continuous characters evolving under discrete selective regimes.}
\usage{
OUwie(phy, data, model=c("BM1","BMS","OU1","OUM","OUMV","OUMA","OUMVA",
"TrendyM","TrendyMS"), simmap.tree=FALSE, root.age=NULL,scaleHeight=FALSE,
root.station=TRUE, clade=NULL, mserr="none", starting.vals=NULL, diagn=FALSE,
quiet=FALSE, warn=TRUE, opts = list(algorithm = "NLOPT_LN_SBPLX", maxeval = "1000",
ftol_rel = .Machine$double.eps^0.5))
}
\arguments{
\item{phy}{a phylogenetic tree, in \code{ape} \dQuote{phylo} format and with internal nodes labeled denoting the ancestral selective regimes.}
\item{data}{a data.frame containing species information (see Details).}
\item{model}{models to fit to comparative data (see Details).}
\item{simmap.tree}{a logical indicating whether the input tree is in SIMMAP format. The default is \code{FALSE}.}
\item{root.age}{indicates the age of the tree. This is to be used in cases where the "tips" are not contemporary, such as in cases for fossil trees. Default is \code{NULL} meaning latest tip is modern day.}
\item{scaleHeight}{a logical indicating whether the total tree height should be scaled to 1 (see Details). The default is \code{FALSE}.}
\item{root.station}{a logical indicating whether the starting state, \eqn{\theta_0}{theta_0}, should be estimated (see Details).}
\item{clade}{a list containing a pair of taxa whose MRCA is the clade of interest (see Details).}
\item{mserr}{designates whether a fourth column in the data matrix contains measurement error for each species value ("known"). The measurement error is assumed to be the standard error of the species mean. The default is "none".}
\item{starting.vals}{a vector of initial values for the optimization search. For OU models, two must be supplied, with the first being the initial alpha value and the second being the initial sigma squared. For BM models, just a single value is needed.}
\item{diagn}{a logical indicating whether the full diagnostic analysis should be carried out. The default is \code{FALSE}.}
\item{quiet}{a logical indicating whether progress should be written to the screen. The default is \code{FALSE}.}
\item{warn}{a logical indicating whether a warning should be printed if the number of parameters exceeds ntips/10. The default is \code{TRUE}.}
\item{opts}{a list of options to pass to nloptr for the optimization: useful to adjust for faster, coarser searches}
}
\details{
This function fits various likelihood models for continuous characters evolving under discrete selective regimes. The function returns parameter estimates and their approximate standard errors. The R package \code{nloptr} provides a common interface to NLopt, an open-source library for nonlinear optimization. The likelihood function is maximized using the bounded subplex optimization routine (\code{NLOPT_LN_SBPLX}). As input all \code{OUwie} requires is a tree and a trait data.frame. The tree must be of class \dQuote{phylo} and must contain the ancestral selective regimes as internal node labels. Internal node labels can be applied manually or from some sort of ancestral state reconstruction procedure (BayesTraits, \code{ape}, \code{diversitree}, SIMMAP, etc.), which would then be brought into OUwie. This is essentially what is required by \code{ouch} and Brownie (though Brownie provides built-in ancestral state reconstruction capabilities). The trait data.frame must have column entries in the following order: [,1] species names, [,2] current selective regime, and [,3] the continuous trait of interest. Alternatively, if the user wants to incorporate measurement error (\code{mserr}="known"), then a fourth column, [,4] must be included that provides the standard error estimates for each species mean. However, a global measurement error for all taxa can be estimated from the data (\code{mserr}="est"); is not well tested, so use at your own risk. Also, a user can specify a particular clade as being in a different selective regime, by inputting a pair of species whose mrca is the root of the clade of interest [e.g., \code{clade}=c("taxaA","taxaB")]. OUwie will automatically assign internal node labels and update the data matrix according to this clade designation.
The initial implementation followed \code{ouch} in that the tree is automatically rescaled so that the branch lengths were in proportion to the total height of the tree. However, this makes the results inconsistent with other implementations such as Brownie or \code{geiger}. Therefore, we allow the user to choose whether the tree should be rescaled or not. Note that the when \code{scaleHeight=FALSE} the bounds will have to be adjusted to the appropriate scale.
Possible models are as follows: single-rate Brownian motion (\code{model=BM1}), Brownian motion with different rate parameters for each state on a tree (\code{model=BMS}), Ornstein-Uhlenbeck model with a single optimum for all species (\code{model=OU1}), Ornstein-Uhlenbeck model with different state means and a single \eqn{\alpha}{alpha} and \eqn{\sigma^2}{sigma^2} acting all selective regimes (\code{model=OUM}), and new Ornstein-Uhlenbeck models that assume different state means as well as either multiple \eqn{\sigma^2}{sigma^2} (\code{model=OUMV}), multiple \eqn{\alpha}{alpha} (\code{model=OUMA}), or multiple \eqn{\alpha}{alpha} and \eqn{\sigma^2}{sigma^2} per selective regime (\code{model=OUMVA}).
If \code{root.station} is \code{TRUE} (the default), \eqn{\theta_0}{theta_0} is dropped from the model. Under these conditions it is assumed that the starting value is distributed according to the stationary distribution of the OU process. This would not fit a biological scenario involving moving away from an ancestral state, but it does fit a scenario of evolution at a steady state. Dropping \eqn{\theta_0}{theta_0} from the model can sometimes stabilize estimates of the primary optima, especially in situations where the estimates of \eqn{\theta}{theta} in the full model are non-sensical. In regards to the accuracy of estimating \eqn{\theta_0}{theta_0}, it is important to note that in simulation, as \eqn{\alpha}{alpha} increases estimates of \eqn{\theta_0}{theta_0} converge to zero. Thus, when \eqn{\alpha}{alpha} is large (i.e. \eqn{\alpha}{alpha}>2) it is likely that any inference of an evolutionary trend will be an artifact and positively misleading.
Also note, when specifying the BMS model be mindful of the root.station flag. When root.station=FALSE, the non-censored model of O'Meara et al. 2006 is invoked (i.e., a single regime at the root is estimated), and when root.station==TRUE the group mean model of Thomas et al. 2006 (i.e., the number of means equals the number of regimes). The latter case appears to be a strange special case of OU, in that it behaves similarly to the OUMV model, but without selection. I would say that this is more consistent with the censored test of O'Meara et al. (2006), as opposed to having any real connection to OU. In any case, more work is clearly needed to understand the behavior of the group means model, and therefore, I recommend setting root.station=FALSE in the BMS case.
The Hessian matrix is used as a means to estimate the approximate standard errors of the model parameters and to assess whether they are the maximum likelihood estimates. The variance-covariance matrix of the estimated values of \eqn{\alpha}{alpha} and \eqn{\sigma^2}{sigma^2} are computed as the inverse of the Hessian matrix and the standard errors are the square roots of the diagonals of this matrix. The Hessian is a matrix of second-order derivatives and is approximated in the R package \code{numDeriv}. So, if changes in the value of a parameter results in sharp changes in the slope around the maximum of the log-likelihood function, the second-order derivative will be large, the standard error will be small, and the parameter estimate is considered stable. On the other hand, if the second-order derivative is nearly zero, then the change in the slope around the maximum is also nearly zero, indicating that the parameter value can be moved in any direction without greatly affecting the log-likelihood. In such situations, the standard error of the parameter will be large.
For models that allow \eqn{\alpha}{alpha} and \eqn{\sigma^2}{sigma^2} to vary (i.e., \code{OUMV}, \code{OUMA}, and \code{OUMVA}), the complexity of the model can often times be greater than the information that is contained within the data. As a result one or many parameters are poorly estimated, which can cause the function to return a log-likelihood that is suboptimal. This has great potential for poor model choice and incorrect biological interpretations. An eigendecomposition of the Hessian can provide an indication of whether the search returned the maximum likelihood estimates. If all the eigenvalues of the Hessian are positive, then the Hessian is positive definite, and all parameter estimates are considered reliable. However, if there are both positive and negative eigenvalues, then the objective function is at a saddlepoint and one or several parameters cannot be estimated adequately. One solution is to just fit a simpler model. Another is to actually identify the offending parameters. This can be done through the examination of the eigenvectors. The row order corresponds to the entries in \code{index.matrix}, the columns correspond to the order of values in \code{eigval}, and the larger the value of the row entry the greater the association between the corresponding parameter and the eigenvalue. Thus, the largest values in the columns associated with negative eigenvalues are the parameters that are causing the objective function to be at a saddlepoint.
}
\value{
\code{OUwie} returns an object of class \code{OUwie}. This is a list with elements:
\item{$loglik}{the maximum log-likelihood.}
\item{$AIC}{Akaike information criterion.}
\item{$AICc}{Akaike information criterion corrected for sample-size.}
\item{$BIC}{Bayesian information criterion.}
\item{$model}{The model being fit}
\item{$param.count}{The number of parameters counted in the model.}
\item{$solution}{a matrix containing the maximum likelihood estimates of \eqn{\alpha}{alpha} and \eqn{\sigma^2}{sigma^2}.}
\item{$theta}{a matrix containing the maximum likelihood estimates of \eqn{\theta}{theta} and its standard error.}
\item{$solution.se}{a matrix containing the approximate standard errors of \eqn{\alpha}{alpha} and \eqn{\sigma^2}{sigma^2}. The standard error is calculated as the diagonal of the inverse of the Hessian matrix.}
\item{$tot.state}{A vector of names for the different regimes}
\item{$index.mat}{The indices of the parameters being estimated are returned. The numbers correspond to the row in the \code{eigvect} and can useful for identifying the parameters that are causing the objective function to be at a saddlepoint (see Details)}
\item{$simmap.tree}{A logical indicating whether the input phylogeny is a SIMMAP formatted tree.}
\item{$root.age}{The user-supplied age at the root of the tree.}
\item{$opts}{Internal settings of the likelihood search}
\item{$data}{User-supplied dataset}
\item{$phy}{User-supplied tree}
\item{$root.station}{A logical indicating whether the starting state, \eqn{\theta_0}{theta_0}, was estimated}
\item{$starting.vals}{A vector of user-supplied initial search parameters.}
\item{$lb}{The lower bound set}
\item{$ub}{The upper bound set}
\item{$iterations}{Number of iterations of the likelihood search that were executed}
\item{$mserr.est}{The estimated measurement error if mserr="est". Otherwise, the value is NULL.}
\item{$res}{A vector of residuals from the model fit. The residuals are ordered in the same way as the tips in the tree.}
\item{$eigval}{The eigenvalues from the decomposition of the Hessian of the likelihood function. If any \code{eigval<0} then one or more parameters were not optimized during the likelihood search (see Details)}
\item{$eigvect}{The eigenvectors from the decomposition of the Hessian of the likelihood function is returned (see Details)}
\item{$new.start}{The vector of values to use if you want to restart the run from this point (starting.vals for a new run)}
}
\examples{
\donttest{
data(tworegime)
#Plot the tree and the internal nodes to highlight the selective regimes:
select.reg<-character(length(tree$node.label))
select.reg[tree$node.label == 1] <- "black"
select.reg[tree$node.label == 2] <- "red"
plot(tree)
nodelabels(pch=21, bg=select.reg)
}
\dontrun{
#To see the first 5 lines of the data matrix to see what how to
#structure the data:
trait[1:5,]
#Now fit an OU model that allows different sigma^2:
OUwie(tree,trait,model=c("OUMV"),root.station=TRUE)
#Fit an OU model based on a clade of interest:
OUwie(tree,trait,model=c("OUMV"), root.station=TRUE, clade=c("t50", "t64"))
#For large trees, it may be useful to have ways to restart the search (due to
#finite time per run on a computing cluster, for example). You can do this
#by changing settings of OUwie runs. For example:
run1 <- OUwie(tree,trait,model=c("OUMV"),root.station=TRUE, opts =
list("algorithm"="NLOPT_LN_SBPLX", "maxeval"="500", "ftol_abs"=0.001))
save(run1, file="run1.rda")
#Then, later or in a different session:
load("run1.rda")
run2 <- OUwie(tree,trait,model=c("OUMV"),root.station=TRUE, opts =
list("algorithm"="NLOPT_LN_SBPLX", "maxeval"="500", "ftol_abs"=0.001),
starting.vals=run1$new.start)
#run2 will start off where run1 stopped.
}
}
\references{
Beaulieu J.M., Jhwueng D.C., Boettiger C., and O'Meara B.C. 2012. Modeling stabilizing selection: Expanding the Ornstein-Uhlenbeck model of adaptive evolution. Evolution 66:2369-2383.
O'Meara B.C., Ane C., Sanderson P.C., Wainwright P.C. 2006. Testing for different rates of continuous trait evolution using likelihood. Evolution 60:922-933.
Butler M.A., King A.A. 2004. Phylogenetic comparative analysis: A modeling approach for adaptive evolution. American Naturalist 164:683-695.
Thomas G.H., Freckleton R.P., and Szekely T. 2006. Comparative analysis of the influence of developmental mode on phenotypic diversification rates in shorebirds. Proceedings of the Royal Society, B. 273:1619-1624.
}
\author{Jeremy M. Beaulieu and Brian C. O'Meara}
\keyword{models}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/VatanenT_2016.R
\name{VatanenT_2016}
\alias{VatanenT_2016}
\alias{VatanenT_2016.genefamilies_relab.stool}
\alias{VatanenT_2016.marker_abundance.stool}
\alias{VatanenT_2016.marker_presence.stool}
\alias{VatanenT_2016.metaphlan_bugs_list.stool}
\alias{VatanenT_2016.pathabundance_relab.stool}
\alias{VatanenT_2016.pathcoverage.stool}
\title{Data from the VatanenT_2016 study}
\description{
Data from the VatanenT_2016 study
}
\section{Datasets}{
\subsection{VatanenT_2016.genefamilies_relab.stool}{
An ExpressionSet with 785 samples and 1,719,634 features specific to the stool body site
}
\subsection{VatanenT_2016.marker_abundance.stool}{
An ExpressionSet with 785 samples and 135,979 features specific to the stool body site
}
\subsection{VatanenT_2016.marker_presence.stool}{
An ExpressionSet with 785 samples and 131,625 features specific to the stool body site
}
\subsection{VatanenT_2016.metaphlan_bugs_list.stool}{
An ExpressionSet with 785 samples and 1,584 features specific to the stool body site
}
\subsection{VatanenT_2016.pathabundance_relab.stool}{
An ExpressionSet with 785 samples and 19,236 features specific to the stool body site
}
\subsection{VatanenT_2016.pathcoverage.stool}{
An ExpressionSet with 785 samples and 19,236 features specific to the stool body site
}
}
\section{Source}{
\subsection{Title}{
Variation in Microbiome LPS Immunogenicity Contributes to Autoimmunity in Humans.
}
\subsection{Author}{
Vatanen T, Kostic AD, d'Hennezel E, Siljander H, Franzosa EA, Yassour M, Kolde R, Vlamakis H, Arthur TD, Hämäläinen AM, Peet A, Tillmann V, Uibo R, Mokurov S, Dorshakova N, Ilonen J, Virtanen SM, Szabo SJ, Porter JA, Lähdesmäki H, Huttenhower C, Gevers D, Cullen TW, Knip M, Xavier RJ
}
\subsection{Lab}{
NA
}
\subsection{PMID}{
27259157
}
}
\examples{
VatanenT_2016.metaphlan_bugs_list.stool()
}
| /man/VatanenT_2016.Rd | permissive | pythseq/curatedMetagenomicData | R | false | true | 1,957 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/VatanenT_2016.R
\name{VatanenT_2016}
\alias{VatanenT_2016}
\alias{VatanenT_2016.genefamilies_relab.stool}
\alias{VatanenT_2016.marker_abundance.stool}
\alias{VatanenT_2016.marker_presence.stool}
\alias{VatanenT_2016.metaphlan_bugs_list.stool}
\alias{VatanenT_2016.pathabundance_relab.stool}
\alias{VatanenT_2016.pathcoverage.stool}
\title{Data from the VatanenT_2016 study}
\description{
Data from the VatanenT_2016 study
}
\section{Datasets}{
\subsection{VatanenT_2016.genefamilies_relab.stool}{
An ExpressionSet with 785 samples and 1,719,634 features specific to the stool body site
}
\subsection{VatanenT_2016.marker_abundance.stool}{
An ExpressionSet with 785 samples and 135,979 features specific to the stool body site
}
\subsection{VatanenT_2016.marker_presence.stool}{
An ExpressionSet with 785 samples and 131,625 features specific to the stool body site
}
\subsection{VatanenT_2016.metaphlan_bugs_list.stool}{
An ExpressionSet with 785 samples and 1,584 features specific to the stool body site
}
\subsection{VatanenT_2016.pathabundance_relab.stool}{
An ExpressionSet with 785 samples and 19,236 features specific to the stool body site
}
\subsection{VatanenT_2016.pathcoverage.stool}{
An ExpressionSet with 785 samples and 19,236 features specific to the stool body site
}
}
\section{Source}{
\subsection{Title}{
Variation in Microbiome LPS Immunogenicity Contributes to Autoimmunity in Humans.
}
\subsection{Author}{
Vatanen T, Kostic AD, d'Hennezel E, Siljander H, Franzosa EA, Yassour M, Kolde R, Vlamakis H, Arthur TD, Hämäläinen AM, Peet A, Tillmann V, Uibo R, Mokurov S, Dorshakova N, Ilonen J, Virtanen SM, Szabo SJ, Porter JA, Lähdesmäki H, Huttenhower C, Gevers D, Cullen TW, Knip M, Xavier RJ
}
\subsection{Lab}{
NA
}
\subsection{PMID}{
27259157
}
}
\examples{
VatanenT_2016.metaphlan_bugs_list.stool()
}
|
## deja vu from yesterday!
library(tidyverse)
library(here)
## create a data frame of your installed packages
ipt <- installed.packages() %>%
as_tibble()
## keep the variables
## * Package
## * LibPath
## * Version
## * Priority
## * Built
ipt_small <- ipt %>%
select(Package, LibPath, Version, Priority, Built)
## write it to data/installed-packages.csv
## YES overwrite the file that is there now
## that came from me (Jenny)
## it an example of what yours should look like
write_csv(ipt_small, here("data", "installed-packages.csv"))
## when this script works, stage & commit it and the csv file
## PUSH!
| /R/01_write-installed-packages.R | no_license | erinboon/packages-report | R | false | false | 628 | r | ## deja vu from yesterday!
library(tidyverse)
library(here)
## create a data frame of your installed packages
ipt <- installed.packages() %>%
as_tibble()
## keep the variables
## * Package
## * LibPath
## * Version
## * Priority
## * Built
ipt_small <- ipt %>%
select(Package, LibPath, Version, Priority, Built)
## write it to data/installed-packages.csv
## YES overwrite the file that is there now
## that came from me (Jenny)
## it an example of what yours should look like
write_csv(ipt_small, here("data", "installed-packages.csv"))
## when this script works, stage & commit it and the csv file
## PUSH!
|
\name{cv.glmregNB}
\alias{cv.glmregNB}
\title{Cross-validation for glmregNB}
\description{Does k-fold cross-validation for glmregNB, produces a plot,
and returns cross-validated log-likelihood values for \code{lambda}}
\usage{
cv.glmregNB(formula, data, weights, offset=NULL, lambda=NULL, nfolds=10,
foldid, plot.it=TRUE, se=TRUE, n.cores=2, trace=FALSE,
parallel=FALSE, ...)
}
\arguments{
\item{formula}{symbolic description of the model}
\item{data}{arguments controlling formula processing
via \code{\link[stats]{model.frame}}.}
\item{weights}{Observation weights; defaults to 1 per observation}
\item{offset}{this can be used to specify an a priori known component to be included in the linear predictor during fitting. This should be NULL or a numeric vector of length equal to the number of cases. Currently only one offset term can be included in the formula.}
\item{lambda}{Optional user-supplied lambda sequence; default is
\code{NULL}, and \code{glmregNB} chooses its own sequence}
\item{nfolds}{number of folds - default is 10. Although \code{nfolds}
can be as large as the sample size (leave-one-out CV), it is not
recommended for large datasets. Smallest value allowable is \code{nfolds=3}}
\item{foldid}{an optional vector of values between 1 and \code{nfold}
identifying what fold each observation is in. If supplied,
\code{nfold} can be missing.}
\item{plot.it}{ a logical value, to plot the estimated log-likelihood values if \code{TRUE}. }
\item{se}{ a logical value, to plot with standard errors. }
\item{n.cores}{The number of CPU cores to use. The cross-validation loop
will attempt to send different CV folds off to different cores.}
\item{trace}{a logical value, print progress of cross-validation or not}
\item{parallel}{a logical value, parallel computing or not}
\item{\dots}{Other arguments that can be passed to \code{glmregNB}.}
}
\details{The function runs \code{glmregNB} \code{nfolds}+1 times; the
first to get the \code{lambda} sequence, and then the remainder to
compute the fit with each of the folds omitted. The error is
accumulated, and the average error and standard deviation over the
folds is computed.
Note that \code{cv.glmregNB} does NOT search for
values for \code{alpha}. A specific value should be supplied, else
\code{alpha=1} is assumed by default. If users would like to
cross-validate \code{alpha} as well, they should call \code{cv.glmregNB}
with a pre-computed vector \code{foldid}, and then use this same fold vector
in separate calls to \code{cv.glmregNB} with different values of
\code{alpha}.
}
\value{an object of class \code{"cv.glmregNB"} is returned, which is a
list with the ingredients of the cross-validation fit.
\item{fit}{a fitted glmregNB object for the full data.}
\item{residmat}{matrix of log-likelihood values with row values for \code{lambda} and column values for \code{k}th cross-validation}
\item{cv}{The mean cross-validated log-likelihood values - a vector of length
\code{length(lambda)}.}
\item{cv.error}{The standard error of cross-validated log-likelihood values - a vector of length
\code{length(lambda)}.}
\item{lambda}{a vector of \code{lambda} values}
\item{foldid}{indicators of data used in each cross-validation, for reproductive purposes}
\item{lambda.which}{index of \code{lambda} that gives maximum \code{cv} value.}
\item{lambda.optim}{value of \code{lambda} that gives maximum \code{cv} value.}
}
\references{
Zhu Wang, Shuangge Ma, Michael Zappitelli, Chirag Parikh, Ching-Yun Wang and Prasad Devarajan (2014)
\emph{Penalized Count Data Regression with Application to Hospital Stay after Pediatric Cardiac Surgery}, \emph{Statistical Methods in Medical Research}. 2014 Apr 17. [Epub ahead of print]
}
\author{Zhu Wang <wangz1@uthscsa.edu>}
\seealso{\code{\link{glmregNB}} and \code{\link{plot}}, \code{\link{predict}}, and \code{\link{coef}} methods for \code{"cv.glmregNB"} object.}
\examples{
\dontrun{
data("bioChemists", package = "pscl")
fm_nb <- cv.glmregNB(art ~ ., data = bioChemists)
plot(fm_nb)
}
}
\keyword{models}
\keyword{regression}
| /man/cv.glmregNB.Rd | no_license | zhuwang46/mpath | R | false | false | 4,166 | rd | \name{cv.glmregNB}
\alias{cv.glmregNB}
\title{Cross-validation for glmregNB}
\description{Does k-fold cross-validation for glmregNB, produces a plot,
and returns cross-validated log-likelihood values for \code{lambda}}
\usage{
cv.glmregNB(formula, data, weights, offset=NULL, lambda=NULL, nfolds=10,
foldid, plot.it=TRUE, se=TRUE, n.cores=2, trace=FALSE,
parallel=FALSE, ...)
}
\arguments{
\item{formula}{symbolic description of the model}
\item{data}{arguments controlling formula processing
via \code{\link[stats]{model.frame}}.}
\item{weights}{Observation weights; defaults to 1 per observation}
\item{offset}{this can be used to specify an a priori known component to be included in the linear predictor during fitting. This should be NULL or a numeric vector of length equal to the number of cases. Currently only one offset term can be included in the formula.}
\item{lambda}{Optional user-supplied lambda sequence; default is
\code{NULL}, and \code{glmregNB} chooses its own sequence}
\item{nfolds}{number of folds - default is 10. Although \code{nfolds}
can be as large as the sample size (leave-one-out CV), it is not
recommended for large datasets. Smallest value allowable is \code{nfolds=3}}
\item{foldid}{an optional vector of values between 1 and \code{nfold}
identifying what fold each observation is in. If supplied,
\code{nfold} can be missing.}
\item{plot.it}{ a logical value, to plot the estimated log-likelihood values if \code{TRUE}. }
\item{se}{ a logical value, to plot with standard errors. }
\item{n.cores}{The number of CPU cores to use. The cross-validation loop
will attempt to send different CV folds off to different cores.}
\item{trace}{a logical value, print progress of cross-validation or not}
\item{parallel}{a logical value, parallel computing or not}
\item{\dots}{Other arguments that can be passed to \code{glmregNB}.}
}
\details{The function runs \code{glmregNB} \code{nfolds}+1 times; the
first to get the \code{lambda} sequence, and then the remainder to
compute the fit with each of the folds omitted. The error is
accumulated, and the average error and standard deviation over the
folds is computed.
Note that \code{cv.glmregNB} does NOT search for
values for \code{alpha}. A specific value should be supplied, else
\code{alpha=1} is assumed by default. If users would like to
cross-validate \code{alpha} as well, they should call \code{cv.glmregNB}
with a pre-computed vector \code{foldid}, and then use this same fold vector
in separate calls to \code{cv.glmregNB} with different values of
\code{alpha}.
}
\value{an object of class \code{"cv.glmregNB"} is returned, which is a
list with the ingredients of the cross-validation fit.
\item{fit}{a fitted glmregNB object for the full data.}
\item{residmat}{matrix of log-likelihood values with row values for \code{lambda} and column values for \code{k}th cross-validation}
\item{cv}{The mean cross-validated log-likelihood values - a vector of length
\code{length(lambda)}.}
\item{cv.error}{The standard error of cross-validated log-likelihood values - a vector of length
\code{length(lambda)}.}
\item{lambda}{a vector of \code{lambda} values}
\item{foldid}{indicators of data used in each cross-validation, for reproductive purposes}
\item{lambda.which}{index of \code{lambda} that gives maximum \code{cv} value.}
\item{lambda.optim}{value of \code{lambda} that gives maximum \code{cv} value.}
}
\references{
Zhu Wang, Shuangge Ma, Michael Zappitelli, Chirag Parikh, Ching-Yun Wang and Prasad Devarajan (2014)
\emph{Penalized Count Data Regression with Application to Hospital Stay after Pediatric Cardiac Surgery}, \emph{Statistical Methods in Medical Research}. 2014 Apr 17. [Epub ahead of print]
}
\author{Zhu Wang <wangz1@uthscsa.edu>}
\seealso{\code{\link{glmregNB}} and \code{\link{plot}}, \code{\link{predict}}, and \code{\link{coef}} methods for \code{"cv.glmregNB"} object.}
\examples{
\dontrun{
data("bioChemists", package = "pscl")
fm_nb <- cv.glmregNB(art ~ ., data = bioChemists)
plot(fm_nb)
}
}
\keyword{models}
\keyword{regression}
|
#########################################################################
#########################################################################
######################## Sequential OTL Detection ########################
#########################################################################
#' Sequential Ensemble Technique to detect Outlier
#'
#' In this perticular function, all the techniques will be applied in a sequential manner, that means the first method
#' will be applied on the initial set of target data, then the second method on the ouput of the first tecnique and so on.
#' The sequence can be given in any combination and permutation.\ The code for the methods are as follows:\cr
#' 1. Decomposiiton Method\cr
#' 2. Rolling Quarter Mehthod\cr
#' 3. Rolling Quarter Mehthod without NAs\cr
#' 4. Overall MAD method.\cr
#' 5. Running Median method \cr\cr
#' To call any one particular method we can call this
#' function with with only that methods number in sequence
#' @export
#' @param data: Th complete data frame or data table or tibble that contains the data
#' @param supply: Target attribute on which decomposition should happen
#' @param key: Key attributes in the table on which split should happen. For example: state_id, job_code_id
#' @param year: Year attribute of the data frame.
#' @param qtr_key: Attribute that contains the quarter value.
#' @param seq: This variable will get the sequence of the methods to be used. DEFAULT to (1,2,3,4,5)
#' @param dec_cutoff: The cutoff values against which the deviation will be checked for decomposition method.
#' If the computed ratio is less than the cutoff, then the point if not an outlier. DEFAULT to 1.5
#' @param dec_mad_const: Scale factor to calculate MAD values in decomposition method to maintain consistency.
#' DEFAULT to 1.4826
#' @param dec_replace: In the final column for decomposition method,
#' the outliers will be replaced by this variable's value. DEFAULT to NA.
#' @param rMAD_cutoff: The cutoff values against which the deviation will be checked for rolling MAD method.
#' If the computed ratio is less than the cutoff, then the point if not an outlier. DEFAULT to 1.5
#' @param rMAD_mad_const: Scale factor to calculate MAD values in rolling MAD method to maintain consistency.
#' DEFAULT to 1.4826
#' @param rMAD_replace: In the final column for rolling MAD method,
#' the outliers will be replaced by this variable's value. DEFAULT to NA.
#' @param rMAD_nona_cutoff: The cutoff values against which the deviation will be checked for rolling MAD method without NAs.
#' If the computed ratio is less than the cutoff, then the point if not an outlier. DEFAULT to 1.5
#' @param rMAD_nona_mad_const: Scale factor to calculate MAD values in rolling MAD method without NAs to maintain consistency.
#' DEFAULT to 1.4826
#' @param rMAD_nona_replace: In the final column for rolling MAD method without NAs,
#' the outliers will be replaced by this variable's value. DEFAULT to NA.
#' @param overallMAD_cutoff: The cutoff values against which the deviation will be checked for overall MAD method.
#' If the computed ratio is less than the cutoff, then the point if not an outlier. DEFAULT to 1.5
#' @param overallMAD_mad_const: Scale factor to calculate MAD values in overall MAD method to maintain consistency.
#' DEFAULT to 1.4826
#' @param overallMAD_replace: In the final column for overall MAD method,
#' the outliers will be replaced by this variable's value. DEFAULT to NA.
#' @param runmed_replace: In the final column for decomposition method,
#' the outliers will be replaced by this variable's value. DEFAULT to NA.
#' @param runmed_quantile: Compute any of 25\% 50\% 75\% 100\% quartile depending on the data requirements
#' @return The dataset with few extra attributes.
#' If the technique number is not present in the parameter seq, then attributes for that method will not be there.\cr
#' Technique related attributes will be:\cr
#' spl_without_otl_dec: The target attribute in which all the outliers will be replaced by the replace variable\cr
#' otl_flag_dec: Outlier flag by decompose method. Outliers will be marked as 1.\cr
#' spl_without_otl_rMAD: The target attribute in which all the outliers will be replaced by the replace variable\cr
#' otl_flag_rMAD: Outlier flag by rolling method. Outliers will be marked as 1.\cr
#' spl_without_otl_rMAD_nona: The target attribute in which all the outliers will be replaced by the replace variable\cr
#' otl_flag_rMAD_nona: Outlier flag by rolling method without NAs. Outliers will be marked as 1.\cr
#' spl_without_otl_overallMAD: The target attribute in which all the outliers will be replaced by the replace variable\cr
#' otl_flag_overallMAD: Outlier flag by overall MAD calculation method. Outliers will be marked as 1.\cr\cr
#' spl_without_otl_runmed: The target attribute in which all the outliers will be replaced by the replace variable\cr
#' otl_flag_runmed: Outlier flag by running median method. Outliers will be marked as 1.\cr
#' seq_spl_without_otl: The final target variable without the outliers\cr
#' seq_flag: The final flag to the outliers.\cr
#' Final variable which will be present in all the files\cr
#' @example seq_test=seq_outlier_detection(data=data_comp2,key=c("CODPRO", "PROFM"), supply="tot_emp",
#' year="year", qtr_key = "qtr", seq=c(4,1))\cr
#' Here just the overall MAD and decomposition method will run in sequence
#1.decompose
#2.rolling qtr
#3.rolling qtr without NAs
#4.overall MAD
#5.running median
seq_outlier_detection=function(data, key, supply, year, qtr_key, seq=c(1,2,3,4,5),
dec_cutoff=1.5, dec_mad_const=1.4826, dec_replace=NA,
rMAD_cutoff=1.5, rMAD_mad_const=1.4826, rMAD_replace=NA,
rMAD_nona_cutoff=1.5, rMAD_nona_mad_const=1.4826, rMAD_nona_replace=NA,
overall_MAD_cutoff=1.5, overall_MAD_mad_const=1.4826, overall_MAD_replace=NA,
runmed_replace=NA, runmed_quantile = 0.75)
{
if(sum(ifelse(grepl( '[0-5]', seq),0,1))>0)
{
stop("In sequence number 1 to 5 is acceptable.
1. Decompose method,
2. Rolling quarters using MAD,
3. Rolling quarters without NAs using MAD,
4. Overall MAD and
5. Running Median")
}
else
{
data=data.frame(data)
data$seq_supply_without_otl=data[,mget("supply")[[1]]]
data$seq_flag=0
data$seq_flag[is.na(data$seq_supply_without_otl)]=1
for(s in seq)
{
if(s==1)
{
print("Decomposing method started")
data=decompose_outlier(data, supply="seq_supply_without_otl", key=key, year=year, qtr_key=qtr_key,
cutoff=dec_cutoff, mad_const = dec_mad_const, replace=dec_replace)
data$seq_supply_without_otl=data$spl_without_otl_dec
data$seq_flag=data$otl_flag_dec
print("Outlier detected by decomposing method")
gc()
}
else if(s==2)
{
print("Rolling Method started")
data=rolling_outlier(data, supply="seq_supply_without_otl", key=key, year=year, qtr_key=qtr_key,
cutoff=rMAD_cutoff, mad_const = rMAD_mad_const, replace=rMAD_replace, nona=F)
data$seq_supply_without_otl=data$spl_without_otl_rMAD
data$seq_flag=data$otl_flag_rMAD
print("Outlier detected by rolling method")
gc()
}
else if(s==3)
{
print("Rolling Method without NAs started")
data=rolling_outlier(data, supply="seq_supply_without_otl", key=key, year=year, qtr_key=qtr_key,
cutoff=rMAD_nona_cutoff, mad_const = rMAD_nona_mad_const,
replace=rMAD_nona_replace, nona=T)
data$seq_supply_without_otl=data$spl_without_otl_rMAD_nona
data$seq_flag=data$otl_flag_rMAD_nona
print("Outlier detected by rolling method without NAs")
gc()
}
else if(s==4)
{
print("Overall MAD method started")
data=overall_outlier(data, supply="seq_supply_without_otl", key=key, year=year, qtr_key=qtr_key,
cutoff=overall_MAD_cutoff, mad_const = overall_MAD_mad_const, replace=overall_MAD_replace)
data$seq_supply_without_otl=data$spl_without_otl_overallMAD
data$seq_flag=data$otl_flag_overallMAD
print("Outlier detected by Overall MAD method")
gc()
}
else if(s==5)
{
print("Running Median method started")
data=running_median_outlier(data, supply="seq_supply_without_otl", key=key, year=year, qtr_key=qtr_key,
quantile = runmed_quantile, replace=runmed_replace)
data$seq_supply_without_otl=data$spl_without_otl_runmed
data$seq_flag=data$otl_flag_runmed
print("Outlier detected by Running Median method")
gc()
}
}
data
}
}
| /outlierdetection/R/Sequential_ensemble_technique.R | no_license | 1711surabhi/tn_data_prep | R | false | false | 9,110 | r | #########################################################################
#########################################################################
######################## Sequential OTL Detection ########################
#########################################################################
#' Sequential Ensemble Technique to detect Outlier
#'
#' In this perticular function, all the techniques will be applied in a sequential manner, that means the first method
#' will be applied on the initial set of target data, then the second method on the ouput of the first tecnique and so on.
#' The sequence can be given in any combination and permutation.\ The code for the methods are as follows:\cr
#' 1. Decomposiiton Method\cr
#' 2. Rolling Quarter Mehthod\cr
#' 3. Rolling Quarter Mehthod without NAs\cr
#' 4. Overall MAD method.\cr
#' 5. Running Median method \cr\cr
#' To call any one particular method we can call this
#' function with with only that methods number in sequence
#' @export
#' @param data: Th complete data frame or data table or tibble that contains the data
#' @param supply: Target attribute on which decomposition should happen
#' @param key: Key attributes in the table on which split should happen. For example: state_id, job_code_id
#' @param year: Year attribute of the data frame.
#' @param qtr_key: Attribute that contains the quarter value.
#' @param seq: This variable will get the sequence of the methods to be used. DEFAULT to (1,2,3,4,5)
#' @param dec_cutoff: The cutoff values against which the deviation will be checked for decomposition method.
#' If the computed ratio is less than the cutoff, then the point if not an outlier. DEFAULT to 1.5
#' @param dec_mad_const: Scale factor to calculate MAD values in decomposition method to maintain consistency.
#' DEFAULT to 1.4826
#' @param dec_replace: In the final column for decomposition method,
#' the outliers will be replaced by this variable's value. DEFAULT to NA.
#' @param rMAD_cutoff: The cutoff values against which the deviation will be checked for rolling MAD method.
#' If the computed ratio is less than the cutoff, then the point if not an outlier. DEFAULT to 1.5
#' @param rMAD_mad_const: Scale factor to calculate MAD values in rolling MAD method to maintain consistency.
#' DEFAULT to 1.4826
#' @param rMAD_replace: In the final column for rolling MAD method,
#' the outliers will be replaced by this variable's value. DEFAULT to NA.
#' @param rMAD_nona_cutoff: The cutoff values against which the deviation will be checked for rolling MAD method without NAs.
#' If the computed ratio is less than the cutoff, then the point if not an outlier. DEFAULT to 1.5
#' @param rMAD_nona_mad_const: Scale factor to calculate MAD values in rolling MAD method without NAs to maintain consistency.
#' DEFAULT to 1.4826
#' @param rMAD_nona_replace: In the final column for rolling MAD method without NAs,
#' the outliers will be replaced by this variable's value. DEFAULT to NA.
#' @param overallMAD_cutoff: The cutoff values against which the deviation will be checked for overall MAD method.
#' If the computed ratio is less than the cutoff, then the point if not an outlier. DEFAULT to 1.5
#' @param overallMAD_mad_const: Scale factor to calculate MAD values in overall MAD method to maintain consistency.
#' DEFAULT to 1.4826
#' @param overallMAD_replace: In the final column for overall MAD method,
#' the outliers will be replaced by this variable's value. DEFAULT to NA.
#' @param runmed_replace: In the final column for decomposition method,
#' the outliers will be replaced by this variable's value. DEFAULT to NA.
#' @param runmed_quantile: Compute any of 25\% 50\% 75\% 100\% quartile depending on the data requirements
#' @return The dataset with few extra attributes.
#' If the technique number is not present in the parameter seq, then attributes for that method will not be there.\cr
#' Technique related attributes will be:\cr
#' spl_without_otl_dec: The target attribute in which all the outliers will be replaced by the replace variable\cr
#' otl_flag_dec: Outlier flag by decompose method. Outliers will be marked as 1.\cr
#' spl_without_otl_rMAD: The target attribute in which all the outliers will be replaced by the replace variable\cr
#' otl_flag_rMAD: Outlier flag by rolling method. Outliers will be marked as 1.\cr
#' spl_without_otl_rMAD_nona: The target attribute in which all the outliers will be replaced by the replace variable\cr
#' otl_flag_rMAD_nona: Outlier flag by rolling method without NAs. Outliers will be marked as 1.\cr
#' spl_without_otl_overallMAD: The target attribute in which all the outliers will be replaced by the replace variable\cr
#' otl_flag_overallMAD: Outlier flag by overall MAD calculation method. Outliers will be marked as 1.\cr\cr
#' spl_without_otl_runmed: The target attribute in which all the outliers will be replaced by the replace variable\cr
#' otl_flag_runmed: Outlier flag by running median method. Outliers will be marked as 1.\cr
#' seq_spl_without_otl: The final target variable without the outliers\cr
#' seq_flag: The final flag to the outliers.\cr
#' Final variable which will be present in all the files\cr
#' @example seq_test=seq_outlier_detection(data=data_comp2,key=c("CODPRO", "PROFM"), supply="tot_emp",
#' year="year", qtr_key = "qtr", seq=c(4,1))\cr
#' Here just the overall MAD and decomposition method will run in sequence
#1.decompose
#2.rolling qtr
#3.rolling qtr without NAs
#4.overall MAD
#5.running median
seq_outlier_detection=function(data, key, supply, year, qtr_key, seq=c(1,2,3,4,5),
dec_cutoff=1.5, dec_mad_const=1.4826, dec_replace=NA,
rMAD_cutoff=1.5, rMAD_mad_const=1.4826, rMAD_replace=NA,
rMAD_nona_cutoff=1.5, rMAD_nona_mad_const=1.4826, rMAD_nona_replace=NA,
overall_MAD_cutoff=1.5, overall_MAD_mad_const=1.4826, overall_MAD_replace=NA,
runmed_replace=NA, runmed_quantile = 0.75)
{
if(sum(ifelse(grepl( '[0-5]', seq),0,1))>0)
{
stop("In sequence number 1 to 5 is acceptable.
1. Decompose method,
2. Rolling quarters using MAD,
3. Rolling quarters without NAs using MAD,
4. Overall MAD and
5. Running Median")
}
else
{
data=data.frame(data)
data$seq_supply_without_otl=data[,mget("supply")[[1]]]
data$seq_flag=0
data$seq_flag[is.na(data$seq_supply_without_otl)]=1
for(s in seq)
{
if(s==1)
{
print("Decomposing method started")
data=decompose_outlier(data, supply="seq_supply_without_otl", key=key, year=year, qtr_key=qtr_key,
cutoff=dec_cutoff, mad_const = dec_mad_const, replace=dec_replace)
data$seq_supply_without_otl=data$spl_without_otl_dec
data$seq_flag=data$otl_flag_dec
print("Outlier detected by decomposing method")
gc()
}
else if(s==2)
{
print("Rolling Method started")
data=rolling_outlier(data, supply="seq_supply_without_otl", key=key, year=year, qtr_key=qtr_key,
cutoff=rMAD_cutoff, mad_const = rMAD_mad_const, replace=rMAD_replace, nona=F)
data$seq_supply_without_otl=data$spl_without_otl_rMAD
data$seq_flag=data$otl_flag_rMAD
print("Outlier detected by rolling method")
gc()
}
else if(s==3)
{
print("Rolling Method without NAs started")
data=rolling_outlier(data, supply="seq_supply_without_otl", key=key, year=year, qtr_key=qtr_key,
cutoff=rMAD_nona_cutoff, mad_const = rMAD_nona_mad_const,
replace=rMAD_nona_replace, nona=T)
data$seq_supply_without_otl=data$spl_without_otl_rMAD_nona
data$seq_flag=data$otl_flag_rMAD_nona
print("Outlier detected by rolling method without NAs")
gc()
}
else if(s==4)
{
print("Overall MAD method started")
data=overall_outlier(data, supply="seq_supply_without_otl", key=key, year=year, qtr_key=qtr_key,
cutoff=overall_MAD_cutoff, mad_const = overall_MAD_mad_const, replace=overall_MAD_replace)
data$seq_supply_without_otl=data$spl_without_otl_overallMAD
data$seq_flag=data$otl_flag_overallMAD
print("Outlier detected by Overall MAD method")
gc()
}
else if(s==5)
{
print("Running Median method started")
data=running_median_outlier(data, supply="seq_supply_without_otl", key=key, year=year, qtr_key=qtr_key,
quantile = runmed_quantile, replace=runmed_replace)
data$seq_supply_without_otl=data$spl_without_otl_runmed
data$seq_flag=data$otl_flag_runmed
print("Outlier detected by Running Median method")
gc()
}
}
data
}
}
|
# Make a table of the mean/sd homozygous genotypes by class
dat <- read.csv("/Users/tomkono/Dropbox/GitHub/Deleterious_GP/Results/Burden/Ran_Sel_Homozygous_Derived_Counts.csv", header=TRUE)
c0.nc.mean <- mean(dat$NC[dat$Cycle == "C0"])
c0.nc.sd <- sd(dat$NC[dat$Cycle == "C0"])
c0.sy.mean <- mean(dat$SY[dat$Cycle == "C0"])
c0.sy.sd <- sd(dat$SY[dat$Cycle == "C0"])
c0.ns.mean <- mean(dat$NS[dat$Cycle == "C0"])
c0.ns.sd <- sd(dat$NS[dat$Cycle == "C0"])
c0.de.mean <- mean(dat$DE[dat$Cycle == "C0"])
c0.de.sd <- sd(dat$DE[dat$Cycle == "C0"])
c1.ran.nc.mean <- mean(dat$NC[dat$Cycle == "C1" & dat$Type == "Ran"])
c1.ran.nc.sd <- sd(dat$NC[dat$Cycle == "C1" & dat$Type == "Ran"])
c1.ran.sy.mean <- mean(dat$SY[dat$Cycle == "C1" & dat$Type == "Ran"])
c1.ran.sy.sd <- sd(dat$SY[dat$Cycle == "C1" & dat$Type == "Ran"])
c1.ran.ns.mean <- mean(dat$NS[dat$Cycle == "C1" & dat$Type == "Ran"])
c1.ran.ns.sd <- sd(dat$NS[dat$Cycle == "C1" & dat$Type == "Ran"])
c1.ran.de.mean <- mean(dat$DE[dat$Cycle == "C1" & dat$Type == "Ran"])
c1.ran.de.sd <- sd(dat$DE[dat$Cycle == "C1" & dat$Type == "Ran"])
c1.sel.nc.mean <- mean(dat$NC[dat$Cycle == "C1" & dat$Type == "Sel"])
c1.sel.nc.sd <- sd(dat$NC[dat$Cycle == "C1" & dat$Type == "Sel"])
c1.sel.sy.mean <- mean(dat$SY[dat$Cycle == "C1" & dat$Type == "Sel"])
c1.sel.sy.sd <- sd(dat$SY[dat$Cycle == "C1" & dat$Type == "Sel"])
c1.sel.ns.mean <- mean(dat$NS[dat$Cycle == "C1" & dat$Type == "Sel"])
c1.sel.ns.sd <- sd(dat$NS[dat$Cycle == "C1" & dat$Type == "Sel"])
c1.sel.de.mean <- mean(dat$DE[dat$Cycle == "C1" & dat$Type == "Sel"])
c1.sel.de.sd <- sd(dat$DE[dat$Cycle == "C1" & dat$Type == "Sel"])
c2.ran.nc.mean <- mean(dat$NC[dat$Cycle == "C2" & dat$Type == "Ran"])
c2.ran.nc.sd <- sd(dat$NC[dat$Cycle == "C2" & dat$Type == "Ran"])
c2.ran.sy.mean <- mean(dat$SY[dat$Cycle == "C2" & dat$Type == "Ran"])
c2.ran.sy.sd <- sd(dat$SY[dat$Cycle == "C2" & dat$Type == "Ran"])
c2.ran.ns.mean <- mean(dat$NS[dat$Cycle == "C2" & dat$Type == "Ran"])
c2.ran.ns.sd <- sd(dat$NS[dat$Cycle == "C2" & dat$Type == "Ran"])
c2.ran.de.mean <- mean(dat$DE[dat$Cycle == "C2" & dat$Type == "Ran"])
c2.ran.de.sd <- sd(dat$DE[dat$Cycle == "C2" & dat$Type == "Ran"])
c2.sel.nc.mean <- mean(dat$NC[dat$Cycle == "C2" & dat$Type == "Sel"])
c2.sel.nc.sd <- sd(dat$NC[dat$Cycle == "C2" & dat$Type == "Sel"])
c2.sel.sy.mean <- mean(dat$SY[dat$Cycle == "C2" & dat$Type == "Sel"])
c2.sel.sy.sd <- sd(dat$SY[dat$Cycle == "C2" & dat$Type == "Sel"])
c2.sel.ns.mean <- mean(dat$NS[dat$Cycle == "C2" & dat$Type == "Sel"])
c2.sel.ns.sd <- sd(dat$NS[dat$Cycle == "C2" & dat$Type == "Sel"])
c2.sel.de.mean <- mean(dat$DE[dat$Cycle == "C2" & dat$Type == "Sel"])
c2.sel.de.sd <- sd(dat$DE[dat$Cycle == "C2" & dat$Type == "Sel"])
c3.ran.nc.mean <- mean(dat$NC[dat$Cycle == "C3" & dat$Type == "Ran"])
c3.ran.nc.sd <- sd(dat$NC[dat$Cycle == "C3" & dat$Type == "Ran"])
c3.ran.sy.mean <- mean(dat$SY[dat$Cycle == "C3" & dat$Type == "Ran"])
c3.ran.sy.sd <- sd(dat$SY[dat$Cycle == "C3" & dat$Type == "Ran"])
c3.ran.ns.mean <- mean(dat$NS[dat$Cycle == "C3" & dat$Type == "Ran"])
c3.ran.ns.sd <- sd(dat$NS[dat$Cycle == "C3" & dat$Type == "Ran"])
c3.ran.de.mean <- mean(dat$DE[dat$Cycle == "C3" & dat$Type == "Ran"])
c3.ran.de.sd <- sd(dat$DE[dat$Cycle == "C3" & dat$Type == "Ran"])
c3.sel.nc.mean <- mean(dat$NC[dat$Cycle == "C3" & dat$Type == "Sel"])
c3.sel.nc.sd <- sd(dat$NC[dat$Cycle == "C3" & dat$Type == "Sel"])
c3.sel.sy.mean <- mean(dat$SY[dat$Cycle == "C3" & dat$Type == "Sel"])
c3.sel.sy.sd <- sd(dat$SY[dat$Cycle == "C3" & dat$Type == "Sel"])
c3.sel.ns.mean <- mean(dat$NS[dat$Cycle == "C3" & dat$Type == "Sel"])
c3.sel.ns.sd <- sd(dat$NS[dat$Cycle == "C3" & dat$Type == "Sel"])
c3.sel.de.mean <- mean(dat$DE[dat$Cycle == "C3" & dat$Type == "Sel"])
c3.sel.de.sd <- sd(dat$DE[dat$Cycle == "C3" & dat$Type == "Sel"])
# Make a table to print it out
toprint <- data.frame(
C0.Mean=c(c0.nc.mean, c0.sy.mean, c0.ns.mean, c0.de.mean),
C0.Sd=c(c0.nc.sd, c0.sy.sd, c0.ns.sd, c0.de.sd),
C1.Ran.Mean=c(c1.ran.nc.mean, c1.ran.sy.mean, c1.ran.ns.mean, c1.ran.de.mean),
C1.Ran.Sd=c(c1.ran.nc.sd, c1.ran.sy.sd, c1.ran.ns.sd, c1.ran.de.sd),
C1.Sel.Mean=c(c1.sel.nc.mean, c1.sel.sy.mean, c1.sel.ns.mean, c1.sel.de.mean),
C1.Sel.Sd=c(c1.sel.nc.sd, c1.sel.sy.sd, c1.sel.ns.sd, c1.sel.de.sd),
C2.Ran.Mean=c(c2.ran.nc.mean, c2.ran.sy.mean, c2.ran.ns.mean, c2.ran.de.mean),
C2.Ran.Sd=c(c2.ran.nc.sd, c2.ran.sy.sd, c2.ran.ns.sd, c2.ran.de.sd),
C2.Sel.Mean=c(c2.sel.nc.mean, c2.sel.sy.mean, c2.sel.ns.mean, c2.sel.de.mean),
C2.Sel.Sd=c(c2.sel.nc.sd, c2.sel.sy.sd, c2.sel.ns.sd, c2.sel.de.sd),
C3.Ran.Mean=c(c3.ran.nc.mean, c3.ran.sy.mean, c3.ran.ns.mean, c3.ran.de.mean),
C3.Ran.Sd=c(c3.ran.nc.sd, c3.ran.sy.sd, c3.ran.ns.sd, c3.ran.de.sd),
C3.Sel.Mean=c(c3.sel.nc.mean, c3.sel.sy.mean, c3.sel.ns.mean, c3.sel.de.mean),
C3.Sel.Sd=c(c3.sel.nc.sd, c3.sel.sy.sd, c3.sel.ns.sd, c3.sel.de.sd)
)
rownames(toprint) <- c("Noncoding", "Synonymous", "Nonsynonymous", "Deleterious")
write.csv(toprint, file="Ran_Sel_Homozygous_Derived_Counts.csv", quote=FALSE, row.names=TRUE)
| /Analysis_Scripts/Genetic_Analysis/Ran_Sel_Burden.R | no_license | MorrellLAB/Deleterious_GP | R | false | false | 5,101 | r | # Make a table of the mean/sd homozygous genotypes by class
dat <- read.csv("/Users/tomkono/Dropbox/GitHub/Deleterious_GP/Results/Burden/Ran_Sel_Homozygous_Derived_Counts.csv", header=TRUE)
c0.nc.mean <- mean(dat$NC[dat$Cycle == "C0"])
c0.nc.sd <- sd(dat$NC[dat$Cycle == "C0"])
c0.sy.mean <- mean(dat$SY[dat$Cycle == "C0"])
c0.sy.sd <- sd(dat$SY[dat$Cycle == "C0"])
c0.ns.mean <- mean(dat$NS[dat$Cycle == "C0"])
c0.ns.sd <- sd(dat$NS[dat$Cycle == "C0"])
c0.de.mean <- mean(dat$DE[dat$Cycle == "C0"])
c0.de.sd <- sd(dat$DE[dat$Cycle == "C0"])
c1.ran.nc.mean <- mean(dat$NC[dat$Cycle == "C1" & dat$Type == "Ran"])
c1.ran.nc.sd <- sd(dat$NC[dat$Cycle == "C1" & dat$Type == "Ran"])
c1.ran.sy.mean <- mean(dat$SY[dat$Cycle == "C1" & dat$Type == "Ran"])
c1.ran.sy.sd <- sd(dat$SY[dat$Cycle == "C1" & dat$Type == "Ran"])
c1.ran.ns.mean <- mean(dat$NS[dat$Cycle == "C1" & dat$Type == "Ran"])
c1.ran.ns.sd <- sd(dat$NS[dat$Cycle == "C1" & dat$Type == "Ran"])
c1.ran.de.mean <- mean(dat$DE[dat$Cycle == "C1" & dat$Type == "Ran"])
c1.ran.de.sd <- sd(dat$DE[dat$Cycle == "C1" & dat$Type == "Ran"])
c1.sel.nc.mean <- mean(dat$NC[dat$Cycle == "C1" & dat$Type == "Sel"])
c1.sel.nc.sd <- sd(dat$NC[dat$Cycle == "C1" & dat$Type == "Sel"])
c1.sel.sy.mean <- mean(dat$SY[dat$Cycle == "C1" & dat$Type == "Sel"])
c1.sel.sy.sd <- sd(dat$SY[dat$Cycle == "C1" & dat$Type == "Sel"])
c1.sel.ns.mean <- mean(dat$NS[dat$Cycle == "C1" & dat$Type == "Sel"])
c1.sel.ns.sd <- sd(dat$NS[dat$Cycle == "C1" & dat$Type == "Sel"])
c1.sel.de.mean <- mean(dat$DE[dat$Cycle == "C1" & dat$Type == "Sel"])
c1.sel.de.sd <- sd(dat$DE[dat$Cycle == "C1" & dat$Type == "Sel"])
c2.ran.nc.mean <- mean(dat$NC[dat$Cycle == "C2" & dat$Type == "Ran"])
c2.ran.nc.sd <- sd(dat$NC[dat$Cycle == "C2" & dat$Type == "Ran"])
c2.ran.sy.mean <- mean(dat$SY[dat$Cycle == "C2" & dat$Type == "Ran"])
c2.ran.sy.sd <- sd(dat$SY[dat$Cycle == "C2" & dat$Type == "Ran"])
c2.ran.ns.mean <- mean(dat$NS[dat$Cycle == "C2" & dat$Type == "Ran"])
c2.ran.ns.sd <- sd(dat$NS[dat$Cycle == "C2" & dat$Type == "Ran"])
c2.ran.de.mean <- mean(dat$DE[dat$Cycle == "C2" & dat$Type == "Ran"])
c2.ran.de.sd <- sd(dat$DE[dat$Cycle == "C2" & dat$Type == "Ran"])
c2.sel.nc.mean <- mean(dat$NC[dat$Cycle == "C2" & dat$Type == "Sel"])
c2.sel.nc.sd <- sd(dat$NC[dat$Cycle == "C2" & dat$Type == "Sel"])
c2.sel.sy.mean <- mean(dat$SY[dat$Cycle == "C2" & dat$Type == "Sel"])
c2.sel.sy.sd <- sd(dat$SY[dat$Cycle == "C2" & dat$Type == "Sel"])
c2.sel.ns.mean <- mean(dat$NS[dat$Cycle == "C2" & dat$Type == "Sel"])
c2.sel.ns.sd <- sd(dat$NS[dat$Cycle == "C2" & dat$Type == "Sel"])
c2.sel.de.mean <- mean(dat$DE[dat$Cycle == "C2" & dat$Type == "Sel"])
c2.sel.de.sd <- sd(dat$DE[dat$Cycle == "C2" & dat$Type == "Sel"])
c3.ran.nc.mean <- mean(dat$NC[dat$Cycle == "C3" & dat$Type == "Ran"])
c3.ran.nc.sd <- sd(dat$NC[dat$Cycle == "C3" & dat$Type == "Ran"])
c3.ran.sy.mean <- mean(dat$SY[dat$Cycle == "C3" & dat$Type == "Ran"])
c3.ran.sy.sd <- sd(dat$SY[dat$Cycle == "C3" & dat$Type == "Ran"])
c3.ran.ns.mean <- mean(dat$NS[dat$Cycle == "C3" & dat$Type == "Ran"])
c3.ran.ns.sd <- sd(dat$NS[dat$Cycle == "C3" & dat$Type == "Ran"])
c3.ran.de.mean <- mean(dat$DE[dat$Cycle == "C3" & dat$Type == "Ran"])
c3.ran.de.sd <- sd(dat$DE[dat$Cycle == "C3" & dat$Type == "Ran"])
c3.sel.nc.mean <- mean(dat$NC[dat$Cycle == "C3" & dat$Type == "Sel"])
c3.sel.nc.sd <- sd(dat$NC[dat$Cycle == "C3" & dat$Type == "Sel"])
c3.sel.sy.mean <- mean(dat$SY[dat$Cycle == "C3" & dat$Type == "Sel"])
c3.sel.sy.sd <- sd(dat$SY[dat$Cycle == "C3" & dat$Type == "Sel"])
c3.sel.ns.mean <- mean(dat$NS[dat$Cycle == "C3" & dat$Type == "Sel"])
c3.sel.ns.sd <- sd(dat$NS[dat$Cycle == "C3" & dat$Type == "Sel"])
c3.sel.de.mean <- mean(dat$DE[dat$Cycle == "C3" & dat$Type == "Sel"])
c3.sel.de.sd <- sd(dat$DE[dat$Cycle == "C3" & dat$Type == "Sel"])
# Make a table to print it out
toprint <- data.frame(
C0.Mean=c(c0.nc.mean, c0.sy.mean, c0.ns.mean, c0.de.mean),
C0.Sd=c(c0.nc.sd, c0.sy.sd, c0.ns.sd, c0.de.sd),
C1.Ran.Mean=c(c1.ran.nc.mean, c1.ran.sy.mean, c1.ran.ns.mean, c1.ran.de.mean),
C1.Ran.Sd=c(c1.ran.nc.sd, c1.ran.sy.sd, c1.ran.ns.sd, c1.ran.de.sd),
C1.Sel.Mean=c(c1.sel.nc.mean, c1.sel.sy.mean, c1.sel.ns.mean, c1.sel.de.mean),
C1.Sel.Sd=c(c1.sel.nc.sd, c1.sel.sy.sd, c1.sel.ns.sd, c1.sel.de.sd),
C2.Ran.Mean=c(c2.ran.nc.mean, c2.ran.sy.mean, c2.ran.ns.mean, c2.ran.de.mean),
C2.Ran.Sd=c(c2.ran.nc.sd, c2.ran.sy.sd, c2.ran.ns.sd, c2.ran.de.sd),
C2.Sel.Mean=c(c2.sel.nc.mean, c2.sel.sy.mean, c2.sel.ns.mean, c2.sel.de.mean),
C2.Sel.Sd=c(c2.sel.nc.sd, c2.sel.sy.sd, c2.sel.ns.sd, c2.sel.de.sd),
C3.Ran.Mean=c(c3.ran.nc.mean, c3.ran.sy.mean, c3.ran.ns.mean, c3.ran.de.mean),
C3.Ran.Sd=c(c3.ran.nc.sd, c3.ran.sy.sd, c3.ran.ns.sd, c3.ran.de.sd),
C3.Sel.Mean=c(c3.sel.nc.mean, c3.sel.sy.mean, c3.sel.ns.mean, c3.sel.de.mean),
C3.Sel.Sd=c(c3.sel.nc.sd, c3.sel.sy.sd, c3.sel.ns.sd, c3.sel.de.sd)
)
rownames(toprint) <- c("Noncoding", "Synonymous", "Nonsynonymous", "Deleterious")
write.csv(toprint, file="Ran_Sel_Homozygous_Derived_Counts.csv", quote=FALSE, row.names=TRUE)
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/spatialAtRiskClassDef.R
\name{xvals.fromXYZ}
\alias{xvals.fromXYZ}
\title{xvals.fromXYZ function}
\usage{
\method{xvals}{fromXYZ}(obj, ...)
}
\arguments{
\item{obj}{a spatialAtRisk object}
\item{...}{additional arguments}
}
\value{
the x values
}
\description{
Method for extracting 'x values' from an object of class fromXYZ
}
\seealso{
\link{xvals}, \link{yvals}, \link{zvals}, \link{xvals.default}, \link{yvals.default}, \link{zvals.default}, \link{yvals.fromXYZ}, \link{zvals.fromXYZ}, \link{xvals.SpatialGridDataFrame}, \link{yvals.SpatialGridDataFrame}, \link{zvals.SpatialGridDataFrame}
}
| /man/xvals.fromXYZ.Rd | no_license | bentaylor1/lgcp | R | false | false | 685 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/spatialAtRiskClassDef.R
\name{xvals.fromXYZ}
\alias{xvals.fromXYZ}
\title{xvals.fromXYZ function}
\usage{
\method{xvals}{fromXYZ}(obj, ...)
}
\arguments{
\item{obj}{a spatialAtRisk object}
\item{...}{additional arguments}
}
\value{
the x values
}
\description{
Method for extracting 'x values' from an object of class fromXYZ
}
\seealso{
\link{xvals}, \link{yvals}, \link{zvals}, \link{xvals.default}, \link{yvals.default}, \link{zvals.default}, \link{yvals.fromXYZ}, \link{zvals.fromXYZ}, \link{xvals.SpatialGridDataFrame}, \link{yvals.SpatialGridDataFrame}, \link{zvals.SpatialGridDataFrame}
}
|
loadModule("cmq_worker", TRUE) # CMQWorker C++ class
#' R worker submitted as cluster job
#'
#' Do not call this manually, the master will do that
#'
#' @param master The master address (tcp://ip:port)
#' @param ... Catch-all to not break older template values (ignored)
#' @param verbose Whether to print debug messages
#' @keywords internal
worker = function(master, ..., verbose=TRUE) {
if (verbose)
message = function(...) base::message(format(Sys.time(), "%Y-%m-%d %H:%M:%OS9 | "), ...)
else
message = function(...) invisible(NULL)
#TODO: replace this by proper authentication
auth = Sys.getenv("CMQ_AUTH")
message("Master: ", master)
if (length(list(...)) > 0)
warning("Arguments ignored: ", paste(names(list(...)), collapse=", "))
# connect to master
zmq = methods::new(CMQWorker, master) # add I/O threads?
zmq$send(list(id="WORKER_UP", auth=auth,
pkgver=utils::packageVersion("clustermq")))
message("WORKER_UP to: ", master)
fmt = "%i in %.2fs [user], %.2fs [system], %.2fs [elapsed]"
start_time = proc.time()
counter = 0
common_data = NA
token = NA
while(TRUE) {
tic = proc.time()
msg = zmq$receive()
if (is.null(msg$id)) {
# more information if #146, #179, #191 happen again
message("msg: ", paste(names(msg), collapse=", "))
next
}
delta = proc.time() - tic
message(sprintf("> %s (%.3fs wait)", msg$id, delta[3]))
switch(msg$id,
"DO_CALL" = {
result = try(eval(msg$expr, envir=msg$env))
message("eval'd: ", msg$expr)
counter = counter + 1
zmq$send(list(id="WORKER_READY", auth=auth, token=token,
n_calls=counter, ref=msg$ref, result=result))
},
"DO_SETUP" = {
if (!is.null(msg$redirect)) {
message("WORKER_READY to redirect: ", msg$redirect)
req = list(id="WORKER_READY", auth=auth)
msg = zmq$get_data_redirect(msg$redirect, req)
}
need = c("id", "fun", "const", "export", "pkgs",
"rettype", "common_seed", "token")
if (setequal(names(msg), need)) {
common_data = msg[setdiff(need, c("id", "export", "pkgs", "token"))]
list2env(msg$export, envir=.GlobalEnv)
token = msg$token
message("token from msg: ", token)
for (pkg in msg$pkgs)
library(pkg, character.only=TRUE) #TODO: in its own namespace
zmq$send(list(id="WORKER_READY", auth=auth,
token=token, n_calls=counter))
} else {
msg = paste("wrong field names for DO_SETUP:",
setdiff(names(msg), need))
zmq$send(list(id="WORKER_ERROR", auth=auth, msg=msg))
}
},
"DO_CHUNK" = {
if (!identical(token, msg$token)) {
msg = paste("mismatch chunk & common data", token, msg$token)
zmq$send(list(id="WORKER_ERROR", auth=auth, msg=msg),
send_more=TRUE)
message("WORKER_ERROR: ", msg)
break
}
tic = proc.time()
result = tryCatch(
do.call(work_chunk, c(list(df=msg$chunk), common_data)),
error = function(e) e)
delta = proc.time() - tic
if ("error" %in% class(result)) {
zmq$send(
list(id="WORKER_ERROR", auth=auth, msg=conditionMessage(result)),
send_more=TRUE)
message("WORKER_ERROR: ", conditionMessage(result))
break
} else {
message("completed ", sprintf(fmt, length(result$result),
delta[1], delta[2], delta[3]))
counter = counter + length(result$result)
zmq$send(c(list(id="WORKER_READY", auth=auth, token=token,
n_calls=counter), result))
}
},
"WORKER_WAIT" = {
message(sprintf("waiting %.2fs", msg$wait))
Sys.sleep(msg$wait)
zmq$send(list(id="WORKER_READY", auth=auth, token=token))
},
"WORKER_STOP" = {
break
}
)
}
run_time = proc.time() - start_time
message("shutting down worker")
zmq$send(list(
id = "WORKER_DONE",
time = run_time,
mem = sum(gc()[,"max used"]),
calls = counter,
auth = auth
))
message("\nTotal: ", sprintf(fmt, counter, run_time[1], run_time[2], run_time[3]))
msg = zmq$receive() # empty message
message("msg: ", msg)
}
| /R/worker.r | permissive | statquant/clustermq | R | false | false | 5,117 | r | loadModule("cmq_worker", TRUE) # CMQWorker C++ class
#' R worker submitted as cluster job
#'
#' Do not call this manually, the master will do that
#'
#' @param master The master address (tcp://ip:port)
#' @param ... Catch-all to not break older template values (ignored)
#' @param verbose Whether to print debug messages
#' @keywords internal
worker = function(master, ..., verbose=TRUE) {
if (verbose)
message = function(...) base::message(format(Sys.time(), "%Y-%m-%d %H:%M:%OS9 | "), ...)
else
message = function(...) invisible(NULL)
#TODO: replace this by proper authentication
auth = Sys.getenv("CMQ_AUTH")
message("Master: ", master)
if (length(list(...)) > 0)
warning("Arguments ignored: ", paste(names(list(...)), collapse=", "))
# connect to master
zmq = methods::new(CMQWorker, master) # add I/O threads?
zmq$send(list(id="WORKER_UP", auth=auth,
pkgver=utils::packageVersion("clustermq")))
message("WORKER_UP to: ", master)
fmt = "%i in %.2fs [user], %.2fs [system], %.2fs [elapsed]"
start_time = proc.time()
counter = 0
common_data = NA
token = NA
while(TRUE) {
tic = proc.time()
msg = zmq$receive()
if (is.null(msg$id)) {
# more information if #146, #179, #191 happen again
message("msg: ", paste(names(msg), collapse=", "))
next
}
delta = proc.time() - tic
message(sprintf("> %s (%.3fs wait)", msg$id, delta[3]))
switch(msg$id,
"DO_CALL" = {
result = try(eval(msg$expr, envir=msg$env))
message("eval'd: ", msg$expr)
counter = counter + 1
zmq$send(list(id="WORKER_READY", auth=auth, token=token,
n_calls=counter, ref=msg$ref, result=result))
},
"DO_SETUP" = {
if (!is.null(msg$redirect)) {
message("WORKER_READY to redirect: ", msg$redirect)
req = list(id="WORKER_READY", auth=auth)
msg = zmq$get_data_redirect(msg$redirect, req)
}
need = c("id", "fun", "const", "export", "pkgs",
"rettype", "common_seed", "token")
if (setequal(names(msg), need)) {
common_data = msg[setdiff(need, c("id", "export", "pkgs", "token"))]
list2env(msg$export, envir=.GlobalEnv)
token = msg$token
message("token from msg: ", token)
for (pkg in msg$pkgs)
library(pkg, character.only=TRUE) #TODO: in its own namespace
zmq$send(list(id="WORKER_READY", auth=auth,
token=token, n_calls=counter))
} else {
msg = paste("wrong field names for DO_SETUP:",
setdiff(names(msg), need))
zmq$send(list(id="WORKER_ERROR", auth=auth, msg=msg))
}
},
"DO_CHUNK" = {
if (!identical(token, msg$token)) {
msg = paste("mismatch chunk & common data", token, msg$token)
zmq$send(list(id="WORKER_ERROR", auth=auth, msg=msg),
send_more=TRUE)
message("WORKER_ERROR: ", msg)
break
}
tic = proc.time()
result = tryCatch(
do.call(work_chunk, c(list(df=msg$chunk), common_data)),
error = function(e) e)
delta = proc.time() - tic
if ("error" %in% class(result)) {
zmq$send(
list(id="WORKER_ERROR", auth=auth, msg=conditionMessage(result)),
send_more=TRUE)
message("WORKER_ERROR: ", conditionMessage(result))
break
} else {
message("completed ", sprintf(fmt, length(result$result),
delta[1], delta[2], delta[3]))
counter = counter + length(result$result)
zmq$send(c(list(id="WORKER_READY", auth=auth, token=token,
n_calls=counter), result))
}
},
"WORKER_WAIT" = {
message(sprintf("waiting %.2fs", msg$wait))
Sys.sleep(msg$wait)
zmq$send(list(id="WORKER_READY", auth=auth, token=token))
},
"WORKER_STOP" = {
break
}
)
}
run_time = proc.time() - start_time
message("shutting down worker")
zmq$send(list(
id = "WORKER_DONE",
time = run_time,
mem = sum(gc()[,"max used"]),
calls = counter,
auth = auth
))
message("\nTotal: ", sprintf(fmt, counter, run_time[1], run_time[2], run_time[3]))
msg = zmq$receive() # empty message
message("msg: ", msg)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/guidance2.R
\name{guidance2}
\alias{guidance2}
\title{GUIDetree-based AligNment ConficencE 2}
\usage{
guidance2(sequences, msa.program = "mafft", exec, bootstrap = 100,
n.part = "auto", col.cutoff = "auto", seq.cutoff = "auto",
mask.cutoff = "auto", parallel = TRUE, ncore = "auto",
method = "auto", alt.msas.file, n.coopt = "auto")
}
\arguments{
\item{sequences}{An object of class \code{\link{DNAbin}} or \code{\link{AAbin}}
containing unaligned sequences of DNA or amino acids.}
\item{msa.program}{A charcter string giving the name of the MSA program,
currelty one of c("mafft", "muscle", "clustalw2"); MAFFT is default}
\item{exec}{A character string giving the path to the executable of the
alignment program.}
\item{bootstrap}{An integer giving the number of perturbated MSAs.}
\item{col.cutoff}{numberic between 0 and 1; specifies a cutoff to remove unreliable columns below the cutoff; either user supplied or "auto" (0.73)}
\item{seq.cutoff}{numberic between 0 and 1; specifies a cutoff to remove unreliable sequences below the cutoff; either user supplied of "auto" (0.5)}
\item{mask.cutoff}{specific residues below a certain cutoff are masked ('N' for DNA, 'X' for AA); either user supplied of "auto" (0.5)}
\item{parallel}{logical, if TRUE, specify the number of cores}
\item{ncore}{number of cores}
\item{method}{further arguments passed to mafft, default is "auto"}
\item{mask}{specific residues below a certain cutoff are masked ('N' for DNA, 'X' for AA)}
}
\value{
alignment_score: is the GUIDANCE alignment score
GUIDANCE_residue_score
GUIDANCE_score: is the GUIDANCE column score
GUIDANCE_sequence_score
guidance_msa: is the base MSA removed from unreliable columns below
cutoff
base_msa
}
\description{
MSA reliability assessment GUIDANCE2 (Sela et al. 2015)
}
\details{
Calculates column confidence (and other scors) by comparing alternative MSAs generated by the GUIDANCE with varying gap opening panelty and the HoT methodology. First 100 alternative MSAs (with BP guide trees) with varying gap opening panelty are produced, then for each n (default = 4) co-optimal alignments are produced using HoT. The basic comparison between the BP MSAs and a reference MSA is if column residue pairs are identically aligned in all alternative MSAs compared with the base MSA (see \code{compareMSAs}).
}
\references{
Felsenstein J. 1985. Confidence limits on phylogenies: an
approach using the bootstrap. Evolution 39:783–791
Penn et al. (2010). An alignment confidence score capturing
robustness to guide tree uncertainty. Molecular Biology and Evolution
27:1759--1767
Sela et al. (2015). GUIDANCE2: accurate detection of unreliable alignment regions accounting for the uncertainty of multiple parameters. Nucleic acids research 43:W7--W14
G. Landan and D. Graur (2008). Local reliability measures from sets of co-optimal multiple sequencesuence alignments. Pacific Symposium on Biocomputin. 13:15--24
}
\seealso{
\code{\link{compareMSAs}}, \code{\link{guidance}}, \code{\link{HoT}}
}
\author{
Franz-Sebastian Krah
Christoph Heibl
}
| /man/guidance2.Rd | no_license | heibl/rpg | R | false | true | 3,157 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/guidance2.R
\name{guidance2}
\alias{guidance2}
\title{GUIDetree-based AligNment ConficencE 2}
\usage{
guidance2(sequences, msa.program = "mafft", exec, bootstrap = 100,
n.part = "auto", col.cutoff = "auto", seq.cutoff = "auto",
mask.cutoff = "auto", parallel = TRUE, ncore = "auto",
method = "auto", alt.msas.file, n.coopt = "auto")
}
\arguments{
\item{sequences}{An object of class \code{\link{DNAbin}} or \code{\link{AAbin}}
containing unaligned sequences of DNA or amino acids.}
\item{msa.program}{A charcter string giving the name of the MSA program,
currelty one of c("mafft", "muscle", "clustalw2"); MAFFT is default}
\item{exec}{A character string giving the path to the executable of the
alignment program.}
\item{bootstrap}{An integer giving the number of perturbated MSAs.}
\item{col.cutoff}{numberic between 0 and 1; specifies a cutoff to remove unreliable columns below the cutoff; either user supplied or "auto" (0.73)}
\item{seq.cutoff}{numberic between 0 and 1; specifies a cutoff to remove unreliable sequences below the cutoff; either user supplied of "auto" (0.5)}
\item{mask.cutoff}{specific residues below a certain cutoff are masked ('N' for DNA, 'X' for AA); either user supplied of "auto" (0.5)}
\item{parallel}{logical, if TRUE, specify the number of cores}
\item{ncore}{number of cores}
\item{method}{further arguments passed to mafft, default is "auto"}
\item{mask}{specific residues below a certain cutoff are masked ('N' for DNA, 'X' for AA)}
}
\value{
alignment_score: is the GUIDANCE alignment score
GUIDANCE_residue_score
GUIDANCE_score: is the GUIDANCE column score
GUIDANCE_sequence_score
guidance_msa: is the base MSA removed from unreliable columns below
cutoff
base_msa
}
\description{
MSA reliability assessment GUIDANCE2 (Sela et al. 2015)
}
\details{
Calculates column confidence (and other scors) by comparing alternative MSAs generated by the GUIDANCE with varying gap opening panelty and the HoT methodology. First 100 alternative MSAs (with BP guide trees) with varying gap opening panelty are produced, then for each n (default = 4) co-optimal alignments are produced using HoT. The basic comparison between the BP MSAs and a reference MSA is if column residue pairs are identically aligned in all alternative MSAs compared with the base MSA (see \code{compareMSAs}).
}
\references{
Felsenstein J. 1985. Confidence limits on phylogenies: an
approach using the bootstrap. Evolution 39:783–791
Penn et al. (2010). An alignment confidence score capturing
robustness to guide tree uncertainty. Molecular Biology and Evolution
27:1759--1767
Sela et al. (2015). GUIDANCE2: accurate detection of unreliable alignment regions accounting for the uncertainty of multiple parameters. Nucleic acids research 43:W7--W14
G. Landan and D. Graur (2008). Local reliability measures from sets of co-optimal multiple sequencesuence alignments. Pacific Symposium on Biocomputin. 13:15--24
}
\seealso{
\code{\link{compareMSAs}}, \code{\link{guidance}}, \code{\link{HoT}}
}
\author{
Franz-Sebastian Krah
Christoph Heibl
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kineticaSQL.R
\docType{methods}
\name{dbQuoteLiteral,KineticaConnection-method}
\alias{dbQuoteLiteral,KineticaConnection-method}
\title{dbQuoteLiteral()}
\usage{
\S4method{dbQuoteLiteral}{KineticaConnection}(conn, x, ...)
}
\arguments{
\item{conn}{A subclass of [KineticaConnection-class]}
\item{x}{A vector to quote as string.}
\item{...}{Other arguments passed on to methods.}
}
\description{
Escapes a literal if nesessary.
}
| /man/dbQuoteLiteral.Rd | permissive | kineticadb/RKinetica | R | false | true | 509 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kineticaSQL.R
\docType{methods}
\name{dbQuoteLiteral,KineticaConnection-method}
\alias{dbQuoteLiteral,KineticaConnection-method}
\title{dbQuoteLiteral()}
\usage{
\S4method{dbQuoteLiteral}{KineticaConnection}(conn, x, ...)
}
\arguments{
\item{conn}{A subclass of [KineticaConnection-class]}
\item{x}{A vector to quote as string.}
\item{...}{Other arguments passed on to methods.}
}
\description{
Escapes a literal if nesessary.
}
|
## ANALYSIS USING HOLMES ALGO
set.seed(12345)
library(randomForest)
library(data.table)
library(TLBC)
library("upclass")
library(extraTrees)
library(RSpectra)
library(mhsmm)
#Window size in seconds
ws=60
#frequency of data
rate=50
if(Sys.info()[['sysname']]=="Darwin"){
cleanDataDirectory<-'/Users/Matthew/Documents/Oxford/Activity/Prototype_data/clean_data'
cleanBoutDirectory<-'/Users/Matthew/Documents/Oxford/Activity/Prototype_data/all_participants/clean_data'
dataDirectory<-'/Users/Matthew/Documents/Oxford/Activity/Prototype_data'
#Linux
} else if(Sys.info()[['sysname']]=='Linux'){
cleanDataDirectory<-'/data/rockptarmigan/willetts/Prototype_data/clean_data'
cleanBoutDirectory<-'/data/rockptarmigan/willetts/Prototype_data/all_participants/clean_data'
dataDirectory<-'/data/rockptarmigan/willetts/Prototype_data'
}
#Temporary directory
tempDirectory<-paste0(dataDirectory,'/temp')
#Directories for RF and HMM models
RFoutput<-paste0(tempDirectory,'/RFoutput')
HMMoutput<-paste0(tempDirectory,'/HMMoutput')
Predictions<-paste0(tempDirectory,'/Predictions')
#Temp data directories
trainingAccelDirectory<-paste0(tempDirectory,'/AccelTraining')
testingAccelDirectory<-paste0(tempDirectory,'/AccelTesting')
trainingBoutDirectory<-paste0(tempDirectory,'/BoutTraining')
testingBoutDirectory<-paste0(tempDirectory,'/BoutTesting')
trainingFeatureDirectory<-paste0(trainingAccelDirectory,'_Features_',ws)
trainingLabelDirectory<-paste0(trainingBoutDirectory,'_Labels_',ws)
testingFeatureDirectory<-paste0(testingAccelDirectory,'_Features_',ws)
testingLabelDirectory<-paste0(testingBoutDirectory,'_Labels_',ws)
outputLabelDirectory<-paste0(tempDirectory,'/Bout_Labels_',ws)
outputFeatureDirectory<-paste0(tempDirectory,'/Accel_Features_',ws)
#Semi Supervised Learning Output
semiSupervisedLabelDirectory<-paste0(dataDirectory,'/SemiSupLabels')
semiSupervisedFeatureDirectory<-paste0(dataDirectory,'/SemiSupFeatures')
#Cut Down to certainly correct output
certainlyTrueLabelDirectory<-paste0(dataDirectory,'/CertainlyTrueLabels')
certainlyTrueFeatureDirectory<-paste0(dataDirectory,'/CertainlyTrueFeatures')
## Train on first half of data for each individual, test on the second
listOfIndividuals<-list.files(cleanBoutDirectory)
listOfDataFiles<-list.files(cleanDataDirectory)
identifiers<-gsub(listOfIndividuals,pattern = '.csv',replacement = '')
InstanceData<-list()
FeatureData<-list()
IndexOfInstanceFiles<-list()
performance<-list()
trainingNoLabel<-list()
testingNoLabel<-list()
#Load up Data
for (i in 1:length(listOfIndividuals)){
#create instance level information
boutFileAddress<-paste(cleanBoutDirectory,listOfIndividuals[i],sep='/')
if(file.exists(file.path(outputLabelDirectory,identifiers[i]))==FALSE){
extractLabelsSingleFile(inputFile = boutFileAddress,outputDir = outputLabelDirectory,winSize = ws)
}
#create features
accelFileAddress<-paste(cleanDataDirectory,listOfDataFiles[i],sep='/')
if(file.exists(file.path(outputFeatureDirectory,identifiers[i]))==FALSE){
extractAccFeatsFile(inputFile = accelFileAddress,outputPath = file.path(outputFeatureDirectory,identifiers[i]),winSize = 60)
}
#Step 1 - Load up all data
#Load in instance level labels
InstanceData[[i]]<-NULL
InstanceDir<-file.path(outputLabelDirectory,identifiers[i])
InstanceFiles<-list.files(InstanceDir)
tempInstanceData<-NULL
#Load in features
FeatureData[[i]]<-NULL
tempFeatureData<-NULL
FeatureDir<-file.path(outputFeatureDirectory,identifiers[i])
FeatureFiles<-list.files(FeatureDir)
for(k in 1:length(InstanceFiles)){
#load in instance data
temptempInstanceData<-read.csv(file=file.path(InstanceDir,InstanceFiles[k]),stringsAsFactors = F)
#load in feature data
temptempFeatureData<-read.csv(file=file.path(FeatureDir,FeatureFiles[k]),stringsAsFactors = F)
#discard all data before first labelled data point and after last labelled point
kx<-which(!temptempInstanceData$behavior=='nolabel')
if(length(kx)>0){
maxIndex<-min(kx[length(kx)],nrow(temptempFeatureData))
temptempInstanceData<-temptempInstanceData[kx[1]:maxIndex,]
tempInstanceData<-rbind(tempInstanceData,temptempInstanceData)
temptempFeatureData<-temptempFeatureData[kx[1]:maxIndex,]
tempFeatureData<-rbind(tempFeatureData,temptempFeatureData)
}
}
InstanceData[[i]]<-tempInstanceData
rm(tempInstanceData)
rm(temptempInstanceData)
FeatureData[[i]]<-tempFeatureData
rm(tempFeatureData)
rm(temptempFeatureData)
#cut down Instance data to size of Feature data, or vice versa
if(nrow(FeatureData[[i]])<nrow(InstanceData[[i]])){
InstanceData[[i]]<-InstanceData[[i]][seq(1,nrow(FeatureData[[i]])),]
} else {
FeatureData[[i]]<-FeatureData[[i]][seq(1,nrow(InstanceData[[i]])),]
}
#add participant labels to data
InstanceData[[i]]$name<-identifiers[i]
FeatureData[[i]]$name<-identifiers[i]
# print(nrow(InstanceData[[i]]))
# print(nrow(FeatureData[[i]]))
}
AllFeatureData<-do.call("rbind", FeatureData)
AllInstanceData<-do.call("rbind", InstanceData)
ReduceInstanceData<-reduceLabels(data=AllInstanceData,labelsToReduce=list(c('gardening','standing'),c('in-vehicle')),overallLabel =c('sitting','driving'))
#1. Run RF using the labelled data points
ix<-which(!AllInstanceData$behavior=='nolabel')
ntree=500
mtry = floor(sqrt(ncol(AllFeatureData[ix,2:42])))
replace=TRUE
nsample=1000
nodesize=1
sampsize=1000
rf<-randomForest(x = AllFeatureData[ix,2:42],y=as.factor(ReduceInstanceData$behavior[ix]),
ntree=ntree,
mtry=mtry,
replace=replace,
sampsize=sampsize,
nodesize=nodesize,
importance=TRUE,
proximity = TRUE,
do.trace = TRUE)
#2.Output the RF proximity matrix, D, for all data points labelled and unlabelled
rf.predict<-predict(rf, AllFeatureData[,2:42], type="prob",
norm.votes=TRUE, predict.all=FALSE, proximity=TRUE, nodes=FALSE)
D<-rf.predict$proximity
#3.Using ideas from spectral clustering, take an eigen(like) spectral decomposition of D and project all
# (labelled and unlabelled) data points into the leading k-components of the decomposition of D
# with k smallish (say 3 or 4 dimensions)
Diag <- diag(apply(D, 1, sum))
U<-Diag-D
k <- length(unique(ReduceInstanceData$behavior))
evL <- eigs_sym(U,k+1,which='SM')
Z <- evL$vectors[,1:k]
plot(Z, col=as.factor(ReduceInstanceData$behavior[ix]), pch=20)
#4.run an HMM with gaussian emission probs for the projected points in the k-space
#learn the HMM using the labelled and unlabelled data in the k-space
ReduceInstanceDataNAs<-ReduceInstanceData
ReduceInstanceDataNAs[ReduceInstanceData[,2]=='nolabel',2]<-NA
labelledInstance<-as.factor(as.vector(ReduceInstanceDataNAs[,2]))
hmmData<-list()
hmmData$s<-as.numeric(labelledInstance)
hmmData$x<-Z
hmmData$N<-length(hmmData$s)
class(hmmData)<-"hsmm.data"
states<-unique(ReduceInstanceDataNAs$behavior)
states<-states[!is.na(states)]
#calculate empirial transition matrix
statesLength<-length(states)
P<-table(states[1:statesLength-1],states[2:statesLength])
P <- P / rowSums(P)
mu<-list()
sigma<-list()
for (i in 1:J){
mu[[i]]<-colMeans(Z[which(ReduceInstanceData$behavior==states[i]),])
sigma[[i]]<-cov(Z[which(ReduceInstanceData$behavior==states[i]),])
}
B <- list(mu=mu,sigma=sigma)
model <- hmmspec(init=init, trans = P, parms.emis = B,dens.emis = dmvnorm.hsmm)
#Now train model
output<-hmmfit(x = hmmData,start.val = model,mstep=mstep.mvnorm,lock.transition=FALSE,tol=1e-08,maxit=1000)
#train <- simulate(model, nsim=100, seed=1234, rand.emis=rmvnorm.hsmm)
smoothed<-predict(object = output$model,newdata = Z,method = 'viterbi',)
newLabels<-as.factor(smoothed$s)
labelledInstance<-as.factor(as.vector(ReduceInstanceData[ix,2]))
labelCode<-levels(labelledInstance)
levels(newLabels)<-labelCode
newLabels<-as.character(newLabels)
hsmmfit(train, startval, mstep = mstep.mvnorm, M = 200)
R> summary(hmv)
| /HolmesModel.R | no_license | MatthewWilletts/Activity | R | false | false | 8,084 | r | ## ANALYSIS USING HOLMES ALGO
set.seed(12345)
library(randomForest)
library(data.table)
library(TLBC)
library("upclass")
library(extraTrees)
library(RSpectra)
library(mhsmm)
#Window size in seconds
ws=60
#frequency of data
rate=50
if(Sys.info()[['sysname']]=="Darwin"){
cleanDataDirectory<-'/Users/Matthew/Documents/Oxford/Activity/Prototype_data/clean_data'
cleanBoutDirectory<-'/Users/Matthew/Documents/Oxford/Activity/Prototype_data/all_participants/clean_data'
dataDirectory<-'/Users/Matthew/Documents/Oxford/Activity/Prototype_data'
#Linux
} else if(Sys.info()[['sysname']]=='Linux'){
cleanDataDirectory<-'/data/rockptarmigan/willetts/Prototype_data/clean_data'
cleanBoutDirectory<-'/data/rockptarmigan/willetts/Prototype_data/all_participants/clean_data'
dataDirectory<-'/data/rockptarmigan/willetts/Prototype_data'
}
#Temporary directory
tempDirectory<-paste0(dataDirectory,'/temp')
#Directories for RF and HMM models
RFoutput<-paste0(tempDirectory,'/RFoutput')
HMMoutput<-paste0(tempDirectory,'/HMMoutput')
Predictions<-paste0(tempDirectory,'/Predictions')
#Temp data directories
trainingAccelDirectory<-paste0(tempDirectory,'/AccelTraining')
testingAccelDirectory<-paste0(tempDirectory,'/AccelTesting')
trainingBoutDirectory<-paste0(tempDirectory,'/BoutTraining')
testingBoutDirectory<-paste0(tempDirectory,'/BoutTesting')
trainingFeatureDirectory<-paste0(trainingAccelDirectory,'_Features_',ws)
trainingLabelDirectory<-paste0(trainingBoutDirectory,'_Labels_',ws)
testingFeatureDirectory<-paste0(testingAccelDirectory,'_Features_',ws)
testingLabelDirectory<-paste0(testingBoutDirectory,'_Labels_',ws)
outputLabelDirectory<-paste0(tempDirectory,'/Bout_Labels_',ws)
outputFeatureDirectory<-paste0(tempDirectory,'/Accel_Features_',ws)
#Semi Supervised Learning Output
semiSupervisedLabelDirectory<-paste0(dataDirectory,'/SemiSupLabels')
semiSupervisedFeatureDirectory<-paste0(dataDirectory,'/SemiSupFeatures')
#Cut Down to certainly correct output
certainlyTrueLabelDirectory<-paste0(dataDirectory,'/CertainlyTrueLabels')
certainlyTrueFeatureDirectory<-paste0(dataDirectory,'/CertainlyTrueFeatures')
## Train on first half of data for each individual, test on the second
listOfIndividuals<-list.files(cleanBoutDirectory)
listOfDataFiles<-list.files(cleanDataDirectory)
identifiers<-gsub(listOfIndividuals,pattern = '.csv',replacement = '')
InstanceData<-list()
FeatureData<-list()
IndexOfInstanceFiles<-list()
performance<-list()
trainingNoLabel<-list()
testingNoLabel<-list()
#Load up Data
for (i in 1:length(listOfIndividuals)){
#create instance level information
boutFileAddress<-paste(cleanBoutDirectory,listOfIndividuals[i],sep='/')
if(file.exists(file.path(outputLabelDirectory,identifiers[i]))==FALSE){
extractLabelsSingleFile(inputFile = boutFileAddress,outputDir = outputLabelDirectory,winSize = ws)
}
#create features
accelFileAddress<-paste(cleanDataDirectory,listOfDataFiles[i],sep='/')
if(file.exists(file.path(outputFeatureDirectory,identifiers[i]))==FALSE){
extractAccFeatsFile(inputFile = accelFileAddress,outputPath = file.path(outputFeatureDirectory,identifiers[i]),winSize = 60)
}
#Step 1 - Load up all data
#Load in instance level labels
InstanceData[[i]]<-NULL
InstanceDir<-file.path(outputLabelDirectory,identifiers[i])
InstanceFiles<-list.files(InstanceDir)
tempInstanceData<-NULL
#Load in features
FeatureData[[i]]<-NULL
tempFeatureData<-NULL
FeatureDir<-file.path(outputFeatureDirectory,identifiers[i])
FeatureFiles<-list.files(FeatureDir)
for(k in 1:length(InstanceFiles)){
#load in instance data
temptempInstanceData<-read.csv(file=file.path(InstanceDir,InstanceFiles[k]),stringsAsFactors = F)
#load in feature data
temptempFeatureData<-read.csv(file=file.path(FeatureDir,FeatureFiles[k]),stringsAsFactors = F)
#discard all data before first labelled data point and after last labelled point
kx<-which(!temptempInstanceData$behavior=='nolabel')
if(length(kx)>0){
maxIndex<-min(kx[length(kx)],nrow(temptempFeatureData))
temptempInstanceData<-temptempInstanceData[kx[1]:maxIndex,]
tempInstanceData<-rbind(tempInstanceData,temptempInstanceData)
temptempFeatureData<-temptempFeatureData[kx[1]:maxIndex,]
tempFeatureData<-rbind(tempFeatureData,temptempFeatureData)
}
}
InstanceData[[i]]<-tempInstanceData
rm(tempInstanceData)
rm(temptempInstanceData)
FeatureData[[i]]<-tempFeatureData
rm(tempFeatureData)
rm(temptempFeatureData)
#cut down Instance data to size of Feature data, or vice versa
if(nrow(FeatureData[[i]])<nrow(InstanceData[[i]])){
InstanceData[[i]]<-InstanceData[[i]][seq(1,nrow(FeatureData[[i]])),]
} else {
FeatureData[[i]]<-FeatureData[[i]][seq(1,nrow(InstanceData[[i]])),]
}
#add participant labels to data
InstanceData[[i]]$name<-identifiers[i]
FeatureData[[i]]$name<-identifiers[i]
# print(nrow(InstanceData[[i]]))
# print(nrow(FeatureData[[i]]))
}
AllFeatureData<-do.call("rbind", FeatureData)
AllInstanceData<-do.call("rbind", InstanceData)
ReduceInstanceData<-reduceLabels(data=AllInstanceData,labelsToReduce=list(c('gardening','standing'),c('in-vehicle')),overallLabel =c('sitting','driving'))
#1. Run RF using the labelled data points
ix<-which(!AllInstanceData$behavior=='nolabel')
ntree=500
mtry = floor(sqrt(ncol(AllFeatureData[ix,2:42])))
replace=TRUE
nsample=1000
nodesize=1
sampsize=1000
rf<-randomForest(x = AllFeatureData[ix,2:42],y=as.factor(ReduceInstanceData$behavior[ix]),
ntree=ntree,
mtry=mtry,
replace=replace,
sampsize=sampsize,
nodesize=nodesize,
importance=TRUE,
proximity = TRUE,
do.trace = TRUE)
#2.Output the RF proximity matrix, D, for all data points labelled and unlabelled
rf.predict<-predict(rf, AllFeatureData[,2:42], type="prob",
norm.votes=TRUE, predict.all=FALSE, proximity=TRUE, nodes=FALSE)
D<-rf.predict$proximity
#3.Using ideas from spectral clustering, take an eigen(like) spectral decomposition of D and project all
# (labelled and unlabelled) data points into the leading k-components of the decomposition of D
# with k smallish (say 3 or 4 dimensions)
Diag <- diag(apply(D, 1, sum))
U<-Diag-D
k <- length(unique(ReduceInstanceData$behavior))
evL <- eigs_sym(U,k+1,which='SM')
Z <- evL$vectors[,1:k]
plot(Z, col=as.factor(ReduceInstanceData$behavior[ix]), pch=20)
#4.run an HMM with gaussian emission probs for the projected points in the k-space
#learn the HMM using the labelled and unlabelled data in the k-space
ReduceInstanceDataNAs<-ReduceInstanceData
ReduceInstanceDataNAs[ReduceInstanceData[,2]=='nolabel',2]<-NA
labelledInstance<-as.factor(as.vector(ReduceInstanceDataNAs[,2]))
hmmData<-list()
hmmData$s<-as.numeric(labelledInstance)
hmmData$x<-Z
hmmData$N<-length(hmmData$s)
class(hmmData)<-"hsmm.data"
states<-unique(ReduceInstanceDataNAs$behavior)
states<-states[!is.na(states)]
#calculate empirial transition matrix
statesLength<-length(states)
P<-table(states[1:statesLength-1],states[2:statesLength])
P <- P / rowSums(P)
mu<-list()
sigma<-list()
for (i in 1:J){
mu[[i]]<-colMeans(Z[which(ReduceInstanceData$behavior==states[i]),])
sigma[[i]]<-cov(Z[which(ReduceInstanceData$behavior==states[i]),])
}
B <- list(mu=mu,sigma=sigma)
model <- hmmspec(init=init, trans = P, parms.emis = B,dens.emis = dmvnorm.hsmm)
#Now train model
output<-hmmfit(x = hmmData,start.val = model,mstep=mstep.mvnorm,lock.transition=FALSE,tol=1e-08,maxit=1000)
#train <- simulate(model, nsim=100, seed=1234, rand.emis=rmvnorm.hsmm)
smoothed<-predict(object = output$model,newdata = Z,method = 'viterbi',)
newLabels<-as.factor(smoothed$s)
labelledInstance<-as.factor(as.vector(ReduceInstanceData[ix,2]))
labelCode<-levels(labelledInstance)
levels(newLabels)<-labelCode
newLabels<-as.character(newLabels)
hsmmfit(train, startval, mstep = mstep.mvnorm, M = 200)
R> summary(hmv)
|
# This is the user-interface definition of a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
shinyUI(pageWithSidebar(
headerPanel(""),
sidebarPanel(
uiOutput("choose_indicador"),
uiOutput("choose_estado"),
uiOutput("choose_cidade"),
uiOutput("choose_escola"),
uiOutput("choose_columns")
),
mainPanel(
tableOutput("data_table")
)
)) | /ui.R | no_license | flaviobarros/ENEM2014 | R | false | false | 490 | r |
# This is the user-interface definition of a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
shinyUI(pageWithSidebar(
headerPanel(""),
sidebarPanel(
uiOutput("choose_indicador"),
uiOutput("choose_estado"),
uiOutput("choose_cidade"),
uiOutput("choose_escola"),
uiOutput("choose_columns")
),
mainPanel(
tableOutput("data_table")
)
)) |
testlist <- list(x = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 4.17902599097323e+184, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(multivariance::fastdist,testlist)
str(result) | /multivariance/inst/testfiles/fastdist/AFL_fastdist/fastdist_valgrind_files/1613098350-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 303 | r | testlist <- list(x = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 4.17902599097323e+184, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(multivariance::fastdist,testlist)
str(result) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.