content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
#************************************************************************************
#Lab exercise FU-Berlin
#************************************************************************************
#This execise contains:
#1. Visualization data: pairs, hist, simple plots
#2. Linear regression:
# 2.1. Simple LR: analysis of the relationship O3-meteovar
# 2.2. Multiple linear regression: fit the best model with more than one predictor
#3. Logistic regression model to analyse high ozone levels (part2, optional)
#************************************************************************************
# Author: NOF
# up. 04.2020
#************************************************************************************
#Load library
library(MASS) # general packages
library(stats)
# library(Hmisc)
library(ggplot2) # plots
library(dplyr)
#Read the file (adding the right path)
mydata <- read.csv("data/data_year_o3.csv")
#Look the data
str(mydata)
head(mydata)
# Convert date to Date for plotting
mydata$date <- as.Date(mydata$date)
# Create months
mydata$mon <- format(mydata$date, "%m")
#########################
#Data visualization
#Simple plots
########################
plot(mydata$o3,type="l",col="orange",xlab="days",ylab="MDA8 O3 (ppb)",main="MDA8 O3")
hist(mydata$o3,main="Distribution of Ozone",col='skyblue',xlab="MDA8 O3")
# Other options of plots
# ggplot2
ggplot2::ggplot(mydata, aes(x=date, y=o3, group=1)) +
geom_line(color="red") +
scale_x_date(date_breaks = "1 years", date_labels = "%Y-%m")
#Scatter plots:
plot(mydata$tmax,mydata$o3,pch = 19, col = "blue", main = "Ozone vs Tx", xlab = "Tx (k)", ylab = "O3 (ppb)")
#Add fit lines
abline(lm(o3~tmax,mydata),col="red") # regression line (y~x)
# visualize all historigrams-graphs at once (alternative)
# hist(dat) #library(Hmisc)
#
pairs(mydata[,c(3:9,2)],panel=panel.smooth,col='black',cex=0.5,cex.labels=1)
#another way to make the plots
#lower panel:# panel.smooth function is built in.
source("plot_panel.R") #add correct path
pairs(mydata[,c(3:9,2)], cex.labels =0.9,
lower.panel=panel.smooth, upper.panel=panel.cor,diag.panel=panel.hist)
# We can create a season column
mydata <- mydata%>%mutate(season=ifelse(mon>="01" & mon<="02" | mon=="12", "DJF",
ifelse(mon>="03" & mon<="05", "MAM",
ifelse(mon>="06" & mon<="08", "JJA",
ifelse(mon>="09" & mon<="11", "SON",NA)))))
# Another way to plot correlations
require(ggcorrplot)
corr <- round(cor(mydata[,c(3:9,2)]), 3)
ggcorrplot(corr)
##################################
# Split data into seasons
# 1. Analyse seasonal cycle
# 2. Daily cycle
#################################
# We plot each season
# create a subset for each season and visualise
data_jja <- subset(mydata, season=="JJA")
data_mam <- subset(mydata, season=="MAM")
plot(data_jja$o3,type="l",col="orange",xlab="days",ylab="MDA8 O3 (ppb)",main="MDA8 O3")
# It can be done for each season
# We can use ggplot and facet seasons
ggplot2::ggplot(mydata, aes(x=date, y=o3))+
geom_point() + facet_grid(~season)
# Plot the monthly cycle
#
boxplot(mydata$o3~mydata$mon)
# Similarly with ggplot2
ggplot2::ggplot(mydata, aes(x=mon, y=o3))+ geom_boxplot(fill="blue")
# The rest of the variables can be also assessed like this
ggplot2::ggplot(mydata, aes(x=mon, y=tmax))+ geom_boxplot(fill="blue")
############################################################################
#*******************
#Linear regression:
#*******************
# Before starting, we will split the data into seasons and we restrict the analysis to
# summer (JJA). Repeat the steps for MAM for comparison
# Let's use data_jja
#Fitting different models
#null model
# Remove the last column to avoid problems
data_jja$season <- NULL
data_mam$season <- NULL
#
m0 <- lm(o3~.,data=data_jja,na.action=na.omit)
#linear regression
m1 <- lm(o3~tmax,data=data_jja,na.action=na.omit) #or you can start by adding a different parameter (Tx,RH...)
#check model output
summary(m1)
#Adding more variables:
m2 <- update(m1,~.+rh)
m3 <- update(m2,~.+blh)
m4 <- update(m3,~.+ssrd)
#Comparing models:
#1.Anova test:
#Comparing null model with the model fitted with new variables
# m1 is statistically significant (Tx contribute to o3 variance)
anova(m0,m1)
#comparison of more models
anova(m1,m2,m3)
anova(m3,m4)
#Other way for comparison models:
#2.AIC criterion:
# When comparing two models, the smaller the AIC, the better the fit.
# This is the basis of automated model simplification using step
AIC(m1,m2)
AIC(m2,m3)
AIC(m1,m2,m3,m4)
#******************************************************
# Multiple linear regression analysis:
#******************************************************
#Linear regression analysis with more variables
pred.names <- names(data_jja[-(which(names(data_jja)%in%c("o3","date","mon")))])
# These are the covariates
print(pred.names)
#One way to write the form of the equation:
form <- (paste("o3~",paste(pred.names,collapse='+'),sep=""))
form <- as.formula(form)
#Add the form to the equation
fit <- lm(form, data=data_jja,na.action=na.omit)
# fit <- lm(o3 ~ LO3+Tx+Gh+RH+SR+ST+TC+U10, data=dat,na.action=na.omit)
summary(fit)
# Other useful functions
coef <- coefficients(fit) # model coefficients
fitted(fit) # predicted values
confint(fit, level=0.95) # CIs for model parameters
residuals(fit) # residuals
anova(fit) # anova table
#Fit more models
fit1 <- update(fit,~.-ssrd)
fit2 <- update(fit1,~.-tcc)
fit3 <- update(fit2,~.-ws)
#*************
#Plotting fit:
#*************
layout(matrix(c(1,2,3,4),2,2)) # optional 4 graphs/page
hist(data_jja$o3,main="Distribution of Ozone",xlab="O3")
sresid <- studres(fit)
hist(sresid, freq=FALSE,
main="Distribution of Studentized Residuals")
xfit<-seq(min(sresid),max(sresid),length=40)
yfit<-dnorm(xfit)
lines(xfit, yfit,col="red")
plot(residuals(fit),xlab="residuals",ylab="");
title("Simple Residual Plot")
acf(residuals(fit), main = "");
title("Residual Autocorrelation Plot ");
#or plot summary(model)
#Visualizing all plots at once
par(mfrow=c(2,2))
plot(fit)
#reset par conditions
par(mfrow=c(1,1))
##################################################
# Model selection
# Find a best subset of predictors to fit the model:
# StepAIC
##################################################
#direction-> "both", "backward","forward"
model <- stepAIC(fit,direction="both")
summary(model)
#Repeat the plots for the final model
#Plot the model
plot(model)
# for spring repeat the same steps:
fit_mam <- lm(form, data=data_mam,na.action=na.omit)
model_mam <- stepAIC(fit_mam,direction="both")
summary(model_mam)
#######################
# Variable importance
#######################
require(relaimpo)
# calculate relative importance
relImportance <- calc.relimp(model, type = "lmg", rela = F)
relImportance_mam <- calc.relimp(model_mam, type = "lmg", rela = F)
# Sort
cat('Relative Importances: \n')
df.rimpo <- data.frame("jja"=sort(round(relImportance$lmg, 3), decreasing=TRUE))
df.rimpo$variable <- rownames(df.rimpo)
# for mam
df.rimpo.mam <- data.frame("mam"=sort(round(relImportance_mam$lmg, 3), decreasing=TRUE))
df.rimpo.mam$variable <- rownames(df.rimpo.mam)
# plots
ggplot2::ggplot(df.rimpo, aes(x=variable, y=rimpo))+ geom_bar(stat = "identity")
#******************************
# Logistic regression: glm
#******************************
# conditions, high O3 levels (how many days with O3>50 or 60 ppb)
ths <- 50 # change to 60 and see how the number of excendances changes
data_jja$date.f <- 1:length(data_jja$date)
data_jja$o3_50 <- ifelse(data_jja$o3>=ths, "orange", "forestgreen")
plot(o3~date.f, data=data_jja, type="h", col=o3_50)
abline(h=ths, lty=2, col="red")
#Analyse ozone exceedances >50ppb
#Convert the outcome(o3) into binary data:
data_jja$o3 <- ifelse(data_jja$o3>ths,1,0)
#Fit logistic model
fitglm <- glm(o3~tmax+rh+ssrd+blh+Direction+ws,data=data_jja,family=binomial())
#summary of model
summary(fitglm)
#Apply model selection
modelglm <- stepAIC(fitglm,direction="both")
summary(modelglm)
plot(modelglm)
# Compute pseudo R-square
modelChi <- modelglm$null.deviance - modelglm$deviance
pseudo.R2 <- modelChi / modelglm$null.deviance
pseudo.R2
newdata <- data_jja
newdata$pred.o50 <- fitglm$fitted.values
pred <- predict(modelglm,type="response") # gives us the probability
# For variable importance
require(caret)
varImp(modelglm, scale=T)
| /exercise_ozone_Lab.R | no_license | noeliaof/Lab_extremes | R | false | false | 8,553 | r | #************************************************************************************
#Lab exercise FU-Berlin
#************************************************************************************
#This execise contains:
#1. Visualization data: pairs, hist, simple plots
#2. Linear regression:
# 2.1. Simple LR: analysis of the relationship O3-meteovar
# 2.2. Multiple linear regression: fit the best model with more than one predictor
#3. Logistic regression model to analyse high ozone levels (part2, optional)
#************************************************************************************
# Author: NOF
# up. 04.2020
#************************************************************************************
#Load library
library(MASS) # general packages
library(stats)
# library(Hmisc)
library(ggplot2) # plots
library(dplyr)
#Read the file (adding the right path)
mydata <- read.csv("data/data_year_o3.csv")
#Look the data
str(mydata)
head(mydata)
# Convert date to Date for plotting
mydata$date <- as.Date(mydata$date)
# Create months
mydata$mon <- format(mydata$date, "%m")
#########################
#Data visualization
#Simple plots
########################
plot(mydata$o3,type="l",col="orange",xlab="days",ylab="MDA8 O3 (ppb)",main="MDA8 O3")
hist(mydata$o3,main="Distribution of Ozone",col='skyblue',xlab="MDA8 O3")
# Other options of plots
# ggplot2
ggplot2::ggplot(mydata, aes(x=date, y=o3, group=1)) +
geom_line(color="red") +
scale_x_date(date_breaks = "1 years", date_labels = "%Y-%m")
#Scatter plots:
plot(mydata$tmax,mydata$o3,pch = 19, col = "blue", main = "Ozone vs Tx", xlab = "Tx (k)", ylab = "O3 (ppb)")
#Add fit lines
abline(lm(o3~tmax,mydata),col="red") # regression line (y~x)
# visualize all historigrams-graphs at once (alternative)
# hist(dat) #library(Hmisc)
#
pairs(mydata[,c(3:9,2)],panel=panel.smooth,col='black',cex=0.5,cex.labels=1)
#another way to make the plots
#lower panel:# panel.smooth function is built in.
source("plot_panel.R") #add correct path
pairs(mydata[,c(3:9,2)], cex.labels =0.9,
lower.panel=panel.smooth, upper.panel=panel.cor,diag.panel=panel.hist)
# We can create a season column
mydata <- mydata%>%mutate(season=ifelse(mon>="01" & mon<="02" | mon=="12", "DJF",
ifelse(mon>="03" & mon<="05", "MAM",
ifelse(mon>="06" & mon<="08", "JJA",
ifelse(mon>="09" & mon<="11", "SON",NA)))))
# Another way to plot correlations
require(ggcorrplot)
corr <- round(cor(mydata[,c(3:9,2)]), 3)
ggcorrplot(corr)
##################################
# Split data into seasons
# 1. Analyse seasonal cycle
# 2. Daily cycle
#################################
# We plot each season
# create a subset for each season and visualise
data_jja <- subset(mydata, season=="JJA")
data_mam <- subset(mydata, season=="MAM")
plot(data_jja$o3,type="l",col="orange",xlab="days",ylab="MDA8 O3 (ppb)",main="MDA8 O3")
# It can be done for each season
# We can use ggplot and facet seasons
ggplot2::ggplot(mydata, aes(x=date, y=o3))+
geom_point() + facet_grid(~season)
# Plot the monthly cycle
#
boxplot(mydata$o3~mydata$mon)
# Similarly with ggplot2
ggplot2::ggplot(mydata, aes(x=mon, y=o3))+ geom_boxplot(fill="blue")
# The rest of the variables can be also assessed like this
ggplot2::ggplot(mydata, aes(x=mon, y=tmax))+ geom_boxplot(fill="blue")
############################################################################
#*******************
#Linear regression:
#*******************
# Before starting, we will split the data into seasons and we restrict the analysis to
# summer (JJA). Repeat the steps for MAM for comparison
# Let's use data_jja
#Fitting different models
#null model
# Remove the last column to avoid problems
data_jja$season <- NULL
data_mam$season <- NULL
#
m0 <- lm(o3~.,data=data_jja,na.action=na.omit)
#linear regression
m1 <- lm(o3~tmax,data=data_jja,na.action=na.omit) #or you can start by adding a different parameter (Tx,RH...)
#check model output
summary(m1)
#Adding more variables:
m2 <- update(m1,~.+rh)
m3 <- update(m2,~.+blh)
m4 <- update(m3,~.+ssrd)
#Comparing models:
#1.Anova test:
#Comparing null model with the model fitted with new variables
# m1 is statistically significant (Tx contribute to o3 variance)
anova(m0,m1)
#comparison of more models
anova(m1,m2,m3)
anova(m3,m4)
#Other way for comparison models:
#2.AIC criterion:
# When comparing two models, the smaller the AIC, the better the fit.
# This is the basis of automated model simplification using step
AIC(m1,m2)
AIC(m2,m3)
AIC(m1,m2,m3,m4)
#******************************************************
# Multiple linear regression analysis:
#******************************************************
#Linear regression analysis with more variables
pred.names <- names(data_jja[-(which(names(data_jja)%in%c("o3","date","mon")))])
# These are the covariates
print(pred.names)
#One way to write the form of the equation:
form <- (paste("o3~",paste(pred.names,collapse='+'),sep=""))
form <- as.formula(form)
#Add the form to the equation
fit <- lm(form, data=data_jja,na.action=na.omit)
# fit <- lm(o3 ~ LO3+Tx+Gh+RH+SR+ST+TC+U10, data=dat,na.action=na.omit)
summary(fit)
# Other useful functions
coef <- coefficients(fit) # model coefficients
fitted(fit) # predicted values
confint(fit, level=0.95) # CIs for model parameters
residuals(fit) # residuals
anova(fit) # anova table
#Fit more models
fit1 <- update(fit,~.-ssrd)
fit2 <- update(fit1,~.-tcc)
fit3 <- update(fit2,~.-ws)
#*************
#Plotting fit:
#*************
layout(matrix(c(1,2,3,4),2,2)) # optional 4 graphs/page
hist(data_jja$o3,main="Distribution of Ozone",xlab="O3")
sresid <- studres(fit)
hist(sresid, freq=FALSE,
main="Distribution of Studentized Residuals")
xfit<-seq(min(sresid),max(sresid),length=40)
yfit<-dnorm(xfit)
lines(xfit, yfit,col="red")
plot(residuals(fit),xlab="residuals",ylab="");
title("Simple Residual Plot")
acf(residuals(fit), main = "");
title("Residual Autocorrelation Plot ");
#or plot summary(model)
#Visualizing all plots at once
par(mfrow=c(2,2))
plot(fit)
#reset par conditions
par(mfrow=c(1,1))
##################################################
# Model selection
# Find a best subset of predictors to fit the model:
# StepAIC
##################################################
#direction-> "both", "backward","forward"
model <- stepAIC(fit,direction="both")
summary(model)
#Repeat the plots for the final model
#Plot the model
plot(model)
# for spring repeat the same steps:
fit_mam <- lm(form, data=data_mam,na.action=na.omit)
model_mam <- stepAIC(fit_mam,direction="both")
summary(model_mam)
#######################
# Variable importance
#######################
require(relaimpo)
# calculate relative importance
relImportance <- calc.relimp(model, type = "lmg", rela = F)
relImportance_mam <- calc.relimp(model_mam, type = "lmg", rela = F)
# Sort
cat('Relative Importances: \n')
df.rimpo <- data.frame("jja"=sort(round(relImportance$lmg, 3), decreasing=TRUE))
df.rimpo$variable <- rownames(df.rimpo)
# for mam
df.rimpo.mam <- data.frame("mam"=sort(round(relImportance_mam$lmg, 3), decreasing=TRUE))
df.rimpo.mam$variable <- rownames(df.rimpo.mam)
# plots
ggplot2::ggplot(df.rimpo, aes(x=variable, y=rimpo))+ geom_bar(stat = "identity")
#******************************
# Logistic regression: glm
#******************************
# conditions, high O3 levels (how many days with O3>50 or 60 ppb)
ths <- 50 # change to 60 and see how the number of excendances changes
data_jja$date.f <- 1:length(data_jja$date)
data_jja$o3_50 <- ifelse(data_jja$o3>=ths, "orange", "forestgreen")
plot(o3~date.f, data=data_jja, type="h", col=o3_50)
abline(h=ths, lty=2, col="red")
#Analyse ozone exceedances >50ppb
#Convert the outcome(o3) into binary data:
data_jja$o3 <- ifelse(data_jja$o3>ths,1,0)
#Fit logistic model
fitglm <- glm(o3~tmax+rh+ssrd+blh+Direction+ws,data=data_jja,family=binomial())
#summary of model
summary(fitglm)
#Apply model selection
modelglm <- stepAIC(fitglm,direction="both")
summary(modelglm)
plot(modelglm)
# Compute pseudo R-square
modelChi <- modelglm$null.deviance - modelglm$deviance
pseudo.R2 <- modelChi / modelglm$null.deviance
pseudo.R2
newdata <- data_jja
newdata$pred.o50 <- fitglm$fitted.values
pred <- predict(modelglm,type="response") # gives us the probability
# For variable importance
require(caret)
varImp(modelglm, scale=T)
|
\name{Henikoff.w}
\alias{Henikoff.w}
\title{Weights proteins sequences according to Henikoff method.}
\description{
Assigns Henikoff weights to sequences on a MSA.
}
\usage{
Henikoff.w(msa,fmarg,normalize=FALSE)
}
\arguments{
\item{msa}{ An integer matrix representing a multiple sequence alignment of proteins in numeric code, as obtined from \code{msa2ali}.}
\item{fmarg}{Marginal frequencies, such as returned by aa.freq.marg}
\item{normalize}{Logical, should the weights be normalized such that \eqn{latex}{\sum _i w_i=1} }
}
\value{
A list with the following components:
\item{w}{ vector of weights.}
\item{mef}{ Effective number of sequences. If normalzed=T, calculated as Henikoff proposal
\eqn{latex}{exp(\sum(-w log(w)))}. If normalized=F, the sum of $w.}
}
\references{
Henikoff, Steven, and Jorja G. Henikoff. "Amino acid substitution matrices from protein blocks." Proceedings of the National Academy of Sciences 89.22 (1992): 10915-10919.
}
\examples{
msa.letters<-rbind(c("G","Y","V","G","S"),c("G","F","D","G","F"),c("G","Y","D","G","F"),c("G","Y","Q","G","G"))
msa<-msa2num(msa.letters)
w<-Henikoff.w(msa,fmarg=aa.freq.marg(msa))
w$w
}
\keyword{weights}\keyword{msa}
| /DCAforR/man/Henikoff.w.Rd | permissive | roespada/DCAforRpackage | R | false | false | 1,218 | rd | \name{Henikoff.w}
\alias{Henikoff.w}
\title{Weights proteins sequences according to Henikoff method.}
\description{
Assigns Henikoff weights to sequences on a MSA.
}
\usage{
Henikoff.w(msa,fmarg,normalize=FALSE)
}
\arguments{
\item{msa}{ An integer matrix representing a multiple sequence alignment of proteins in numeric code, as obtined from \code{msa2ali}.}
\item{fmarg}{Marginal frequencies, such as returned by aa.freq.marg}
\item{normalize}{Logical, should the weights be normalized such that \eqn{latex}{\sum _i w_i=1} }
}
\value{
A list with the following components:
\item{w}{ vector of weights.}
\item{mef}{ Effective number of sequences. If normalzed=T, calculated as Henikoff proposal
\eqn{latex}{exp(\sum(-w log(w)))}. If normalized=F, the sum of $w.}
}
\references{
Henikoff, Steven, and Jorja G. Henikoff. "Amino acid substitution matrices from protein blocks." Proceedings of the National Academy of Sciences 89.22 (1992): 10915-10919.
}
\examples{
msa.letters<-rbind(c("G","Y","V","G","S"),c("G","F","D","G","F"),c("G","Y","D","G","F"),c("G","Y","Q","G","G"))
msa<-msa2num(msa.letters)
w<-Henikoff.w(msa,fmarg=aa.freq.marg(msa))
w$w
}
\keyword{weights}\keyword{msa}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/windows.R
\name{tile_rle}
\alias{tile_rle}
\alias{stretch_rle}
\alias{roll_rle}
\title{Map a function over an Rle in windows}
\usage{
tile_rle(.x, .size = 1L, .fun, ...)
stretch_rle(.x, .size = 1L, .step = 1L, .fun, ...)
roll_rle(.x, .size = 1L, .step = 1L, .fun, ...)
}
\arguments{
\item{.x}{An atomic vector or Rle object.}
\item{.size}{The (integer) size of the window.}
\item{.fun}{A function}
\item{...}{Additional arguments passed on to the mapped function}
\item{.step}{The (integer) amount to shift the start of the window by.}
}
\description{
Map a function over an Rle in windows
}
\details{
The map functions apply a function over non-overlapping windows \code{[tile_rle()]},
overlapping windows \code{[roll_rle()]}, and windows with a fixed start but
increasing width \code{\link[=stretch_rle]{stretch_rle()}}
}
\examples{
x <- S4Vectors::Rle(1:10, 1:10)
tile_rle(x, .size = 2, mean)
roll_rle(x, .size = 2, .step = 1, mean)
stretch_rle(x, .size = 1, .step = 2, mean)
}
| /man/windows.Rd | no_license | sridhar0605/superintronic | R | false | true | 1,067 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/windows.R
\name{tile_rle}
\alias{tile_rle}
\alias{stretch_rle}
\alias{roll_rle}
\title{Map a function over an Rle in windows}
\usage{
tile_rle(.x, .size = 1L, .fun, ...)
stretch_rle(.x, .size = 1L, .step = 1L, .fun, ...)
roll_rle(.x, .size = 1L, .step = 1L, .fun, ...)
}
\arguments{
\item{.x}{An atomic vector or Rle object.}
\item{.size}{The (integer) size of the window.}
\item{.fun}{A function}
\item{...}{Additional arguments passed on to the mapped function}
\item{.step}{The (integer) amount to shift the start of the window by.}
}
\description{
Map a function over an Rle in windows
}
\details{
The map functions apply a function over non-overlapping windows \code{[tile_rle()]},
overlapping windows \code{[roll_rle()]}, and windows with a fixed start but
increasing width \code{\link[=stretch_rle]{stretch_rle()}}
}
\examples{
x <- S4Vectors::Rle(1:10, 1:10)
tile_rle(x, .size = 2, mean)
roll_rle(x, .size = 2, .step = 1, mean)
stretch_rle(x, .size = 1, .step = 2, mean)
}
|
# ============================================================================
# Load required packages
# ============================================================================
pckgList <- c(
"shiny",
"shinyjs",
"shinyBS",
"shinydashboard",
"shinycssloaders",
"DT",
"heatmaply",
"RSQLite",
"reshape",
"igraph",
"visNetwork",
"htmlwidgets",
"openxlsx",
"rjson",
"rintrojs",
"httr"
)
pckgMissing <- pckgList[!(pckgList %in% installed.packages()[,"Package"])]
if(length(pckgMissing)) install.packages(pckgMissing)
for (i in pckgList) {
library(i, character.only= TRUE)
}
# ============================================================================
# Load dataset stars
# ============================================================================
starsHsa <- read.delim("./www/decorationTables/gseHsa.txt")
starsMmu <- read.delim("./www/decorationTables/gseMmu.txt")
starsII <- read.delim("./www/decorationTables/ipf_vs_ctrl_lung_coding.txt")
starsIII <- read.delim("./www/decorationTables/bleoD14_vs_ctrl_lung_coding.txt")
starsIV <- read.delim("./www/decorationTables/gseNonCoding.txt")
stars <- rbind(starsHsa, starsMmu, starsIV)
# ============================================================================
# Connect to DB
# ============================================================================
db_path <- "."
fibromine_db <- dbConnect(RSQLite::SQLite(),
file.path(db_path, "FibromineDB.sqlite"))
# ============================================================================
# Source custom functions
# ============================================================================
curDir <- getwd()
source(file.path(curDir, "utils.R"))
# ============================================================================
# techChoices
# ============================================================================
techChoices <- c("RNA expression profiling",
"Non-coding RNA expression profiling")
# ============================================================================
# gcnHsaChoices
# ============================================================================
gcnHsaChoices <- as.character(dbGetQuery(conn = fibromine_db,
statement = '
SELECT
Name
FROM
GCNdrivers
WHERE
Comparison = "IPF_vs_Ctrl"
;'
)$Name)
# ============================================================================
# gcnMmuChoices
# ============================================================================
gcnMmuChoices <- as.character(dbGetQuery(conn = fibromine_db,
statement = '
SELECT
Name
FROM
GCNdrivers
WHERE
Comparison = "BleomycinD14_vs_Ctrl"
;'
)$Name)
# ============================================================================
# scGeneChoices
# ============================================================================
scGeneChoices <- unique(as.character(dbGetQuery(conn = fibromine_db,
statement = '
SELECT
Gene
FROM
scDEA
;'
)$Gene))
# ============================================================================
# DEmiRNAChoices
# ============================================================================
DEmiRNAChoices <- as.character(dbGetQuery(conn = fibromine_db,
statement = '
SELECT
prodID
FROM
nonCodingDEShort
;'
)$prodID)
# ============================================================================
# Datasets lists to use in download data tab
# ============================================================================
transDatasets <- dbGetQuery(
fibromine_db,
'SELECT DISTINCT
DatasetID
FROM
DatasetsDescription
;'
)
transDatasets <- as.character(transDatasets[,1])
transDatasets <- transDatasets[grep("^GSE", transDatasets)]
protDatasets <- dbGetQuery(
fibromine_db,
'SELECT DISTINCT
DatasetID
FROM
DatasetsDescription
;'
)
protDatasets <- as.character(protDatasets[,1])
protDatasets <- protDatasets[-grep("^GSE", protDatasets)]
| /global.R | permissive | bbyun28/Fibromine | R | false | false | 3,983 | r | # ============================================================================
# Load required packages
# ============================================================================
pckgList <- c(
"shiny",
"shinyjs",
"shinyBS",
"shinydashboard",
"shinycssloaders",
"DT",
"heatmaply",
"RSQLite",
"reshape",
"igraph",
"visNetwork",
"htmlwidgets",
"openxlsx",
"rjson",
"rintrojs",
"httr"
)
pckgMissing <- pckgList[!(pckgList %in% installed.packages()[,"Package"])]
if(length(pckgMissing)) install.packages(pckgMissing)
for (i in pckgList) {
library(i, character.only= TRUE)
}
# ============================================================================
# Load dataset stars
# ============================================================================
starsHsa <- read.delim("./www/decorationTables/gseHsa.txt")
starsMmu <- read.delim("./www/decorationTables/gseMmu.txt")
starsII <- read.delim("./www/decorationTables/ipf_vs_ctrl_lung_coding.txt")
starsIII <- read.delim("./www/decorationTables/bleoD14_vs_ctrl_lung_coding.txt")
starsIV <- read.delim("./www/decorationTables/gseNonCoding.txt")
stars <- rbind(starsHsa, starsMmu, starsIV)
# ============================================================================
# Connect to DB
# ============================================================================
db_path <- "."
fibromine_db <- dbConnect(RSQLite::SQLite(),
file.path(db_path, "FibromineDB.sqlite"))
# ============================================================================
# Source custom functions
# ============================================================================
curDir <- getwd()
source(file.path(curDir, "utils.R"))
# ============================================================================
# techChoices
# ============================================================================
techChoices <- c("RNA expression profiling",
"Non-coding RNA expression profiling")
# ============================================================================
# gcnHsaChoices
# ============================================================================
gcnHsaChoices <- as.character(dbGetQuery(conn = fibromine_db,
statement = '
SELECT
Name
FROM
GCNdrivers
WHERE
Comparison = "IPF_vs_Ctrl"
;'
)$Name)
# ============================================================================
# gcnMmuChoices
# ============================================================================
gcnMmuChoices <- as.character(dbGetQuery(conn = fibromine_db,
statement = '
SELECT
Name
FROM
GCNdrivers
WHERE
Comparison = "BleomycinD14_vs_Ctrl"
;'
)$Name)
# ============================================================================
# scGeneChoices
# ============================================================================
scGeneChoices <- unique(as.character(dbGetQuery(conn = fibromine_db,
statement = '
SELECT
Gene
FROM
scDEA
;'
)$Gene))
# ============================================================================
# DEmiRNAChoices
# ============================================================================
DEmiRNAChoices <- as.character(dbGetQuery(conn = fibromine_db,
statement = '
SELECT
prodID
FROM
nonCodingDEShort
;'
)$prodID)
# ============================================================================
# Datasets lists to use in download data tab
# ============================================================================
transDatasets <- dbGetQuery(
fibromine_db,
'SELECT DISTINCT
DatasetID
FROM
DatasetsDescription
;'
)
transDatasets <- as.character(transDatasets[,1])
transDatasets <- transDatasets[grep("^GSE", transDatasets)]
protDatasets <- dbGetQuery(
fibromine_db,
'SELECT DISTINCT
DatasetID
FROM
DatasetsDescription
;'
)
protDatasets <- as.character(protDatasets[,1])
protDatasets <- protDatasets[-grep("^GSE", protDatasets)]
|
#' @export
lightON <- function(wemo) {
script <- paste0(find.package("slimeLapse"), "/wemo_control.sh")
system(paste0(script, " ", wemo, " ON"), ignore.stdout = TRUE, ignore.stderr = TRUE)
}
#' @export
lightOFF <- function(wemo) {
script <- paste0(find.package("slimeLapse"), "/wemo_control.sh")
system(paste0(script, " ", wemo, " OFF"), ignore.stdout = TRUE, ignore.stderr = TRUE)
}
| /R/wemo.R | no_license | swarm-lab/slimeLapse | R | false | false | 393 | r | #' @export
lightON <- function(wemo) {
script <- paste0(find.package("slimeLapse"), "/wemo_control.sh")
system(paste0(script, " ", wemo, " ON"), ignore.stdout = TRUE, ignore.stderr = TRUE)
}
#' @export
lightOFF <- function(wemo) {
script <- paste0(find.package("slimeLapse"), "/wemo_control.sh")
system(paste0(script, " ", wemo, " OFF"), ignore.stdout = TRUE, ignore.stderr = TRUE)
}
|
############################################################
# Create occasion-specific encounter history for Brook Trout
############################################################
library(tidyverse)
#bring in fish data, and isolate BKT
fishdat <- read_csv("Data/Thesis/Tidy/tidyfish1.csv", col_names = T)
brook <- fishdat %>%
select(1:4)
#change names to match environmental covariate data set
brook[46,3] <- 201
brook[47,3] <- 202
#remove sites deemed unfit for alaysis (fishless or area not randomly selected)
brook <- brook %>%
unite(newID, c(HUC8, site), sep = "_", remove = F)%>%
filter(!newID %in% c("UPI_29", "UPI_165", "YEL_33", "YEL_98"))%>%
select(-newID)
#create new columns with occ1, occ2, occ3
brook <- brook %>%
mutate(occ1=NA, occ2=NA, occ3=NA)
#isolate sites where BKT were present and absent
present <- brook %>%
filter(BKT == 1)
absent <- brook %>%
filter(BKT == 0)
#update values of occasion specific occupancy in "present" df
present[1,5:7] <- c(0,1,0)
present[2,5:7] <- c(1,1,1)
present[3,5:7] <- c(1,1,0)
present[4,5:7] <- c(1,0,0)
present[5,5:7] <- c(0,0,1)
present[6,5:7] <- c(1,1,1)
present[7,5:7] <- c(1,1,0)
present[8,5:7] <- c(1,1,1)
present[9,5:7] <- c(1,1,1)
present[10,5:7] <- c(1,1,1)
present[11,5:7] <- c(0,1,0)
present[12,5:7] <- c(1,1,1)
present[13,5:7] <- c(1,1,1)
present[14,5:7] <- c(1,1,1)
present[15,5:7] <- c(0,1,1)
present[16,5:7] <- c(1,1,1)
present[17,5:7] <- c(1,1,1)
present[18,5:7] <- c(1,0,0)
present[19,5:7] <- c(0,0,1)
#update values in "absent" df
absent[,5:7] <- 0
#join dataset back together
Enc <- full_join(absent, present, by=NULL)
#create column for enc history in string format ("001" for example)
Brook_enc <- Enc %>%
unite(EncHist, c(5:7), sep = "", remove = F)
Brook_enc <- Brook_enc[,c(1:3,5,4,6:8)]
######################################
## Write csv for BKT encounter history
######################################
write.csv(Brook_enc, "Data/Thesis/Tidy/BKT_EncHist.csv", row.names = F)
write.csv(Enc, "Data/Thesis/Tidy/BKT_DetectionHist.csv", row.names = F)
| /analysis/Brook Trout Project/Occupancy/RMark/Brook Trout/BKT_Enc_Hist.R | no_license | EEOB590A-Fall-2019/BKelly_Fishes | R | false | false | 2,065 | r | ############################################################
# Create occasion-specific encounter history for Brook Trout
############################################################
library(tidyverse)
#bring in fish data, and isolate BKT
fishdat <- read_csv("Data/Thesis/Tidy/tidyfish1.csv", col_names = T)
brook <- fishdat %>%
select(1:4)
#change names to match environmental covariate data set
brook[46,3] <- 201
brook[47,3] <- 202
#remove sites deemed unfit for alaysis (fishless or area not randomly selected)
brook <- brook %>%
unite(newID, c(HUC8, site), sep = "_", remove = F)%>%
filter(!newID %in% c("UPI_29", "UPI_165", "YEL_33", "YEL_98"))%>%
select(-newID)
#create new columns with occ1, occ2, occ3
brook <- brook %>%
mutate(occ1=NA, occ2=NA, occ3=NA)
#isolate sites where BKT were present and absent
present <- brook %>%
filter(BKT == 1)
absent <- brook %>%
filter(BKT == 0)
#update values of occasion specific occupancy in "present" df
present[1,5:7] <- c(0,1,0)
present[2,5:7] <- c(1,1,1)
present[3,5:7] <- c(1,1,0)
present[4,5:7] <- c(1,0,0)
present[5,5:7] <- c(0,0,1)
present[6,5:7] <- c(1,1,1)
present[7,5:7] <- c(1,1,0)
present[8,5:7] <- c(1,1,1)
present[9,5:7] <- c(1,1,1)
present[10,5:7] <- c(1,1,1)
present[11,5:7] <- c(0,1,0)
present[12,5:7] <- c(1,1,1)
present[13,5:7] <- c(1,1,1)
present[14,5:7] <- c(1,1,1)
present[15,5:7] <- c(0,1,1)
present[16,5:7] <- c(1,1,1)
present[17,5:7] <- c(1,1,1)
present[18,5:7] <- c(1,0,0)
present[19,5:7] <- c(0,0,1)
#update values in "absent" df
absent[,5:7] <- 0
#join dataset back together
Enc <- full_join(absent, present, by=NULL)
#create column for enc history in string format ("001" for example)
Brook_enc <- Enc %>%
unite(EncHist, c(5:7), sep = "", remove = F)
Brook_enc <- Brook_enc[,c(1:3,5,4,6:8)]
######################################
## Write csv for BKT encounter history
######################################
write.csv(Brook_enc, "Data/Thesis/Tidy/BKT_EncHist.csv", row.names = F)
write.csv(Enc, "Data/Thesis/Tidy/BKT_DetectionHist.csv", row.names = F)
|
# HW1: list7
#
# 1. Create a list `l1` consist of 52 components from 'a' to 'z' and 'A' to 'Z'. (hint: check variables letters and LETTERS)
# 2. Convert the list `l1` into a 13 x 4 matrix `l2` filling by column.
# 3. Convert the list `l1` into a vector `x1`.
# 4. Convert the list `l1` into an atomic vector `x2`. (hint: `x1` should be different with `x2`)
## Do not modify this line! ## Write your code for 1. after this line! ##
l1 <- append(as.list(letters), as.list(LETTERS))
## Do not modify this line! ## Write your code for 2. after this line! ##
l2 <- matrix(l1, nrow = 13, ncol = 4, byrow =FALSE)
## Do not modify this line! ## Write your code for 3. after this line! ##
x1 <- as.vector(l1)
## Do not modify this line! ## Write your code for 4. after this line! ##
x2 <- unlist(l1) | /HW1/List 2.R | no_license | lsjhome/GR5206 | R | false | false | 794 | r | # HW1: list7
#
# 1. Create a list `l1` consist of 52 components from 'a' to 'z' and 'A' to 'Z'. (hint: check variables letters and LETTERS)
# 2. Convert the list `l1` into a 13 x 4 matrix `l2` filling by column.
# 3. Convert the list `l1` into a vector `x1`.
# 4. Convert the list `l1` into an atomic vector `x2`. (hint: `x1` should be different with `x2`)
## Do not modify this line! ## Write your code for 1. after this line! ##
l1 <- append(as.list(letters), as.list(LETTERS))
## Do not modify this line! ## Write your code for 2. after this line! ##
l2 <- matrix(l1, nrow = 13, ncol = 4, byrow =FALSE)
## Do not modify this line! ## Write your code for 3. after this line! ##
x1 <- as.vector(l1)
## Do not modify this line! ## Write your code for 4. after this line! ##
x2 <- unlist(l1) |
require(devtools)
load_all("~/git/rmaize")
require(ape)
require(ggtree)
require(ggforce)
require(Rtsne)
require(skimr)
dirp = '~/projects/rnaseq'
dird = file.path(dirp, 'data')
dirc = '/scratch.global/zhoux379/rnaseq'
#t_cfg = read_gspread_master(lib='rnaseq')
#f_yml = file.path(dird, '10.cfg.yaml')
#Sys.setenv("R_CONFIG_FILE" = f_yml)
read_rnaseq <- function(yid) {
#{{{
res = rnaseq_cpm(yid)
th = res$th; tm = res$tm
th = th %>% replace_na(list(Tissue='',Genotype='B73',Treatment='')) %>%
mutate(Tissue=as.character(Tissue)) %>%
mutate(Genotype=as.character(Genotype)) %>%
mutate(Treatment=as.character(Treatment))
yids_dev = c('rn10a','rn11a','rn13b','rn14b','rn14c','rn14e',"rn16b","rn16c","rn18g")
if(yid == 'rn12a') {
th = th %>% filter(Treatment == 'WT')
} else if(yid == 'rn17c') {
th = th %>% filter(Treatment == 'con')
} else if(yid %in% c(yids_dev,'rn19c')) {
if(yid == 'rn13b') th = th %>% filter(!str_detect(Treatment, "^ET"))
if(yid == 'rn18g') th = th %>% filter(Genotype == 'B73')
th = th %>% mutate(Tissue=str_c(Tissue,Treatment, sep="_")) %>%
mutate(Treatment=yid)
}
th = th %>% mutate(study = yid) %>%
mutate(SampleID = str_c(study, SampleID, sep='_')) %>%
replace_na(list(Treatment='')) %>%
select(SampleID, Tissue, Genotype, Treatment, Replicate, study)
tm = tm %>% mutate(SampleID = str_c(yid, SampleID, sep='_')) %>%
filter(SampleID %in% th$SampleID)
list(th=th, tm=tm)
#}}}
}
read_multiqc_trimmomatic <- function(fi, paired = T) {
#{{{
ti = read_tsv(fi)
types = c("surviving", "forward_only_surviving", "reverse_only_surviving", "dropped")
if (paired == F) {
nd = ti %>% mutate(nd = input_reads - surviving - dropped) %>%
group_by(1) %>% summarise(nd = sum(nd)) %>% pull(nd)
stopifnot(nd == 0)
to = ti %>% mutate(SampleID = Sample, total = input_reads,
surviving_f=0, surviving_r=0)
} else if(paired == T | paired == 'both') {
ti2 = ti %>%
separate(Sample, c("SampleID",'suf'), sep="_", fill='right', extra='merge') %>% select(-suf) %>%
mutate(surviving_f = forward_only_surviving,
surviving_r = reverse_only_surviving)
if(paired == 'both')
ti2 = ti2 %>%
replace_na(list('input_reads'=0, 'input_read_pairs'=0,
'surviving_f'=0, 'surviving_r'=0)) %>%
mutate(input_read_pairs =
ifelse(input_read_pairs == 0, input_reads, input_read_pairs))
nd = ti2 %>% mutate(nd = input_read_pairs - surviving -
surviving_f - surviving_r - dropped) %>%
group_by(1) %>% summarise(nd = sum(nd)) %>% pull(nd)
stopifnot(nd == 0)
to = ti2 %>% mutate(total = input_read_pairs)
} else {
stop(sprintf("unsupported option: %s", paired))
}
to %>%
select(SampleID, total, surviving, surviving_f, surviving_r, dropped)
#}}}
}
read_multiqc_star <- function(fi, paired = T) {
#{{{
ti = read_tsv(fi)
if(paired == F) {
ti2 = ti %>% mutate(SampleID = Sample) %>% select(-Sample)
} else {
ti2 = ti %>% separate(Sample, c("SampleID",'suf'), sep="_", fill='right', extra='merge') %>%
select(-suf)
}
ti2 = ti2 %>%
transmute(SampleID = SampleID, total = total_reads,
uniquely_mapped = uniquely_mapped,
multimapped = multimapped + multimapped_toomany,
unmapped = unmapped_mismatches + unmapped_tooshort + unmapped_other,
nd = total - uniquely_mapped - multimapped - unmapped)
stopifnot(sum(ti2$nd) < 1000)
ti2 = ti2 %>% group_by(SampleID) %>%
summarise(uniquely_mapped = sum(uniquely_mapped),
multimapped = sum(multimapped),
unmapped = sum(unmapped))
types = c("uniquely_mapped", "multimapped", "unmapped")
to = ti2 %>% select(SampleID, uniquely_mapped, multimapped, unmapped)
to
#}}}
}
read_multiqc_hisat2 <- function(fi, paired = T) {
#{{{
ti = read_tsv(fi)
if(paired == F) {
ti2 = ti %>%
transmute(SampleID = Sample,
total = unpaired_total,
uniquely_mapped = unpaired_aligned_one,
multimapped = unpaired_aligned_multi,
unmapped = unpaired_aligned_none)
} else {
ti2 = ti %>%
transmute(SampleID = Sample,
#total = paired_total + unpaired_total,
#uniquely_mapped = paired_aligned_one+paired_aligned_discord_one+unpaired_aligned_one,
#multimapped = paired_aligned_multi+unpaired_aligned_multi,
#unmapped = paired_aligned_none+unpaired_aligned_none)
total = paired_total,
uniquely_mapped = paired_aligned_one+paired_aligned_discord_one,
multimapped = paired_aligned_multi,
unmapped = paired_aligned_none)
}
ti2 = ti2 %>% mutate(nd = total - uniquely_mapped - multimapped - unmapped)
cat(sum(ti2$nd),"\n")
stopifnot(sum(ti2$nd) < 1000)
to = ti2 %>% group_by(SampleID) %>%
summarise(uniquely_mapped = sum(uniquely_mapped),
multimapped = sum(multimapped),
unmapped = sum(unmapped))
types = c("uniquely_mapped", "multimapped", "unmapped")
to
#}}}
}
read_multiqc_featurecounts <- function(fi) {
#{{{
ti = read_tsv(fi)
ti2 = ti %>% mutate(SampleID = Sample) %>%
select(SampleID, Total, Assigned, Unassigned_Unmapped,
Unassigned_MultiMapping,
Unassigned_NoFeatures, Unassigned_Ambiguity) %>%
mutate(nd = Total-Assigned-Unassigned_Unmapped-Unassigned_MultiMapping-Unassigned_NoFeatures-Unassigned_Ambiguity)
stopifnot(sum(as.numeric(ti2$nd)) == 0)
#
types = c("Assigned", "Unassigned_MultiMapping", "Unassigned_Unmapped",
"Unassigned_NoFeatures", "Unassigned_Ambiguity")
to = ti2 %>% select(SampleID, Assigned, Unassigned_MultiMapping,
Unassigned_NoFeatures, Unassigned_Ambiguity,
Unassigned_Unmapped)
to
#}}}
}
plot_pca0 <- function(tp, fo, opt = 'col=tis', labsize = 2.5, wd = 8, ht = 8) {
#{{{
if(opt == 'col=tis,sha=rep') {
#{{{
p1 = ggplot(tp, aes(x = PC1, y = PC2, color = Tissue, shape = Replicate)) +
geom_point(size = 1.5) +
scale_x_continuous(name = xlab) + scale_y_continuous(name = ylab) +
scale_color_d3() +
scale_shape_manual(values = shapes) +
guides(direction = 'vertical', color = guide_legend(ncol = 1)) +
guides(shape = guide_legend(ncol = 1, byrow = T)) +
otheme(legend.pos = 'top.left', xgrid = T, ygrid = T, xtitle = T, ytitle = T, xtext = T, ytext = T)
#}}}
} else if(opt == 'lab=tis,sha=rep') {
#{{{
p1 = ggplot(tp, aes(x = PC1, y = PC2, label = Tissue, shape = Replicate)) +
geom_point(size = 1.5) +
geom_text_repel(size = labsize) +
scale_x_continuous(name = xlab) + scale_y_continuous(name = ylab) +
scale_color_d3() +
scale_shape_manual(values = shapes) +
guides(direction = 'vertical', color = guide_legend(ncol = 1)) +
guides(shape = guide_legend(ncol = 1, byrow = T)) +
otheme(legend.pos = 'top.left', xgrid = T, ygrid = T, xtitle = T, ytitle = T, xtext = T, ytext = T)
#}}}
} else if(opt == 'lab=tre,sha=rep') {
#{{{
p1 = ggplot(tp, aes(x = PC1, y = PC2, label = Treatment, shape = Replicate)) +
geom_point(size = 1.5) +
geom_text_repel(size = labsize) +
scale_x_continuous(name = xlab) + scale_y_continuous(name = ylab) +
scale_color_d3() +
scale_shape_manual(values = shapes) +
guides(direction = 'vertical', color = guide_legend(ncol = 1)) +
guides(shape = guide_legend(ncol = 1, byrow = T)) +
otheme(legend.pos = 'top.left', xgrid = T, ygrid = T, xtitle = T, ytitle = T, xtext = T, ytext = T)
#}}}
} else if(opt == 'lab=tre') {
#{{{
p1 = ggplot(tp, aes(x = PC1, y = PC2, label = Treatment)) +
geom_point(size = 1.5) +
geom_text_repel(size = labsize) +
scale_x_continuous(name = xlab) + scale_y_continuous(name = ylab) +
scale_color_d3() +
scale_shape_manual(values = shapes) +
guides(direction = 'vertical', color = guide_legend(ncol = 1)) +
guides(shape = guide_legend(ncol = 1, byrow = T)) +
otheme(legend.pos = 'top.left', xgrid = T, ygrid = T, xtitle = T, ytitle = T, xtext = T, ytext = T)
#}}}
} else if(opt == 'lab=gen,sha=tis') {
#{{{
p1 = ggplot(tp, aes(x = PC1, y = PC2, label = Genotype, shape = Tissue)) +
geom_point(size = 1.5) +
geom_text_repel(size = labsize) +
scale_x_continuous(name = xlab) + scale_y_continuous(name = ylab) +
scale_shape_manual(values = shapes) +
guides(direction = 'vertical', color = guide_legend(ncol = 1)) +
guides(shape = guide_legend(ncol = 1, byrow = T)) +
otheme(legend.pos = 'top.left', xgrid = T, ygrid = T, xtitle = T, ytitle = T, xtext = T, ytext = T)
#}}}
} else if(opt == 'lab=tis,sha=gen') {
#{{{
p1 = ggplot(tp, aes(x = PC1, y = PC2, label = Tissue, shape = Genotype)) +
geom_point(size = 1.5) +
geom_text_repel(size = labsize) +
scale_x_continuous(name = xlab) + scale_y_continuous(name = ylab) +
scale_color_d3() +
scale_shape_manual(values = shapes) +
guides(direction = 'vertical', color = guide_legend(ncol = 1)) +
guides(shape = guide_legend(ncol = 1, byrow = T)) +
otheme(legend.pos = 'top.left', xgrid = T, ygrid = T, xtitle = T, ytitle = T, xtext = T, ytext = T)
#}}}
} else if(opt == 'lab=tis,col=sid,sha=sid') {
#{{{
p1 = ggplot(tp, aes(x = PC1, y = PC2, label = Tissue, color = sid, shape = sid)) +
geom_point(size = 1.5) +
geom_text_repel(size = labsize) +
scale_x_continuous(name = xlab) + scale_y_continuous(name = ylab) +
scale_color_d3() +
scale_shape_manual(values = shapes) +
guides(direction = 'vertical', color = guide_legend(ncol = 1)) +
guides(shape = guide_legend(ncol = 1, byrow = T)) +
otheme(legend.pos = 'top.left', xgrid = T, ygrid = T, xtitle = T, ytitle = T, xtext = T, ytext = T)
#}}}
} else if(opt == 'col=tis,sha=tre') {
#{{{
p1 = ggplot(tp, aes(x = PC1, y = PC2, color = Tissue, shape = Treatment)) +
geom_point(size = 1.5) +
scale_x_continuous(name = xlab) + scale_y_continuous(name = ylab) +
scale_color_d3() +
scale_shape_manual(values = shapes) +
guides(direction = 'vertical', fill = guide_legend(ncol = 1)) +
guides(shape = guide_legend(ncol = 1, byrow = T)) +
otheme(legend.pos = 'top.left', xgrid = T, ygrid = T, xtitle = T, ytitle = T, xtext = T, ytext = T)
#}}}
} else if(opt == 'col=tis') {
#{{{
p1 = ggplot(tp, aes(x = PC1, y = PC2, color = Tissue)) +
geom_point(size = 1.5) +
scale_x_continuous(name = xlab) + scale_y_continuous(name = ylab) +
scale_color_d3() +
guides(direction = 'vertical', color = guide_legend(ncol = 1)) +
guides(shape = guide_legend(ncol = 1, byrow = T)) +
otheme(legend.pos = 'top.left', xgrid = T, ygrid = T, xtitle = T, ytitle = T, xtext = T, ytext = T)
#}}}
} else if(opt == 'col=tis,sha=tis') {
#{{{
p1 = ggplot(tp, aes(x = PC1, y = PC2, color = Tissue, shape = Tissue)) +
geom_point(size = 1.5) +
scale_x_continuous(name = xlab) + scale_y_continuous(name = ylab) +
scale_color_d3() +
scale_shape_manual(values = shapes) +
guides(direction = 'vertical', color = guide_legend(ncol = 1)) +
guides(shape = guide_legend(ncol = 1, byrow = T)) +
otheme(legend.pos = 'top.left', xgrid = T, ygrid = T, xtitle = T, ytitle = T, xtext = T, ytext = T)
#}}}
} else if(opt == 'col=tre,sha=rep') {
#{{{
p1 = ggplot(tp, aes(x = PC1, y = PC2, color = Treatment, shape = Replicate)) +
geom_point(size = 1.5) +
scale_x_continuous(name = xlab) + scale_y_continuous(name = ylab) +
scale_color_d3() +
scale_shape_manual(values = shapes) +
guides(direction = 'vertical', color = guide_legend(ncol = 1)) +
guides(shape = guide_legend(ncol = 1, byrow = T)) +
otheme(legend.pos = 'top.left', xgrid = T, ygrid = T, xtitle = T, ytitle = T, xtext = T, ytext = T)
#}}}
} else if(opt == 'sha=rep') {
#{{{
p1 = ggplot(tp, aes(x = PC1, y = PC2, shape = Replicate)) +
geom_point(size = 1.5) +
scale_x_continuous(name = xlab) + scale_y_continuous(name = ylab) +
scale_color_d3() +
scale_shape_manual(values = shapes) +
guides(direction = 'vertical', color = guide_legend(ncol = 1)) +
guides(shape = guide_legend(ncol = 1, byrow = T)) +
otheme(legend.pos = 'top.left', xgrid = T, ygrid = T, xtitle = T, ytitle = T, xtext = T, ytext = T)
#}}}
} else {
stop(sprintf("unknown opt: %s", opt))
}
ggsave(p1, filename = fo, width = wd, height = ht)
#}}}
}
| /src/functions.R | permissive | Wangqq0515/rnaseq | R | false | false | 14,016 | r | require(devtools)
load_all("~/git/rmaize")
require(ape)
require(ggtree)
require(ggforce)
require(Rtsne)
require(skimr)
dirp = '~/projects/rnaseq'
dird = file.path(dirp, 'data')
dirc = '/scratch.global/zhoux379/rnaseq'
#t_cfg = read_gspread_master(lib='rnaseq')
#f_yml = file.path(dird, '10.cfg.yaml')
#Sys.setenv("R_CONFIG_FILE" = f_yml)
read_rnaseq <- function(yid) {
#{{{
res = rnaseq_cpm(yid)
th = res$th; tm = res$tm
th = th %>% replace_na(list(Tissue='',Genotype='B73',Treatment='')) %>%
mutate(Tissue=as.character(Tissue)) %>%
mutate(Genotype=as.character(Genotype)) %>%
mutate(Treatment=as.character(Treatment))
yids_dev = c('rn10a','rn11a','rn13b','rn14b','rn14c','rn14e',"rn16b","rn16c","rn18g")
if(yid == 'rn12a') {
th = th %>% filter(Treatment == 'WT')
} else if(yid == 'rn17c') {
th = th %>% filter(Treatment == 'con')
} else if(yid %in% c(yids_dev,'rn19c')) {
if(yid == 'rn13b') th = th %>% filter(!str_detect(Treatment, "^ET"))
if(yid == 'rn18g') th = th %>% filter(Genotype == 'B73')
th = th %>% mutate(Tissue=str_c(Tissue,Treatment, sep="_")) %>%
mutate(Treatment=yid)
}
th = th %>% mutate(study = yid) %>%
mutate(SampleID = str_c(study, SampleID, sep='_')) %>%
replace_na(list(Treatment='')) %>%
select(SampleID, Tissue, Genotype, Treatment, Replicate, study)
tm = tm %>% mutate(SampleID = str_c(yid, SampleID, sep='_')) %>%
filter(SampleID %in% th$SampleID)
list(th=th, tm=tm)
#}}}
}
read_multiqc_trimmomatic <- function(fi, paired = T) {
#{{{
ti = read_tsv(fi)
types = c("surviving", "forward_only_surviving", "reverse_only_surviving", "dropped")
if (paired == F) {
nd = ti %>% mutate(nd = input_reads - surviving - dropped) %>%
group_by(1) %>% summarise(nd = sum(nd)) %>% pull(nd)
stopifnot(nd == 0)
to = ti %>% mutate(SampleID = Sample, total = input_reads,
surviving_f=0, surviving_r=0)
} else if(paired == T | paired == 'both') {
ti2 = ti %>%
separate(Sample, c("SampleID",'suf'), sep="_", fill='right', extra='merge') %>% select(-suf) %>%
mutate(surviving_f = forward_only_surviving,
surviving_r = reverse_only_surviving)
if(paired == 'both')
ti2 = ti2 %>%
replace_na(list('input_reads'=0, 'input_read_pairs'=0,
'surviving_f'=0, 'surviving_r'=0)) %>%
mutate(input_read_pairs =
ifelse(input_read_pairs == 0, input_reads, input_read_pairs))
nd = ti2 %>% mutate(nd = input_read_pairs - surviving -
surviving_f - surviving_r - dropped) %>%
group_by(1) %>% summarise(nd = sum(nd)) %>% pull(nd)
stopifnot(nd == 0)
to = ti2 %>% mutate(total = input_read_pairs)
} else {
stop(sprintf("unsupported option: %s", paired))
}
to %>%
select(SampleID, total, surviving, surviving_f, surviving_r, dropped)
#}}}
}
read_multiqc_star <- function(fi, paired = T) {
#{{{
ti = read_tsv(fi)
if(paired == F) {
ti2 = ti %>% mutate(SampleID = Sample) %>% select(-Sample)
} else {
ti2 = ti %>% separate(Sample, c("SampleID",'suf'), sep="_", fill='right', extra='merge') %>%
select(-suf)
}
ti2 = ti2 %>%
transmute(SampleID = SampleID, total = total_reads,
uniquely_mapped = uniquely_mapped,
multimapped = multimapped + multimapped_toomany,
unmapped = unmapped_mismatches + unmapped_tooshort + unmapped_other,
nd = total - uniquely_mapped - multimapped - unmapped)
stopifnot(sum(ti2$nd) < 1000)
ti2 = ti2 %>% group_by(SampleID) %>%
summarise(uniquely_mapped = sum(uniquely_mapped),
multimapped = sum(multimapped),
unmapped = sum(unmapped))
types = c("uniquely_mapped", "multimapped", "unmapped")
to = ti2 %>% select(SampleID, uniquely_mapped, multimapped, unmapped)
to
#}}}
}
read_multiqc_hisat2 <- function(fi, paired = T) {
#{{{
ti = read_tsv(fi)
if(paired == F) {
ti2 = ti %>%
transmute(SampleID = Sample,
total = unpaired_total,
uniquely_mapped = unpaired_aligned_one,
multimapped = unpaired_aligned_multi,
unmapped = unpaired_aligned_none)
} else {
ti2 = ti %>%
transmute(SampleID = Sample,
#total = paired_total + unpaired_total,
#uniquely_mapped = paired_aligned_one+paired_aligned_discord_one+unpaired_aligned_one,
#multimapped = paired_aligned_multi+unpaired_aligned_multi,
#unmapped = paired_aligned_none+unpaired_aligned_none)
total = paired_total,
uniquely_mapped = paired_aligned_one+paired_aligned_discord_one,
multimapped = paired_aligned_multi,
unmapped = paired_aligned_none)
}
ti2 = ti2 %>% mutate(nd = total - uniquely_mapped - multimapped - unmapped)
cat(sum(ti2$nd),"\n")
stopifnot(sum(ti2$nd) < 1000)
to = ti2 %>% group_by(SampleID) %>%
summarise(uniquely_mapped = sum(uniquely_mapped),
multimapped = sum(multimapped),
unmapped = sum(unmapped))
types = c("uniquely_mapped", "multimapped", "unmapped")
to
#}}}
}
read_multiqc_featurecounts <- function(fi) {
#{{{
ti = read_tsv(fi)
ti2 = ti %>% mutate(SampleID = Sample) %>%
select(SampleID, Total, Assigned, Unassigned_Unmapped,
Unassigned_MultiMapping,
Unassigned_NoFeatures, Unassigned_Ambiguity) %>%
mutate(nd = Total-Assigned-Unassigned_Unmapped-Unassigned_MultiMapping-Unassigned_NoFeatures-Unassigned_Ambiguity)
stopifnot(sum(as.numeric(ti2$nd)) == 0)
#
types = c("Assigned", "Unassigned_MultiMapping", "Unassigned_Unmapped",
"Unassigned_NoFeatures", "Unassigned_Ambiguity")
to = ti2 %>% select(SampleID, Assigned, Unassigned_MultiMapping,
Unassigned_NoFeatures, Unassigned_Ambiguity,
Unassigned_Unmapped)
to
#}}}
}
plot_pca0 <- function(tp, fo, opt = 'col=tis', labsize = 2.5, wd = 8, ht = 8) {
#{{{
if(opt == 'col=tis,sha=rep') {
#{{{
p1 = ggplot(tp, aes(x = PC1, y = PC2, color = Tissue, shape = Replicate)) +
geom_point(size = 1.5) +
scale_x_continuous(name = xlab) + scale_y_continuous(name = ylab) +
scale_color_d3() +
scale_shape_manual(values = shapes) +
guides(direction = 'vertical', color = guide_legend(ncol = 1)) +
guides(shape = guide_legend(ncol = 1, byrow = T)) +
otheme(legend.pos = 'top.left', xgrid = T, ygrid = T, xtitle = T, ytitle = T, xtext = T, ytext = T)
#}}}
} else if(opt == 'lab=tis,sha=rep') {
#{{{
p1 = ggplot(tp, aes(x = PC1, y = PC2, label = Tissue, shape = Replicate)) +
geom_point(size = 1.5) +
geom_text_repel(size = labsize) +
scale_x_continuous(name = xlab) + scale_y_continuous(name = ylab) +
scale_color_d3() +
scale_shape_manual(values = shapes) +
guides(direction = 'vertical', color = guide_legend(ncol = 1)) +
guides(shape = guide_legend(ncol = 1, byrow = T)) +
otheme(legend.pos = 'top.left', xgrid = T, ygrid = T, xtitle = T, ytitle = T, xtext = T, ytext = T)
#}}}
} else if(opt == 'lab=tre,sha=rep') {
#{{{
p1 = ggplot(tp, aes(x = PC1, y = PC2, label = Treatment, shape = Replicate)) +
geom_point(size = 1.5) +
geom_text_repel(size = labsize) +
scale_x_continuous(name = xlab) + scale_y_continuous(name = ylab) +
scale_color_d3() +
scale_shape_manual(values = shapes) +
guides(direction = 'vertical', color = guide_legend(ncol = 1)) +
guides(shape = guide_legend(ncol = 1, byrow = T)) +
otheme(legend.pos = 'top.left', xgrid = T, ygrid = T, xtitle = T, ytitle = T, xtext = T, ytext = T)
#}}}
} else if(opt == 'lab=tre') {
#{{{
p1 = ggplot(tp, aes(x = PC1, y = PC2, label = Treatment)) +
geom_point(size = 1.5) +
geom_text_repel(size = labsize) +
scale_x_continuous(name = xlab) + scale_y_continuous(name = ylab) +
scale_color_d3() +
scale_shape_manual(values = shapes) +
guides(direction = 'vertical', color = guide_legend(ncol = 1)) +
guides(shape = guide_legend(ncol = 1, byrow = T)) +
otheme(legend.pos = 'top.left', xgrid = T, ygrid = T, xtitle = T, ytitle = T, xtext = T, ytext = T)
#}}}
} else if(opt == 'lab=gen,sha=tis') {
#{{{
p1 = ggplot(tp, aes(x = PC1, y = PC2, label = Genotype, shape = Tissue)) +
geom_point(size = 1.5) +
geom_text_repel(size = labsize) +
scale_x_continuous(name = xlab) + scale_y_continuous(name = ylab) +
scale_shape_manual(values = shapes) +
guides(direction = 'vertical', color = guide_legend(ncol = 1)) +
guides(shape = guide_legend(ncol = 1, byrow = T)) +
otheme(legend.pos = 'top.left', xgrid = T, ygrid = T, xtitle = T, ytitle = T, xtext = T, ytext = T)
#}}}
} else if(opt == 'lab=tis,sha=gen') {
#{{{
p1 = ggplot(tp, aes(x = PC1, y = PC2, label = Tissue, shape = Genotype)) +
geom_point(size = 1.5) +
geom_text_repel(size = labsize) +
scale_x_continuous(name = xlab) + scale_y_continuous(name = ylab) +
scale_color_d3() +
scale_shape_manual(values = shapes) +
guides(direction = 'vertical', color = guide_legend(ncol = 1)) +
guides(shape = guide_legend(ncol = 1, byrow = T)) +
otheme(legend.pos = 'top.left', xgrid = T, ygrid = T, xtitle = T, ytitle = T, xtext = T, ytext = T)
#}}}
} else if(opt == 'lab=tis,col=sid,sha=sid') {
#{{{
p1 = ggplot(tp, aes(x = PC1, y = PC2, label = Tissue, color = sid, shape = sid)) +
geom_point(size = 1.5) +
geom_text_repel(size = labsize) +
scale_x_continuous(name = xlab) + scale_y_continuous(name = ylab) +
scale_color_d3() +
scale_shape_manual(values = shapes) +
guides(direction = 'vertical', color = guide_legend(ncol = 1)) +
guides(shape = guide_legend(ncol = 1, byrow = T)) +
otheme(legend.pos = 'top.left', xgrid = T, ygrid = T, xtitle = T, ytitle = T, xtext = T, ytext = T)
#}}}
} else if(opt == 'col=tis,sha=tre') {
#{{{
p1 = ggplot(tp, aes(x = PC1, y = PC2, color = Tissue, shape = Treatment)) +
geom_point(size = 1.5) +
scale_x_continuous(name = xlab) + scale_y_continuous(name = ylab) +
scale_color_d3() +
scale_shape_manual(values = shapes) +
guides(direction = 'vertical', fill = guide_legend(ncol = 1)) +
guides(shape = guide_legend(ncol = 1, byrow = T)) +
otheme(legend.pos = 'top.left', xgrid = T, ygrid = T, xtitle = T, ytitle = T, xtext = T, ytext = T)
#}}}
} else if(opt == 'col=tis') {
#{{{
p1 = ggplot(tp, aes(x = PC1, y = PC2, color = Tissue)) +
geom_point(size = 1.5) +
scale_x_continuous(name = xlab) + scale_y_continuous(name = ylab) +
scale_color_d3() +
guides(direction = 'vertical', color = guide_legend(ncol = 1)) +
guides(shape = guide_legend(ncol = 1, byrow = T)) +
otheme(legend.pos = 'top.left', xgrid = T, ygrid = T, xtitle = T, ytitle = T, xtext = T, ytext = T)
#}}}
} else if(opt == 'col=tis,sha=tis') {
#{{{
p1 = ggplot(tp, aes(x = PC1, y = PC2, color = Tissue, shape = Tissue)) +
geom_point(size = 1.5) +
scale_x_continuous(name = xlab) + scale_y_continuous(name = ylab) +
scale_color_d3() +
scale_shape_manual(values = shapes) +
guides(direction = 'vertical', color = guide_legend(ncol = 1)) +
guides(shape = guide_legend(ncol = 1, byrow = T)) +
otheme(legend.pos = 'top.left', xgrid = T, ygrid = T, xtitle = T, ytitle = T, xtext = T, ytext = T)
#}}}
} else if(opt == 'col=tre,sha=rep') {
#{{{
p1 = ggplot(tp, aes(x = PC1, y = PC2, color = Treatment, shape = Replicate)) +
geom_point(size = 1.5) +
scale_x_continuous(name = xlab) + scale_y_continuous(name = ylab) +
scale_color_d3() +
scale_shape_manual(values = shapes) +
guides(direction = 'vertical', color = guide_legend(ncol = 1)) +
guides(shape = guide_legend(ncol = 1, byrow = T)) +
otheme(legend.pos = 'top.left', xgrid = T, ygrid = T, xtitle = T, ytitle = T, xtext = T, ytext = T)
#}}}
} else if(opt == 'sha=rep') {
#{{{
p1 = ggplot(tp, aes(x = PC1, y = PC2, shape = Replicate)) +
geom_point(size = 1.5) +
scale_x_continuous(name = xlab) + scale_y_continuous(name = ylab) +
scale_color_d3() +
scale_shape_manual(values = shapes) +
guides(direction = 'vertical', color = guide_legend(ncol = 1)) +
guides(shape = guide_legend(ncol = 1, byrow = T)) +
otheme(legend.pos = 'top.left', xgrid = T, ygrid = T, xtitle = T, ytitle = T, xtext = T, ytext = T)
#}}}
} else {
stop(sprintf("unknown opt: %s", opt))
}
ggsave(p1, filename = fo, width = wd, height = ht)
#}}}
}
|
#### Coursera - Exploratory Data Analysis - Week 1 ####
# generate plot3.png
# download data
library(data.table)
temp <- tempfile()
download.file("https://archive.ics.uci.edu/ml/machine-learning-databases/00235/household_power_consumption.zip", temp)
power <- read.delim(unz(temp, "household_power_consumption.txt"),
sep = ";", na.strings = "?", header = TRUE, stringsAsFactors = FALSE)
unlink(temp)
rm(temp)
# check the data - confirm dimensions and column names
dim(power) == c(2075259, 9)
names(power) == c("Date", "Time", "Global_active_power", "Global_reactive_power",
"Voltage", "Global_intensity",
"Sub_metering_1", "Sub_metering_2", "Sub_metering_3")
# keep data from 2007/02/01-2007/02/02
power$Date <- as.Date(power$Date, "%d/%m/%Y")
power <- power[power$Date %in% as.Date(c("1/2/2007", "2/2/2007"), "%d/%m/%Y"), ]
# create datetime column
power$datetime <- strptime(paste0(power$Date, power$Time), "%Y-%m-%d %H:%M:%S")
# create plot
png("plot3.png", height = 480, width = 480)
with(power,
plot(datetime, Sub_metering_1, type = "n",
xlab = "", ylab = "Energy sub metering"))
with(power, points(datetime, Sub_metering_1, type = "l"))
with(power, points(datetime, Sub_metering_2, type = "l", col = "red"))
with(power, points(datetime, Sub_metering_3, type = "l", col = "blue"))
legend("topright",
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty = 1,
col = c("black", "red", "blue"))
dev.off()
# clean up workspace
rm(power)
| /plot3.R | no_license | hallieswan/ExData_Plotting1 | R | false | false | 1,560 | r | #### Coursera - Exploratory Data Analysis - Week 1 ####
# generate plot3.png
# download data
library(data.table)
temp <- tempfile()
download.file("https://archive.ics.uci.edu/ml/machine-learning-databases/00235/household_power_consumption.zip", temp)
power <- read.delim(unz(temp, "household_power_consumption.txt"),
sep = ";", na.strings = "?", header = TRUE, stringsAsFactors = FALSE)
unlink(temp)
rm(temp)
# check the data - confirm dimensions and column names
dim(power) == c(2075259, 9)
names(power) == c("Date", "Time", "Global_active_power", "Global_reactive_power",
"Voltage", "Global_intensity",
"Sub_metering_1", "Sub_metering_2", "Sub_metering_3")
# keep data from 2007/02/01-2007/02/02
power$Date <- as.Date(power$Date, "%d/%m/%Y")
power <- power[power$Date %in% as.Date(c("1/2/2007", "2/2/2007"), "%d/%m/%Y"), ]
# create datetime column
power$datetime <- strptime(paste0(power$Date, power$Time), "%Y-%m-%d %H:%M:%S")
# create plot
png("plot3.png", height = 480, width = 480)
with(power,
plot(datetime, Sub_metering_1, type = "n",
xlab = "", ylab = "Energy sub metering"))
with(power, points(datetime, Sub_metering_1, type = "l"))
with(power, points(datetime, Sub_metering_2, type = "l", col = "red"))
with(power, points(datetime, Sub_metering_3, type = "l", col = "blue"))
legend("topright",
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty = 1,
col = c("black", "red", "blue"))
dev.off()
# clean up workspace
rm(power)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nsink_prep_data.R
\name{nsink_prep_q}
\alias{nsink_prep_q}
\title{Prepare flow data for N-Sink}
\usage{
nsink_prep_q(data_dir)
}
\arguments{
\item{data_dir}{Base directory that contains N-Sink data folders. Data may
be downloaded with the \code{\link{nsink_get_data}} function.}
}
\value{
returns a tibble of the flow data
}
\description{
Standardizes flow data from the EROM tables.
}
\keyword{internal}
| /man/nsink_prep_q.Rd | permissive | qkellogg/nsink-1 | R | false | true | 484 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nsink_prep_data.R
\name{nsink_prep_q}
\alias{nsink_prep_q}
\title{Prepare flow data for N-Sink}
\usage{
nsink_prep_q(data_dir)
}
\arguments{
\item{data_dir}{Base directory that contains N-Sink data folders. Data may
be downloaded with the \code{\link{nsink_get_data}} function.}
}
\value{
returns a tibble of the flow data
}
\description{
Standardizes flow data from the EROM tables.
}
\keyword{internal}
|
args = commandArgs(trailingOnly = TRUE)
resultdir = args[1]
TEname = args[2]
ZNFname = args[3]
tissue = tail(args,n=-2)
if("COADREAD" %in% tissue) {
tissue = c("COAD", "READ")
} else if ("ESCASTAD" %in% tissue) {
tissue = c("ESCA", "STAD")
} else if ("KICHKIRCKIRP" %in% tissue) {
tissue = c("KICH", "KIRC", "KIRP")
} else if ("LUADLUSC" %in% tissue) {
tissue = c("LUAD", "LUSC")
}
print(resultdir)
print(TEname)
print(ZNFname)
print(tissue)
setwd("../../")
ZNFfilename = paste0(resultdir, "/VSTcnts/", ZNFname, ".txt")
TEfilename = paste0(resultdir, ".instance/VSTcnts/", TEname, ".txt")
print(ZNFfilename)
print(TEfilename)
ZNF <- read.table(ZNFfilename, header=TRUE)
TE <- read.table(TEfilename, header = TRUE)
patient <- read.table("data/patient.info", sep="\t", header=TRUE)
dat <- cbind(patient$tissue, ZNF, TE)
b_tissue = patient$tissue %in% tissue
subdat <- dat[b_tissue,]
print(subdat)
pdf(paste0("result.rsem.TET.instance/ZNF/plots/", paste(TEname, ZNFname, tissue, "pdf", sep=".")))
plot(subdat[,3], subdat[,5])
dev.off()
| /src/ZNF/checkcor.r | no_license | HanLabUNLV/TEcoex | R | false | false | 1,052 | r |
args = commandArgs(trailingOnly = TRUE)
resultdir = args[1]
TEname = args[2]
ZNFname = args[3]
tissue = tail(args,n=-2)
if("COADREAD" %in% tissue) {
tissue = c("COAD", "READ")
} else if ("ESCASTAD" %in% tissue) {
tissue = c("ESCA", "STAD")
} else if ("KICHKIRCKIRP" %in% tissue) {
tissue = c("KICH", "KIRC", "KIRP")
} else if ("LUADLUSC" %in% tissue) {
tissue = c("LUAD", "LUSC")
}
print(resultdir)
print(TEname)
print(ZNFname)
print(tissue)
setwd("../../")
ZNFfilename = paste0(resultdir, "/VSTcnts/", ZNFname, ".txt")
TEfilename = paste0(resultdir, ".instance/VSTcnts/", TEname, ".txt")
print(ZNFfilename)
print(TEfilename)
ZNF <- read.table(ZNFfilename, header=TRUE)
TE <- read.table(TEfilename, header = TRUE)
patient <- read.table("data/patient.info", sep="\t", header=TRUE)
dat <- cbind(patient$tissue, ZNF, TE)
b_tissue = patient$tissue %in% tissue
subdat <- dat[b_tissue,]
print(subdat)
pdf(paste0("result.rsem.TET.instance/ZNF/plots/", paste(TEname, ZNFname, tissue, "pdf", sep=".")))
plot(subdat[,3], subdat[,5])
dev.off()
|
#' writeHBV_disc.R
#'
#' This function write the HBV format discharge data.
#' @param fileName file to write.
#' @keywords data
#' @export
#' @examples
#' writeHBV_disc(DataZoo, fileName = "data/discharge_data.txt")
write_hbv_disc <- function(DataZoo, fileName) {
if ( ! require(zoo) ) { install.packages("zoo"); library(zoo) }
#print(fileName)
if (is.null(dim(DataZoo)) == TRUE) {
dim(DataZoo) <- c(length(DataZoo), 1)
}
rownames(DataZoo) <- format(time(DataZoo), "%Y%m%d/%H%M")
write.table(DataZoo, file = fileName, col.names = FALSE, row.names = TRUE, quote = FALSE, sep = "\t")
}
| /R/write_hbv_disc.R | permissive | NVE/hongR | R | false | false | 611 | r | #' writeHBV_disc.R
#'
#' This function write the HBV format discharge data.
#' @param fileName file to write.
#' @keywords data
#' @export
#' @examples
#' writeHBV_disc(DataZoo, fileName = "data/discharge_data.txt")
write_hbv_disc <- function(DataZoo, fileName) {
if ( ! require(zoo) ) { install.packages("zoo"); library(zoo) }
#print(fileName)
if (is.null(dim(DataZoo)) == TRUE) {
dim(DataZoo) <- c(length(DataZoo), 1)
}
rownames(DataZoo) <- format(time(DataZoo), "%Y%m%d/%H%M")
write.table(DataZoo, file = fileName, col.names = FALSE, row.names = TRUE, quote = FALSE, sep = "\t")
}
|
#===============================#
# #
# Fun with the Mandelbrot Set #
# #
#===============================#
# Translate a complex number z -> z^2 + C
mbTrans <- function(z, C) {
return (z^2 + C)
}
C <- 6 + 5i
z0 <- 0
z1 <- mbTrans(z0, C)
z2 <- mbTrans(z1, C)
z3 <- mbTrans(z2, C)
z4 <- mbTrans(z3, C)
z5 <- mbTrans(z4, C)
#plot(c(z0, z1, z2, z3, z4, z5))
# Create a grid of coordinates over specified ranges
gridInit <- function(rMin, rMax, iMin, iMax) {
dr <- rMax - rMin
di <- iMax - iMin
plane <- matrix(nrow=di, ncol=dr)
for (i in 1:di) {
for (r in 1:dr) {
plane[i, r] <- complex(real = rMin + (r - 1),
imaginary = (iMin + (i - 1)))
}
}
plane
}
g5x5 <- gridInit(-100, 100, -100, 100)
imageable <- function(M) {
Mr <- Re(M)
Mi <- Im(M)
Md <- sqrt(Mr^2 + Mi^2)
Md
}
image(imageable(g5x5), col = heat.colors(100))
g1 <- mbTrans(g5x5, C = (0.2 - 0.01i))
image(imageable(g1), col = heat.colors(100))
g2 <- mbTrans(g1, C = (1 - 1i))
image(imageable(g2), col = heat.colors(100))
| /misc/Mandelbrot.R | no_license | damiansp/R | R | false | false | 1,119 | r | #===============================#
# #
# Fun with the Mandelbrot Set #
# #
#===============================#
# Translate a complex number z -> z^2 + C
mbTrans <- function(z, C) {
return (z^2 + C)
}
C <- 6 + 5i
z0 <- 0
z1 <- mbTrans(z0, C)
z2 <- mbTrans(z1, C)
z3 <- mbTrans(z2, C)
z4 <- mbTrans(z3, C)
z5 <- mbTrans(z4, C)
#plot(c(z0, z1, z2, z3, z4, z5))
# Create a grid of coordinates over specified ranges
gridInit <- function(rMin, rMax, iMin, iMax) {
dr <- rMax - rMin
di <- iMax - iMin
plane <- matrix(nrow=di, ncol=dr)
for (i in 1:di) {
for (r in 1:dr) {
plane[i, r] <- complex(real = rMin + (r - 1),
imaginary = (iMin + (i - 1)))
}
}
plane
}
g5x5 <- gridInit(-100, 100, -100, 100)
imageable <- function(M) {
Mr <- Re(M)
Mi <- Im(M)
Md <- sqrt(Mr^2 + Mi^2)
Md
}
image(imageable(g5x5), col = heat.colors(100))
g1 <- mbTrans(g5x5, C = (0.2 - 0.01i))
image(imageable(g1), col = heat.colors(100))
g2 <- mbTrans(g1, C = (1 - 1i))
image(imageable(g2), col = heat.colors(100))
|
## TODO -- test that there is some variance across a set of bootstrap
## samples
## TODO -- for ratios, test also that there is some variance in
## numerator and denominator
## TODO -- test paired ego / alter datasets
## TODO -- test that calling bootstrap.estimates works when
## total.popn.size is an argument and not an attribute of
## the data frame (had to use parent.frame(2)) to fix
## a bug about this
## TODO -- test cases where estimates should never be negative
## TODO -- look at
## http://stackoverflow.com/questions/8898469/is-it-possible-to-use-r-package-data-in-testthat-tests-or-run-examples
## to try and figure out the real way to include package data in
## unit tests...
data(goc, package="networkreporting")
set.seed(12345)
#########################################
## RDS - markov chain bootstrap (matt's algorithm)
context("goc / rds - build markov model")
mm <- estimate.mixing(survey.data, parent.data, c("use.crack"))
## idea: if we take a bunch of draws starting from each
## state, we should end up in neighboring states in proportion
## to the transition probabilities
this.state <- mm$states[["0"]]
tmp <- laply(1:10000, function(x) { this.state$trans.fn() })
tmptab <- as.numeric(table(tmp)/sum(table(tmp)))
expect_that(tmptab, equals(as.vector(this.state$trans.probs),
tolerance=.01, scale=1),
label="trans.fn from state '0'")
this.state <- mm$states[["1"]]
tmp <- laply(1:10000, function(x) { this.state$trans.fn() })
tmptab <- as.numeric(table(tmp)/sum(table(tmp)))
expect_that(tmptab, equals(as.vector(this.state$trans.probs),
tolerance=.01, scale=1),
label="trans.fn from state '1'")
## similar test to above, but using choose.next.state.fn
this.state <- mm$states[["0"]]
parents <- rep("0", 10000)
tmp <- mm$choose.next.state.fn(parents)
tmptab <- as.numeric(table(tmp)/sum(table(tmp)))
expect_that(tmptab, equals(as.vector(this.state$trans.probs),
tolerance=.01, scale=1),
label="choose.next.state.fn from state '0'")
this.state <- mm$states[["1"]]
parents <- rep("1", 10000)
tmp <- mm$choose.next.state.fn(parents)
tmptab <- as.numeric(table(tmp)/sum(table(tmp)))
expect_that(tmptab, equals(as.vector(this.state$trans.probs),
tolerance=.01, scale=1),
label="choose.next.state.fn from state '1'")
## mixture of both states...
parents <- c(rep("1", 5000), rep("0", 5000))
tmptab <- as.numeric(table(tmp)/sum(table(tmp)))
tp <- mean(c(mm$states[["0"]]$trans.probs[1], mm$states[["1"]]$trans.probs[1]))
tp <- c(tp, mean(c(mm$states[["0"]]$trans.probs[2], mm$states[["1"]]$trans.probs[2])))
expect_that(tmptab, equals(as.vector(this.state$trans.probs),
tolerance=.01, scale=1),
label="choose.next.state.fn from mixture of states")
#########################################
## RDS - markov chain bootstrap (matt's algorithm)
context("goc / rds - degree estimation")
these.traits <- c("use.crack", "female")
dd <- estimate.degree.distns(survey.data,
d.hat.vals="netsize.5.bss",
traits=these.traits)
tt <- traits.to.string(survey.data, these.traits)
survey.data$tt[tt$used.idx] <- tt$traits
dmeans <- ddply(survey.data,
.(tt),
summarise,
mean.degree=mean(netsize.5.bss))
for(cur.trait in c("0.0", "0.1", "1.0", "1.1")) {
res <- dd$draw.degrees.fn(rep(cur.trait, 10000))
expect_that(mean(res[,'degree']),
equals(dmeans[paste(dmeans$tt)==cur.trait, "mean.degree"],
tol=.1),
label=paste0("draw from degree distn for trait ", cur.trait))
}
#########################################
## RDS - static chain bootstrap (Weir et al algorithm)
context("variance estimators - rds static chain bootstrap - sanity checks")
## TODO
| /inst/tests/test_goc.r | no_license | msalganik/networkreporting | R | false | false | 3,981 | r | ## TODO -- test that there is some variance across a set of bootstrap
## samples
## TODO -- for ratios, test also that there is some variance in
## numerator and denominator
## TODO -- test paired ego / alter datasets
## TODO -- test that calling bootstrap.estimates works when
## total.popn.size is an argument and not an attribute of
## the data frame (had to use parent.frame(2)) to fix
## a bug about this
## TODO -- test cases where estimates should never be negative
## TODO -- look at
## http://stackoverflow.com/questions/8898469/is-it-possible-to-use-r-package-data-in-testthat-tests-or-run-examples
## to try and figure out the real way to include package data in
## unit tests...
data(goc, package="networkreporting")
set.seed(12345)
#########################################
## RDS - markov chain bootstrap (matt's algorithm)
context("goc / rds - build markov model")
mm <- estimate.mixing(survey.data, parent.data, c("use.crack"))
## idea: if we take a bunch of draws starting from each
## state, we should end up in neighboring states in proportion
## to the transition probabilities
this.state <- mm$states[["0"]]
tmp <- laply(1:10000, function(x) { this.state$trans.fn() })
tmptab <- as.numeric(table(tmp)/sum(table(tmp)))
expect_that(tmptab, equals(as.vector(this.state$trans.probs),
tolerance=.01, scale=1),
label="trans.fn from state '0'")
this.state <- mm$states[["1"]]
tmp <- laply(1:10000, function(x) { this.state$trans.fn() })
tmptab <- as.numeric(table(tmp)/sum(table(tmp)))
expect_that(tmptab, equals(as.vector(this.state$trans.probs),
tolerance=.01, scale=1),
label="trans.fn from state '1'")
## similar test to above, but using choose.next.state.fn
this.state <- mm$states[["0"]]
parents <- rep("0", 10000)
tmp <- mm$choose.next.state.fn(parents)
tmptab <- as.numeric(table(tmp)/sum(table(tmp)))
expect_that(tmptab, equals(as.vector(this.state$trans.probs),
tolerance=.01, scale=1),
label="choose.next.state.fn from state '0'")
this.state <- mm$states[["1"]]
parents <- rep("1", 10000)
tmp <- mm$choose.next.state.fn(parents)
tmptab <- as.numeric(table(tmp)/sum(table(tmp)))
expect_that(tmptab, equals(as.vector(this.state$trans.probs),
tolerance=.01, scale=1),
label="choose.next.state.fn from state '1'")
## mixture of both states...
parents <- c(rep("1", 5000), rep("0", 5000))
tmptab <- as.numeric(table(tmp)/sum(table(tmp)))
tp <- mean(c(mm$states[["0"]]$trans.probs[1], mm$states[["1"]]$trans.probs[1]))
tp <- c(tp, mean(c(mm$states[["0"]]$trans.probs[2], mm$states[["1"]]$trans.probs[2])))
expect_that(tmptab, equals(as.vector(this.state$trans.probs),
tolerance=.01, scale=1),
label="choose.next.state.fn from mixture of states")
#########################################
## RDS - markov chain bootstrap (matt's algorithm)
context("goc / rds - degree estimation")
these.traits <- c("use.crack", "female")
dd <- estimate.degree.distns(survey.data,
d.hat.vals="netsize.5.bss",
traits=these.traits)
tt <- traits.to.string(survey.data, these.traits)
survey.data$tt[tt$used.idx] <- tt$traits
dmeans <- ddply(survey.data,
.(tt),
summarise,
mean.degree=mean(netsize.5.bss))
for(cur.trait in c("0.0", "0.1", "1.0", "1.1")) {
res <- dd$draw.degrees.fn(rep(cur.trait, 10000))
expect_that(mean(res[,'degree']),
equals(dmeans[paste(dmeans$tt)==cur.trait, "mean.degree"],
tol=.1),
label=paste0("draw from degree distn for trait ", cur.trait))
}
#########################################
## RDS - static chain bootstrap (Weir et al algorithm)
context("variance estimators - rds static chain bootstrap - sanity checks")
## TODO
|
#############################################################################
##
## Copyright 2016 Novartis Institutes for BioMedical Research Inc.
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
#############################################################################
context("H5S")
test_that("Setting different types of dataspaces", {
ds_simple <- H5S$new(type="simple")
ds_scalar <- H5S$new(type="scalar")
ds_null <- H5S$new(type="null")
## check that the types are as expected
expect_true(ds_simple$get_select_type() == "H5S_SEL_ALL")
expect_equal(ds_simple$get_simple_extent_type(), h5const$H5S_SIMPLE)
expect_equal(ds_scalar$get_simple_extent_type(), h5const$H5S_SCALAR)
expect_equal(ds_null$get_simple_extent_type(), h5const$H5S_NULL)
})
test_that("Setting extensions of dataspace", {
## create one with extent set immediately
## then one where the extent is set after creation
ds_extent_new <- H5S$new(dims=as.double(c(4,6,3,4)), maxdims=c(Inf, 6, Inf, 8))
ds_extent_later <- H5S$new(type="simple")
ds_extent_later$set_extent_simple(dims=c(4,6,3,4), maxdims=c(Inf, 6, Inf, 8))
expect_true(ds_extent_new$extent_equal(ds_extent_later))
})
test_that("Retrieving dimensions", {
ds_simple <- H5S$new(type="simple", dims=c(10,20), maxdims=c(Inf, 30))
ds_scalar <- H5S$new(type="scalar")
ds_null <- H5S$new(type="null")
expect_equal(ds_null$dims, NULL)
expect_equal(ds_null$maxdims, NULL)
expect_equal(ds_scalar$dims, integer(0))
expect_equal(ds_scalar$maxdims, integer(0))
expect_equal(ds_simple$dims, c(10,20))
expect_equal(ds_simple$maxdims, c(Inf, 30))
})
test_that("Setting and retrieving hyperslabs", {
## create a simple DS, select two hyperslabs, overlapping, with AND and OR
ds_simple <- H5S$new(type="simple", dims=c(10,10,40), maxdims=c(Inf, Inf, Inf))
ds_simple$select_hyperslab(start=c(2,3,2), count=c(2,1,1), block=c(2,3,2), stride=c(3,5,3), op=h5const$H5S_SELECT_SET)
blocks <- ds_simple$get_select_hyper_nblocks()
blocklist <- ds_simple$get_select_hyper_blocklist()
## now check that the blocklist is as expected
expect_equal(blocks, 2)
expect_equal(blocklist, matrix(c(2,3,2, 3, 5, 3, 5, 3, 2, 6, 5, 3), ncol=3, byrow=TRUE))
})
test_that("Testing encode/decode as well as copy", {
## create a new dataspace, , encode it, then decode it again, then compare
## the old one to the new one
ds_simple <- H5S$new(type="simple", dims=c(10,10,40), maxdims=c(Inf, Inf, Inf))
ds_simple$select_hyperslab(start=c(2,3,2), count=c(2,1,1), block=c(2,3,2), stride=c(3,5,3), op=h5const$H5S_SELECT_SET)
buf <- ds_simple$encode()
ds_decode <- H5S$new(decode_buf=buf)
ds_copy <- ds_simple$copy()
## now check that in various aspects, the new and the old dataspace are identical
expect_equal(ds_simple$get_select_type(), ds_decode$get_select_type())
expect_equal(ds_simple$get_select_hyper_blocklist(), ds_decode$get_select_hyper_blocklist())
expect_equal(ds_simple$get_select_type(), ds_copy$get_select_type())
expect_equal(ds_simple$get_select_hyper_blocklist(), ds_copy$get_select_hyper_blocklist())
## ds_simple$offset_simple(c(5,1,1))
## ds_simple$select_valid()
## ds_simple$offset_simple(c(6,1,1))
## ds_simple$select_valid()
})
test_that("Setting of individual points", {
ds_simple <- H5S$new(type="simple", dims=c(12,10,40), maxdims=c(Inf, Inf, Inf))
coords <- matrix(c(1:5, 2:6, 3:7), ncol=3)
ds_simple$select_elements(coord=coords, byrow=TRUE)
coords_selected <- ds_simple$get_select_elem_pointlist()
expect_equal(coords, coords_selected)
## now add the selection using an AND statement
ds_simple$select_elements(coord=c(2,3,4), op=h5const$H5S_SELECT_APPEND)
coords_selected_2 <- ds_simple$get_select_elem_pointlist()
expect_equal(rbind(coords, c(2,3,4)), coords_selected_2)
## ds_simple$offset_simple(c(9,1,1))
## ds_simple$get_select_bounds()
## ds_simple$select_valid()
## ds_simple$offset_simple(c(10,1,1))
## ds_simple$get_select_bounds()
## ds_simple$select_valid()
## ds_simple$offset_simple(c(11,1,1))
## ds_simple$get_select_bounds()
## ds_simple$select_valid()
## ds_simple <- H5S$new(type="simple", dims=c(5,10,20), maxdims=c(Inf, Inf, Inf))
## ds_simple$select_elements(coord=c(1,1,1), byrow=TRUE)
## ds_simple$select_valid()
## ds_simple$offset_simple(NULL)
## ds_simple$get_select_bounds()
## ds_simple$select_valid()
## ds_simple$offset_simple(c(4,0,0))
## ds_simple$get_select_bounds()
## ds_simple$select_valid()
## ds_simple$offset_simple(c(5,0,0))
## ds_simple$get_select_bounds()
## ds_simple$select_valid()
## ds_simple$offset_simple(c(6,0,0))
## ds_simple$get_select_bounds()
## ds_simple$select_valid()
})
| /tests/testthat/test-h5s.R | permissive | Novartis/hdf5r | R | false | false | 5,403 | r | #############################################################################
##
## Copyright 2016 Novartis Institutes for BioMedical Research Inc.
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
#############################################################################
context("H5S")
test_that("Setting different types of dataspaces", {
ds_simple <- H5S$new(type="simple")
ds_scalar <- H5S$new(type="scalar")
ds_null <- H5S$new(type="null")
## check that the types are as expected
expect_true(ds_simple$get_select_type() == "H5S_SEL_ALL")
expect_equal(ds_simple$get_simple_extent_type(), h5const$H5S_SIMPLE)
expect_equal(ds_scalar$get_simple_extent_type(), h5const$H5S_SCALAR)
expect_equal(ds_null$get_simple_extent_type(), h5const$H5S_NULL)
})
test_that("Setting extensions of dataspace", {
## create one with extent set immediately
## then one where the extent is set after creation
ds_extent_new <- H5S$new(dims=as.double(c(4,6,3,4)), maxdims=c(Inf, 6, Inf, 8))
ds_extent_later <- H5S$new(type="simple")
ds_extent_later$set_extent_simple(dims=c(4,6,3,4), maxdims=c(Inf, 6, Inf, 8))
expect_true(ds_extent_new$extent_equal(ds_extent_later))
})
test_that("Retrieving dimensions", {
ds_simple <- H5S$new(type="simple", dims=c(10,20), maxdims=c(Inf, 30))
ds_scalar <- H5S$new(type="scalar")
ds_null <- H5S$new(type="null")
expect_equal(ds_null$dims, NULL)
expect_equal(ds_null$maxdims, NULL)
expect_equal(ds_scalar$dims, integer(0))
expect_equal(ds_scalar$maxdims, integer(0))
expect_equal(ds_simple$dims, c(10,20))
expect_equal(ds_simple$maxdims, c(Inf, 30))
})
test_that("Setting and retrieving hyperslabs", {
## create a simple DS, select two hyperslabs, overlapping, with AND and OR
ds_simple <- H5S$new(type="simple", dims=c(10,10,40), maxdims=c(Inf, Inf, Inf))
ds_simple$select_hyperslab(start=c(2,3,2), count=c(2,1,1), block=c(2,3,2), stride=c(3,5,3), op=h5const$H5S_SELECT_SET)
blocks <- ds_simple$get_select_hyper_nblocks()
blocklist <- ds_simple$get_select_hyper_blocklist()
## now check that the blocklist is as expected
expect_equal(blocks, 2)
expect_equal(blocklist, matrix(c(2,3,2, 3, 5, 3, 5, 3, 2, 6, 5, 3), ncol=3, byrow=TRUE))
})
test_that("Testing encode/decode as well as copy", {
## create a new dataspace, , encode it, then decode it again, then compare
## the old one to the new one
ds_simple <- H5S$new(type="simple", dims=c(10,10,40), maxdims=c(Inf, Inf, Inf))
ds_simple$select_hyperslab(start=c(2,3,2), count=c(2,1,1), block=c(2,3,2), stride=c(3,5,3), op=h5const$H5S_SELECT_SET)
buf <- ds_simple$encode()
ds_decode <- H5S$new(decode_buf=buf)
ds_copy <- ds_simple$copy()
## now check that in various aspects, the new and the old dataspace are identical
expect_equal(ds_simple$get_select_type(), ds_decode$get_select_type())
expect_equal(ds_simple$get_select_hyper_blocklist(), ds_decode$get_select_hyper_blocklist())
expect_equal(ds_simple$get_select_type(), ds_copy$get_select_type())
expect_equal(ds_simple$get_select_hyper_blocklist(), ds_copy$get_select_hyper_blocklist())
## ds_simple$offset_simple(c(5,1,1))
## ds_simple$select_valid()
## ds_simple$offset_simple(c(6,1,1))
## ds_simple$select_valid()
})
test_that("Setting of individual points", {
ds_simple <- H5S$new(type="simple", dims=c(12,10,40), maxdims=c(Inf, Inf, Inf))
coords <- matrix(c(1:5, 2:6, 3:7), ncol=3)
ds_simple$select_elements(coord=coords, byrow=TRUE)
coords_selected <- ds_simple$get_select_elem_pointlist()
expect_equal(coords, coords_selected)
## now add the selection using an AND statement
ds_simple$select_elements(coord=c(2,3,4), op=h5const$H5S_SELECT_APPEND)
coords_selected_2 <- ds_simple$get_select_elem_pointlist()
expect_equal(rbind(coords, c(2,3,4)), coords_selected_2)
## ds_simple$offset_simple(c(9,1,1))
## ds_simple$get_select_bounds()
## ds_simple$select_valid()
## ds_simple$offset_simple(c(10,1,1))
## ds_simple$get_select_bounds()
## ds_simple$select_valid()
## ds_simple$offset_simple(c(11,1,1))
## ds_simple$get_select_bounds()
## ds_simple$select_valid()
## ds_simple <- H5S$new(type="simple", dims=c(5,10,20), maxdims=c(Inf, Inf, Inf))
## ds_simple$select_elements(coord=c(1,1,1), byrow=TRUE)
## ds_simple$select_valid()
## ds_simple$offset_simple(NULL)
## ds_simple$get_select_bounds()
## ds_simple$select_valid()
## ds_simple$offset_simple(c(4,0,0))
## ds_simple$get_select_bounds()
## ds_simple$select_valid()
## ds_simple$offset_simple(c(5,0,0))
## ds_simple$get_select_bounds()
## ds_simple$select_valid()
## ds_simple$offset_simple(c(6,0,0))
## ds_simple$get_select_bounds()
## ds_simple$select_valid()
})
|
# Code to semi-automatically remove eye-blinks using independent components analysis
#
# Dan Acheson (Maineiac)
# 2/10/2015
library(fastICA)
library(data.table)
library(prospectr)
library(stringr)
remove_eyeblinks_ica <- function(eeg_data, channel_names, threshold = 20) {
#Function for automatic removal of eyeblinks from eeg data
# Performs ICA on the data using the fastICA algorithm
# Then calculates a gap derivative on the data with smoothing,
# and uses a threshold to determine which ICs contain blinks
# The removes the blinks from the IC matrix and reconstructs the data
#
# INPUT:
# eeg_data : data.frame organized as timeXchan
# channel_names : strig vector of channel names
# threshod : threshold used for detecting eyeblinks - count of events with big derivatives
#
# OUTPUT:
# eeg_data with blink ICs removed
library(fastICA)
library(prospectr)
data_chan_only <- eeg_data[,which(names(eeg_data) %in% channel_names)]
print("Performing ICA")
data_ica <- fastICA(data_chan_only, n.comp = ncol(data_chan_only), method = "C", verbose = F)
blink_detect <- sapply(seq(1:ncol(data_ica$S)),
function(x) sum(gapDer(data_ica$S[,x],s=11,w=3)>0.2))
blink_components <- which(blink_detect > threshold)
blink_to_remove <- c()
#loop through each of the provided component and ask user if they want to remove it
for(blink in blink_components) {
par(mfrow = c(2,1))
print_length = round(dim(data_ica$S)[1] / 10)
#plot component data vs. EOG data
plot(data_ica$S[1:print_length, blink], type = 'l', ylab = paste0("ICA component ", blink))
plot(eeg_data$EOG[1:print_length], type = 'l', ylab = "EOG")
remove <- readline("Remove this component? Y/N > ")
if("y" %in% tolower(remove)) {
blink_to_remove <- c(blink_to_remove, blink)
}
}
#remove components
if(length(blink_to_remove) > 0) {
print(paste0("Removing ", length(blink_to_remove), " components"))
#ICA Sources retrieved, and eyeblinks set to 0
ica_S <- data_ica$S
ica_S[,blink_to_remove] == 0
dat_reconstruct <- ica_S %*% data_ica$A
#put the reconstructed data back in the data frame
eeg_data[,which(names(eeg_data) %in% channel_names)] <- dat_reconstruct
}
return(eeg_data)
}
#Loop through train or test files to remove eyeblinks
# NOTE: saves the file as .rbin, not .csv
setwd("/home/zak/kaggle/bci/data")
ica_files <- list.files("./test/")
ica_files <- ica_files[str_detect(ica_files,".csv")]
file_count <- 1
total_files <- length(ica_files)
for(curr_file in ica_files) {
dat <- as.data.frame(fread(paste0("./test/",curr_file)))
channel_names <- names(dat)[2:57]
file_prefix <- str_replace(curr_file,".csv","")
dat_ica <- remove_eyeblinks_ica(dat, channel_names = channel_names, threshold =10)
save(dat_ica, file = paste0("./test/", file_prefix, ".ica_remove.rbin"))
print(paste0("Done with file: ", file_count, "/", total_files))
file_count <- file_count + 1
}
| /bci/samples/R/ica_remove_eyeblinks2.r | no_license | thekannman/kaggle | R | false | false | 3,015 | r | # Code to semi-automatically remove eye-blinks using independent components analysis
#
# Dan Acheson (Maineiac)
# 2/10/2015
library(fastICA)
library(data.table)
library(prospectr)
library(stringr)
remove_eyeblinks_ica <- function(eeg_data, channel_names, threshold = 20) {
#Function for automatic removal of eyeblinks from eeg data
# Performs ICA on the data using the fastICA algorithm
# Then calculates a gap derivative on the data with smoothing,
# and uses a threshold to determine which ICs contain blinks
# The removes the blinks from the IC matrix and reconstructs the data
#
# INPUT:
# eeg_data : data.frame organized as timeXchan
# channel_names : strig vector of channel names
# threshod : threshold used for detecting eyeblinks - count of events with big derivatives
#
# OUTPUT:
# eeg_data with blink ICs removed
library(fastICA)
library(prospectr)
data_chan_only <- eeg_data[,which(names(eeg_data) %in% channel_names)]
print("Performing ICA")
data_ica <- fastICA(data_chan_only, n.comp = ncol(data_chan_only), method = "C", verbose = F)
blink_detect <- sapply(seq(1:ncol(data_ica$S)),
function(x) sum(gapDer(data_ica$S[,x],s=11,w=3)>0.2))
blink_components <- which(blink_detect > threshold)
blink_to_remove <- c()
#loop through each of the provided component and ask user if they want to remove it
for(blink in blink_components) {
par(mfrow = c(2,1))
print_length = round(dim(data_ica$S)[1] / 10)
#plot component data vs. EOG data
plot(data_ica$S[1:print_length, blink], type = 'l', ylab = paste0("ICA component ", blink))
plot(eeg_data$EOG[1:print_length], type = 'l', ylab = "EOG")
remove <- readline("Remove this component? Y/N > ")
if("y" %in% tolower(remove)) {
blink_to_remove <- c(blink_to_remove, blink)
}
}
#remove components
if(length(blink_to_remove) > 0) {
print(paste0("Removing ", length(blink_to_remove), " components"))
#ICA Sources retrieved, and eyeblinks set to 0
ica_S <- data_ica$S
ica_S[,blink_to_remove] == 0
dat_reconstruct <- ica_S %*% data_ica$A
#put the reconstructed data back in the data frame
eeg_data[,which(names(eeg_data) %in% channel_names)] <- dat_reconstruct
}
return(eeg_data)
}
#Loop through train or test files to remove eyeblinks
# NOTE: saves the file as .rbin, not .csv
setwd("/home/zak/kaggle/bci/data")
ica_files <- list.files("./test/")
ica_files <- ica_files[str_detect(ica_files,".csv")]
file_count <- 1
total_files <- length(ica_files)
for(curr_file in ica_files) {
dat <- as.data.frame(fread(paste0("./test/",curr_file)))
channel_names <- names(dat)[2:57]
file_prefix <- str_replace(curr_file,".csv","")
dat_ica <- remove_eyeblinks_ica(dat, channel_names = channel_names, threshold =10)
save(dat_ica, file = paste0("./test/", file_prefix, ".ica_remove.rbin"))
print(paste0("Done with file: ", file_count, "/", total_files))
file_count <- file_count + 1
}
|
library(dplyr)
library(tidyr)
#1
table_4a <- table4a%>%gather(`1999`,`2000`,key="year",value="cases") %>% arrange(desc(year))
table_4b <- table4b%>%gather(`1999`,`2000`,key="year",value="population") %>% arrange(desc(year))
sum <- table_4a%>%inner_join(table_4b,by=c("country","year"))
sum <- sum %>% mutate(rate=cases/population*10000)
#1a
table2
newdata <- table2 %>% spread(type,count)
newdata %>% select(country,year,cases) %>% arrange(desc(year))
#1b
newdata %>% select(country,year,population) %>% arrange(desc(year))
#1c
newdata %>% mutate(rate = cases/population*10000)
#1d
newdf <- newdata %>% mutate(rate = cases/population*10000)
#2
#It should be: table4a %>% gather(`1999`,`2000`,key="year",value="cases"), because the key in the table4a include the single quotes.
#3a
library(nycflights13)
flights %>% group_by(month,day, air_time) %>% summarise(count=n())
distribution <- flights %>% group_by(month,day, air_time) %>% summarise(count=n())
distribution1 <- flights %>% filter(month==1) %>% summarize(mean=mean(air_time,na.rm=TRUE),sd=sd(air_time,na.rm=TRUE))
distribution2 <- flights %>% filter(month==2) %>% summarize(mean=mean(air_time,na.rm=TRUE),sd=sd(air_time,na.rm=TRUE))
distribution3 <- flights %>% filter(month==3) %>% summarize(mean=mean(air_time,na.rm=TRUE),sd=sd(air_time,na.rm=TRUE))
distribution4 <- flights %>% filter(month==4) %>% summarize(mean=mean(air_time,na.rm=TRUE),sd=sd(air_time,na.rm=TRUE))
distribution5 <- flights %>% filter(month==5) %>% summarize(mean=mean(air_time,na.rm=TRUE),sd=sd(air_time,na.rm=TRUE))
distribution6 <- flights %>% filter(month==6) %>% summarize(mean=mean(air_time,na.rm=TRUE),sd=sd(air_time,na.rm=TRUE))
distribution7 <- flights %>% filter(month==7) %>% summarize(mean=mean(air_time,na.rm=TRUE),sd=sd(air_time,na.rm=TRUE))
distribution8 <- flights %>% filter(month==8) %>% summarize(mean=mean(air_time,na.rm=TRUE),sd=sd(air_time,na.rm=TRUE))
distribution9 <- flights %>% filter(month==9) %>% summarize(mean=mean(air_time,na.rm=TRUE),sd=sd(air_time,na.rm=TRUE))
distribution10 <- flights %>% filter(month==10) %>% summarize(mean=mean(air_time,na.rm=TRUE),sd=sd(air_time,na.rm=TRUE))
distribution11 <- flights %>% filter(month==11) %>% summarize(mean=mean(air_time,na.rm=TRUE),sd=sd(air_time,na.rm=TRUE))
distribution12 <- flights %>% filter(month==12) %>% summarize(mean=mean(air_time,na.rm=TRUE),sd=sd(air_time,na.rm=TRUE))
summarize(mean=mean(air_time,na.rm=TRUE),sd=sd(air_time,na.rm=TRUE))
vector <- c(distribution1,distribution2,distribution3,distribution4,distribution5,distribution6,
distribution7,distribution8,distribution9,distribution10,distribution11,distribution12)
vector
#3b
flightdata <- flights %>% mutate(deptime.hour=dep_time%/%100,deptime.min=dep_time %% 100,sch.deptime.hour=sched_dep_time%/%100,sch.deptime.min=sched_dep_time %% 100) %>%
mutate(deptime.totalmin=deptime.hour*60+deptime.min) %>%
mutate(sch.deptime.totalmin=sch.deptime.hour*60+sch.deptime.min) %>%
mutate(diff = deptime.totalmin - sch.deptime.totalmin - dep_delay) %>%
filter(diff != 0)
#Achieve the hour and minute from the dep_time and the sch.deptime, and convert into total minutes.
#Then we could get the difference between deptime.totalmin and sch.deptime.totalmin. We sift through those differences that are not 0.
#Then, I found that part of the time in the dep_time is actually the next day.
#So I did the conversion of this part again(see the below R code)and found the differences of this part were all 0, which indicates the calculation of the dep_delay column is right.
#Therefore, dep_time,sched_dep_time, and dep_delay are consistent.
anotherday <- flightdata %>% mutate(another.diff = deptime.totalmin+24*60 - sch.deptime.totalmin - dep_delay) %>%
filter(another.diff != 0)
#3c
delayornot <- flights %>% filter((minute >=20 & minute <=30) | (minute >=50 & minute <=60)) %>%
mutate(delay=dep_delay>0)
#Create a binary variable called delay. In the delay column, “True” means the flight was delayed and “False” means the flight is not delayed.
twenty <- delayornot %>% filter((minute >=20 & minute <=30) & (delay==TRUE)) %>%
summarise(count=n())
fifty <- delayornot %>% filter((minute >=50 & minute <=60) & (delay==TRUE)) %>%
summarise(count=n())
#Therefore, we can find that flights leaving in 50-60min are less delayed compared to those leaving in 20-30min.
#4
library(rvest)
scraping_web <- read_html("https://geiselmed.dartmouth.edu/qbs/")
head(scraping_web)
h1_text <- scraping_web %>% html_nodes("h1") %>% html_text()
h2_text <- scraping_web%>% html_nodes("h2") %>% html_text()
length(h2_text)
h3_text <- scraping_web %>% html_nodes("h3") %>% html_text()
h4_text <- scraping_web %>% html_nodes("h4") %>% html_text()
p_nodes <- scraping_web %>% html_nodes("p")
p_nodes[1:6]
p_text <- scraping_web %>% html_nodes("p") %>% html_text()
length(p_text)
ul_text <- scraping_web %>% html_nodes("ul") %>%html_text()
length(ul_text)
ul_text[1]
substr(ul_text[2],start=1,stop=19)
li_text <- scraping_web %>% html_nodes("li") %>%html_text()
length(li_text)
li_text[1:8]
# all text irrespecive of headings, paragrpahs, lists, ordered list etc..
all_text <- scraping_web %>%
html_nodes("div") %>%
html_text()
all_text
p_text
body_text <- scraping_wiki %>%
html_nodes("#mw-content-text") %>%
html_text()
substr(body_text, start = 1, stop = 57)
| /project3/project3.R | no_license | mengdanzhu/QBS181_Data_Wrangling_Project_and_Tasks | R | false | false | 5,357 | r | library(dplyr)
library(tidyr)
#1
table_4a <- table4a%>%gather(`1999`,`2000`,key="year",value="cases") %>% arrange(desc(year))
table_4b <- table4b%>%gather(`1999`,`2000`,key="year",value="population") %>% arrange(desc(year))
sum <- table_4a%>%inner_join(table_4b,by=c("country","year"))
sum <- sum %>% mutate(rate=cases/population*10000)
#1a
table2
newdata <- table2 %>% spread(type,count)
newdata %>% select(country,year,cases) %>% arrange(desc(year))
#1b
newdata %>% select(country,year,population) %>% arrange(desc(year))
#1c
newdata %>% mutate(rate = cases/population*10000)
#1d
newdf <- newdata %>% mutate(rate = cases/population*10000)
#2
#It should be: table4a %>% gather(`1999`,`2000`,key="year",value="cases"), because the key in the table4a include the single quotes.
#3a
library(nycflights13)
flights %>% group_by(month,day, air_time) %>% summarise(count=n())
distribution <- flights %>% group_by(month,day, air_time) %>% summarise(count=n())
distribution1 <- flights %>% filter(month==1) %>% summarize(mean=mean(air_time,na.rm=TRUE),sd=sd(air_time,na.rm=TRUE))
distribution2 <- flights %>% filter(month==2) %>% summarize(mean=mean(air_time,na.rm=TRUE),sd=sd(air_time,na.rm=TRUE))
distribution3 <- flights %>% filter(month==3) %>% summarize(mean=mean(air_time,na.rm=TRUE),sd=sd(air_time,na.rm=TRUE))
distribution4 <- flights %>% filter(month==4) %>% summarize(mean=mean(air_time,na.rm=TRUE),sd=sd(air_time,na.rm=TRUE))
distribution5 <- flights %>% filter(month==5) %>% summarize(mean=mean(air_time,na.rm=TRUE),sd=sd(air_time,na.rm=TRUE))
distribution6 <- flights %>% filter(month==6) %>% summarize(mean=mean(air_time,na.rm=TRUE),sd=sd(air_time,na.rm=TRUE))
distribution7 <- flights %>% filter(month==7) %>% summarize(mean=mean(air_time,na.rm=TRUE),sd=sd(air_time,na.rm=TRUE))
distribution8 <- flights %>% filter(month==8) %>% summarize(mean=mean(air_time,na.rm=TRUE),sd=sd(air_time,na.rm=TRUE))
distribution9 <- flights %>% filter(month==9) %>% summarize(mean=mean(air_time,na.rm=TRUE),sd=sd(air_time,na.rm=TRUE))
distribution10 <- flights %>% filter(month==10) %>% summarize(mean=mean(air_time,na.rm=TRUE),sd=sd(air_time,na.rm=TRUE))
distribution11 <- flights %>% filter(month==11) %>% summarize(mean=mean(air_time,na.rm=TRUE),sd=sd(air_time,na.rm=TRUE))
distribution12 <- flights %>% filter(month==12) %>% summarize(mean=mean(air_time,na.rm=TRUE),sd=sd(air_time,na.rm=TRUE))
summarize(mean=mean(air_time,na.rm=TRUE),sd=sd(air_time,na.rm=TRUE))
vector <- c(distribution1,distribution2,distribution3,distribution4,distribution5,distribution6,
distribution7,distribution8,distribution9,distribution10,distribution11,distribution12)
vector
#3b
flightdata <- flights %>% mutate(deptime.hour=dep_time%/%100,deptime.min=dep_time %% 100,sch.deptime.hour=sched_dep_time%/%100,sch.deptime.min=sched_dep_time %% 100) %>%
mutate(deptime.totalmin=deptime.hour*60+deptime.min) %>%
mutate(sch.deptime.totalmin=sch.deptime.hour*60+sch.deptime.min) %>%
mutate(diff = deptime.totalmin - sch.deptime.totalmin - dep_delay) %>%
filter(diff != 0)
#Achieve the hour and minute from the dep_time and the sch.deptime, and convert into total minutes.
#Then we could get the difference between deptime.totalmin and sch.deptime.totalmin. We sift through those differences that are not 0.
#Then, I found that part of the time in the dep_time is actually the next day.
#So I did the conversion of this part again(see the below R code)and found the differences of this part were all 0, which indicates the calculation of the dep_delay column is right.
#Therefore, dep_time,sched_dep_time, and dep_delay are consistent.
anotherday <- flightdata %>% mutate(another.diff = deptime.totalmin+24*60 - sch.deptime.totalmin - dep_delay) %>%
filter(another.diff != 0)
#3c
delayornot <- flights %>% filter((minute >=20 & minute <=30) | (minute >=50 & minute <=60)) %>%
mutate(delay=dep_delay>0)
#Create a binary variable called delay. In the delay column, “True” means the flight was delayed and “False” means the flight is not delayed.
twenty <- delayornot %>% filter((minute >=20 & minute <=30) & (delay==TRUE)) %>%
summarise(count=n())
fifty <- delayornot %>% filter((minute >=50 & minute <=60) & (delay==TRUE)) %>%
summarise(count=n())
#Therefore, we can find that flights leaving in 50-60min are less delayed compared to those leaving in 20-30min.
#4
library(rvest)
scraping_web <- read_html("https://geiselmed.dartmouth.edu/qbs/")
head(scraping_web)
h1_text <- scraping_web %>% html_nodes("h1") %>% html_text()
h2_text <- scraping_web%>% html_nodes("h2") %>% html_text()
length(h2_text)
h3_text <- scraping_web %>% html_nodes("h3") %>% html_text()
h4_text <- scraping_web %>% html_nodes("h4") %>% html_text()
p_nodes <- scraping_web %>% html_nodes("p")
p_nodes[1:6]
p_text <- scraping_web %>% html_nodes("p") %>% html_text()
length(p_text)
ul_text <- scraping_web %>% html_nodes("ul") %>%html_text()
length(ul_text)
ul_text[1]
substr(ul_text[2],start=1,stop=19)
li_text <- scraping_web %>% html_nodes("li") %>%html_text()
length(li_text)
li_text[1:8]
# all text irrespecive of headings, paragrpahs, lists, ordered list etc..
all_text <- scraping_web %>%
html_nodes("div") %>%
html_text()
all_text
p_text
body_text <- scraping_wiki %>%
html_nodes("#mw-content-text") %>%
html_text()
substr(body_text, start = 1, stop = 57)
|
#!/usr/bin/env Rscript
library(ggplot2)
suppressPackageStartupMessages(library(niftir))
base <- "/home/data/Projects/CWAS"
indir <- file.path(base, "nki/sca_voxelwise_scan1/30_sca")
maskfile <- file.path(base, "/nki/rois/mask_gray_4mm.nii.gz")
mask <- read.mask(maskfile)
nvoxs <- sum(mask)
## Summarizes data.
## Gives count, mean, standard deviation, standard error of the mean, and confidence interval (default 95%).
## data: a data frame.
## measurevar: the name of a column that contains the variable to be summariezed
## groupvars: a vector containing names of columns that contain grouping variables
## na.rm: a boolean that indicates whether to ignore NA's
## conf.interval: the percent range of the confidence interval (default is 95%)
summarySE <- function(data=NULL, measurevar, groupvars=NULL, na.rm=FALSE,
conf.interval=.95, .drop=TRUE) {
require(plyr)
# New version of length which can handle NA's: if na.rm==T, don't count them
length2 <- function (x, na.rm=FALSE) {
if (na.rm) sum(!is.na(x))
else length(x)
}
# This is does the summary; it's not easy to understand...
datac <- ddply(data, groupvars, .drop=.drop,
.fun= function(xx, col, na.rm) {
c( N = length2(xx[,col], na.rm=na.rm),
mean = mean (xx[,col], na.rm=na.rm),
sd = sd (xx[,col], na.rm=na.rm)
)
},
measurevar,
na.rm
)
# Rename the "mean" column
datac <- rename(datac, c("mean"=measurevar))
datac$se <- datac$sd / sqrt(datac$N) # Calculate standard error of the mean
# Confidence interval multiplier for standard error
# Calculate t-statistic for confidence interval:
# e.g., if conf.interval is .95, use .975 (above/below), and use df=N-1
ciMult <- qt(conf.interval/2 + .5, datac$N-1)
datac$ci <- datac$se * ciMult
return(datac)
}
# Theme Setting For Scatter Plot
barplot_theme <- theme_grey() +
theme(text=element_text(family="Helvetica")) +
theme(axis.ticks = element_blank(), axis.title = element_blank()) +
theme(axis.text = element_text(size=18)) +
theme(legend.position="none") +
theme(strip.text = element_text(size=18)) +
theme(panel.grid.major.y = element_line(color="white", size=1),
panel.grid.minor.y = element_line(color="white", size=0.5),
panel.grid.major.x = element_blank(),
panel.grid.minor.x = element_blank())
###
# SETUP
###
# Load the data
df <- read.csv(file.path(indir, "rois_glm+mdmr.csv"))
# Get a summary with error info
mdf <- summarySE(df, "sca", c("scan", "label"))
# Relabel
roi_types <- c("maxima", "significant", "not-significant", "minima")
scans <- c("short", "medium")
mdf$label <- factor(mdf$label, levels=roi_types)
mdf$scan <- factor(mdf$scan, levels=scans, labels=c("Scan 1", "Scan 2"))
###
# PLOT
###
ggplot(data=mdf, aes(x=label, y=sca, group=label, fill=label)) +
geom_bar(postition="dodge", stat="identity") +
geom_errorbar(aes(ymin=sca-se, ymax=sca+se),
width=.2, # Width of the error bars
position=position_dodge(.9)) +
facet_grid(. ~ scan) +
geom_hline(yintercept=0) +
xlab("ROI Types") +
ylab("Percent of Significant Connectivity-IQ Associations") +
barplot_theme +
theme(axis.text.x = element_blank())
ggsave(file.path(base, "figures/fig_06/B_summary_bar_plot_scan1.png"), width=6, height=4)
| /nki/08_sca_voxelwise/40_plot.R | no_license | vishalmeeni/cwas-paper | R | false | false | 3,649 | r | #!/usr/bin/env Rscript
library(ggplot2)
suppressPackageStartupMessages(library(niftir))
base <- "/home/data/Projects/CWAS"
indir <- file.path(base, "nki/sca_voxelwise_scan1/30_sca")
maskfile <- file.path(base, "/nki/rois/mask_gray_4mm.nii.gz")
mask <- read.mask(maskfile)
nvoxs <- sum(mask)
## Summarizes data.
## Gives count, mean, standard deviation, standard error of the mean, and confidence interval (default 95%).
## data: a data frame.
## measurevar: the name of a column that contains the variable to be summariezed
## groupvars: a vector containing names of columns that contain grouping variables
## na.rm: a boolean that indicates whether to ignore NA's
## conf.interval: the percent range of the confidence interval (default is 95%)
summarySE <- function(data=NULL, measurevar, groupvars=NULL, na.rm=FALSE,
conf.interval=.95, .drop=TRUE) {
require(plyr)
# New version of length which can handle NA's: if na.rm==T, don't count them
length2 <- function (x, na.rm=FALSE) {
if (na.rm) sum(!is.na(x))
else length(x)
}
# This is does the summary; it's not easy to understand...
datac <- ddply(data, groupvars, .drop=.drop,
.fun= function(xx, col, na.rm) {
c( N = length2(xx[,col], na.rm=na.rm),
mean = mean (xx[,col], na.rm=na.rm),
sd = sd (xx[,col], na.rm=na.rm)
)
},
measurevar,
na.rm
)
# Rename the "mean" column
datac <- rename(datac, c("mean"=measurevar))
datac$se <- datac$sd / sqrt(datac$N) # Calculate standard error of the mean
# Confidence interval multiplier for standard error
# Calculate t-statistic for confidence interval:
# e.g., if conf.interval is .95, use .975 (above/below), and use df=N-1
ciMult <- qt(conf.interval/2 + .5, datac$N-1)
datac$ci <- datac$se * ciMult
return(datac)
}
# Theme Setting For Scatter Plot
barplot_theme <- theme_grey() +
theme(text=element_text(family="Helvetica")) +
theme(axis.ticks = element_blank(), axis.title = element_blank()) +
theme(axis.text = element_text(size=18)) +
theme(legend.position="none") +
theme(strip.text = element_text(size=18)) +
theme(panel.grid.major.y = element_line(color="white", size=1),
panel.grid.minor.y = element_line(color="white", size=0.5),
panel.grid.major.x = element_blank(),
panel.grid.minor.x = element_blank())
###
# SETUP
###
# Load the data
df <- read.csv(file.path(indir, "rois_glm+mdmr.csv"))
# Get a summary with error info
mdf <- summarySE(df, "sca", c("scan", "label"))
# Relabel
roi_types <- c("maxima", "significant", "not-significant", "minima")
scans <- c("short", "medium")
mdf$label <- factor(mdf$label, levels=roi_types)
mdf$scan <- factor(mdf$scan, levels=scans, labels=c("Scan 1", "Scan 2"))
###
# PLOT
###
ggplot(data=mdf, aes(x=label, y=sca, group=label, fill=label)) +
geom_bar(postition="dodge", stat="identity") +
geom_errorbar(aes(ymin=sca-se, ymax=sca+se),
width=.2, # Width of the error bars
position=position_dodge(.9)) +
facet_grid(. ~ scan) +
geom_hline(yintercept=0) +
xlab("ROI Types") +
ylab("Percent of Significant Connectivity-IQ Associations") +
barplot_theme +
theme(axis.text.x = element_blank())
ggsave(file.path(base, "figures/fig_06/B_summary_bar_plot_scan1.png"), width=6, height=4)
|
musical.modes <- list('Major'= c(0, 2, 4, 5, 7, 9, 11),
'Minor'= c(0, 2, 3, 5, 7, 8, 10),
'Dorian'= c(0, 2, 3, 5, 7, 9, 10),
'Mixolydian'= c(0, 2, 4, 5, 7, 9, 10),
'Lydian'= c(0, 2, 4, 6, 7, 9, 11),
'Phrygian'= c(0, 1, 3, 5, 7, 8, 10),
'Locrian'= c(0, 1, 3, 5, 6, 8, 10),
'Diminished'= c(0, 1, 3, 4, 6, 7, 9, 10),
'Whole-half'= c(0, 2, 3, 5, 6, 8, 9, 11),
'Whole Tone'= c(0, 2, 4, 6, 8, 10),
'Minor Blues'= c(0, 3, 5, 6, 7, 10),
'Minor Pentatonic'= c(0, 3, 5, 7, 10),
'Major Pentatonic'= c(0, 2, 4, 7, 9),
'Harmonic Minor'= c(0, 2, 3, 5, 7, 8, 11),
'Melodic Minor'= c(0, 2, 3, 5, 7, 9, 11),
'Super Locrian'= c(0, 1, 3, 4, 6, 8, 10),
'Bhairav'= c(0, 1, 4, 5, 7, 8, 11),
'Hungarian Minor'= c(0, 2, 3, 6, 7, 8, 11),
'Minor Gypsy'= c(0, 1, 4, 5, 7, 8, 10),
'Hirojoshi'= c(0, 2, 3, 7, 8),
'In-Sen'= c(0, 1, 5, 7, 10),
'Iwato'= c(0, 1, 5, 6, 10),
'Kumoi'= c(0, 2, 3, 7, 9),
'Pelog'= c(0, 1, 3, 4, 7, 8),
'Spanish'= c(0, 1, 3, 4, 5, 6, 8, 10)
)
note.numbers <- c('C'=0,'C#'=1,'D'=2,'D#'=3,'E'=4,'F'=5,'F#'=6,'G'=7,'G#'=8,'A'=9,'A#'=10,'B'=11)
minor.scale.chords <- c('min','dim','maj','min','min','maj','maj','min')
major.scale.chords <- c('maj','min','min','maj','maj','min','dim','maj')
lydian.scale.chords <- c('maj','maj','min','dim','maj','min','min','maj')
mixolydian.scale.chords <- c('maj','min','dim','maj','min','min','maj','maj')
dorian.scale.chords <- c('min','min','maj','maj','min','dim','maj','min')
phrygian.scale.chords <- c('min','maj','maj','min','dim','maj','min','min')
locrian.scale.chords <- c('dim','maj','min','min','maj','maj','min','dim')
noteNumberToName <- function(note.number) {
names(note.numbers[note.numbers == note.number - (12*(note.number %/% 12))])
}
# matrix note layout on LinnStrument
note.matrix <- t(sapply(7:0,function(x) c(6:30) + (5*x)))
note.matrix.names <- apply(note.matrix,MARGIN=c(1,2),noteNumberToName)
note.matrix.small <- note.matrix[3:7,2:10]
note.matrix.small.names <- apply(note.matrix.small,MARGIN=c(1,2),noteNumberToName) | /defs_resources.R | no_license | cole-brokamp/linnstrument_scales | R | false | false | 2,509 | r | musical.modes <- list('Major'= c(0, 2, 4, 5, 7, 9, 11),
'Minor'= c(0, 2, 3, 5, 7, 8, 10),
'Dorian'= c(0, 2, 3, 5, 7, 9, 10),
'Mixolydian'= c(0, 2, 4, 5, 7, 9, 10),
'Lydian'= c(0, 2, 4, 6, 7, 9, 11),
'Phrygian'= c(0, 1, 3, 5, 7, 8, 10),
'Locrian'= c(0, 1, 3, 5, 6, 8, 10),
'Diminished'= c(0, 1, 3, 4, 6, 7, 9, 10),
'Whole-half'= c(0, 2, 3, 5, 6, 8, 9, 11),
'Whole Tone'= c(0, 2, 4, 6, 8, 10),
'Minor Blues'= c(0, 3, 5, 6, 7, 10),
'Minor Pentatonic'= c(0, 3, 5, 7, 10),
'Major Pentatonic'= c(0, 2, 4, 7, 9),
'Harmonic Minor'= c(0, 2, 3, 5, 7, 8, 11),
'Melodic Minor'= c(0, 2, 3, 5, 7, 9, 11),
'Super Locrian'= c(0, 1, 3, 4, 6, 8, 10),
'Bhairav'= c(0, 1, 4, 5, 7, 8, 11),
'Hungarian Minor'= c(0, 2, 3, 6, 7, 8, 11),
'Minor Gypsy'= c(0, 1, 4, 5, 7, 8, 10),
'Hirojoshi'= c(0, 2, 3, 7, 8),
'In-Sen'= c(0, 1, 5, 7, 10),
'Iwato'= c(0, 1, 5, 6, 10),
'Kumoi'= c(0, 2, 3, 7, 9),
'Pelog'= c(0, 1, 3, 4, 7, 8),
'Spanish'= c(0, 1, 3, 4, 5, 6, 8, 10)
)
note.numbers <- c('C'=0,'C#'=1,'D'=2,'D#'=3,'E'=4,'F'=5,'F#'=6,'G'=7,'G#'=8,'A'=9,'A#'=10,'B'=11)
minor.scale.chords <- c('min','dim','maj','min','min','maj','maj','min')
major.scale.chords <- c('maj','min','min','maj','maj','min','dim','maj')
lydian.scale.chords <- c('maj','maj','min','dim','maj','min','min','maj')
mixolydian.scale.chords <- c('maj','min','dim','maj','min','min','maj','maj')
dorian.scale.chords <- c('min','min','maj','maj','min','dim','maj','min')
phrygian.scale.chords <- c('min','maj','maj','min','dim','maj','min','min')
locrian.scale.chords <- c('dim','maj','min','min','maj','maj','min','dim')
noteNumberToName <- function(note.number) {
names(note.numbers[note.numbers == note.number - (12*(note.number %/% 12))])
}
# matrix note layout on LinnStrument
note.matrix <- t(sapply(7:0,function(x) c(6:30) + (5*x)))
note.matrix.names <- apply(note.matrix,MARGIN=c(1,2),noteNumberToName)
note.matrix.small <- note.matrix[3:7,2:10]
note.matrix.small.names <- apply(note.matrix.small,MARGIN=c(1,2),noteNumberToName) |
library(stepR)
### Name: transit
### Title: TRANSIT algorithm for detecting jumps
### Aliases: transit
### Keywords: nonparametric
### ** Examples
# estimating step-functions with Gaussian white noise added
# simulate a Gaussian hidden Markov model of length 1000 with 2 states
# with identical transition rates 0.01, and signal-to-noise ratio 2
sim <- contMC(1e3, 0:1, matrix(c(0, 0.01, 0.01, 0), 2), param=1/2)
plot(sim$data, cex = 0.1)
lines(sim$cont, col="red")
# maximum-likelihood estimation under multiresolution constraints
fit.MRC <- smuceR(sim$data$y, sim$data$x)
lines(fit.MRC, col="blue")
# choose number of jumps using BIC
path <- steppath(sim$data$y, sim$data$x, max.blocks=1e2)
fit.BIC <- path[[stepsel.BIC(path)]]
lines(fit.BIC, col="green3", lty = 2)
# estimate after filtering
# simulate filtered ion channel recording with two states
set.seed(9)
# sampling rate 10 kHz
sampling <- 1e4
# tenfold oversampling
over <- 10
# 1 kHz 4-pole Bessel-filter, adjusted for oversampling
cutoff <- 1e3
df.over <- dfilter("bessel", list(pole=4, cutoff=cutoff / sampling / over))
# two states, leaving state 1 at 10 Hz, state 2 at 20 Hz
rates <- rbind(c(0, 10), c(20, 0))
# simulate 0.5 s, level 0 corresponds to state 1, level 1 to state 2
# noise level is 0.3 after filtering
Sim <- contMC(0.5 * sampling, 0:1, rates, sampling=sampling, family="gaussKern",
param = list(df=df.over, over=over, sd=0.3))
plot(Sim$data, pch = ".")
lines(Sim$discr, col = "red")
# fit under multiresolution constraints using filter corresponding to sample rate
df <- dfilter("bessel", list(pole=4, cutoff=cutoff / sampling))
Fit.MRC <- jsmurf(Sim$data$y, Sim$data$x, param=df, r=1e2)
lines(Fit.MRC, col = "blue")
# fit using TRANSIT
Fit.trans <- transit(Sim$data$y, Sim$data$x)
lines(Fit.trans, col = "green3", lty=2)
| /data/genthat_extracted_code/stepR/examples/transit.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,813 | r | library(stepR)
### Name: transit
### Title: TRANSIT algorithm for detecting jumps
### Aliases: transit
### Keywords: nonparametric
### ** Examples
# estimating step-functions with Gaussian white noise added
# simulate a Gaussian hidden Markov model of length 1000 with 2 states
# with identical transition rates 0.01, and signal-to-noise ratio 2
sim <- contMC(1e3, 0:1, matrix(c(0, 0.01, 0.01, 0), 2), param=1/2)
plot(sim$data, cex = 0.1)
lines(sim$cont, col="red")
# maximum-likelihood estimation under multiresolution constraints
fit.MRC <- smuceR(sim$data$y, sim$data$x)
lines(fit.MRC, col="blue")
# choose number of jumps using BIC
path <- steppath(sim$data$y, sim$data$x, max.blocks=1e2)
fit.BIC <- path[[stepsel.BIC(path)]]
lines(fit.BIC, col="green3", lty = 2)
# estimate after filtering
# simulate filtered ion channel recording with two states
set.seed(9)
# sampling rate 10 kHz
sampling <- 1e4
# tenfold oversampling
over <- 10
# 1 kHz 4-pole Bessel-filter, adjusted for oversampling
cutoff <- 1e3
df.over <- dfilter("bessel", list(pole=4, cutoff=cutoff / sampling / over))
# two states, leaving state 1 at 10 Hz, state 2 at 20 Hz
rates <- rbind(c(0, 10), c(20, 0))
# simulate 0.5 s, level 0 corresponds to state 1, level 1 to state 2
# noise level is 0.3 after filtering
Sim <- contMC(0.5 * sampling, 0:1, rates, sampling=sampling, family="gaussKern",
param = list(df=df.over, over=over, sd=0.3))
plot(Sim$data, pch = ".")
lines(Sim$discr, col = "red")
# fit under multiresolution constraints using filter corresponding to sample rate
df <- dfilter("bessel", list(pole=4, cutoff=cutoff / sampling))
Fit.MRC <- jsmurf(Sim$data$y, Sim$data$x, param=df, r=1e2)
lines(Fit.MRC, col = "blue")
# fit using TRANSIT
Fit.trans <- transit(Sim$data$y, Sim$data$x)
lines(Fit.trans, col = "green3", lty=2)
|
library(reshape2)
run_analysis <- function() {
# read features.txt
features_labels = read.table(file="features.txt", col.names=c("feature_id","feature"))
# clean features labels
features_labels$feature = gsub("[,-]",".",gsub("[()]","",features_labels$feature))
# read activity_labels.txt
activities_labels = read.table(file="activity_labels.txt", col.names=c("activity_id","activity"))
# read subject_train.txt and subject_test.txt to be combined
subjects_train = read.table(file="train/subject_train.txt", col.names=c("subject"))
subjects_test = read.table(file="test/subject_test.txt", col.names=c("subject"))
subjects = rbind(subjects_train,subjects_test)
rm(subjects_train)
rm(subjects_test)
# read X_train.txt and X_test.txt to be combined
# features names are taken from features
features_train = read.table(file="train/X_train.txt", col.names=features_labels$feature)
features_test = read.table(file="test/X_test.txt", col.names=features_labels$feature)
features = rbind(features_train,features_test)
rm(features_train)
rm(features_test)
# read y_train.txt and y_test.txt to be combined
activities_train = read.table(file="train/y_train.txt", col.names=c("activity_id"))
activities_test = read.table(file="test/y_test.txt", col.names=c("activity_id"))
activities = rbind(activities_train,activities_test)
rm(activities_train)
rm(activities_test)
# select only -mean() and -std() features
features_labels=features_labels[grep("\\.(mean|std)\\b",features_labels$feature),]
features = features[,features_labels$feature]
# merge activity names and activity labels deleting activity_id
activities$activity_order = 1:nrow(activities)
activities = merge(activities,activities_labels)
activities = activities[order(activities$activity_order),]
activities$activity_id = NULL
activities$activity_order = NULL
# combine subjects, activities and features
data_raw = cbind(subjects,activities,features)
# finally melt variables using subject and activity, and aggrefate using mean
data_melted = melt(data_raw,id=c("subject","activity"))
data_tidy = dcast(data_melted,subject+activity~variable,mean)
} | /run_analysis.R | no_license | AdemRamadani/Getting_and_Cleaning_Data_Project | R | false | false | 2,201 | r | library(reshape2)
run_analysis <- function() {
# read features.txt
features_labels = read.table(file="features.txt", col.names=c("feature_id","feature"))
# clean features labels
features_labels$feature = gsub("[,-]",".",gsub("[()]","",features_labels$feature))
# read activity_labels.txt
activities_labels = read.table(file="activity_labels.txt", col.names=c("activity_id","activity"))
# read subject_train.txt and subject_test.txt to be combined
subjects_train = read.table(file="train/subject_train.txt", col.names=c("subject"))
subjects_test = read.table(file="test/subject_test.txt", col.names=c("subject"))
subjects = rbind(subjects_train,subjects_test)
rm(subjects_train)
rm(subjects_test)
# read X_train.txt and X_test.txt to be combined
# features names are taken from features
features_train = read.table(file="train/X_train.txt", col.names=features_labels$feature)
features_test = read.table(file="test/X_test.txt", col.names=features_labels$feature)
features = rbind(features_train,features_test)
rm(features_train)
rm(features_test)
# read y_train.txt and y_test.txt to be combined
activities_train = read.table(file="train/y_train.txt", col.names=c("activity_id"))
activities_test = read.table(file="test/y_test.txt", col.names=c("activity_id"))
activities = rbind(activities_train,activities_test)
rm(activities_train)
rm(activities_test)
# select only -mean() and -std() features
features_labels=features_labels[grep("\\.(mean|std)\\b",features_labels$feature),]
features = features[,features_labels$feature]
# merge activity names and activity labels deleting activity_id
activities$activity_order = 1:nrow(activities)
activities = merge(activities,activities_labels)
activities = activities[order(activities$activity_order),]
activities$activity_id = NULL
activities$activity_order = NULL
# combine subjects, activities and features
data_raw = cbind(subjects,activities,features)
# finally melt variables using subject and activity, and aggrefate using mean
data_melted = melt(data_raw,id=c("subject","activity"))
data_tidy = dcast(data_melted,subject+activity~variable,mean)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/anova.R
\name{arrayAnova}
\alias{arrayAnova}
\title{Perform ANOVA (potentially with TFCE correction) on arrays}
\usage{
arrayAnova(
.arraydat,
factordef,
bwdat = NULL,
verbose = TRUE,
perm = NULL,
tfce = NULL,
parallel = NULL,
seed = NULL
)
}
\arguments{
\item{.arraydat}{a numeric array with named dimnames containing the EEG (or
other) data. Missing values are not allowed.}
\item{factordef}{a named list of factor definitions, containing the following
elements:
\itemize{
\item{between: }{character vector of between-subject factors (default: NULL)}
\item{within: }{character vector of within-subject factors (default: NULL)}
\item{w_id: }{name of the dimension which identifies the subjects
(default: "id")}
\item{observed: }{character vector of observed (i.e., not manipulated)
variables (default: NULL). Only used for effect-size calculations.}
}}
\item{bwdat}{a data.frame which contains the identification codes
(factordef$w_id) and all subject-level variables (usually factors) listed in
'factordef$between'. Missing values are not allowed.}
\item{verbose}{logical value indicating if p-values and effect sizes should
be computed for the traditional ANOVA results}
\item{perm}{either 1) NULL (the default) or FALSE (the same as NULL),
both of which mean no permutation, or 2) TRUE, which means
permutation with default parameters, or 3) an object as returned by
\code{\link{permParams}} with custom parameters (see Examples and also
\code{\link{permParams}}).\cr
Custom parameters can be also provided by \code{perm = .(key = value)} to
save typing (this works by calling \code{\link{permParams}} with the
given parameters).}
\item{tfce}{either 1) NULL (the default) or FALSE (the same as NULL), both of
which mean no TFCE correction, or 2) TRUE, which means TFCE correction with
default parameters, or 3) an object as returned by \code{\link{tfceParams}}
with custom TFCE parameters (see Examples and also \code{\link{tfceParams}}).\cr
Custom parameters can be also provided by \code{tfce = .(key = value)} to
save typing (this works by calling \code{\link{tfceParams}} with the given
parameters).}
\item{parallel}{either 1) NULL (the default) or FALSE (the same as NULL),
both of which mean single-core computation, or 2) TRUE, which means
parallelization with default parameters, or 3) an object as returned by
\code{\link{parallelParams}} with custom parameters (see Examples and also
\code{\link{parallelParams}}).\cr
Custom parameters can be also provided by \code{parallel = .(key = value)} to
save typing (this works by calling \code{\link{parallelParams}} with the
given parameters).}
\item{seed}{an integer value which specifies a seed (default: NULL), or a
list of arguments passed to \code{\link{set.seed}}}
}
\value{
A list object with the following numeric arrays:
\itemize{
\item{stat: }{a numeric array of F-values, with attribute 'Df' (an integer
matrix) containing the term- and residual degrees of freedom for each model
term. Optionally (if 'verbose' is TRUE), the F-value array has two extra
attributes: 'p_value' and 'effect_size', consisiting of the traditional
p-values and the generalized eta squared effect size measures
(see References), respectively.}
\item{stat_corr: }{a numeric array of TFCE-corrected F-values (if requested)}
\item{p_corr: }{a numeric array of permutation-based p-values (if requested)}
}
}
\description{
\code{arrayAnova} performs point-to-point ANOVAs on arrays. Permutation-based
p-values and Threshold-free Cluster Enhancement (TFCE) correction can be
requested.
}
\details{
The function assumes that the input array contains at least three
named dimensions: "chan" (corresponding to the channels [electrodes]), "time"
(corresponding to time points), and a subject identifier as given by
\code{factordef$w_id}. All dimensions which are not listed as
within-subject factors are treated in a similar way as chan and time, that is
separate ANOVA-s are computed for each level of those dimensions.
}
\note{
The function computes type I p-values - this is correct if the design
is fully balanced and orthogonal (if the number of between-subject
factors is one, it may have slightly unequal group sizes).
}
\examples{
# example dataset
data(erps)
dat_id <- attr(erps, "id") # to get group memberships
chan_pos <- attr(erps, "chan") # needed for TFCE correction
# make the dataset unbalanced to illustrate that if there is only one
# between-subject factor, Type 1 and Type 2 results are identical
erps <- subsetArray(erps, list(id = 2:20))
dat_id <- dat_id[2:20, ]
# average the data in each 12 ms time-bin to decrease the computational
# burden (not needed in serious analyses)
tempdat <- avgBin(erps, "time", 6)
# analyze the effect of the reading group (between-subject factor) and the
# two experimental conditions (stimclass, pairtye; within-subject factors)
# for each channel and time sample (without requesting TFCE correction); this
# means we run 1518 repeated measures ANOVAs
system.time(
result_eegr <- arrayAnova(tempdat,
list(between = "group",
within = c("stimclass", "pairtype"),
w_id = "id",
observed = "group"),
bwdat = dat_id)
)
# if package 'ez' is installed, you can compare the results; we take only a
# subset of the data (choosing "02" channel at time point "207") because
# ezANOVA is much slower
if (requireNamespace("ez")) {
sub <- list(chan = "O2", time = "207")
tempdat_ez <- transformArray(y ~ ., tempdat, subset = sub)
tempdat_ez$group <- factor(dat_id$group[match(tempdat_ez$id, dat_id$id)])
result_ez <- ez::ezANOVA(tempdat_ez, y, id, .(stimclass, pairtype),
between = group, observed = group, type = 2)
# compare results
ez_F <- result_ez$ANOVA$F # F-values
ez_p <- result_ez$ANOVA$p # p-values
ez_es <- result_ez$ANOVA$ges # effect sizes
eegr_F <- as.vector(
subsetArray(extract(result_eegr, "stat"), sub))
eegr_p <- as.vector(
subsetArray(extract(result_eegr, "p"), sub))
eegr_es <- as.vector(
subsetArray(extract(result_eegr, "es"), sub))
stopifnot(
all.equal(ez_F, eegr_F),
all.equal(ez_p, eegr_p),
all.equal(ez_es, eegr_es)
)
}
# the between-subject variable could be numeric, too. Let's create an 'age'
# variable and analyze the effects.
dat_id$age <- rnorm(nrow(dat_id), 10, 1)
result_eegr_c <- arrayAnova(tempdat,
list(between = "age",
within = c("stimclass", "pairtype"),
w_id = "id",
observed = "age"),
bwdat = dat_id)
# if package 'car' is installed, you can compare the results; we take only a
# subset of the data (choosing "01" channel at time point "195")
if (requireNamespace("car")) {
#
# subsetting indices
subs <- list(chan = "O1", time = "195")
#
# extract F values and p values from the eegR results
eegr_F <- subsetArray(extract(result_eegr_c, "stat"), subs)
eegr_p <- subsetArray(extract(result_eegr_c, "p"), subs)
#
# run the same analysis with the 'car' package (first we have to reshape
# the data)
tempdat_car <- subsetArray(tempdat, subs)
tempdat_car <- mergeDims(tempdat_car,
list("id", c("stimclass", "pairtype")))
tempdat_car <- data.frame(tempdat_car)
tempdat_car$id <- dat_id$id
tempdat_car$age <- dat_id$age
idata <- expand.grid(dimnames(tempdat)[c("stimclass", "pairtype")])
maov <- lm(cbind(A.ident, B.ident, C.ident,
A.subst, B.subst, C.subst,
A.transp, B.transp, C.transp) ~ age,
data = tempdat_car)
maov <- car::Anova(maov, idata = idata, idesign= ~stimclass*pairtype,
type = 2)
maov <- summary(maov, multivariate = FALSE)$univ
car_F <- maov[names(eegr_F), "F"]
car_p <- maov[names(eegr_p), "Pr(>F)"]
#
# compare results
stopifnot(
all.equal(as.vector(car_F), as.vector(eegr_F)),
all.equal(as.vector(car_p), as.vector(eegr_p))
)
}
# in order to use TFCE correction, the channel neigbourhood matrix is needed
# (see ?tfceParams and ?chanNb)
ChN <- chanNb(chan_pos, alpha = 0.7)
# now analyze the data by collapsing the pairtypes, and apply TFCE correction
# (note: this will take a couple of seconds); use more randomization
# runs (n should be several thousand instead of 499L) in serious analyses
tempdat <- avgDims(tempdat, "pairtype")
result_tfce <- arrayAnova(tempdat,
list(between = "group",
within = "stimclass",
w_id = "id",
observed = "group"),
bwdat = dat_id,
perm = .(n = 499L),
tfce = .(ChN = ChN),
parallel = .(ncores = 2))
# plot the corrected and uncorrected results
modelplot(result_tfce)
modelplot(result_tfce, type = "unc")
# compare traditional and TFCE p-values
p_all <- extract(result_tfce, c("p", "p_corr"))
p_all <- bindArrays(trad = p_all$p, tfce = p_all$p_corr,
along_name = "method")
# plot p-values after -log transformation to increase discriminability;
# note how the sporadic effects disappear
p_plot <- imageValues(-log(p_all)) # returns a ggplot object
p_plot
}
\references{
The TFCE correction follows:\cr
Mensen, A. and Khatami, R. (2013)
Advanced EEG analysis using threshold-free cluster-enhancement and
non-parametric statistics. Neuroimage, 67, 111-118.
doi:10.1016/j.neuroimage.2012.10.027 \cr
The Generalized Eta Squared effect size statistic is described in:\cr
Olejnik, S., Algina, J. (2003) Generalized eta and omega squared statistics:
Measures of effect size for some common research designs. Psychological
Methods 8: pp. 434-447. doi:10.1037/1082-989X.8.4.434 \cr
Bakeman, R. (2005) Recommended effect size statistics for repeated measures
designs. Behavior Research Methods, 37 (3), 379-384.
}
\seealso{
See also the related methods to explore the results, e.g.
\code{\link{extract.arrayAnova}}, \code{\link{summary.arrayAnova}}, and the
plotting functions \code{\link{modelplot}}, or the lower-level
\code{\link{imageValues}} and \code{\link{imagePvalues}}.
}
| /man/arrayAnova.Rd | no_license | tdeenes/eegR | R | false | true | 10,579 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/anova.R
\name{arrayAnova}
\alias{arrayAnova}
\title{Perform ANOVA (potentially with TFCE correction) on arrays}
\usage{
arrayAnova(
.arraydat,
factordef,
bwdat = NULL,
verbose = TRUE,
perm = NULL,
tfce = NULL,
parallel = NULL,
seed = NULL
)
}
\arguments{
\item{.arraydat}{a numeric array with named dimnames containing the EEG (or
other) data. Missing values are not allowed.}
\item{factordef}{a named list of factor definitions, containing the following
elements:
\itemize{
\item{between: }{character vector of between-subject factors (default: NULL)}
\item{within: }{character vector of within-subject factors (default: NULL)}
\item{w_id: }{name of the dimension which identifies the subjects
(default: "id")}
\item{observed: }{character vector of observed (i.e., not manipulated)
variables (default: NULL). Only used for effect-size calculations.}
}}
\item{bwdat}{a data.frame which contains the identification codes
(factordef$w_id) and all subject-level variables (usually factors) listed in
'factordef$between'. Missing values are not allowed.}
\item{verbose}{logical value indicating if p-values and effect sizes should
be computed for the traditional ANOVA results}
\item{perm}{either 1) NULL (the default) or FALSE (the same as NULL),
both of which mean no permutation, or 2) TRUE, which means
permutation with default parameters, or 3) an object as returned by
\code{\link{permParams}} with custom parameters (see Examples and also
\code{\link{permParams}}).\cr
Custom parameters can be also provided by \code{perm = .(key = value)} to
save typing (this works by calling \code{\link{permParams}} with the
given parameters).}
\item{tfce}{either 1) NULL (the default) or FALSE (the same as NULL), both of
which mean no TFCE correction, or 2) TRUE, which means TFCE correction with
default parameters, or 3) an object as returned by \code{\link{tfceParams}}
with custom TFCE parameters (see Examples and also \code{\link{tfceParams}}).\cr
Custom parameters can be also provided by \code{tfce = .(key = value)} to
save typing (this works by calling \code{\link{tfceParams}} with the given
parameters).}
\item{parallel}{either 1) NULL (the default) or FALSE (the same as NULL),
both of which mean single-core computation, or 2) TRUE, which means
parallelization with default parameters, or 3) an object as returned by
\code{\link{parallelParams}} with custom parameters (see Examples and also
\code{\link{parallelParams}}).\cr
Custom parameters can be also provided by \code{parallel = .(key = value)} to
save typing (this works by calling \code{\link{parallelParams}} with the
given parameters).}
\item{seed}{an integer value which specifies a seed (default: NULL), or a
list of arguments passed to \code{\link{set.seed}}}
}
\value{
A list object with the following numeric arrays:
\itemize{
\item{stat: }{a numeric array of F-values, with attribute 'Df' (an integer
matrix) containing the term- and residual degrees of freedom for each model
term. Optionally (if 'verbose' is TRUE), the F-value array has two extra
attributes: 'p_value' and 'effect_size', consisiting of the traditional
p-values and the generalized eta squared effect size measures
(see References), respectively.}
\item{stat_corr: }{a numeric array of TFCE-corrected F-values (if requested)}
\item{p_corr: }{a numeric array of permutation-based p-values (if requested)}
}
}
\description{
\code{arrayAnova} performs point-to-point ANOVAs on arrays. Permutation-based
p-values and Threshold-free Cluster Enhancement (TFCE) correction can be
requested.
}
\details{
The function assumes that the input array contains at least three
named dimensions: "chan" (corresponding to the channels [electrodes]), "time"
(corresponding to time points), and a subject identifier as given by
\code{factordef$w_id}. All dimensions which are not listed as
within-subject factors are treated in a similar way as chan and time, that is
separate ANOVA-s are computed for each level of those dimensions.
}
\note{
The function computes type I p-values - this is correct if the design
is fully balanced and orthogonal (if the number of between-subject
factors is one, it may have slightly unequal group sizes).
}
\examples{
# example dataset
data(erps)
dat_id <- attr(erps, "id") # to get group memberships
chan_pos <- attr(erps, "chan") # needed for TFCE correction
# make the dataset unbalanced to illustrate that if there is only one
# between-subject factor, Type 1 and Type 2 results are identical
erps <- subsetArray(erps, list(id = 2:20))
dat_id <- dat_id[2:20, ]
# average the data in each 12 ms time-bin to decrease the computational
# burden (not needed in serious analyses)
tempdat <- avgBin(erps, "time", 6)
# analyze the effect of the reading group (between-subject factor) and the
# two experimental conditions (stimclass, pairtye; within-subject factors)
# for each channel and time sample (without requesting TFCE correction); this
# means we run 1518 repeated measures ANOVAs
system.time(
result_eegr <- arrayAnova(tempdat,
list(between = "group",
within = c("stimclass", "pairtype"),
w_id = "id",
observed = "group"),
bwdat = dat_id)
)
# if package 'ez' is installed, you can compare the results; we take only a
# subset of the data (choosing "02" channel at time point "207") because
# ezANOVA is much slower
if (requireNamespace("ez")) {
sub <- list(chan = "O2", time = "207")
tempdat_ez <- transformArray(y ~ ., tempdat, subset = sub)
tempdat_ez$group <- factor(dat_id$group[match(tempdat_ez$id, dat_id$id)])
result_ez <- ez::ezANOVA(tempdat_ez, y, id, .(stimclass, pairtype),
between = group, observed = group, type = 2)
# compare results
ez_F <- result_ez$ANOVA$F # F-values
ez_p <- result_ez$ANOVA$p # p-values
ez_es <- result_ez$ANOVA$ges # effect sizes
eegr_F <- as.vector(
subsetArray(extract(result_eegr, "stat"), sub))
eegr_p <- as.vector(
subsetArray(extract(result_eegr, "p"), sub))
eegr_es <- as.vector(
subsetArray(extract(result_eegr, "es"), sub))
stopifnot(
all.equal(ez_F, eegr_F),
all.equal(ez_p, eegr_p),
all.equal(ez_es, eegr_es)
)
}
# the between-subject variable could be numeric, too. Let's create an 'age'
# variable and analyze the effects.
dat_id$age <- rnorm(nrow(dat_id), 10, 1)
result_eegr_c <- arrayAnova(tempdat,
list(between = "age",
within = c("stimclass", "pairtype"),
w_id = "id",
observed = "age"),
bwdat = dat_id)
# if package 'car' is installed, you can compare the results; we take only a
# subset of the data (choosing "01" channel at time point "195")
if (requireNamespace("car")) {
#
# subsetting indices
subs <- list(chan = "O1", time = "195")
#
# extract F values and p values from the eegR results
eegr_F <- subsetArray(extract(result_eegr_c, "stat"), subs)
eegr_p <- subsetArray(extract(result_eegr_c, "p"), subs)
#
# run the same analysis with the 'car' package (first we have to reshape
# the data)
tempdat_car <- subsetArray(tempdat, subs)
tempdat_car <- mergeDims(tempdat_car,
list("id", c("stimclass", "pairtype")))
tempdat_car <- data.frame(tempdat_car)
tempdat_car$id <- dat_id$id
tempdat_car$age <- dat_id$age
idata <- expand.grid(dimnames(tempdat)[c("stimclass", "pairtype")])
maov <- lm(cbind(A.ident, B.ident, C.ident,
A.subst, B.subst, C.subst,
A.transp, B.transp, C.transp) ~ age,
data = tempdat_car)
maov <- car::Anova(maov, idata = idata, idesign= ~stimclass*pairtype,
type = 2)
maov <- summary(maov, multivariate = FALSE)$univ
car_F <- maov[names(eegr_F), "F"]
car_p <- maov[names(eegr_p), "Pr(>F)"]
#
# compare results
stopifnot(
all.equal(as.vector(car_F), as.vector(eegr_F)),
all.equal(as.vector(car_p), as.vector(eegr_p))
)
}
# in order to use TFCE correction, the channel neigbourhood matrix is needed
# (see ?tfceParams and ?chanNb)
ChN <- chanNb(chan_pos, alpha = 0.7)
# now analyze the data by collapsing the pairtypes, and apply TFCE correction
# (note: this will take a couple of seconds); use more randomization
# runs (n should be several thousand instead of 499L) in serious analyses
tempdat <- avgDims(tempdat, "pairtype")
result_tfce <- arrayAnova(tempdat,
list(between = "group",
within = "stimclass",
w_id = "id",
observed = "group"),
bwdat = dat_id,
perm = .(n = 499L),
tfce = .(ChN = ChN),
parallel = .(ncores = 2))
# plot the corrected and uncorrected results
modelplot(result_tfce)
modelplot(result_tfce, type = "unc")
# compare traditional and TFCE p-values
p_all <- extract(result_tfce, c("p", "p_corr"))
p_all <- bindArrays(trad = p_all$p, tfce = p_all$p_corr,
along_name = "method")
# plot p-values after -log transformation to increase discriminability;
# note how the sporadic effects disappear
p_plot <- imageValues(-log(p_all)) # returns a ggplot object
p_plot
}
\references{
The TFCE correction follows:\cr
Mensen, A. and Khatami, R. (2013)
Advanced EEG analysis using threshold-free cluster-enhancement and
non-parametric statistics. Neuroimage, 67, 111-118.
doi:10.1016/j.neuroimage.2012.10.027 \cr
The Generalized Eta Squared effect size statistic is described in:\cr
Olejnik, S., Algina, J. (2003) Generalized eta and omega squared statistics:
Measures of effect size for some common research designs. Psychological
Methods 8: pp. 434-447. doi:10.1037/1082-989X.8.4.434 \cr
Bakeman, R. (2005) Recommended effect size statistics for repeated measures
designs. Behavior Research Methods, 37 (3), 379-384.
}
\seealso{
See also the related methods to explore the results, e.g.
\code{\link{extract.arrayAnova}}, \code{\link{summary.arrayAnova}}, and the
plotting functions \code{\link{modelplot}}, or the lower-level
\code{\link{imageValues}} and \code{\link{imagePvalues}}.
}
|
UpdateTPI <-
function(TPI,dataset,l_genes,l_prior)
{
uptodate=T
names(l_prior)=l_genes
l_genes_tot=unique(c(TPI$input$l_genes,l_genes))
prob_TPI=TPI$prob_TPI
prob_TPI_domain=TPI$prob_TPI_domain
prob_TPI_ind=rep(0,length(l_genes_tot))
names(prob_TPI_ind)=l_genes_tot
prob_TPI_ind[match(TPI$input$l_genes,l_genes_tot)]=TPI$prob_TPI_ind
new_genes=substract(l_genes,TPI$input$l_genes)
new_prior=l_prior[match(new_genes,l_genes)]
l_prior_tot=rep(0,length(l_genes_tot))
l_prior_tot[match(TPI$input$l_genes,l_genes_tot)]=TPI$input$l_prior
l_prior_tot[match(new_genes,l_genes_tot)]=new_prior
names(l_prior_tot)=l_genes_tot
common_genes=intersect(l_genes,TPI$input$l_genes)
if (length(common_genes)>0)
{
gene_changed_prior=common_genes[which(sign(abs(l_prior[common_genes]))>sign(abs(TPI$input$l_prior[common_genes])))]
}
if (sum(abs(new_prior))>0)
{
uptodate=F
TPI_new=CalculateTPI(dataset,new_genes,new_prior,TPI$input$times,TPI$input$time_step,TPI$input$N,TPI$input$ks_int,TPI$input$kd_int,TPI$input$delta_int,TPI$input$noise,TPI$input$delay)
kin=seq(1,length(new_genes))
for (k in kin)
{
if (new_prior[k]!=0)
{
prob_TPI_ind[match(new_genes[k],l_genes_tot)]=length(prob_TPI)+1
prob_TPI_domain[[length(prob_TPI)+1]]=TPI_new$prob_TPI_domain[[TPI_new$prob_TPI_ind[new_genes[k]]]]
prob_TPI[[length(prob_TPI)+1]]=TPI_new$prob_TPI[[TPI_new$prob_TPI_ind[new_genes[k]]]]
}
}
}
if (length(gene_changed_prior)>0)
{
uptodate=F
TPI_new=CalculateTPI(dataset,gene_changed_prior,rep(1,length(gene_changed_prior)),TPI$input$times,TPI$input$time_step,TPI$input$N,TPI$input$ks_int,TPI$input$kd_int,TPI$input$delta_int,TPI$input$noise,TPI$input$delay)
kin=seq(1,length(gene_changed_prior))
for (k in kin)
{
prob_TPI_ind[match(gene_changed_prior[k],l_genes_tot)]=length(prob_TPI)+1
prob_TPI_domain[[length(prob_TPI)+1]]=TPI_new$prob_TPI_domain[[k]]
prob_TPI[[length(prob_TPI)+1]]=TPI_new$prob_TPI[[k]]
}
}
if (uptodate)
{
message("The TPI database is up-to-date.")
}else
{
message("The TPI database has been updated.")
}
input=list(l_genes=l_genes_tot,l_prior=l_prior_tot,times=TPI$input$times,time_step=TPI$input$time_step,N=TPI$input$N,ks_int=TPI$input$ks_int,kd_int=TPI$input$kd_int,delta_int=TPI$input$delta_int,noise=TPI$input$noise,delay=TPI$input$delay)
output=list(prob_TPI_ind=prob_TPI_ind,prob_TPI=prob_TPI,prob_TPI_domain=prob_TPI_domain,input=input)
return(output)
}
| /TDCor/R/UpdateTPI.R | no_license | ingted/R-Examples | R | false | false | 2,435 | r | UpdateTPI <-
function(TPI,dataset,l_genes,l_prior)
{
uptodate=T
names(l_prior)=l_genes
l_genes_tot=unique(c(TPI$input$l_genes,l_genes))
prob_TPI=TPI$prob_TPI
prob_TPI_domain=TPI$prob_TPI_domain
prob_TPI_ind=rep(0,length(l_genes_tot))
names(prob_TPI_ind)=l_genes_tot
prob_TPI_ind[match(TPI$input$l_genes,l_genes_tot)]=TPI$prob_TPI_ind
new_genes=substract(l_genes,TPI$input$l_genes)
new_prior=l_prior[match(new_genes,l_genes)]
l_prior_tot=rep(0,length(l_genes_tot))
l_prior_tot[match(TPI$input$l_genes,l_genes_tot)]=TPI$input$l_prior
l_prior_tot[match(new_genes,l_genes_tot)]=new_prior
names(l_prior_tot)=l_genes_tot
common_genes=intersect(l_genes,TPI$input$l_genes)
if (length(common_genes)>0)
{
gene_changed_prior=common_genes[which(sign(abs(l_prior[common_genes]))>sign(abs(TPI$input$l_prior[common_genes])))]
}
if (sum(abs(new_prior))>0)
{
uptodate=F
TPI_new=CalculateTPI(dataset,new_genes,new_prior,TPI$input$times,TPI$input$time_step,TPI$input$N,TPI$input$ks_int,TPI$input$kd_int,TPI$input$delta_int,TPI$input$noise,TPI$input$delay)
kin=seq(1,length(new_genes))
for (k in kin)
{
if (new_prior[k]!=0)
{
prob_TPI_ind[match(new_genes[k],l_genes_tot)]=length(prob_TPI)+1
prob_TPI_domain[[length(prob_TPI)+1]]=TPI_new$prob_TPI_domain[[TPI_new$prob_TPI_ind[new_genes[k]]]]
prob_TPI[[length(prob_TPI)+1]]=TPI_new$prob_TPI[[TPI_new$prob_TPI_ind[new_genes[k]]]]
}
}
}
if (length(gene_changed_prior)>0)
{
uptodate=F
TPI_new=CalculateTPI(dataset,gene_changed_prior,rep(1,length(gene_changed_prior)),TPI$input$times,TPI$input$time_step,TPI$input$N,TPI$input$ks_int,TPI$input$kd_int,TPI$input$delta_int,TPI$input$noise,TPI$input$delay)
kin=seq(1,length(gene_changed_prior))
for (k in kin)
{
prob_TPI_ind[match(gene_changed_prior[k],l_genes_tot)]=length(prob_TPI)+1
prob_TPI_domain[[length(prob_TPI)+1]]=TPI_new$prob_TPI_domain[[k]]
prob_TPI[[length(prob_TPI)+1]]=TPI_new$prob_TPI[[k]]
}
}
if (uptodate)
{
message("The TPI database is up-to-date.")
}else
{
message("The TPI database has been updated.")
}
input=list(l_genes=l_genes_tot,l_prior=l_prior_tot,times=TPI$input$times,time_step=TPI$input$time_step,N=TPI$input$N,ks_int=TPI$input$ks_int,kd_int=TPI$input$kd_int,delta_int=TPI$input$delta_int,noise=TPI$input$noise,delay=TPI$input$delay)
output=list(prob_TPI_ind=prob_TPI_ind,prob_TPI=prob_TPI,prob_TPI_domain=prob_TPI_domain,input=input)
return(output)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get-data.R
\name{get_data}
\alias{get_data}
\title{Get data from Statistics Netherlands (CBS)}
\usage{
get_data(id, ..., recode = TRUE, use_column_title = recode,
dir = tempdir(), base_url = getOption("cbsodataR.base_url",
BASE_URL))
}
\arguments{
\item{id}{Identifier of table, can be found in \code{\link{get_table_list}}}
\item{...}{optional filter statemenets}
\item{recode}{Should the categories of the table be recoded with their title
(TRUE) or with their key (FALSE)?}
\item{use_column_title}{Should column names be coded with title (TRUE)
or key (FALSE)}
\item{dir}{Directory where the table should be downloaded. Defaults to temporary
directory}
\item{base_url}{optionally specify a different server. Useful for
third party data services implementing the same protocol.}
}
\value{
\code{data.frame} with the requested data. Note that a csv copy of the data is stored in \code{dir}.
}
\description{
Retrieves data from a table of Statistics Netherlands. A list of tables
can be retrieved with \code{\link{get_table_list}}.
Optionaly the data can be filtered on category values.
The filter is specified with \code{<column_name> = <values>} in which \code{<values>} is a character vector.
Rows with values that are not part of the character vector are not returned. Note that the values
have to be raw (un-recoded) values.
}
\note{
All data are downloaded using \code{\link{download_table}}
}
\examples{
\dontrun{
# get data for main (000000) Consumer Price Index (7196ENG) for March 2000,
get_data(id="7196ENG", Periods="2000MM03", CPI="000000")
}
}
| /man/get_data.Rd | no_license | jacobkap/cbsodataR | R | false | true | 1,650 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get-data.R
\name{get_data}
\alias{get_data}
\title{Get data from Statistics Netherlands (CBS)}
\usage{
get_data(id, ..., recode = TRUE, use_column_title = recode,
dir = tempdir(), base_url = getOption("cbsodataR.base_url",
BASE_URL))
}
\arguments{
\item{id}{Identifier of table, can be found in \code{\link{get_table_list}}}
\item{...}{optional filter statemenets}
\item{recode}{Should the categories of the table be recoded with their title
(TRUE) or with their key (FALSE)?}
\item{use_column_title}{Should column names be coded with title (TRUE)
or key (FALSE)}
\item{dir}{Directory where the table should be downloaded. Defaults to temporary
directory}
\item{base_url}{optionally specify a different server. Useful for
third party data services implementing the same protocol.}
}
\value{
\code{data.frame} with the requested data. Note that a csv copy of the data is stored in \code{dir}.
}
\description{
Retrieves data from a table of Statistics Netherlands. A list of tables
can be retrieved with \code{\link{get_table_list}}.
Optionaly the data can be filtered on category values.
The filter is specified with \code{<column_name> = <values>} in which \code{<values>} is a character vector.
Rows with values that are not part of the character vector are not returned. Note that the values
have to be raw (un-recoded) values.
}
\note{
All data are downloaded using \code{\link{download_table}}
}
\examples{
\dontrun{
# get data for main (000000) Consumer Price Index (7196ENG) for March 2000,
get_data(id="7196ENG", Periods="2000MM03", CPI="000000")
}
}
|
# plots.plot_ScatterMatrix
# plots.ggplot_theme
# plots.ggplot_theme_casual
# plots.install_xkcd
# plots.ggplot_labels
# plots.hclust
############################################################################################
############################################################################################
plots.plot_ScatterMatrix <- function(df, columnsToPlot)
{
library(GGally, quietly=T)
p <- ggpairs(data=df, columns=columnsToPlot, upper="blank", lower=list(continuous="points"), legends=F, mapping=ggplot2::aes(colour=tag))
for (i in 1:length(columnsToPlot))
{
# Address only the diagonal elements
# Get plot out of matrix
inner <- getPlot(p, i, i);
# Add any ggplot2 settings you want (blank grid here)
inner <- inner + theme(panel.grid=element_blank()) + theme(axis.text.x=element_blank())
# Put it back into the matrix
p <- putPlot(p, inner, i, i)
for (j in 1:length(columnsToPlot))
{
if((i==1 & j==1))
{
# Move legend right
inner <- getPlot(p, i, j)
inner <- inner + theme(legend.position=c(length(columnsToPlot)-0.25,0.50))
p <- putPlot(p, inner, i, j)
}
else{
# Delete legend
inner <- getPlot(p, i, j)
inner <- inner + theme(legend.position="none")
p <- putPlot(p, inner, i, j)
}
}
}
return(p)
}
############################################################################################
plots.ggplot_theme <- function(g)
{
g <- g + theme_classic()
g <- g + theme(axis.text=element_text(size=16, face="bold"), axis.title.y=element_text(vjust=0.9), axis.title.x=element_text(vjust=-0.6))
g <- g + theme(axis.title=element_text(size=16, face="bold"))
g <- g + theme(panel.border=element_blank())
g <- g + theme(axis.line.x=element_line(color="black", size=0.7), axis.line.y=element_line(color="black", size=0.7))
return(g)
}
############################################################################################
plots.ggplot_theme_casual <- function(g)
{
if(!"xkcd" %in% rownames(installed.packages()))
{
install.packages("xkcd")
}
library("xkcd")
return (theme(axis.line.x=element_line(size=0.5, colour="black"), axis.line.y=element_line(size=0.5, colour="black"), axis.text.x=element_text(colour="black", size=10), axis.text.y=element_text(colour="black", size=10), panel.background=element_blank(), plot.title=element_text(family="xkcd"), text=element_text(family="xkcd")))
}
############################################################################################
plots.install_xkcd <- function()
{
install.packages("xkcd")
if(!"sysfonts" %in% rownames(installed.packages()))
{
install.packages("sysfonts")
}
library(sysfonts)
download.file("http://simonsoftware.se/other/xkcd.ttf", dest="xkcd.ttf", mode="wb")
# The following steps are adapted from the xkcd package vignette
# for a Mac OS X machine.
system("cp xkcd.ttf ~/Library/Fonts")
system("rm xkcd.ttf")
stopifnot("xkcd.ttf" %in% font.files())
}
############################################################################################
plots.ggplot_labels <- function(g, xlabel, ylabel, title)
{
g <- g + labs(x=paste0(xlabel), y=ylabel)
g <- g + ggtitle(title)
return(g)
}
############################################################################################
plots.hclust <- function(hc)
{
library(ggplot2)
# hc is the object from hclust()
dhc <- as.dendrogram(hc)
ddata <- dendro_data(dhc, type="rectangle")
p <- ggplot(segment(ddata))
p <- p + geom_segment(aes(x=x, y=y, xend=xend, yend=yend), size=0.7)
p <- p + coord_flip()
p <- p + scale_y_reverse(expand=c(1.3,0), breaks=seq(0,1,by=0.1))
p <- p + scale_size("n")
p <- p + geom_text(data=label(ddata), aes(x=x, y=y, label=label), , size=6, hjust=0, fontface="bold")
p <- p + theme_classic()
p <- p + theme(axis.ticks.y=element_blank(), axis.text.y=element_blank(), axis.title=element_text(size=16,face="bold",vjust=-0.65))
p <- p + theme(axis.text.x=element_text(angle=90, hjust=0, size=16, vjust=0.4, face="bold"))
p <- p + theme(axis.title=element_text(vjust=0.9,face="bold",size=16))
p
}
############################################################################################
| /includes/libs/mylib_plots.R | no_license | jkh1/serrano-remining | R | false | false | 4,538 | r |
# plots.plot_ScatterMatrix
# plots.ggplot_theme
# plots.ggplot_theme_casual
# plots.install_xkcd
# plots.ggplot_labels
# plots.hclust
############################################################################################
############################################################################################
plots.plot_ScatterMatrix <- function(df, columnsToPlot)
{
library(GGally, quietly=T)
p <- ggpairs(data=df, columns=columnsToPlot, upper="blank", lower=list(continuous="points"), legends=F, mapping=ggplot2::aes(colour=tag))
for (i in 1:length(columnsToPlot))
{
# Address only the diagonal elements
# Get plot out of matrix
inner <- getPlot(p, i, i);
# Add any ggplot2 settings you want (blank grid here)
inner <- inner + theme(panel.grid=element_blank()) + theme(axis.text.x=element_blank())
# Put it back into the matrix
p <- putPlot(p, inner, i, i)
for (j in 1:length(columnsToPlot))
{
if((i==1 & j==1))
{
# Move legend right
inner <- getPlot(p, i, j)
inner <- inner + theme(legend.position=c(length(columnsToPlot)-0.25,0.50))
p <- putPlot(p, inner, i, j)
}
else{
# Delete legend
inner <- getPlot(p, i, j)
inner <- inner + theme(legend.position="none")
p <- putPlot(p, inner, i, j)
}
}
}
return(p)
}
############################################################################################
plots.ggplot_theme <- function(g)
{
g <- g + theme_classic()
g <- g + theme(axis.text=element_text(size=16, face="bold"), axis.title.y=element_text(vjust=0.9), axis.title.x=element_text(vjust=-0.6))
g <- g + theme(axis.title=element_text(size=16, face="bold"))
g <- g + theme(panel.border=element_blank())
g <- g + theme(axis.line.x=element_line(color="black", size=0.7), axis.line.y=element_line(color="black", size=0.7))
return(g)
}
############################################################################################
plots.ggplot_theme_casual <- function(g)
{
if(!"xkcd" %in% rownames(installed.packages()))
{
install.packages("xkcd")
}
library("xkcd")
return (theme(axis.line.x=element_line(size=0.5, colour="black"), axis.line.y=element_line(size=0.5, colour="black"), axis.text.x=element_text(colour="black", size=10), axis.text.y=element_text(colour="black", size=10), panel.background=element_blank(), plot.title=element_text(family="xkcd"), text=element_text(family="xkcd")))
}
############################################################################################
plots.install_xkcd <- function()
{
install.packages("xkcd")
if(!"sysfonts" %in% rownames(installed.packages()))
{
install.packages("sysfonts")
}
library(sysfonts)
download.file("http://simonsoftware.se/other/xkcd.ttf", dest="xkcd.ttf", mode="wb")
# The following steps are adapted from the xkcd package vignette
# for a Mac OS X machine.
system("cp xkcd.ttf ~/Library/Fonts")
system("rm xkcd.ttf")
stopifnot("xkcd.ttf" %in% font.files())
}
############################################################################################
plots.ggplot_labels <- function(g, xlabel, ylabel, title)
{
g <- g + labs(x=paste0(xlabel), y=ylabel)
g <- g + ggtitle(title)
return(g)
}
############################################################################################
plots.hclust <- function(hc)
{
library(ggplot2)
# hc is the object from hclust()
dhc <- as.dendrogram(hc)
ddata <- dendro_data(dhc, type="rectangle")
p <- ggplot(segment(ddata))
p <- p + geom_segment(aes(x=x, y=y, xend=xend, yend=yend), size=0.7)
p <- p + coord_flip()
p <- p + scale_y_reverse(expand=c(1.3,0), breaks=seq(0,1,by=0.1))
p <- p + scale_size("n")
p <- p + geom_text(data=label(ddata), aes(x=x, y=y, label=label), , size=6, hjust=0, fontface="bold")
p <- p + theme_classic()
p <- p + theme(axis.ticks.y=element_blank(), axis.text.y=element_blank(), axis.title=element_text(size=16,face="bold",vjust=-0.65))
p <- p + theme(axis.text.x=element_text(angle=90, hjust=0, size=16, vjust=0.4, face="bold"))
p <- p + theme(axis.title=element_text(vjust=0.9,face="bold",size=16))
p
}
############################################################################################
|
library(tidyverse)
library(classInt)
library(viridis)
#https://cran.r-project.org/web/packages/classInt/classInt.pdf
nyctaxi <- read.csv("d:/nyc-taxi-classifier.csv", stringsAsFactors = FALSE)
quantile <- classIntervals(nyctaxi$hour12, n=12, style="quantile")
equalInterval <- classIntervals(nyctaxi$hour12, n=12, style="equal")
natural <- classIntervals(nyctaxi$hour12, n=12, style="jenks")
quantileBreaks <- nyctaxi
quantileBreaks <- mutate(quantileBreaks,group="quantile")
quantileBreaks$breaks <- factor(
cut(as.numeric(quantileBreaks$hour12), c(-1,quantile$brks))
)
equalIntervalBreaks <- nyctaxi
equalIntervalBreaks <- mutate(equalIntervalBreaks,group="equalInterval")
equalIntervalBreaks$breaks <- factor(
cut(as.numeric(equalIntervalBreaks$hour12), c(-1,equalInterval$brks))
)
naturalBreaks <- nyctaxi
naturalBreaks <- mutate(naturalBreaks,group="natural")
naturalBreaks$breaks <- factor(
cut(as.numeric(naturalBreaks$hour12), natural$brks)
)
myTheme <- function() {
theme_void() +
theme(
legend.position="none",
axis.line = element_line(size = 0.1, colour = "black"),
axis.text = element_text(colour = "black",size = 8),
plot.margin = margin(1, 1, 1, 1, 'cm')
)
}
ggplot() +
geom_bar(data=quantileBreaks,aes(fill=breaks,x=OBJECTID,y=hour12),size = 0,stat="identity") +
scale_fill_viridis(discrete = TRUE, direction = -1) +
#scale_x_continuous(limits = c(700,1050)) +
myTheme()
ggplot() +
geom_bar(data=equalIntervalBreaks,aes(fill=breaks,x=OBJECTID,y=hour12),size = 0,stat="identity") +
scale_fill_viridis(discrete = TRUE, direction = -1) +
#scale_x_continuous(limits = c(700,1050)) +
myTheme()
ggplot() +
geom_bar(data=naturalBreaks,aes(fill=breaks,x=OBJECTID,y=hour12),size = 0,stat="identity") +
scale_fill_viridis(discrete = TRUE, direction = -1) +
#scale_x_continuous(limits = c(700,1050)) +
myTheme()
taxisTotal <- rbind(quantileBreaks, equalIntervalBreaks, naturalBreaks)
ggplot() +
#facet_grid(~group) +
facet_grid(group ~ .) +
geom_bar(data=taxisTotal,aes(fill=breaks,x=OBJECTID,y=hour12),stat="identity") +
scale_fill_viridis(discrete = TRUE, direction = -1) +
myTheme()
| /data-classification.R | no_license | Sophiehsw/MUSA-620-Week-4 | R | false | false | 2,286 | r |
library(tidyverse)
library(classInt)
library(viridis)
#https://cran.r-project.org/web/packages/classInt/classInt.pdf
nyctaxi <- read.csv("d:/nyc-taxi-classifier.csv", stringsAsFactors = FALSE)
quantile <- classIntervals(nyctaxi$hour12, n=12, style="quantile")
equalInterval <- classIntervals(nyctaxi$hour12, n=12, style="equal")
natural <- classIntervals(nyctaxi$hour12, n=12, style="jenks")
quantileBreaks <- nyctaxi
quantileBreaks <- mutate(quantileBreaks,group="quantile")
quantileBreaks$breaks <- factor(
cut(as.numeric(quantileBreaks$hour12), c(-1,quantile$brks))
)
equalIntervalBreaks <- nyctaxi
equalIntervalBreaks <- mutate(equalIntervalBreaks,group="equalInterval")
equalIntervalBreaks$breaks <- factor(
cut(as.numeric(equalIntervalBreaks$hour12), c(-1,equalInterval$brks))
)
naturalBreaks <- nyctaxi
naturalBreaks <- mutate(naturalBreaks,group="natural")
naturalBreaks$breaks <- factor(
cut(as.numeric(naturalBreaks$hour12), natural$brks)
)
myTheme <- function() {
theme_void() +
theme(
legend.position="none",
axis.line = element_line(size = 0.1, colour = "black"),
axis.text = element_text(colour = "black",size = 8),
plot.margin = margin(1, 1, 1, 1, 'cm')
)
}
ggplot() +
geom_bar(data=quantileBreaks,aes(fill=breaks,x=OBJECTID,y=hour12),size = 0,stat="identity") +
scale_fill_viridis(discrete = TRUE, direction = -1) +
#scale_x_continuous(limits = c(700,1050)) +
myTheme()
ggplot() +
geom_bar(data=equalIntervalBreaks,aes(fill=breaks,x=OBJECTID,y=hour12),size = 0,stat="identity") +
scale_fill_viridis(discrete = TRUE, direction = -1) +
#scale_x_continuous(limits = c(700,1050)) +
myTheme()
ggplot() +
geom_bar(data=naturalBreaks,aes(fill=breaks,x=OBJECTID,y=hour12),size = 0,stat="identity") +
scale_fill_viridis(discrete = TRUE, direction = -1) +
#scale_x_continuous(limits = c(700,1050)) +
myTheme()
taxisTotal <- rbind(quantileBreaks, equalIntervalBreaks, naturalBreaks)
ggplot() +
#facet_grid(~group) +
facet_grid(group ~ .) +
geom_bar(data=taxisTotal,aes(fill=breaks,x=OBJECTID,y=hour12),stat="identity") +
scale_fill_viridis(discrete = TRUE, direction = -1) +
myTheme()
|
#' Leaf area fitting
#'
#' Fits randomly measured leaf area values linearly to fresh weight values. Useful if the leaf area changes during a measurement
#' series but is only randomly measured.
#'
#' @param data data frame, with columns of equal length, containing at least columns with the the fresh.weight (g)
#' and the leaf.area (cm^2) values, ordered by sample by descending fresh weight. A column containing the sample IDs is optionally required
#' if several samples were measured.At least 3 leaf area values are required.
#' @param sample string, optional name of the column in data containing the sample ID, default: "sample"
#' @param fresh.weight optional name of the column in data containing the numeric fresh weight values (g);
#' default: "fresh.weight"
#' @param leaf.area optional name of the column in data containing the numeric single-sided leaf area values (cm^2);
#' default: "leaf.area"
#' @details fits given leaf area values linearly to the respective fresh weight values and calculates leaf area values
#' for the fresh weight values based on the fit
#' @return the original data frame extended by a numeric column containing the fitted leaf area values (leaf.area.fitted)
#' @examples # get example data
#' df <- data.frame(
#' sample = c(as.integer(rep(1, times = 6))),
#' fresh.weight = c(1.23, 1.19, 1.15, 1.12, 1.09, 1.0),
#' leaf.area = c(10.5, NA, NA, 9.8, NA, 8.4))
#' # fit leaf area
#' df_new <- FitLeafArea(df)
#'
#' @import ggplot2
#' @importFrom graphics legend
#' @importFrom stats approx coef confint lm na.omit nls
#'
#' @export
FitLeafArea <- function(data,
sample = "sample",
fresh.weight = "fresh.weight",
leaf.area = "leaf.area") {
# check validity of data
data_in <-
ValidityCheck(
data,
sample = sample,
fresh.weight = fresh.weight,
leaf.area = leaf.area
)
OrderCheck(data, sample = sample, fresh.weight = fresh.weight)
leaf.area.fitted <- c()
for (i in 1:length(unique(data_in[[sample]]))) {
# subset data
sub.sample <- unique(data_in[[sample]])[i]
data_in_subset_original <-
data_in[data_in[[sample]] == sub.sample, ]
data_in_subset <-
data_in_subset_original[!is.na(data_in_subset_original[[fresh.weight]]), ] # remove rows where fresh weight is NA
data_in_subset <-
data_in_subset[!is.na(data_in_subset[[leaf.area]]), ] # remove rows where leaf area is NA
data_in_subset <-
data.frame(leaf.area = data_in_subset[[leaf.area]], fresh.weight = data_in_subset[[fresh.weight]])
try({
# makes sure the correct error is printed in case fitting didn't work
all.fine <-
FALSE # helping variable. set to TRUE if everything in the try wrapper worked
# only do fitting if not all leaf.area values are equal
if (length(unique(na.omit(data_in_subset$leaf.area))) >= 2) {
# linear fitting
lin <-
nls(
leaf.area ~ (a * fresh.weight + b),
data = data_in_subset,
start = c(
a = 1,
b = mean(data_in_subset$leaf.area)
)
)
a <- coef(lin)[1] # extract coefficient a from model
b <- coef(lin)[2]
# calculate fitted leaf area values for fresh weight values and append to previous fitted leaf.area values
leaf.area.fitted <-
c(leaf.area.fitted, a * data_in_subset_original[[fresh.weight]] + b)
# if all leaf area values are equal, repeat it for the length of the output vector
} else{
leaf.area.fitted <- c(leaf.area.fitted, c(rep(
na.omit(data_in_subset_original[[leaf.area]])[1],
times = length(data_in_subset_original[[fresh.weight]])
)))
}
all.fine <- TRUE
}, silent = TRUE)
# give warning and add NAs for leaf.area if fitting didn't work
if (all.fine == FALSE) {
warning(paste0("sample ", sub.sample),
" Fitting of leaf area was unsuccessful")
leaf.area.fitted <-
c(leaf.area.fitted, c(rep(
NA, times = length(data_in_subset_original[[fresh.weight]])
)))
}
}
return(data.frame(data, leaf.area.fitted))
}
| /R/FitLeafArea.R | no_license | cran/pvldcurve | R | false | false | 4,372 | r | #' Leaf area fitting
#'
#' Fits randomly measured leaf area values linearly to fresh weight values. Useful if the leaf area changes during a measurement
#' series but is only randomly measured.
#'
#' @param data data frame, with columns of equal length, containing at least columns with the the fresh.weight (g)
#' and the leaf.area (cm^2) values, ordered by sample by descending fresh weight. A column containing the sample IDs is optionally required
#' if several samples were measured.At least 3 leaf area values are required.
#' @param sample string, optional name of the column in data containing the sample ID, default: "sample"
#' @param fresh.weight optional name of the column in data containing the numeric fresh weight values (g);
#' default: "fresh.weight"
#' @param leaf.area optional name of the column in data containing the numeric single-sided leaf area values (cm^2);
#' default: "leaf.area"
#' @details fits given leaf area values linearly to the respective fresh weight values and calculates leaf area values
#' for the fresh weight values based on the fit
#' @return the original data frame extended by a numeric column containing the fitted leaf area values (leaf.area.fitted)
#' @examples # get example data
#' df <- data.frame(
#' sample = c(as.integer(rep(1, times = 6))),
#' fresh.weight = c(1.23, 1.19, 1.15, 1.12, 1.09, 1.0),
#' leaf.area = c(10.5, NA, NA, 9.8, NA, 8.4))
#' # fit leaf area
#' df_new <- FitLeafArea(df)
#'
#' @import ggplot2
#' @importFrom graphics legend
#' @importFrom stats approx coef confint lm na.omit nls
#'
#' @export
FitLeafArea <- function(data,
sample = "sample",
fresh.weight = "fresh.weight",
leaf.area = "leaf.area") {
# check validity of data
data_in <-
ValidityCheck(
data,
sample = sample,
fresh.weight = fresh.weight,
leaf.area = leaf.area
)
OrderCheck(data, sample = sample, fresh.weight = fresh.weight)
leaf.area.fitted <- c()
for (i in 1:length(unique(data_in[[sample]]))) {
# subset data
sub.sample <- unique(data_in[[sample]])[i]
data_in_subset_original <-
data_in[data_in[[sample]] == sub.sample, ]
data_in_subset <-
data_in_subset_original[!is.na(data_in_subset_original[[fresh.weight]]), ] # remove rows where fresh weight is NA
data_in_subset <-
data_in_subset[!is.na(data_in_subset[[leaf.area]]), ] # remove rows where leaf area is NA
data_in_subset <-
data.frame(leaf.area = data_in_subset[[leaf.area]], fresh.weight = data_in_subset[[fresh.weight]])
try({
# makes sure the correct error is printed in case fitting didn't work
all.fine <-
FALSE # helping variable. set to TRUE if everything in the try wrapper worked
# only do fitting if not all leaf.area values are equal
if (length(unique(na.omit(data_in_subset$leaf.area))) >= 2) {
# linear fitting
lin <-
nls(
leaf.area ~ (a * fresh.weight + b),
data = data_in_subset,
start = c(
a = 1,
b = mean(data_in_subset$leaf.area)
)
)
a <- coef(lin)[1] # extract coefficient a from model
b <- coef(lin)[2]
# calculate fitted leaf area values for fresh weight values and append to previous fitted leaf.area values
leaf.area.fitted <-
c(leaf.area.fitted, a * data_in_subset_original[[fresh.weight]] + b)
# if all leaf area values are equal, repeat it for the length of the output vector
} else{
leaf.area.fitted <- c(leaf.area.fitted, c(rep(
na.omit(data_in_subset_original[[leaf.area]])[1],
times = length(data_in_subset_original[[fresh.weight]])
)))
}
all.fine <- TRUE
}, silent = TRUE)
# give warning and add NAs for leaf.area if fitting didn't work
if (all.fine == FALSE) {
warning(paste0("sample ", sub.sample),
" Fitting of leaf area was unsuccessful")
leaf.area.fitted <-
c(leaf.area.fitted, c(rep(
NA, times = length(data_in_subset_original[[fresh.weight]])
)))
}
}
return(data.frame(data, leaf.area.fitted))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/importReads.R
\name{readBedFileAsGRanges}
\alias{readBedFileAsGRanges}
\title{Import BED file into GRanges}
\usage{
readBedFileAsGRanges(
bedfile,
assembly,
chromosomes = NULL,
remove.duplicate.reads = FALSE,
min.mapq = 10,
max.fragment.width = 1000,
blacklist = NULL
)
}
\arguments{
\item{bedfile}{A file with aligned reads in BED-6 format. The columns have to be c('chromosome','start','end','description','mapq','strand').}
\item{assembly}{Please see \code{\link[GenomeInfoDb]{getChromInfoFromUCSC}} for available assemblies. Only necessary when importing BED files. BAM files are handled automatically. Alternatively a data.frame with columns 'chromosome' and 'length'.}
\item{chromosomes}{If only a subset of the chromosomes should be imported, specify them here.}
\item{remove.duplicate.reads}{A logical indicating whether or not duplicate reads should be removed.}
\item{min.mapq}{Minimum mapping quality when importing from BAM files. Set \code{min.mapq=0} to keep all reads.}
\item{max.fragment.width}{Maximum allowed fragment length. This is to filter out erroneously wrong fragments.}
\item{blacklist}{A \code{\link{GRanges-class}} or a bed(.gz) file with blacklisted regions. Reads falling into those regions will be discarded.}
}
\value{
A \code{\link{GRanges-class}} object containing the reads.
}
\description{
Import aligned reads from a BED file into a \code{\link{GRanges-class}} object.
}
\examples{
## Get an example BED file with single-cell-sequencing reads
bedfile <- system.file("extdata", "liver-H3K4me3-BN-male-bio2-tech1.bed.gz",
package="chromstaRData")
## Read the file into a GRanges object
data(rn4_chrominfo)
reads <- readBedFileAsGRanges(bedfile, assembly=rn4_chrominfo, chromosomes='chr12',
min.mapq=10, remove.duplicate.reads=TRUE)
print(reads)
}
| /man/readBedFileAsGRanges.Rd | no_license | ataudt/chromstaR | R | false | true | 1,924 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/importReads.R
\name{readBedFileAsGRanges}
\alias{readBedFileAsGRanges}
\title{Import BED file into GRanges}
\usage{
readBedFileAsGRanges(
bedfile,
assembly,
chromosomes = NULL,
remove.duplicate.reads = FALSE,
min.mapq = 10,
max.fragment.width = 1000,
blacklist = NULL
)
}
\arguments{
\item{bedfile}{A file with aligned reads in BED-6 format. The columns have to be c('chromosome','start','end','description','mapq','strand').}
\item{assembly}{Please see \code{\link[GenomeInfoDb]{getChromInfoFromUCSC}} for available assemblies. Only necessary when importing BED files. BAM files are handled automatically. Alternatively a data.frame with columns 'chromosome' and 'length'.}
\item{chromosomes}{If only a subset of the chromosomes should be imported, specify them here.}
\item{remove.duplicate.reads}{A logical indicating whether or not duplicate reads should be removed.}
\item{min.mapq}{Minimum mapping quality when importing from BAM files. Set \code{min.mapq=0} to keep all reads.}
\item{max.fragment.width}{Maximum allowed fragment length. This is to filter out erroneously wrong fragments.}
\item{blacklist}{A \code{\link{GRanges-class}} or a bed(.gz) file with blacklisted regions. Reads falling into those regions will be discarded.}
}
\value{
A \code{\link{GRanges-class}} object containing the reads.
}
\description{
Import aligned reads from a BED file into a \code{\link{GRanges-class}} object.
}
\examples{
## Get an example BED file with single-cell-sequencing reads
bedfile <- system.file("extdata", "liver-H3K4me3-BN-male-bio2-tech1.bed.gz",
package="chromstaRData")
## Read the file into a GRanges object
data(rn4_chrominfo)
reads <- readBedFileAsGRanges(bedfile, assembly=rn4_chrominfo, chromosomes='chr12',
min.mapq=10, remove.duplicate.reads=TRUE)
print(reads)
}
|
# Remove s4 classes created by this package.
# This is only necessary if the package was loaded with devtools. If the
# package was NOT loaded by devtools, it's not necessary to remove the
# classes this way, and attempting to do so will result in errors.
remove_s4_classes <- function(package) {
nsenv <- ns_env(package)
if (is.null(nsenv)) {
return()
}
classes <- methods::getClasses(nsenv, FALSE)
classes <- sort_s4classes(classes, package)
lapply(classes, remove_s4_class, package)
}
# Sort S4 classes for hierarchical removal
# Derived classes must be removed **after** their parents.
# This reduces to a topological sorting on the S4 dependency class
# https://en.wikipedia.org/wiki/Topological_sorting
sort_s4classes <- function(classes, package) {
nsenv <- ns_env(package)
sorted_classes <- vector(mode = 'character', length = 0)
## Return the parent class, if any within domestic classes
extends_first <- function(x, classes) {
ext <- methods::extends(methods::getClass(x, where = nsenv))
parent <- ext[2]
classes %in% parent
}
## Matrix of classes in columns, extending classes in rows
extended_classes <- vapply(
classes,
extends_first,
rep(TRUE, length(classes)),
classes
)
if (!is.matrix(extended_classes))
extended_classes <- as.matrix(extended_classes)
## Dynamic set of orphan classes (safe to remove)
start_idx <- which(apply(extended_classes, 2, sum) == 0)
while (length(start_idx) > 0) {
## add node to sorted list (and remove from pending list)
i <- start_idx[1]
start_idx <- utils::tail(start_idx, -1)
sorted_classes <- c(sorted_classes, classes[i])
## check its derived classes if any
for (j in which(extended_classes[i, ])) {
extended_classes[i, j] <- FALSE
if (sum(extended_classes[, j]) == 0) {
start_idx <- c(start_idx, j)
}
}
}
if (any(extended_classes)) {
## Graph has a cycle. This should not happen
## Stop or try to continue?
idx <- !classes %in% sorted_classes
sorted_classes <- c(sorted_classes, classes[idx])
}
sorted_classes
}
# Remove an s4 class from a package loaded by devtools
#
# For classes loaded with devtools, this is necessary so that R doesn't try to
# modify superclasses that don't have references to this class. For example,
# suppose you have package pkgA with class A, and pkgB with class B, which
# contains A. If pkgB is loaded with load_all(), then class B will have a
# reference to class A, and unloading pkgB the normal way, with
# unloadNamespace("pkgB"), will result in some errors. They happen because R
# will look at B, see that it is a superclass of A, then it will try to modify
# A by removing subclass references to B.
#
# This function sidesteps the problem by modifying B. It finds all the classes
# in B@contains which also have references back to B, then modifes B to keep
# references to those classes, but remove references to all other classes.
# Finally, it removes B. Calling removeClass("B") tells the classes referred to
# in B@contains to remove their references back to B.
#
# It is entirely possible that this code is necessary only because of bugs in
# R's S4 implementation.
#
# @param classname The name of the class.
# @param package The package object which contains the class.
remove_s4_class <- function(classname, package) {
nsenv <- ns_env(package)
# Make a copy of the class
class <- methods::getClassDef(classname, package = package, inherits = FALSE)
# If the class is not defined in this package do not try to remove it
if (!identical(class@package, package)) {
return()
}
# Find all the references to classes that (this one contains/extends AND
# have backreferences to this class) so that R doesn't try to modify them.
keep_idx <- contains_backrefs(classname, package, class@contains)
class@contains <- class@contains[keep_idx]
# Assign the modified class back into the package
methods::assignClassDef(classname, class, where = nsenv, force = TRUE)
# Remove the class, ignoring failures due to potentially locked environments.
tryCatch(methods::removeClass(classname, where = nsenv), error = function(e) NULL)
}
# Given a list of SClassExtension objects, this returns a logical vector of the
# same length. Each element is TRUE if the corresponding object has a reference
# to this class, FALSE otherwise.
contains_backrefs <- function(classname, pkgname, contains) {
# If class_a in pkg_a has class_b in pkg_b as a subclass, return TRUE,
# otherwise FALSE.
has_subclass_ref <- function(class_a, pkg_a, class_b, pkg_b) {
x <- methods::getClassDef(class_a, package = pkg_a)
if (is.null(x)) return(FALSE)
subclass_ref <- x@subclasses[[class_b]]
if (!is.null(subclass_ref) && subclass_ref@package == pkg_b) {
return(TRUE)
}
FALSE
}
if (length(contains) == 0) {
return()
}
# Get a named vector of 'contains', where each item's name is the class,
# and the value is the package.
contain_pkgs <- sapply(contains, "slot", "package")
mapply(has_subclass_ref, names(contain_pkgs), contain_pkgs, classname, pkgname)
}
| /R/remove-s4-class.R | no_license | cran/pkgload | R | false | false | 5,165 | r | # Remove s4 classes created by this package.
# This is only necessary if the package was loaded with devtools. If the
# package was NOT loaded by devtools, it's not necessary to remove the
# classes this way, and attempting to do so will result in errors.
remove_s4_classes <- function(package) {
nsenv <- ns_env(package)
if (is.null(nsenv)) {
return()
}
classes <- methods::getClasses(nsenv, FALSE)
classes <- sort_s4classes(classes, package)
lapply(classes, remove_s4_class, package)
}
# Sort S4 classes for hierarchical removal
# Derived classes must be removed **after** their parents.
# This reduces to a topological sorting on the S4 dependency class
# https://en.wikipedia.org/wiki/Topological_sorting
sort_s4classes <- function(classes, package) {
nsenv <- ns_env(package)
sorted_classes <- vector(mode = 'character', length = 0)
## Return the parent class, if any within domestic classes
extends_first <- function(x, classes) {
ext <- methods::extends(methods::getClass(x, where = nsenv))
parent <- ext[2]
classes %in% parent
}
## Matrix of classes in columns, extending classes in rows
extended_classes <- vapply(
classes,
extends_first,
rep(TRUE, length(classes)),
classes
)
if (!is.matrix(extended_classes))
extended_classes <- as.matrix(extended_classes)
## Dynamic set of orphan classes (safe to remove)
start_idx <- which(apply(extended_classes, 2, sum) == 0)
while (length(start_idx) > 0) {
## add node to sorted list (and remove from pending list)
i <- start_idx[1]
start_idx <- utils::tail(start_idx, -1)
sorted_classes <- c(sorted_classes, classes[i])
## check its derived classes if any
for (j in which(extended_classes[i, ])) {
extended_classes[i, j] <- FALSE
if (sum(extended_classes[, j]) == 0) {
start_idx <- c(start_idx, j)
}
}
}
if (any(extended_classes)) {
## Graph has a cycle. This should not happen
## Stop or try to continue?
idx <- !classes %in% sorted_classes
sorted_classes <- c(sorted_classes, classes[idx])
}
sorted_classes
}
# Remove an s4 class from a package loaded by devtools
#
# For classes loaded with devtools, this is necessary so that R doesn't try to
# modify superclasses that don't have references to this class. For example,
# suppose you have package pkgA with class A, and pkgB with class B, which
# contains A. If pkgB is loaded with load_all(), then class B will have a
# reference to class A, and unloading pkgB the normal way, with
# unloadNamespace("pkgB"), will result in some errors. They happen because R
# will look at B, see that it is a superclass of A, then it will try to modify
# A by removing subclass references to B.
#
# This function sidesteps the problem by modifying B. It finds all the classes
# in B@contains which also have references back to B, then modifes B to keep
# references to those classes, but remove references to all other classes.
# Finally, it removes B. Calling removeClass("B") tells the classes referred to
# in B@contains to remove their references back to B.
#
# It is entirely possible that this code is necessary only because of bugs in
# R's S4 implementation.
#
# @param classname The name of the class.
# @param package The package object which contains the class.
remove_s4_class <- function(classname, package) {
nsenv <- ns_env(package)
# Make a copy of the class
class <- methods::getClassDef(classname, package = package, inherits = FALSE)
# If the class is not defined in this package do not try to remove it
if (!identical(class@package, package)) {
return()
}
# Find all the references to classes that (this one contains/extends AND
# have backreferences to this class) so that R doesn't try to modify them.
keep_idx <- contains_backrefs(classname, package, class@contains)
class@contains <- class@contains[keep_idx]
# Assign the modified class back into the package
methods::assignClassDef(classname, class, where = nsenv, force = TRUE)
# Remove the class, ignoring failures due to potentially locked environments.
tryCatch(methods::removeClass(classname, where = nsenv), error = function(e) NULL)
}
# Given a list of SClassExtension objects, this returns a logical vector of the
# same length. Each element is TRUE if the corresponding object has a reference
# to this class, FALSE otherwise.
contains_backrefs <- function(classname, pkgname, contains) {
# If class_a in pkg_a has class_b in pkg_b as a subclass, return TRUE,
# otherwise FALSE.
has_subclass_ref <- function(class_a, pkg_a, class_b, pkg_b) {
x <- methods::getClassDef(class_a, package = pkg_a)
if (is.null(x)) return(FALSE)
subclass_ref <- x@subclasses[[class_b]]
if (!is.null(subclass_ref) && subclass_ref@package == pkg_b) {
return(TRUE)
}
FALSE
}
if (length(contains) == 0) {
return()
}
# Get a named vector of 'contains', where each item's name is the class,
# and the value is the package.
contain_pkgs <- sapply(contains, "slot", "package")
mapply(has_subclass_ref, names(contain_pkgs), contain_pkgs, classname, pkgname)
}
|
context("updates")
test_that("updates for all widget in the gallery", {
app <- shinyapp$new("apps/081-widgets-gallery")
expect_update(app, checkbox = FALSE, output = "checkboxOut")
expect_equal(app$get_value("checkboxOut"), "[1] FALSE")
expect_update(app, checkbox = TRUE, output = "checkboxOut")
expect_equal(app$get_value("checkboxOut"), "[1] TRUE")
expect_update(app, checkGroup = c("1", "3"), output = "checkGroupOut")
expect_equal(app$get_value("checkGroupOut"), c('[1] "1" "3"'))
expect_update(app, checkGroup = c("2"), output = "checkGroupOut")
expect_equal(app$get_value("checkGroupOut"), c('[1] "2"'))
expect_update(app, date = as.Date("2015-01-21"), output = "dateOut")
expect_equal(app$get_value("dateOut"), "[1] \"2015-01-21\"")
## We only change the start, because that already triggers an
## update. The end date would trigger another one, but possibly
## later than us checking the value here. Then we change the end date
## in another test
v <- c(as.Date("2012-06-30"), Sys.Date())
expect_update(app, dates = v, output = "datesOut")
expect_equal(app$get_value("datesOut"), capture.output(print(v)))
v <- as.Date(c("2012-06-30", "2015-01-21"))
expect_update(app, dates = v, output = "datesOut")
expect_equal(app$get_value("datesOut"), capture.output(print(v)))
## We cannot check the value of the output easily, because
## set_value() is not atomic for the input widget, and the output
## watcher finishes before its final value is set
expect_update(app, num = 42, output = "numOut")
expect_true(
app$wait_for("$('#numOut.shiny-bound-output').text() == '[1] 42'")
)
expect_equal(app$get_value("numOut"), "[1] 42")
expect_update(app, radio = "2", output = "radioOut")
expect_equal(app$get_value("radioOut"), '[1] "2"')
expect_update(app, select = "2", output = "selectOut")
expect_equal(app$get_value("selectOut"), '[1] "2"')
expect_update(app, slider1 = 42, output = "slider1Out")
expect_equal(app$get_value("slider1Out"), '[1] 42')
expect_update(app, slider2 = c(0, 100), output = "slider2Out")
expect_equal(app$get_value("slider2Out"), '[1] 0 100')
expect_update(app, text = "foobar", output = "textOut")
expect_true(
app$wait_for("$('#textOut.shiny-bound-output').text() == '[1] \"foobar\"'")
)
expect_equal(app$get_value("textOut"), "[1] \"foobar\"")
})
test_that("simple updates", {
app <- shinyapp$new("apps/050-kmeans-example")
expect_update(app, xcol = "Sepal.Width", output = "plot1")
expect_update(app, ycol = "Petal.Width", output = "plot1")
expect_update(app, clusters = 4, output = "plot1")
})
| /tests/testthat/test-updates.R | permissive | wch/shinytest | R | false | false | 2,637 | r |
context("updates")
test_that("updates for all widget in the gallery", {
app <- shinyapp$new("apps/081-widgets-gallery")
expect_update(app, checkbox = FALSE, output = "checkboxOut")
expect_equal(app$get_value("checkboxOut"), "[1] FALSE")
expect_update(app, checkbox = TRUE, output = "checkboxOut")
expect_equal(app$get_value("checkboxOut"), "[1] TRUE")
expect_update(app, checkGroup = c("1", "3"), output = "checkGroupOut")
expect_equal(app$get_value("checkGroupOut"), c('[1] "1" "3"'))
expect_update(app, checkGroup = c("2"), output = "checkGroupOut")
expect_equal(app$get_value("checkGroupOut"), c('[1] "2"'))
expect_update(app, date = as.Date("2015-01-21"), output = "dateOut")
expect_equal(app$get_value("dateOut"), "[1] \"2015-01-21\"")
## We only change the start, because that already triggers an
## update. The end date would trigger another one, but possibly
## later than us checking the value here. Then we change the end date
## in another test
v <- c(as.Date("2012-06-30"), Sys.Date())
expect_update(app, dates = v, output = "datesOut")
expect_equal(app$get_value("datesOut"), capture.output(print(v)))
v <- as.Date(c("2012-06-30", "2015-01-21"))
expect_update(app, dates = v, output = "datesOut")
expect_equal(app$get_value("datesOut"), capture.output(print(v)))
## We cannot check the value of the output easily, because
## set_value() is not atomic for the input widget, and the output
## watcher finishes before its final value is set
expect_update(app, num = 42, output = "numOut")
expect_true(
app$wait_for("$('#numOut.shiny-bound-output').text() == '[1] 42'")
)
expect_equal(app$get_value("numOut"), "[1] 42")
expect_update(app, radio = "2", output = "radioOut")
expect_equal(app$get_value("radioOut"), '[1] "2"')
expect_update(app, select = "2", output = "selectOut")
expect_equal(app$get_value("selectOut"), '[1] "2"')
expect_update(app, slider1 = 42, output = "slider1Out")
expect_equal(app$get_value("slider1Out"), '[1] 42')
expect_update(app, slider2 = c(0, 100), output = "slider2Out")
expect_equal(app$get_value("slider2Out"), '[1] 0 100')
expect_update(app, text = "foobar", output = "textOut")
expect_true(
app$wait_for("$('#textOut.shiny-bound-output').text() == '[1] \"foobar\"'")
)
expect_equal(app$get_value("textOut"), "[1] \"foobar\"")
})
test_that("simple updates", {
app <- shinyapp$new("apps/050-kmeans-example")
expect_update(app, xcol = "Sepal.Width", output = "plot1")
expect_update(app, ycol = "Petal.Width", output = "plot1")
expect_update(app, clusters = 4, output = "plot1")
})
|
library(testthat)
library(mididrumR)
test_check("mididrumR")
| /tests/testthat.R | permissive | florianm/mididrumR | R | false | false | 62 | r | library(testthat)
library(mididrumR)
test_check("mididrumR")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mi.R
\name{plot_imi_check}
\alias{plot_imi_check}
\title{Visualize IMI scores for the top words in topics}
\usage{
plot_imi_check(m, k, groups = NULL, n_reps = 20, ...)
}
\arguments{
\item{m}{\code{mallet_model} object \emph{with sampling state loaded} via
\code{\link{load_sampling_state}}}
\item{k}{topic number (only one topic at a time)}
\item{groups}{optional grouping factor with one element for each document}
\item{n_reps}{number of simulations}
\item{...}{passed on to \code{\link{top_words}}: use to specify number of top
words and/or weighting function}
}
\value{
\code{ggplot2} plot object
}
\description{
As a diagnostic visualization, this function displays the IMI scores of the
top-weighted words in a topic, together with simulated values, on a scale set
by the distribution of simulated values. Extreme deviations of the actual IMI
scores indicate departures from the multinomial assumption of the model.
}
\seealso{
\code{\link{imi_topic}}, \code{\link{imi_check}}
\code{\link{plot_imi_check}}
}
| /man/plot_imi_check.Rd | permissive | agoldst/dfrtopics | R | false | true | 1,100 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mi.R
\name{plot_imi_check}
\alias{plot_imi_check}
\title{Visualize IMI scores for the top words in topics}
\usage{
plot_imi_check(m, k, groups = NULL, n_reps = 20, ...)
}
\arguments{
\item{m}{\code{mallet_model} object \emph{with sampling state loaded} via
\code{\link{load_sampling_state}}}
\item{k}{topic number (only one topic at a time)}
\item{groups}{optional grouping factor with one element for each document}
\item{n_reps}{number of simulations}
\item{...}{passed on to \code{\link{top_words}}: use to specify number of top
words and/or weighting function}
}
\value{
\code{ggplot2} plot object
}
\description{
As a diagnostic visualization, this function displays the IMI scores of the
top-weighted words in a topic, together with simulated values, on a scale set
by the distribution of simulated values. Extreme deviations of the actual IMI
scores indicate departures from the multinomial assumption of the model.
}
\seealso{
\code{\link{imi_topic}}, \code{\link{imi_check}}
\code{\link{plot_imi_check}}
}
|
#' Make nice ANOVA table for printing.
#'
#' This generic function produces a nice ANOVA table for printin for objects of class. \code{nice_anova} takes an object from \code{\link[car]{Anova}} possible created by the convenience functions \code{\link{aov_ez}} or \code{\link{aov_car}}. When within-subject factors are present, either sphericity corrected or uncorrected degrees of freedom can be reported.
#'
#'
#' @param object,x An object of class \code{"afex_aov"} (see \code{\link{aov_car}}) or of class \code{"mixed"} (see \code{\link{mixed}}) as returned from the \pkg{afex} functions. Alternatively, an object of class \code{"Anova.mlm"} or \code{"anova"} as returned from \code{\link[car]{Anova}}.
#' @param es Effect Size to be reported. The default is given by \code{afex_options("es_aov")}, which is initially set to \code{"ges"} (i.e., reporting generalized eta-squared, see details). Also supported is partial eta-squared (\code{"pes"}) or \code{"none"}.
#' @param observed character vector referring to the observed (i.e., non manipulated) variables/effects in the design. Important for calculation of generalized eta-squared (ignored if \code{es} is not \code{"ges"}), see details.
#' @param correction Character. Which sphericity correction of the degrees of freedom should be reported for the within-subject factors. The default is given by \code{afex_options("correction_aov")}, which is initially set to \code{"GG"} corresponding to the Greenhouse-Geisser correction. Possible values are \code{"GG"}, \code{"HF"} (i.e., Hyunh-Feldt correction), and \code{"none"} (i.e., no correction).
#' @param p.adjust.method \code{character} indicating if p-values for individual effects should be adjusted for multiple comparisons (see \link[stats]{p.adjust} and details). The default \code{NULL} corresponds to no adjustment.
#' @param sig.symbols Character. What should be the symbols designating significance? When entering an vector with \code{length(sig.symbol) < 4} only those elements of the default (\code{c(" +", " *", " **", " ***")}) will be replaced. \code{sig.symbols = ""} will display the stars but not the \code{+}, \code{sig.symbols = rep("", 4)} will display no symbols.
#' @param MSE logical. Should the column containing the Mean Sqaured Error (MSE) be displayed? Default is \code{TRUE}.
#' @param intercept logical. Should intercept (if present) be included in the ANOVA table? Default is \code{FALSE} which hides the intercept.
#' @param ... currently ignored.
#'
#' @return A \code{data.frame} of class \code{nice_table} with the ANOVA table consisting of characters. The columns that are always present are: \code{Effect}, \code{df} (degrees of freedom), \code{F}, and \code{p}.
#'
#' \code{ges} contains the generalized eta-squared effect size measure (Bakeman, 2005), \code{pes} contains partial eta-squared (if requested).
#'
#' @details The returned \code{data.frame} is print-ready when adding to a document with proper methods. Either directly via \pkg{knitr} or similar approaches such as via packages \pkg{ascii} or \pkg{xtable} (nowadays \pkg{knitr} is probably the best approach, see \href{http://yihui.name/knitr/}{here}). \pkg{ascii} provides conversion to \href{http://www.methods.co.nz/asciidoc/}{AsciiDoc} and \href{http://orgmode.org/}{org-mode} (see \code{\link[ascii]{ascii}} and \code{\link[ascii]{print-ascii}}). \pkg{xtable} converts a \code{data.frame} into LaTeX code with many possible options (e.g., allowing for \code{"longtable"} or \code{"sidewaystable"}), see \code{\link[xtable]{xtable}} and \code{\link[xtable]{print.xtable}}. See Examples.
#'
#' Conversion functions to other formats (such as HTML, ODF, or Word) can be found at the \href{http://cran.r-project.org/web/views/ReproducibleResearch.html}{Reproducible Research Task View}.
#'
#' The default reports generalized eta squared (Olejnik & Algina, 2003), the "recommended effect size for repeated measured designs" (Bakeman, 2005). Note that it is important that all measured variables (as opposed to experimentally manipulated variables), such as e.g., age, gender, weight, ..., must be declared via \code{observed} to obtain the correct effect size estimate. Partial eta squared (\code{"pes"}) does not require this.
#'
#' Exploratory ANOVA, for which no detailed hypotheses have been specified a priori, harbor a multiple comparison problem (Cramer et al., 2015). To avoid an inflation of familywise Type I error rate, results need to be corrected for multiple comparisons using \code{p.adjust.method}.
#' \code{p.adjust.method} defaults to the method specified in the call to \code{\link{aov_car}} in \code{anova_table}. If no method was specified and \code{p.adjust.method = NULL} p-values are not adjusted.
#'
#' @seealso \code{\link{aov_ez}} and \code{\link{aov_car}} are the convenience functions to create the object appropriate for \code{nice_anova}.
#'
#' @author The code for calculating generalized eta-squared was written by Mike Lawrence.\cr Everything else was written by Henrik Singmann.
#'
#' @references Bakeman, R. (2005). Recommended effect size statistics for repeated measures designs. \emph{Behavior Research Methods}, 37(3), 379-384. doi:10.3758/BF03192707
#'
#' Cramer, A. O. J., van Ravenzwaaij, D., Matzke, D., Steingroever, H., Wetzels, R., Grasman, R. P. P. P., ... Wagenmakers, E.-J. (2015). Hidden multiplicity in exploratory multiway ANOVA: Prevalence and remedies. \emph{Psychonomic Bulletin & Review}, 1–8. doi:\href{http://doi.org/10.3758/s13423-015-0913-5}{10.3758/s13423-015-0913-5}
#'
#' Olejnik, S., & Algina, J. (2003). Generalized Eta and Omega Squared Statistics: Measures of Effect Size for Some Common Research Designs. \emph{Psychological Methods}, 8(4), 434-447. doi:10.1037/1082-989X.8.4.434
#'
#' @name nice
#' @importFrom stats anova
#' @encoding UTF-8
#'
#' @examples
#'
#' ## example from Olejnik & Algina (2003)
#' # "Repeated Measures Design" (pp. 439):
#' data(md_12.1)
#' # create object of class afex_aov:
#' rmd <- aov_ez("id", "rt", md_12.1, within = c("angle", "noise"))
#' # use different es:
#' nice(rmd, es = "pes") # noise: .82
#' nice(rmd, es = "ges") # noise: .39
#'
#' # exampel using obk.long (see ?obk.long), a long version of the OBrienKaiser dataset from car.
#' data(obk.long)
#' # create object of class afex_aov:
#' tmp.aov <- aov_car(value ~ treatment * gender + Error(id/phase*hour), data = obk.long)
#'
#' nice(tmp.aov, observed = "gender")
#'
#' nice(tmp.aov, observed = "gender", sig.symbol = rep("", 4))
#'
#' \dontrun{
#' # use package ascii or xtable for formatting of tables ready for printing.
#'
#' full <- nice(tmp.aov, observed = "gender")
#'
#' require(ascii)
#' print(ascii(full, include.rownames = FALSE, caption = "ANOVA 1"), type = "org")
#'
#' require(xtable)
#' print.xtable(xtable(full, caption = "ANOVA 2"), include.rownames = FALSE)
#' }
#'
#' @export nice
nice <- function(object, ...) UseMethod("nice", object)
#' @rdname nice
#' @method nice afex_aov
#' @export
nice.afex_aov <- function(object, es = NULL, observed = attr(object$anova_table, "observed"), correction = attr(object$anova_table, "correction"), MSE = NULL, intercept = NULL, p.adjust.method = attr(object$anova_table, "p.adjust.method"), sig.symbols = c(" +", " *", " **", " ***"), ...) {
if(is.null(es)) { # Defaults to afex_options("es") because of default set in anova.afex_aov
es <- c("pes", "ges")[c("pes", "ges") %in% colnames(object$anova_table)]
}
if(is.null(MSE)) { # Defaults to TRUE because of default set in anova.afex_aov
MSE <- "MSE" %in% colnames(object$anova_table)
}
if(is.null(intercept)) { # Defaults to FALSE because of default set in anova.afex_aov
intercept <- "(Intercept)" %in% rownames(object$anova_table)
}
anova_table <- as.data.frame(anova(object, es = es, observed = observed, correction = correction, MSE = MSE, intercept = intercept, p.adjust.method = p.adjust.method))
nice.anova(anova_table, MSE = MSE, intercept = intercept, sig.symbols = sig.symbols)
}
#' @rdname nice
#' @method nice anova
#' @export
nice.anova <- function(object, MSE = TRUE, intercept = FALSE, sig.symbols = c(" +", " *", " **", " ***"), ...) {
# internal functions:
is.wholenumber <- function(x, tol = .Machine$double.eps^0.5) abs(x - round(x)) < tol
make.fs <- function(anova, symbols) {
ifelse(anova[["Pr(>F)"]] < 0.001, str_c(formatC(anova[["F"]], digits = 2, format = "f"), symbols[4]),
ifelse(anova[["Pr(>F)"]] < 0.01, str_c(formatC(anova[["F"]], digits = 2, format = "f"), symbols[3]),
ifelse(anova[["Pr(>F)"]] < 0.05, str_c(formatC(anova[["F"]], digits = 2, format = "f"), symbols[2]),
ifelse(anova[["Pr(>F)"]] < 0.1, str_c(formatC(anova[["F"]], digits = 2, format = "f"), symbols[1]), formatC(anova[["F"]], digits = 2, format = "f")))))
}
anova_table <- object
anova_table[,"df"] <- paste(ifelse(is.wholenumber(anova_table[,"num Df"]), anova_table[,"num Df"], formatC(anova_table[,"num Df"], digits = 2, format = "f")), ifelse(is.wholenumber(anova_table[,"den Df"]),anova_table[,"den Df"], formatC(anova_table[,"den Df"], digits = 2, format = "f")), sep = ", ")
symbols.use <- c(" +", " *", " **", " ***")
symbols.use[seq_along(sig.symbols)] <- sig.symbols
df.out <- data.frame(Effect = row.names(anova_table), df = anova_table[,"df"], stringsAsFactors = FALSE)
if (!is.null(anova_table$MSE)) df.out <- cbind(df.out, data.frame(MSE = formatC(anova_table[,"MSE"], digits = 2, format = "f"), stringsAsFactors = FALSE))
df.out <- cbind(df.out, data.frame(F = make.fs(anova_table, symbols.use), stringsAsFactors = FALSE))
if (!is.null(anova_table$ges)) df.out$ges <- round_ps(anova_table$ges)
if (!is.null(anova_table$pes)) df.out$pes <- round_ps(anova_table$pes)
df.out$p.value <- round_ps(anova_table[,"Pr(>F)"])
if (!intercept) if (df.out[1,1] == "(Intercept)") df.out <- df.out[-1,, drop = FALSE]
rownames(df.out) <- NULL
attr(df.out, "heading") <- attr(object, "heading")
attr(df.out, "p.adjust.method") <- attr(object, "p.adjust.method")
attr(df.out, "correction") <- attr(object, "correction")
attr(df.out, "observed") <- attr(object, "observed")
class(df.out) <- c("nice_table", class(df.out))
df.out
}
make.stat <- function(anova, stat, symbols) {
ifelse(anova[[paste0("Pr(>", stat,")")]] < 0.001, str_c(formatC(anova[[stat]], digits = 2, format = "f"), symbols[4]),
ifelse(anova[[paste0("Pr(>", stat,")")]] < 0.01, str_c(formatC(anova[[stat]], digits = 2, format = "f"), symbols[3]),
ifelse(anova[[paste0("Pr(>", stat,")")]] < 0.05, str_c(formatC(anova[[stat]], digits = 2, format = "f"), symbols[2]),
ifelse(anova[[paste0("Pr(>", stat,")")]] < 0.1, str_c(formatC(anova[[stat]], digits = 2, format = "f"), symbols[1]), formatC(anova[[stat]], digits = 2, format = "f")))))
}
is.wholenumber <- function(x, tol = .Machine$double.eps^0.5) abs(x - round(x)) < tol
#' @rdname nice
#' @method nice mixed
#' @export
nice.mixed <- function(object, sig.symbols = c(" +", " *", " **", " ***"), ...) {
anova_table <- object$anova_table
symbols.use <- c(" +", " *", " **", " ***")
symbols.use[seq_along(sig.symbols)] <- sig.symbols
if (is.null(attr(object, "method"))) {
df.out <- object[[1]]
warning("mixed object was created with old version of afex, table not nicely formatted.")
} else if (attr(object, "method") == "KR") {
anova_table[,"df"] <- paste(ifelse(is.wholenumber(anova_table[,"num Df"]), round(anova_table[,"num Df"]), formatC(anova_table[,"num Df"], digits = 2, format = "f")), ifelse(is.wholenumber(anova_table[,"den Df"]), round(anova_table[,"den Df"]), formatC(anova_table[,"den Df"], digits = 2, format = "f")), sep = ", ")
df.out <- data.frame(Effect = row.names(anova_table), df = anova_table[,"df"], "F.scaling" = formatC(anova_table[,"F.scaling"], digits = 2, format = "f"), stringsAsFactors = FALSE, check.names = FALSE)
df.out <- cbind(df.out, data.frame(F = make.stat(anova_table, stat = "F", symbols.use), stringsAsFactors = FALSE))
df.out$p.value <- round_ps(anova_table[,"Pr(>F)"])
} else if (attr(object, "method") == "PB") {
anova_table[,"Pr(>Chisq)"] <- anova_table[,"Pr(>PB)"]
df.out <- data.frame(Effect = row.names(anova_table), df = anova_table[,"Chi Df"], Chisq = make.stat(anova_table, stat = "Chisq", symbols.use), p.value = round_ps(anova_table[,"Pr(>Chisq)"]), stringsAsFactors = FALSE, check.names = FALSE)
} else if (attr(object, "method") == "LRT") {
df.out <- data.frame(Effect = row.names(anova_table), df = anova_table[,"Chi Df"], Chisq = make.stat(anova_table, stat = "Chisq", symbols.use), p.value = round_ps(anova_table[,"Pr(>Chisq)"]), stringsAsFactors = FALSE, check.names = FALSE)
} else stop("method of mixed object not supported.")
rownames(df.out) <- NULL
class(df.out) <- c("nice_table", class(df.out))
df.out
}
#' @rdname nice
#' @method print nice_table
#' @export
print.nice_table <- function(x, ...) {
if(!is.null(heading <- attr(x, "heading"))) {
cat(heading, sep = "\n")
}
print.data.frame(x)
if(!is.null(correction_method <- attr(x, "correction")) && correction_method != "none") {
cat("\nSphericity correction method:", correction_method, "\n")
}
invisible(x)
}
| /afex/R/nice.R | no_license | ingted/R-Examples | R | false | false | 13,326 | r | #' Make nice ANOVA table for printing.
#'
#' This generic function produces a nice ANOVA table for printin for objects of class. \code{nice_anova} takes an object from \code{\link[car]{Anova}} possible created by the convenience functions \code{\link{aov_ez}} or \code{\link{aov_car}}. When within-subject factors are present, either sphericity corrected or uncorrected degrees of freedom can be reported.
#'
#'
#' @param object,x An object of class \code{"afex_aov"} (see \code{\link{aov_car}}) or of class \code{"mixed"} (see \code{\link{mixed}}) as returned from the \pkg{afex} functions. Alternatively, an object of class \code{"Anova.mlm"} or \code{"anova"} as returned from \code{\link[car]{Anova}}.
#' @param es Effect Size to be reported. The default is given by \code{afex_options("es_aov")}, which is initially set to \code{"ges"} (i.e., reporting generalized eta-squared, see details). Also supported is partial eta-squared (\code{"pes"}) or \code{"none"}.
#' @param observed character vector referring to the observed (i.e., non manipulated) variables/effects in the design. Important for calculation of generalized eta-squared (ignored if \code{es} is not \code{"ges"}), see details.
#' @param correction Character. Which sphericity correction of the degrees of freedom should be reported for the within-subject factors. The default is given by \code{afex_options("correction_aov")}, which is initially set to \code{"GG"} corresponding to the Greenhouse-Geisser correction. Possible values are \code{"GG"}, \code{"HF"} (i.e., Hyunh-Feldt correction), and \code{"none"} (i.e., no correction).
#' @param p.adjust.method \code{character} indicating if p-values for individual effects should be adjusted for multiple comparisons (see \link[stats]{p.adjust} and details). The default \code{NULL} corresponds to no adjustment.
#' @param sig.symbols Character. What should be the symbols designating significance? When entering an vector with \code{length(sig.symbol) < 4} only those elements of the default (\code{c(" +", " *", " **", " ***")}) will be replaced. \code{sig.symbols = ""} will display the stars but not the \code{+}, \code{sig.symbols = rep("", 4)} will display no symbols.
#' @param MSE logical. Should the column containing the Mean Sqaured Error (MSE) be displayed? Default is \code{TRUE}.
#' @param intercept logical. Should intercept (if present) be included in the ANOVA table? Default is \code{FALSE} which hides the intercept.
#' @param ... currently ignored.
#'
#' @return A \code{data.frame} of class \code{nice_table} with the ANOVA table consisting of characters. The columns that are always present are: \code{Effect}, \code{df} (degrees of freedom), \code{F}, and \code{p}.
#'
#' \code{ges} contains the generalized eta-squared effect size measure (Bakeman, 2005), \code{pes} contains partial eta-squared (if requested).
#'
#' @details The returned \code{data.frame} is print-ready when adding to a document with proper methods. Either directly via \pkg{knitr} or similar approaches such as via packages \pkg{ascii} or \pkg{xtable} (nowadays \pkg{knitr} is probably the best approach, see \href{http://yihui.name/knitr/}{here}). \pkg{ascii} provides conversion to \href{http://www.methods.co.nz/asciidoc/}{AsciiDoc} and \href{http://orgmode.org/}{org-mode} (see \code{\link[ascii]{ascii}} and \code{\link[ascii]{print-ascii}}). \pkg{xtable} converts a \code{data.frame} into LaTeX code with many possible options (e.g., allowing for \code{"longtable"} or \code{"sidewaystable"}), see \code{\link[xtable]{xtable}} and \code{\link[xtable]{print.xtable}}. See Examples.
#'
#' Conversion functions to other formats (such as HTML, ODF, or Word) can be found at the \href{http://cran.r-project.org/web/views/ReproducibleResearch.html}{Reproducible Research Task View}.
#'
#' The default reports generalized eta squared (Olejnik & Algina, 2003), the "recommended effect size for repeated measured designs" (Bakeman, 2005). Note that it is important that all measured variables (as opposed to experimentally manipulated variables), such as e.g., age, gender, weight, ..., must be declared via \code{observed} to obtain the correct effect size estimate. Partial eta squared (\code{"pes"}) does not require this.
#'
#' Exploratory ANOVA, for which no detailed hypotheses have been specified a priori, harbor a multiple comparison problem (Cramer et al., 2015). To avoid an inflation of familywise Type I error rate, results need to be corrected for multiple comparisons using \code{p.adjust.method}.
#' \code{p.adjust.method} defaults to the method specified in the call to \code{\link{aov_car}} in \code{anova_table}. If no method was specified and \code{p.adjust.method = NULL} p-values are not adjusted.
#'
#' @seealso \code{\link{aov_ez}} and \code{\link{aov_car}} are the convenience functions to create the object appropriate for \code{nice_anova}.
#'
#' @author The code for calculating generalized eta-squared was written by Mike Lawrence.\cr Everything else was written by Henrik Singmann.
#'
#' @references Bakeman, R. (2005). Recommended effect size statistics for repeated measures designs. \emph{Behavior Research Methods}, 37(3), 379-384. doi:10.3758/BF03192707
#'
#' Cramer, A. O. J., van Ravenzwaaij, D., Matzke, D., Steingroever, H., Wetzels, R., Grasman, R. P. P. P., ... Wagenmakers, E.-J. (2015). Hidden multiplicity in exploratory multiway ANOVA: Prevalence and remedies. \emph{Psychonomic Bulletin & Review}, 1–8. doi:\href{http://doi.org/10.3758/s13423-015-0913-5}{10.3758/s13423-015-0913-5}
#'
#' Olejnik, S., & Algina, J. (2003). Generalized Eta and Omega Squared Statistics: Measures of Effect Size for Some Common Research Designs. \emph{Psychological Methods}, 8(4), 434-447. doi:10.1037/1082-989X.8.4.434
#'
#' @name nice
#' @importFrom stats anova
#' @encoding UTF-8
#'
#' @examples
#'
#' ## example from Olejnik & Algina (2003)
#' # "Repeated Measures Design" (pp. 439):
#' data(md_12.1)
#' # create object of class afex_aov:
#' rmd <- aov_ez("id", "rt", md_12.1, within = c("angle", "noise"))
#' # use different es:
#' nice(rmd, es = "pes") # noise: .82
#' nice(rmd, es = "ges") # noise: .39
#'
#' # exampel using obk.long (see ?obk.long), a long version of the OBrienKaiser dataset from car.
#' data(obk.long)
#' # create object of class afex_aov:
#' tmp.aov <- aov_car(value ~ treatment * gender + Error(id/phase*hour), data = obk.long)
#'
#' nice(tmp.aov, observed = "gender")
#'
#' nice(tmp.aov, observed = "gender", sig.symbol = rep("", 4))
#'
#' \dontrun{
#' # use package ascii or xtable for formatting of tables ready for printing.
#'
#' full <- nice(tmp.aov, observed = "gender")
#'
#' require(ascii)
#' print(ascii(full, include.rownames = FALSE, caption = "ANOVA 1"), type = "org")
#'
#' require(xtable)
#' print.xtable(xtable(full, caption = "ANOVA 2"), include.rownames = FALSE)
#' }
#'
#' @export nice
nice <- function(object, ...) UseMethod("nice", object)
#' @rdname nice
#' @method nice afex_aov
#' @export
nice.afex_aov <- function(object, es = NULL, observed = attr(object$anova_table, "observed"), correction = attr(object$anova_table, "correction"), MSE = NULL, intercept = NULL, p.adjust.method = attr(object$anova_table, "p.adjust.method"), sig.symbols = c(" +", " *", " **", " ***"), ...) {
if(is.null(es)) { # Defaults to afex_options("es") because of default set in anova.afex_aov
es <- c("pes", "ges")[c("pes", "ges") %in% colnames(object$anova_table)]
}
if(is.null(MSE)) { # Defaults to TRUE because of default set in anova.afex_aov
MSE <- "MSE" %in% colnames(object$anova_table)
}
if(is.null(intercept)) { # Defaults to FALSE because of default set in anova.afex_aov
intercept <- "(Intercept)" %in% rownames(object$anova_table)
}
anova_table <- as.data.frame(anova(object, es = es, observed = observed, correction = correction, MSE = MSE, intercept = intercept, p.adjust.method = p.adjust.method))
nice.anova(anova_table, MSE = MSE, intercept = intercept, sig.symbols = sig.symbols)
}
#' @rdname nice
#' @method nice anova
#' @export
nice.anova <- function(object, MSE = TRUE, intercept = FALSE, sig.symbols = c(" +", " *", " **", " ***"), ...) {
# internal functions:
is.wholenumber <- function(x, tol = .Machine$double.eps^0.5) abs(x - round(x)) < tol
make.fs <- function(anova, symbols) {
ifelse(anova[["Pr(>F)"]] < 0.001, str_c(formatC(anova[["F"]], digits = 2, format = "f"), symbols[4]),
ifelse(anova[["Pr(>F)"]] < 0.01, str_c(formatC(anova[["F"]], digits = 2, format = "f"), symbols[3]),
ifelse(anova[["Pr(>F)"]] < 0.05, str_c(formatC(anova[["F"]], digits = 2, format = "f"), symbols[2]),
ifelse(anova[["Pr(>F)"]] < 0.1, str_c(formatC(anova[["F"]], digits = 2, format = "f"), symbols[1]), formatC(anova[["F"]], digits = 2, format = "f")))))
}
anova_table <- object
anova_table[,"df"] <- paste(ifelse(is.wholenumber(anova_table[,"num Df"]), anova_table[,"num Df"], formatC(anova_table[,"num Df"], digits = 2, format = "f")), ifelse(is.wholenumber(anova_table[,"den Df"]),anova_table[,"den Df"], formatC(anova_table[,"den Df"], digits = 2, format = "f")), sep = ", ")
symbols.use <- c(" +", " *", " **", " ***")
symbols.use[seq_along(sig.symbols)] <- sig.symbols
df.out <- data.frame(Effect = row.names(anova_table), df = anova_table[,"df"], stringsAsFactors = FALSE)
if (!is.null(anova_table$MSE)) df.out <- cbind(df.out, data.frame(MSE = formatC(anova_table[,"MSE"], digits = 2, format = "f"), stringsAsFactors = FALSE))
df.out <- cbind(df.out, data.frame(F = make.fs(anova_table, symbols.use), stringsAsFactors = FALSE))
if (!is.null(anova_table$ges)) df.out$ges <- round_ps(anova_table$ges)
if (!is.null(anova_table$pes)) df.out$pes <- round_ps(anova_table$pes)
df.out$p.value <- round_ps(anova_table[,"Pr(>F)"])
if (!intercept) if (df.out[1,1] == "(Intercept)") df.out <- df.out[-1,, drop = FALSE]
rownames(df.out) <- NULL
attr(df.out, "heading") <- attr(object, "heading")
attr(df.out, "p.adjust.method") <- attr(object, "p.adjust.method")
attr(df.out, "correction") <- attr(object, "correction")
attr(df.out, "observed") <- attr(object, "observed")
class(df.out) <- c("nice_table", class(df.out))
df.out
}
make.stat <- function(anova, stat, symbols) {
ifelse(anova[[paste0("Pr(>", stat,")")]] < 0.001, str_c(formatC(anova[[stat]], digits = 2, format = "f"), symbols[4]),
ifelse(anova[[paste0("Pr(>", stat,")")]] < 0.01, str_c(formatC(anova[[stat]], digits = 2, format = "f"), symbols[3]),
ifelse(anova[[paste0("Pr(>", stat,")")]] < 0.05, str_c(formatC(anova[[stat]], digits = 2, format = "f"), symbols[2]),
ifelse(anova[[paste0("Pr(>", stat,")")]] < 0.1, str_c(formatC(anova[[stat]], digits = 2, format = "f"), symbols[1]), formatC(anova[[stat]], digits = 2, format = "f")))))
}
is.wholenumber <- function(x, tol = .Machine$double.eps^0.5) abs(x - round(x)) < tol
#' @rdname nice
#' @method nice mixed
#' @export
nice.mixed <- function(object, sig.symbols = c(" +", " *", " **", " ***"), ...) {
anova_table <- object$anova_table
symbols.use <- c(" +", " *", " **", " ***")
symbols.use[seq_along(sig.symbols)] <- sig.symbols
if (is.null(attr(object, "method"))) {
df.out <- object[[1]]
warning("mixed object was created with old version of afex, table not nicely formatted.")
} else if (attr(object, "method") == "KR") {
anova_table[,"df"] <- paste(ifelse(is.wholenumber(anova_table[,"num Df"]), round(anova_table[,"num Df"]), formatC(anova_table[,"num Df"], digits = 2, format = "f")), ifelse(is.wholenumber(anova_table[,"den Df"]), round(anova_table[,"den Df"]), formatC(anova_table[,"den Df"], digits = 2, format = "f")), sep = ", ")
df.out <- data.frame(Effect = row.names(anova_table), df = anova_table[,"df"], "F.scaling" = formatC(anova_table[,"F.scaling"], digits = 2, format = "f"), stringsAsFactors = FALSE, check.names = FALSE)
df.out <- cbind(df.out, data.frame(F = make.stat(anova_table, stat = "F", symbols.use), stringsAsFactors = FALSE))
df.out$p.value <- round_ps(anova_table[,"Pr(>F)"])
} else if (attr(object, "method") == "PB") {
anova_table[,"Pr(>Chisq)"] <- anova_table[,"Pr(>PB)"]
df.out <- data.frame(Effect = row.names(anova_table), df = anova_table[,"Chi Df"], Chisq = make.stat(anova_table, stat = "Chisq", symbols.use), p.value = round_ps(anova_table[,"Pr(>Chisq)"]), stringsAsFactors = FALSE, check.names = FALSE)
} else if (attr(object, "method") == "LRT") {
df.out <- data.frame(Effect = row.names(anova_table), df = anova_table[,"Chi Df"], Chisq = make.stat(anova_table, stat = "Chisq", symbols.use), p.value = round_ps(anova_table[,"Pr(>Chisq)"]), stringsAsFactors = FALSE, check.names = FALSE)
} else stop("method of mixed object not supported.")
rownames(df.out) <- NULL
class(df.out) <- c("nice_table", class(df.out))
df.out
}
#' @rdname nice
#' @method print nice_table
#' @export
print.nice_table <- function(x, ...) {
if(!is.null(heading <- attr(x, "heading"))) {
cat(heading, sep = "\n")
}
print.data.frame(x)
if(!is.null(correction_method <- attr(x, "correction")) && correction_method != "none") {
cat("\nSphericity correction method:", correction_method, "\n")
}
invisible(x)
}
|
#' Get site attainments
#'
#' Get site attainment categories for chlorophyll or light attenuation
#'
#' @param avedatsite result returned from \code{\link{anlz_avedatsite}}
#' @param thr chr string indicating with water quality value and appropriate threshold to to plot, one of "chl" for chlorophyll and "la" for light availability
#' @param trgs optional \code{data.frame} for annual bay segment water quality targets, defaults to \code{\link{targets}}
#' @param yrrng optional numeric value for year to return, defaults to all
#' @param thrs logical indicating if attainment category is relative to targets (default) or thresholds
#'
#' @return a \code{data.frame} for each year and site showing the attainment category
#'
#' @details This function is a simplication of the attainment categories returned by \code{\link{anlz_attain}}. Sites are only compared to the targets/thresholds that apply separately for chlorophyll or light attenuation.
#'
#' @concept analyze
#'
#' @export
#'
#' @examples
#' avedatsite <- anlz_avedatsite(epcdata)
#' anlz_attainsite(avedatsite)
anlz_attainsite <- function(avedatsite, thr = c('chla', 'la'), trgs = NULL, yrrng = NULL, thrs = FALSE){
# default targets from data file
if(is.null(trgs))
trgs <- targets
# wq to plot
thr <- match.arg(thr)
if(is.null(yrrng))
yrrng <- avedatsite$ann %>%
dplyr::pull(yr) %>%
unique %>%
sort
# format targets
trgs <- trgs %>%
tidyr::gather('var', 'val', -bay_segment, -name) %>%
tidyr::separate(var, c('var', 'trgtyp'), sep = '_') %>%
spread(trgtyp, val) %>%
dplyr::select(bay_segment, var, target, smallex, thresh) %>%
dplyr::filter(grepl(paste0('^', thr), var))
# get annual averages, join with targets
annavesite <- avedatsite$ann %>%
dplyr::mutate(var = gsub('mean\\_', '', var)) %>%
dplyr::filter(grepl(paste0('^', thr), var)) %>%
dplyr::filter(yr %in% yrrng) %>%
dplyr::left_join(trgs, by = c('bay_segment', 'var'))
# sanity check
if(nrow(annavesite) == 0)
stop(paste(yrrng, "not in epcdata"))
# is val above/below
if(!thrs)
out <- annavesite %>%
dplyr::mutate(
met = ifelse(val < target, 'yes', 'no')
)
if(thrs)
out <- annavesite %>%
dplyr::mutate(
met = ifelse(val < thresh, 'yes', 'no')
)
return(out)
}
| /R/anlz_attainsite.R | permissive | tbep-tech/tbeptools | R | false | false | 2,338 | r | #' Get site attainments
#'
#' Get site attainment categories for chlorophyll or light attenuation
#'
#' @param avedatsite result returned from \code{\link{anlz_avedatsite}}
#' @param thr chr string indicating with water quality value and appropriate threshold to to plot, one of "chl" for chlorophyll and "la" for light availability
#' @param trgs optional \code{data.frame} for annual bay segment water quality targets, defaults to \code{\link{targets}}
#' @param yrrng optional numeric value for year to return, defaults to all
#' @param thrs logical indicating if attainment category is relative to targets (default) or thresholds
#'
#' @return a \code{data.frame} for each year and site showing the attainment category
#'
#' @details This function is a simplication of the attainment categories returned by \code{\link{anlz_attain}}. Sites are only compared to the targets/thresholds that apply separately for chlorophyll or light attenuation.
#'
#' @concept analyze
#'
#' @export
#'
#' @examples
#' avedatsite <- anlz_avedatsite(epcdata)
#' anlz_attainsite(avedatsite)
anlz_attainsite <- function(avedatsite, thr = c('chla', 'la'), trgs = NULL, yrrng = NULL, thrs = FALSE){
# default targets from data file
if(is.null(trgs))
trgs <- targets
# wq to plot
thr <- match.arg(thr)
if(is.null(yrrng))
yrrng <- avedatsite$ann %>%
dplyr::pull(yr) %>%
unique %>%
sort
# format targets
trgs <- trgs %>%
tidyr::gather('var', 'val', -bay_segment, -name) %>%
tidyr::separate(var, c('var', 'trgtyp'), sep = '_') %>%
spread(trgtyp, val) %>%
dplyr::select(bay_segment, var, target, smallex, thresh) %>%
dplyr::filter(grepl(paste0('^', thr), var))
# get annual averages, join with targets
annavesite <- avedatsite$ann %>%
dplyr::mutate(var = gsub('mean\\_', '', var)) %>%
dplyr::filter(grepl(paste0('^', thr), var)) %>%
dplyr::filter(yr %in% yrrng) %>%
dplyr::left_join(trgs, by = c('bay_segment', 'var'))
# sanity check
if(nrow(annavesite) == 0)
stop(paste(yrrng, "not in epcdata"))
# is val above/below
if(!thrs)
out <- annavesite %>%
dplyr::mutate(
met = ifelse(val < target, 'yes', 'no')
)
if(thrs)
out <- annavesite %>%
dplyr::mutate(
met = ifelse(val < thresh, 'yes', 'no')
)
return(out)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/g.R
\name{local_g_sims}
\alias{local_g_sims}
\title{Local G Simulations}
\usage{
local_g_sims(x, weights, permutations, type = "g")
}
\arguments{
\item{x}{A vector of numerical values}
\item{weights}{Weights structure from spdep, must be style "B"}
\item{permutations}{Number of permutations, the default is 999}
\item{type}{designates the type of statistic g or gstar, g is the default}
}
\value{
local.sims Reference distributions of local geary statistics for each location in matrix form
}
\description{
Function to compute reference distributions of local G and G* statistics
}
| /man/local_g_sims.Rd | no_license | morrisonge/spatmap | R | false | true | 664 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/g.R
\name{local_g_sims}
\alias{local_g_sims}
\title{Local G Simulations}
\usage{
local_g_sims(x, weights, permutations, type = "g")
}
\arguments{
\item{x}{A vector of numerical values}
\item{weights}{Weights structure from spdep, must be style "B"}
\item{permutations}{Number of permutations, the default is 999}
\item{type}{designates the type of statistic g or gstar, g is the default}
}
\value{
local.sims Reference distributions of local geary statistics for each location in matrix form
}
\description{
Function to compute reference distributions of local G and G* statistics
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/speech_objects.R
\name{Status}
\alias{Status}
\title{Google Cloud Speech API Objects
Google Cloud Speech API.}
\usage{
Status(code = NULL, details = NULL, message = NULL)
}
\arguments{
\item{code}{The status code, which should be an enum value of google}
\item{details}{A list of messages that carry the error details}
\item{message}{A developer-facing error message, which should be in English}
}
\value{
Status object
}
\description{
Auto-generated code by googleAuthR::gar_create_api_objects
at 2016-09-03 23:47:37
filename: /Users/mark/dev/R/autoGoogleAPI/googlespeechv1beta1.auto/R/speech_objects.R
api_json: api_json
}
\details{
Objects for use by the functions created by googleAuthR::gar_create_api_skeleton
Status Object
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
The `Status` type defines a logical error model that is suitable for differentprogramming environments, including REST APIs and RPC APIs. It is used by[gRPC](https://github.com/grpc). The error model is designed to be:- Simple to use and understand for most users- Flexible enough to meet unexpected needs# OverviewThe `Status` message contains three pieces of data: error code, error message,and error details. The error code should be an enum value ofgoogle.rpc.Code, but it may accept additional error codes if needed. Theerror message should be a developer-facing English message that helpsdevelopers *understand* and *resolve* the error. If a localized user-facingerror message is needed, put the localized message in the error details orlocalize it in the client. The optional error details may contain arbitraryinformation about the error. There is a predefined set of error detail typesin the package `google.rpc` which can be used for common error conditions.# Language mappingThe `Status` message is the logical representation of the error model, but itis not necessarily the actual wire format. When the `Status` message isexposed in different client libraries and different wire protocols, it can bemapped differently. For example, it will likely be mapped to some exceptionsin Java, but more likely mapped to some error codes in C.# Other usesThe error model and the `Status` message can be used in a variety ofenvironments, either with or without APIs, to provide aconsistent developer experience across different environments.Example uses of this error model include:- Partial errors. If a service needs to return partial errors to the client, it may embed the `Status` in the normal response to indicate the partial errors.- Workflow errors. A typical workflow has multiple steps. Each step may have a `Status` message for error reporting purpose.- Batch operations. If a client uses batch request and batch response, the `Status` message should be used directly inside batch response, one for each error sub-response.- Asynchronous operations. If an API call embeds asynchronous operation results in its response, the status of those operations should be represented directly using the `Status` message.- Logging. If some API errors are stored in logs, the message `Status` could be used directly after any stripping needed for security/privacy reasons.
}
| /googlespeechv1beta1.auto/man/Status.Rd | permissive | Phippsy/autoGoogleAPI | R | false | true | 3,281 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/speech_objects.R
\name{Status}
\alias{Status}
\title{Google Cloud Speech API Objects
Google Cloud Speech API.}
\usage{
Status(code = NULL, details = NULL, message = NULL)
}
\arguments{
\item{code}{The status code, which should be an enum value of google}
\item{details}{A list of messages that carry the error details}
\item{message}{A developer-facing error message, which should be in English}
}
\value{
Status object
}
\description{
Auto-generated code by googleAuthR::gar_create_api_objects
at 2016-09-03 23:47:37
filename: /Users/mark/dev/R/autoGoogleAPI/googlespeechv1beta1.auto/R/speech_objects.R
api_json: api_json
}
\details{
Objects for use by the functions created by googleAuthR::gar_create_api_skeleton
Status Object
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
The `Status` type defines a logical error model that is suitable for differentprogramming environments, including REST APIs and RPC APIs. It is used by[gRPC](https://github.com/grpc). The error model is designed to be:- Simple to use and understand for most users- Flexible enough to meet unexpected needs# OverviewThe `Status` message contains three pieces of data: error code, error message,and error details. The error code should be an enum value ofgoogle.rpc.Code, but it may accept additional error codes if needed. Theerror message should be a developer-facing English message that helpsdevelopers *understand* and *resolve* the error. If a localized user-facingerror message is needed, put the localized message in the error details orlocalize it in the client. The optional error details may contain arbitraryinformation about the error. There is a predefined set of error detail typesin the package `google.rpc` which can be used for common error conditions.# Language mappingThe `Status` message is the logical representation of the error model, but itis not necessarily the actual wire format. When the `Status` message isexposed in different client libraries and different wire protocols, it can bemapped differently. For example, it will likely be mapped to some exceptionsin Java, but more likely mapped to some error codes in C.# Other usesThe error model and the `Status` message can be used in a variety ofenvironments, either with or without APIs, to provide aconsistent developer experience across different environments.Example uses of this error model include:- Partial errors. If a service needs to return partial errors to the client, it may embed the `Status` in the normal response to indicate the partial errors.- Workflow errors. A typical workflow has multiple steps. Each step may have a `Status` message for error reporting purpose.- Batch operations. If a client uses batch request and batch response, the `Status` message should be used directly inside batch response, one for each error sub-response.- Asynchronous operations. If an API call embeds asynchronous operation results in its response, the status of those operations should be represented directly using the `Status` message.- Logging. If some API errors are stored in logs, the message `Status` could be used directly after any stripping needed for security/privacy reasons.
}
|
## Put comments here that give an overall description of what your
## functions do
# Author: Sven Mader
# Date: September 2014
# Description:
# Both functions (makeCacheMatrix and cacheSolve) are part of the 2nd programming
# assesement for the r-programming course in coursera.
# The aim of the function set it to provide a extended matrix with get and set
# functions to store larger matrices and their inverse plus to calculate the
# inverse with use of the cached content.
## Write a short comment describing this function
# Function: makeCacheMatrix
# Parameter: x = matrix()
# Return value: a list object with functions for the stored matrix
makeCacheMatrix <- function(x = matrix()) {
inv_mat <- NULL #set default value for inverse matrix variable
# function to set content of x + check if matrix changed.
set <- function(new_mat = matrix()) {
# check matrix, if identical to existing, no need for in_mat reset
if (!identical(x, new_mat)) {
inv_mat <<- NULL #new_mat and x differ, so inverse has to be re-calculated.
}
# set matrix
x <<- new_mat
}
get <- function() x
# set & get inverse functions
setinverse <- function(solve) inv_mat <<- solve
getinverse <- function() inv_mat
# store for functions
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse)
}
## Write a short comment describing this function
# Function: cacheSolve
# Parameter: x as a matrix created with makeCacheMatrix
# Return value: inverse of the matrix of x. Either calculated from scratch if
# it does not yet exist or returend from cache, if already calculated.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
# get inverse of matrix
inv <- x$getinverse()
# check if inverse is NULL, if not, return inverse
if(!is.null(inv)){
message("... get chached data ...")
return(inv)
}
# inverse is currently null, so get matrix data
data <- x$get()
# and start solve function
inv <- solve(data, ...)
# update store
x$setinverse(inv)
#return inversed matrix
inv
}
| /cachematrix.R | no_license | masvli/ProgrammingAssignment2 | R | false | false | 2,152 | r | ## Put comments here that give an overall description of what your
## functions do
# Author: Sven Mader
# Date: September 2014
# Description:
# Both functions (makeCacheMatrix and cacheSolve) are part of the 2nd programming
# assesement for the r-programming course in coursera.
# The aim of the function set it to provide a extended matrix with get and set
# functions to store larger matrices and their inverse plus to calculate the
# inverse with use of the cached content.
## Write a short comment describing this function
# Function: makeCacheMatrix
# Parameter: x = matrix()
# Return value: a list object with functions for the stored matrix
makeCacheMatrix <- function(x = matrix()) {
inv_mat <- NULL #set default value for inverse matrix variable
# function to set content of x + check if matrix changed.
set <- function(new_mat = matrix()) {
# check matrix, if identical to existing, no need for in_mat reset
if (!identical(x, new_mat)) {
inv_mat <<- NULL #new_mat and x differ, so inverse has to be re-calculated.
}
# set matrix
x <<- new_mat
}
get <- function() x
# set & get inverse functions
setinverse <- function(solve) inv_mat <<- solve
getinverse <- function() inv_mat
# store for functions
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse)
}
## Write a short comment describing this function
# Function: cacheSolve
# Parameter: x as a matrix created with makeCacheMatrix
# Return value: inverse of the matrix of x. Either calculated from scratch if
# it does not yet exist or returend from cache, if already calculated.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
# get inverse of matrix
inv <- x$getinverse()
# check if inverse is NULL, if not, return inverse
if(!is.null(inv)){
message("... get chached data ...")
return(inv)
}
# inverse is currently null, so get matrix data
data <- x$get()
# and start solve function
inv <- solve(data, ...)
# update store
x$setinverse(inv)
#return inversed matrix
inv
}
|
library(data.table)
library(FeatureHashing)
library(Matrix)
library(xgboost)
df <- fread("../input/train.csv", data.table = F)
# set names of character and numeric features
X_char <- names(df)[sapply(df, is.character)]
X_num <- setdiff(names(df), c(X_char, "ID", "target"))
# hash characters and retain only more frequent ones
d1 <- hashed.model.matrix(~. , df[ , X_char], signed.hash = F)
d1 <- d1[ , colSums(d1) > 20]
# set numeric NAs to -1
d2 <- df[ , X_num]
d2[is.na(d2)] <- -1
# bind all features with cBind (not cbind) to keep sparse
d3 <- cBind(d1, as.matrix(d2))
# check size of resulting matrix
dim(d3)
print(object.size(d3), units = "Mb")
# minimal xgboost example (untuned, small number of rounds)
dmodel <- xgb.DMatrix(d3, label = df$target)
param <- list(objective = "binary:logistic", eta = 0.05,
max_depth = 5, subsample = 0.5)
cv <- xgb.cv(nfold = 2, metrics = "logloss", print = 10,
nrounds = 100, params = param, data = dmodel)
| /feature hashing trick.R | no_license | JohnM-TX/0-Rsnippets | R | false | false | 1,020 | r | library(data.table)
library(FeatureHashing)
library(Matrix)
library(xgboost)
df <- fread("../input/train.csv", data.table = F)
# set names of character and numeric features
X_char <- names(df)[sapply(df, is.character)]
X_num <- setdiff(names(df), c(X_char, "ID", "target"))
# hash characters and retain only more frequent ones
d1 <- hashed.model.matrix(~. , df[ , X_char], signed.hash = F)
d1 <- d1[ , colSums(d1) > 20]
# set numeric NAs to -1
d2 <- df[ , X_num]
d2[is.na(d2)] <- -1
# bind all features with cBind (not cbind) to keep sparse
d3 <- cBind(d1, as.matrix(d2))
# check size of resulting matrix
dim(d3)
print(object.size(d3), units = "Mb")
# minimal xgboost example (untuned, small number of rounds)
dmodel <- xgb.DMatrix(d3, label = df$target)
param <- list(objective = "binary:logistic", eta = 0.05,
max_depth = 5, subsample = 0.5)
cv <- xgb.cv(nfold = 2, metrics = "logloss", print = 10,
nrounds = 100, params = param, data = dmodel)
|
\name{gtransg}
\alias{dgtransg} \alias{pgtransg} \alias{qgtransg} \alias{rgtransg} \alias{mpsgtransg} \alias{qqgtransg}
\title{exponentiated exponential Poisson G distribution}
\description{Computes the pdf, cdf, quantile, and random numbers, draws the q-q plot, and estimates the parameters of the generalized transmuted \code{G} distribution. The general form for the probability density function (pdf) of the generalized transmuted \code{G} distribution due to Merovci et al. (2017) is given by
\deqn{f(x,{\Theta}) = a\,g(x-\mu,\theta ){\left( {G(x-\mu,\theta )} \right)^{a - 1}}\left[ {1 + b - 2bG(x-\mu,\theta )} \right]{\left[ {1 + b\left( {1 - G(x-\mu,\theta )} \right)} \right]^{a - 1}},}
where \eqn{\theta} is the baseline family parameter vector. Also, a>0, b<1, and \eqn{\mu} are the extra parameters induced to the baseline cumulative distribution function (cdf) \code{G} whose pdf is \code{g}. The general form for the cumulative distribution function (cdf) of the generalized transmuted \code{G} distribution distribution is given by
\deqn{F(x,{\Theta}) = {\left( {G(x-\mu,\theta )} \right)^a}{\left[ {1 + b\left( {1 - G(x-\mu,\theta )} \right)} \right]^a}.}
Here, the baseline \code{G} refers to the cdf of famous families such as: Birnbaum-Saunders, Burr type XII, Exponential, Chen, Chisquare, F, Frechet, Gamma, Gompertz, Linear failure rate (lfr), Log-normal, Log-logistic, Lomax, Rayleigh, and Weibull. The parameter vector is \eqn{\Theta=(a,b,\theta,\mu)} where \eqn{\theta} is the baseline \code{G} family's parameter space. If \eqn{\theta} consists of the shape and scale parameters, the last component of \eqn{\theta} is the scale parameter (here, a and b are the first and second shape parameters). Always, the location parameter \eqn{\mu} is placed in the last component of \eqn{\Theta}.}
\usage{
dgtransg(mydata, g, param, location = TRUE, log=FALSE)
pgtransg(mydata, g, param, location = TRUE, log.p = FALSE, lower.tail = TRUE)
qgtransg(p, g, param, location = TRUE, log.p = FALSE, lower.tail = TRUE)
rgtransg(n, g, param, location = TRUE)
qqgtransg(mydata, g, location = TRUE, method)
mpsgtransg(mydata, g, location = TRUE, method, sig.level)
}
\arguments{
\item{g}{The name of family's pdf including: "\code{birnbaum-saunders}", "\code{burrxii}", "\code{chisq}", "\code{chen}", "\code{exp}", "\code{f}", "\code{frechet}", "\code{gamma}", "\code{gompetrz}", "\code{lfr}", "\code{log-normal}", "\code{log-logistic}", "\code{lomax}", "\code{rayleigh}", and "\code{weibull}".}
\item{p}{a vector of value(s) between 0 and 1 at which the quantile needs to be computed.}
\item{n}{number of realizations to be generated.}
\item{mydata}{Vector of observations.}
\item{param}{parameter vector \eqn{\Theta=(a,b,\theta,\mu)}}
\item{location}{If \code{FALSE}, then the location parameter will be omitted.}
\item{log}{If \code{TRUE}, then log(pdf) is returned.}
\item{log.p}{If \code{TRUE}, then log(cdf) is returned and quantile is computed for \code{exp(-p)}.}
\item{lower.tail}{If \code{FALSE}, then \code{1-cdf} is returned and quantile is computed for \code{1-p}.}
\item{method}{The used method for maximizing the sum of log-spacing function. It will be "\code{BFGS}", "\code{CG}", "\code{L-BFGS-B}", "\code{Nelder-Mead}", or "\code{SANN}".}
\item{sig.level}{Significance level for the Chi-square goodness-of-fit test.}
}
\details{
It can be shown that the Moran's statistic follows a normal distribution. Also, a chi-square approximation exists for small samples whose mean and variance approximately are m(\code{log}(m)+0.57722)-0.5-1/(12m) and m(\eqn{\pi^2}/6-1)-0.5-1/(6m), respectively, with \code{m=n+1}, see Cheng and Stephens (1989). So, a hypothesis tesing can be constructed based on a sample of \code{n} independent realizations at the given significance level, indicated in above as \code{sig.level}.}
\value{
\enumerate{
\item A vector of the same length as \code{mydata}, giving the pdf values computed at \code{mydata}.
\item A vector of the same length as \code{mydata}, giving the cdf values computed at \code{mydata}.
\item A vector of the same length as \code{p}, giving the quantile values computed at \code{p}.
\item A vector of the same length as \code{n}, giving the random numbers realizations.
\item A sequence of goodness-of-fit statistics such as: Akaike Information Criterion (\code{AIC}), Consistent Akaike Information Criterion (\code{CAIC}), Bayesian Information Criterion (\code{BIC}), Hannan-Quinn information criterion (\code{HQIC}), Cramer-von Misses statistic (\code{CM}), Anderson Darling statistic (\code{AD}), log-likelihood statistic (\code{log}), and Moran's statistic (\code{M}). The Kolmogorov-Smirnov (\code{KS}) test statistic and corresponding \code{p-value}. The Chi-square test statistic, critical upper tail Chi-square distribution, related \code{p-value}, and the convergence status.
}
}
\references{
Cheng, R. C. H. and Stephens, M. A. (1989). A goodness-of-fit test using Moran's statistic with estimated parameters, \emph{Biometrika}, 76 (2), 385-392.
Merovcia, F., Alizadeh, M., Yousof, H. M., and Hamedani, G. G. (2017). The exponentiated transmuted-G family of distributions: Theory and applications, \emph{Communications in Statistics-Theory and Methods}, 46(21), 10800-10822.}
\author{Mahdi Teimouri}
\examples{
mydata<-rweibull(100,shape=2,scale=2)+3
dgtransg(mydata, "weibull", c(1,0.5,2,2,3))
pgtransg(mydata, "weibull", c(1,0.5,2,2,3))
qgtransg(runif(100), "weibull", c(1,0.5,2,2,3))
rgtransg(100, "weibull", c(1,0.5,2,2,3))
qqgtransg(mydata, "weibull", TRUE, "Nelder-Mead")
mpsgtransg(mydata, "weibull", TRUE, "Nelder-Mead", 0.05)
}
| /man/gtransg.Rd | no_license | cran/MPS | R | false | false | 5,695 | rd | \name{gtransg}
\alias{dgtransg} \alias{pgtransg} \alias{qgtransg} \alias{rgtransg} \alias{mpsgtransg} \alias{qqgtransg}
\title{exponentiated exponential Poisson G distribution}
\description{Computes the pdf, cdf, quantile, and random numbers, draws the q-q plot, and estimates the parameters of the generalized transmuted \code{G} distribution. The general form for the probability density function (pdf) of the generalized transmuted \code{G} distribution due to Merovci et al. (2017) is given by
\deqn{f(x,{\Theta}) = a\,g(x-\mu,\theta ){\left( {G(x-\mu,\theta )} \right)^{a - 1}}\left[ {1 + b - 2bG(x-\mu,\theta )} \right]{\left[ {1 + b\left( {1 - G(x-\mu,\theta )} \right)} \right]^{a - 1}},}
where \eqn{\theta} is the baseline family parameter vector. Also, a>0, b<1, and \eqn{\mu} are the extra parameters induced to the baseline cumulative distribution function (cdf) \code{G} whose pdf is \code{g}. The general form for the cumulative distribution function (cdf) of the generalized transmuted \code{G} distribution distribution is given by
\deqn{F(x,{\Theta}) = {\left( {G(x-\mu,\theta )} \right)^a}{\left[ {1 + b\left( {1 - G(x-\mu,\theta )} \right)} \right]^a}.}
Here, the baseline \code{G} refers to the cdf of famous families such as: Birnbaum-Saunders, Burr type XII, Exponential, Chen, Chisquare, F, Frechet, Gamma, Gompertz, Linear failure rate (lfr), Log-normal, Log-logistic, Lomax, Rayleigh, and Weibull. The parameter vector is \eqn{\Theta=(a,b,\theta,\mu)} where \eqn{\theta} is the baseline \code{G} family's parameter space. If \eqn{\theta} consists of the shape and scale parameters, the last component of \eqn{\theta} is the scale parameter (here, a and b are the first and second shape parameters). Always, the location parameter \eqn{\mu} is placed in the last component of \eqn{\Theta}.}
\usage{
dgtransg(mydata, g, param, location = TRUE, log=FALSE)
pgtransg(mydata, g, param, location = TRUE, log.p = FALSE, lower.tail = TRUE)
qgtransg(p, g, param, location = TRUE, log.p = FALSE, lower.tail = TRUE)
rgtransg(n, g, param, location = TRUE)
qqgtransg(mydata, g, location = TRUE, method)
mpsgtransg(mydata, g, location = TRUE, method, sig.level)
}
\arguments{
\item{g}{The name of family's pdf including: "\code{birnbaum-saunders}", "\code{burrxii}", "\code{chisq}", "\code{chen}", "\code{exp}", "\code{f}", "\code{frechet}", "\code{gamma}", "\code{gompetrz}", "\code{lfr}", "\code{log-normal}", "\code{log-logistic}", "\code{lomax}", "\code{rayleigh}", and "\code{weibull}".}
\item{p}{a vector of value(s) between 0 and 1 at which the quantile needs to be computed.}
\item{n}{number of realizations to be generated.}
\item{mydata}{Vector of observations.}
\item{param}{parameter vector \eqn{\Theta=(a,b,\theta,\mu)}}
\item{location}{If \code{FALSE}, then the location parameter will be omitted.}
\item{log}{If \code{TRUE}, then log(pdf) is returned.}
\item{log.p}{If \code{TRUE}, then log(cdf) is returned and quantile is computed for \code{exp(-p)}.}
\item{lower.tail}{If \code{FALSE}, then \code{1-cdf} is returned and quantile is computed for \code{1-p}.}
\item{method}{The used method for maximizing the sum of log-spacing function. It will be "\code{BFGS}", "\code{CG}", "\code{L-BFGS-B}", "\code{Nelder-Mead}", or "\code{SANN}".}
\item{sig.level}{Significance level for the Chi-square goodness-of-fit test.}
}
\details{
It can be shown that the Moran's statistic follows a normal distribution. Also, a chi-square approximation exists for small samples whose mean and variance approximately are m(\code{log}(m)+0.57722)-0.5-1/(12m) and m(\eqn{\pi^2}/6-1)-0.5-1/(6m), respectively, with \code{m=n+1}, see Cheng and Stephens (1989). So, a hypothesis tesing can be constructed based on a sample of \code{n} independent realizations at the given significance level, indicated in above as \code{sig.level}.}
\value{
\enumerate{
\item A vector of the same length as \code{mydata}, giving the pdf values computed at \code{mydata}.
\item A vector of the same length as \code{mydata}, giving the cdf values computed at \code{mydata}.
\item A vector of the same length as \code{p}, giving the quantile values computed at \code{p}.
\item A vector of the same length as \code{n}, giving the random numbers realizations.
\item A sequence of goodness-of-fit statistics such as: Akaike Information Criterion (\code{AIC}), Consistent Akaike Information Criterion (\code{CAIC}), Bayesian Information Criterion (\code{BIC}), Hannan-Quinn information criterion (\code{HQIC}), Cramer-von Misses statistic (\code{CM}), Anderson Darling statistic (\code{AD}), log-likelihood statistic (\code{log}), and Moran's statistic (\code{M}). The Kolmogorov-Smirnov (\code{KS}) test statistic and corresponding \code{p-value}. The Chi-square test statistic, critical upper tail Chi-square distribution, related \code{p-value}, and the convergence status.
}
}
\references{
Cheng, R. C. H. and Stephens, M. A. (1989). A goodness-of-fit test using Moran's statistic with estimated parameters, \emph{Biometrika}, 76 (2), 385-392.
Merovcia, F., Alizadeh, M., Yousof, H. M., and Hamedani, G. G. (2017). The exponentiated transmuted-G family of distributions: Theory and applications, \emph{Communications in Statistics-Theory and Methods}, 46(21), 10800-10822.}
\author{Mahdi Teimouri}
\examples{
mydata<-rweibull(100,shape=2,scale=2)+3
dgtransg(mydata, "weibull", c(1,0.5,2,2,3))
pgtransg(mydata, "weibull", c(1,0.5,2,2,3))
qgtransg(runif(100), "weibull", c(1,0.5,2,2,3))
rgtransg(100, "weibull", c(1,0.5,2,2,3))
qqgtransg(mydata, "weibull", TRUE, "Nelder-Mead")
mpsgtransg(mydata, "weibull", TRUE, "Nelder-Mead", 0.05)
}
|
#Code created to separate databases according to game, expertise and condition
# Setting the work directory and importing the files
wDirectory<-"C:/Users/ru25tas/Dropbox/Analysis"
csvFile<-"160222FullETDatabase.csv"
setwd(wDirectory)
fullDatabase<-read.table(csvFile, header=TRUE, sep=";", dec=",",na.strings="NA")
fullDatabase<-as.data.frame(fullDatabase)
# Subsetting the databases according to game, group of expertise, and acondition.
LoLExperts<-subset(fullDatabase,Game=="lol"&Expertise=="expert"&Condition=="expert")
LoLExperts2<-subset(fullDatabase,Game=="lol"&Expertise=="expert"&Condition=="novice")
LoLNovices<-subset(fullDatabase,Game=="lol"&Expertise=="novice")
dotaExperts<-subset(fullDatabase,Game=="dota")
# Cleaning the data of NA columns
a<-which(colnames(LoLExperts)=="Elo")
LoLExperts<-LoLExperts[,-a]
a<-which(colnames(LoLExperts2)=="Elo")
LoLExperts2<-LoLExperts2[,-a]
a<-which(colnames(LoLNovices)=="Elo")
LoLNovices<-LoLNovices[,-a]
a<-which(colnames(LoLExperts)=="GroupByElo")
LoLExperts<-LoLExperts[,-a]
a<-which(colnames(LoLExperts2)=="GroupByElo")
LoLExperts2<-LoLExperts2[,-a]
a<-which(colnames(LoLNovices)=="GroupByElo")
LoLNovices<-LoLNovices[,-a]
a<-which(colnames(LoLNovices)=="Tier")
LoLNovices<-LoLNovices[,-a]
a<-which(colnames(dotaExperts)=="Tier")
dotaExperts<-dotaExperts[,-a]
a<-which(colnames(LoLNovices)=="Division")
LoLNovices<-LoLNovices[,-a]
a<-which(colnames(dotaExperts)=="Division")
dotaExperts<-dotaExperts[,-a]
a<-which(colnames(LoLNovices)=="GroupByTier")
LoLNovices<-LoLNovices[,-a]
a<-which(colnames(dotaExperts)=="GroupByTier")
dotaExperts<-dotaExperts[,-a]
# Writing tables for every group
write.table(LoLExperts, file="160222lolExpExp.csv",sep = ";", dec = ",")
write.table(LoLExperts2, file="160222lolExpNov.csv",sep = ";", dec = ",")
write.table(LoLNovices, file="160222lolNov.csv",sep = ";", dec = ",")
write.table(dotaExperts, file="160222dotaExp.csv",sep = ";", dec = ",")
| /SubsettingDatabases.R | no_license | cmauricio/MOBA-ET-R | R | false | false | 1,989 | r | #Code created to separate databases according to game, expertise and condition
# Setting the work directory and importing the files
wDirectory<-"C:/Users/ru25tas/Dropbox/Analysis"
csvFile<-"160222FullETDatabase.csv"
setwd(wDirectory)
fullDatabase<-read.table(csvFile, header=TRUE, sep=";", dec=",",na.strings="NA")
fullDatabase<-as.data.frame(fullDatabase)
# Subsetting the databases according to game, group of expertise, and acondition.
LoLExperts<-subset(fullDatabase,Game=="lol"&Expertise=="expert"&Condition=="expert")
LoLExperts2<-subset(fullDatabase,Game=="lol"&Expertise=="expert"&Condition=="novice")
LoLNovices<-subset(fullDatabase,Game=="lol"&Expertise=="novice")
dotaExperts<-subset(fullDatabase,Game=="dota")
# Cleaning the data of NA columns
a<-which(colnames(LoLExperts)=="Elo")
LoLExperts<-LoLExperts[,-a]
a<-which(colnames(LoLExperts2)=="Elo")
LoLExperts2<-LoLExperts2[,-a]
a<-which(colnames(LoLNovices)=="Elo")
LoLNovices<-LoLNovices[,-a]
a<-which(colnames(LoLExperts)=="GroupByElo")
LoLExperts<-LoLExperts[,-a]
a<-which(colnames(LoLExperts2)=="GroupByElo")
LoLExperts2<-LoLExperts2[,-a]
a<-which(colnames(LoLNovices)=="GroupByElo")
LoLNovices<-LoLNovices[,-a]
a<-which(colnames(LoLNovices)=="Tier")
LoLNovices<-LoLNovices[,-a]
a<-which(colnames(dotaExperts)=="Tier")
dotaExperts<-dotaExperts[,-a]
a<-which(colnames(LoLNovices)=="Division")
LoLNovices<-LoLNovices[,-a]
a<-which(colnames(dotaExperts)=="Division")
dotaExperts<-dotaExperts[,-a]
a<-which(colnames(LoLNovices)=="GroupByTier")
LoLNovices<-LoLNovices[,-a]
a<-which(colnames(dotaExperts)=="GroupByTier")
dotaExperts<-dotaExperts[,-a]
# Writing tables for every group
write.table(LoLExperts, file="160222lolExpExp.csv",sep = ";", dec = ",")
write.table(LoLExperts2, file="160222lolExpNov.csv",sep = ";", dec = ",")
write.table(LoLNovices, file="160222lolNov.csv",sep = ";", dec = ",")
write.table(dotaExperts, file="160222dotaExp.csv",sep = ";", dec = ",")
|
library(phylosim)
### Name: summary.GeneralSubstitution
### Title: Summarize the properties of an object
### Aliases: summary.GeneralSubstitution GeneralSubstitution.summary
### summary,GeneralSubstitution-method
### ** Examples
# create an object
a<-GeneralSubstitution(alphabet=BinaryAlphabet(),rate.list=list("0->1"=1,"1->0"=2))
# get a summary
summary(a)
| /data/genthat_extracted_code/phylosim/examples/summary.GeneralSubstitution.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 393 | r | library(phylosim)
### Name: summary.GeneralSubstitution
### Title: Summarize the properties of an object
### Aliases: summary.GeneralSubstitution GeneralSubstitution.summary
### summary,GeneralSubstitution-method
### ** Examples
# create an object
a<-GeneralSubstitution(alphabet=BinaryAlphabet(),rate.list=list("0->1"=1,"1->0"=2))
# get a summary
summary(a)
|
# These functions are implemented for compatibility with the
# rstantools package (and rstanarm)
#' Generic Method for Obtaining Posterior Predictive Distribution from Stan Objects
#'
#' This function is a generic that is used to match the functions used with \code{\link[bayesplot]{ppc_bars}} to calculate
#' the posterior predictive distribution of the data given the model.
#'
#' @param object A fitted \code{idealstan} object
#' @param ... All other parameters passed on to the underlying function.
#' @export
#' @return \code{posterior_predict} methods should return a \eqn{D} by \eqn{N}
#' matrix, where \eqn{D} is the number of draws from the posterior predictive
#' distribution and \eqn{N} is the number of data points being predicted per
#' draw.
#' @export
setGeneric('id_post_pred',signature='object',
function(object,...) standardGeneric('id_post_pred'))
#' Posterior Prediction for \code{idealstan} objects
#'
#' This function will draw from the posterior distribution, whether in terms of the outcome (prediction)
#' or to produce the log-likelihood values.
#'
#' This function can also produce either distribution of the
#' outcomes (i.e., predictions) or the log-likelihood values of the posterior (set option
#' \code{type} to \code{'log_lik'}.
#' For more information, see the package vignette How to Evaluate Models.
#'
#' You can then use functions such as
#' \code{\link{id_plot_ppc}} to see how well the model does returning the correct number of categories
#' in the score/vote matrix.
#' Also see \code{help("posterior_predict", package = "rstanarm")}
#'
#' @param object A fitted \code{idealstan} object
#' @param draws The number of draws to use from the total number of posterior draws (default is 100).
#' @param sample_scores In addition to reducing the number of posterior draws used to
#' calculate the posterior predictive distribution, which will reduce computational overhead.
#' Only available for calculating predictive distributions, not log-likelihood values.
#' @param type Whether to produce posterior predictive values (\code{'predict'}, the default),
#' or log-likelihood values (\code{'log_lik'}). See the How to Evaluate Models vignette for more info.
#' @param output If the model has an unbounded outcome (Poisson, continuous, etc.), then
#' specify whether to show the \code{'observed'} data (the default) or the binary
#' output \code{'missing'} showing whether an observation was predicted as missing or not
#' @param ... Any other arguments passed on to posterior_predict (currently none available)
#'
#' @export
setMethod('id_post_pred',signature(object='idealstan'),function(object,draws=100,
output='observed',
type='predict',
sample_scores=NULL,...) {
#all_params <- rstan::extract(object@stan_samples)
n_votes <- nrow(object@score_data@score_matrix)
if(object@stan_samples@stan_args[[1]]$method != 'variational') {
n_iters <- (object@stan_samples@stan_args[[1]]$iter-object@stan_samples@stan_args[[1]]$warmup)*length(object@stan_samples@stan_args)
} else {
# there is no warmup for VB
n_iters <- dim(object@stan_samples)[1]
}
if(!is.null(sample_scores) && type!='log_lik') {
this_sample <- sample(1:n_votes,sample_scores)
} else {
this_sample <- 1:n_votes
}
if(type!='log_lik') {
these_draws <- sample(1:n_iters,draws)
} else {
these_draws <- 1:n_iters
draws <- n_iters
}
print(paste0('Processing posterior replications for ',n_votes,' scores using ',draws,
' posterior samples out of a total of ',n_iters, ' samples.'))
y <- object@score_data@score_matrix$outcome[this_sample]
# check to see if we need to recode missing values from the data if the model_type doesn't handle missing data
if(object@model_type %in% c(1,3,5,7,9,11,13) & !is.null(object@score_data@miss_val)) {
y <- .na_if(y,object@score_data@miss_val)
}
if(object@use_groups) {
person_points <- as.numeric(object@score_data@score_matrix$group_id)[this_sample]
} else {
person_points <- as.numeric(object@score_data@score_matrix$person_id)[this_sample]
}
bill_points <- as.numeric(object@score_data@score_matrix$item_id)[this_sample]
time_points <- as.numeric(factor(object@score_data@score_matrix$time_id))[this_sample]
remove_nas <- !is.na(y) & !is.na(person_points) & !is.na(bill_points) & !is.na(time_points)
y <- y[remove_nas]
if(is.factor(y)) {
miss_val <- which(levels(y)==object@score_data@miss_val)
y <- as.numeric(y)
}
max_val <- max(y)
bill_points <- bill_points[remove_nas]
time_points <- time_points[remove_nas]
person_points <- person_points[remove_nas]
model_type <- object@model_type
latent_space <- model_type %in% c(13,14)
inflate <- model_type %in% c(2,4,6,8,10,12,14)
# we can do the initial processing here
# loop over posterior iterations
L_tp1 <- .extract_nonp(object@stan_samples,'L_tp1')[[1]]
A_int_free <- .extract_nonp(object@stan_samples,'A_int_free')[[1]]
B_int_free <- .extract_nonp(object@stan_samples,'B_int_free')[[1]]
sigma_abs_free <- .extract_nonp(object@stan_samples,'sigma_abs_free')[[1]]
sigma_reg_free <- .extract_nonp(object@stan_samples,'sigma_reg_free')[[1]]
pr_absence_iter <- sapply(these_draws, function(d) {
if(latent_space) {
# use latent-space formulation for likelihood
pr_absence <- sapply(1:length(person_points),function(n) {
-sqrt((L_tp1[d,time_points[n],person_points[n]] - A_int_free[d,bill_points[n]])^2)
}) %>% plogis()
} else {
# use IRT formulation for likelihood
pr_absence <- sapply(1:length(person_points),function(n) {
L_tp1[d,time_points[n],person_points[n]]*sigma_abs_free[d,bill_points[n]] - A_int_free[d,bill_points[n]]
}) %>% plogis()
}
return(pr_absence)
})
pr_vote_iter <- sapply(these_draws, function(d) {
if(latent_space) {
if(inflate) {
pr_vote <- sapply(1:length(person_points),function(n) {
-sqrt((L_tp1[d,time_points[n],person_points[n]] - B_int_free[d,bill_points[n]])^2)
}) %>% plogis()
} else {
# latent space non-inflated formulation is different
pr_vote <- sapply(1:length(person_points),function(n) {
sigma_reg_free[d,bill_points[n]] + sigma_abs_free[d,bill_points[n]] -
sqrt((L_tp1[d,time_points[n],person_points[n]] - B_int_free[d,bill_points[n]])^2)
}) %>% plogis()
}
} else {
pr_vote <- sapply(1:length(person_points),function(n) {
L_tp1[d,time_points[n],person_points[n]]*sigma_reg_free[d,bill_points[n]] - B_int_free[d,bill_points[n]]
}) %>% plogis()
}
return(pr_vote)
})
rep_func <- switch(as.character(model_type),
`1`=.binary,
`2`=.binary,
`3`=.ordinal_ratingscale,
`4`=.ordinal_ratingscale,
`5`=.ordinal_grm,
`6`=.ordinal_grm,
`7`=.poisson,
`8`=.poisson,
`9`=.normal,
`10`=.normal,
`11`=.lognormal,
`12`=.lognormal,
`13`=.binary,
`14`=.binary)
# pass along cutpoints as well
if(model_type %in% c(3,4)) {
cutpoints <- .extract_nonp(object@stan_samples,'steps_votes')[[1]]
cutpoints <- cutpoints[these_draws,]
} else if(model_type %in% c(5,6)) {
cutpoints <- .extract_nonp(object@stan_samples,'steps_votes_grm')[[1]]
cutpoints <- cutpoints[these_draws,,]
} else {
cutpoints <- 1
}
out_predict <- rep_func(pr_absence=pr_absence_iter,
pr_vote=pr_vote_iter,
N=length(person_points),
ordinal_outcomes=length(unique(object@score_data@score_matrix$outcome)),
inflate=inflate,
latent_space=latent_space,
time_points=time_points,
item_points=bill_points,
max_val=max_val,
outcome=y,
miss_val=miss_val,
person_points=person_points,
sigma_sd=.extract_nonp(object@stan_samples,'extra_sd')[[1]][these_draws],
cutpoints=cutpoints,
type=type,
output=output)
# set attributes to pass along sample info
attr(out_predict,'chain_order') <- attr(L_tp1,'chain_order')[these_draws]
attr(out_predict,'this_sample') <- this_sample
if(type=='predict') {
class(out_predict) <- c('matrix','ppd')
} else if(type=='log_lik') {
class(out_predict) <- c('matrix','log_lik')
}
return(out_predict)
})
#' Plot Posterior Predictive Distribution for \code{idealstan} Objects
#'
#' This function is the generic method for generating posterior distributions
#' from a fitted \code{idealstan} model. Functions are documented in the
#' actual method.
#'
#' This function is a wrapper around \code{\link[bayesplot]{ppc_bars}},
#' \code{\link[bayesplot]{ppc_dens_overlay}} and
#' \code{\link[bayesplot]{ppc_violin_grouped} that plots the posterior predictive distribution
#' derived from \code{\link{id_post_pred}} against the original data. You can also subset the
#' posterior predictions over
#' legislators/persons or
#' bills/item sby specifying the ID of each in the original data as a character vector.
#' Only persons or items can be specified,
#' not both.
#'
#' If you specify a value for \code{group} that is either a person ID or a group ID
#' (depending on whether a person or group-level model was fit), then you can see the
#' posterior distributions for those specific persons. Similarly, if an item ID is passed
#' to \code{item}, you can see how well the model predictions compare to the true values
#' for that specific item.
#'
#' @param object A fitted \code{idealstan} object
#' @param ... Other arguments passed on to \code{\link[bayesplot]{ppc_bars}}
#' @export
setGeneric('id_plot_ppc',signature='object',
function(object,...) standardGeneric('id_plot_ppc'))
#' Plot Posterior Predictive Distribution for \code{idealstan} Objects
#'
#' This function is the actual method for generating posterior distributions
#' from a fitted \code{idealstan} model.
#'
#' This function is a wrapper around \code{\link[bayesplot]{ppc_bars}},
#' \code{\link[bayesplot]{ppc_dens_overlay}} and
#' \code{\link[bayesplot]{ppc_violin_grouped} that plots the posterior predictive distribution
#' derived from \code{\link{id_post_pred}} against the original data. You can also subset the
#' posterior predictions over
#' legislators/persons or
#' bills/item sby specifying the ID of each in the original data as a character vector.
#' Only persons or items can be specified,
#' not both.
#'
#' If you specify a value for \code{group} that is either a person ID or a group ID
#' (depending on whether a person or group-level model was fit), then you can see the
#' posterior distributions for those specific persons. Similarly, if an item ID is passed
#' to \code{item}, you can see how well the model predictions compare to the true values
#' for that specific item.
#'
#' @param object A fitted idealstan object
#' @param ppc_pred The output of the \code{\link{id_post_pred}} function on a fitted idealstan object
#' @param group A character vector of the person or group IDs
#' over which to subset the predictive distribution
#' @param item A character vector of item IDs to subset the posterior distribution
#' @param ... Other arguments passed on to \code{\link[bayesplot]{ppc_bars}}
#' @export
setMethod('id_plot_ppc',signature(object='idealstan'),function(object,
ppc_pred=NULL,
group=NULL,
item=NULL,...) {
this_sample <- attr(ppc_pred,'this_sample')
# create grouping variable
if(!is.null(group)) {
if(object@use_groups) {
group_var <- factor(object@score_data@score_matrix$group_id, levels=group)
} else {
group_var <- factor(object@score_data@score_matrix$person_id, levels=group)
}
grouped <- T
} else if(!is.null(item)) {
group_var <- factor(object@score_data@score_matrix$item_id, levels=item)
grouped <- T
} else {
grouped <- F
}
y <- object@score_data@score_matrix$outcome[this_sample]
# check to see if we need to recode missing values from the data if the model_type doesn't handle missing data
if(object@model_type %in% c(1,3,5,7,9,11,13) & !is.null(object@score_data@miss_val)) {
y <- .na_if(y,object@score_data@miss_val)
}
if(object@use_groups) {
person_points <- as.numeric(object@score_data@score_matrix$group_id)[this_sample]
} else {
person_points <- as.numeric(object@score_data@score_matrix$person_id)[this_sample]
}
bill_points <- as.numeric(object@score_data@score_matrix$item_id)[this_sample]
time_points <- as.numeric(object@score_data@score_matrix$time_id)[this_sample]
remove_nas <- !is.na(y) & !is.na(person_points) & !is.na(bill_points) & !is.na(time_points)
y <- y[remove_nas]
bill_points <- bill_points[remove_nas]
time_points <- time_points[remove_nas]
person_points <- person_points[remove_nas]
if(!is.null(group)) {
group_var <- group_var[remove_nas]
# create a second one for the grouping variable
remove_nas_group <- !is.na(group)
}
if(!is.null(item) && !is.null(group))
stop('Please only specify an index to item or person, not both.')
if(attr(ppc_pred,'output')=='all') {
y <- as.numeric(y)
if(grouped) {
bayesplot::ppc_bars_grouped(y=y[remove_nas_group],yrep=ppc_pred[,remove_nas_group],
group=group_var[remove_nas_group],...)
} else {
bayesplot::ppc_bars(y=y,yrep=ppc_pred,...)
}
} else if(attr(ppc_pred,'output')=='observed') {
# only show observed data for yrep
y <- .na_if(y,object@score_data@miss_val)
to_remove <- !is.na(y)
y <- y[to_remove]
if(!is.null(group)) {
group_var <- group_var[to_remove]
remove_nas_group <- !is.na(group_var)
}
y <- as.numeric(y)
if(attr(ppc_pred,'output_type')=='continuous') {
ppc_pred <- ppc_pred[,to_remove]
#unbounded observed outcomes (i.e., continuous)
if(grouped) {
bayesplot::ppc_violin_grouped(y=y[remove_nas_group],yrep=ppc_pred[,remove_nas_group],
group=group_var[remove_nas_group],
...)
} else {
bayesplot::ppc_dens_overlay(y=y,yrep=ppc_pred,...)
}
} else if(attr(ppc_pred,'output_type')=='discrete') {
ppc_pred <- ppc_pred[,to_remove]
if(grouped) {
bayesplot::ppc_bars_grouped(y=y[remove_nas_group],yrep=ppc_pred[,remove_nas_group],
group=group_var[remove_nas_group],...)
} else {
bayesplot::ppc_bars(y=y,yrep=ppc_pred,...)
}
}
} else if(attr(ppc_pred,'output')=='missing') {
y <- .na_if(y,object@score_data@miss_val)
y <- as.numeric(is.na(y))
if(grouped) {
bayesplot::ppc_bars_grouped(y=y[remove_nas_group],yrep=ppc_pred[,remove_nas_group],
group=group_var[remove_nas_group],...)
} else {
bayesplot::ppc_bars(y=y,yrep=ppc_pred,...)
}
}
})
#' Helper Function for `loo` calculation
#'
#' This function accepts a log-likelihood matrix produced by `id_post_pred` and
#' extracts the IDs of the MCMC chains. It is necessary to use this function
#' as the second argument to the `loo` function along with an exponentiated
#' log-likelihood matrix. See the package vignette How to Evaluate Models
#' for more details.
#'
#' @param ll_matrix A log-likelihood matrix as produced by the \code{\link{id_post_pred}}
#' function
#' @export
derive_chain <- function(ll_matrix=NULL) {
attr(ll_matrix,'chain_order')
} | /R/rstan_generics.R | no_license | Bhanditz/idealstan | R | false | false | 16,959 | r | # These functions are implemented for compatibility with the
# rstantools package (and rstanarm)
#' Generic Method for Obtaining Posterior Predictive Distribution from Stan Objects
#'
#' This function is a generic that is used to match the functions used with \code{\link[bayesplot]{ppc_bars}} to calculate
#' the posterior predictive distribution of the data given the model.
#'
#' @param object A fitted \code{idealstan} object
#' @param ... All other parameters passed on to the underlying function.
#' @export
#' @return \code{posterior_predict} methods should return a \eqn{D} by \eqn{N}
#' matrix, where \eqn{D} is the number of draws from the posterior predictive
#' distribution and \eqn{N} is the number of data points being predicted per
#' draw.
#' @export
setGeneric('id_post_pred',signature='object',
function(object,...) standardGeneric('id_post_pred'))
#' Posterior Prediction for \code{idealstan} objects
#'
#' This function will draw from the posterior distribution, whether in terms of the outcome (prediction)
#' or to produce the log-likelihood values.
#'
#' This function can also produce either distribution of the
#' outcomes (i.e., predictions) or the log-likelihood values of the posterior (set option
#' \code{type} to \code{'log_lik'}.
#' For more information, see the package vignette How to Evaluate Models.
#'
#' You can then use functions such as
#' \code{\link{id_plot_ppc}} to see how well the model does returning the correct number of categories
#' in the score/vote matrix.
#' Also see \code{help("posterior_predict", package = "rstanarm")}
#'
#' @param object A fitted \code{idealstan} object
#' @param draws The number of draws to use from the total number of posterior draws (default is 100).
#' @param sample_scores In addition to reducing the number of posterior draws used to
#' calculate the posterior predictive distribution, which will reduce computational overhead.
#' Only available for calculating predictive distributions, not log-likelihood values.
#' @param type Whether to produce posterior predictive values (\code{'predict'}, the default),
#' or log-likelihood values (\code{'log_lik'}). See the How to Evaluate Models vignette for more info.
#' @param output If the model has an unbounded outcome (Poisson, continuous, etc.), then
#' specify whether to show the \code{'observed'} data (the default) or the binary
#' output \code{'missing'} showing whether an observation was predicted as missing or not
#' @param ... Any other arguments passed on to posterior_predict (currently none available)
#'
#' @export
setMethod('id_post_pred',signature(object='idealstan'),function(object,draws=100,
output='observed',
type='predict',
sample_scores=NULL,...) {
#all_params <- rstan::extract(object@stan_samples)
n_votes <- nrow(object@score_data@score_matrix)
if(object@stan_samples@stan_args[[1]]$method != 'variational') {
n_iters <- (object@stan_samples@stan_args[[1]]$iter-object@stan_samples@stan_args[[1]]$warmup)*length(object@stan_samples@stan_args)
} else {
# there is no warmup for VB
n_iters <- dim(object@stan_samples)[1]
}
if(!is.null(sample_scores) && type!='log_lik') {
this_sample <- sample(1:n_votes,sample_scores)
} else {
this_sample <- 1:n_votes
}
if(type!='log_lik') {
these_draws <- sample(1:n_iters,draws)
} else {
these_draws <- 1:n_iters
draws <- n_iters
}
print(paste0('Processing posterior replications for ',n_votes,' scores using ',draws,
' posterior samples out of a total of ',n_iters, ' samples.'))
y <- object@score_data@score_matrix$outcome[this_sample]
# check to see if we need to recode missing values from the data if the model_type doesn't handle missing data
if(object@model_type %in% c(1,3,5,7,9,11,13) & !is.null(object@score_data@miss_val)) {
y <- .na_if(y,object@score_data@miss_val)
}
if(object@use_groups) {
person_points <- as.numeric(object@score_data@score_matrix$group_id)[this_sample]
} else {
person_points <- as.numeric(object@score_data@score_matrix$person_id)[this_sample]
}
bill_points <- as.numeric(object@score_data@score_matrix$item_id)[this_sample]
time_points <- as.numeric(factor(object@score_data@score_matrix$time_id))[this_sample]
remove_nas <- !is.na(y) & !is.na(person_points) & !is.na(bill_points) & !is.na(time_points)
y <- y[remove_nas]
if(is.factor(y)) {
miss_val <- which(levels(y)==object@score_data@miss_val)
y <- as.numeric(y)
}
max_val <- max(y)
bill_points <- bill_points[remove_nas]
time_points <- time_points[remove_nas]
person_points <- person_points[remove_nas]
model_type <- object@model_type
latent_space <- model_type %in% c(13,14)
inflate <- model_type %in% c(2,4,6,8,10,12,14)
# we can do the initial processing here
# loop over posterior iterations
L_tp1 <- .extract_nonp(object@stan_samples,'L_tp1')[[1]]
A_int_free <- .extract_nonp(object@stan_samples,'A_int_free')[[1]]
B_int_free <- .extract_nonp(object@stan_samples,'B_int_free')[[1]]
sigma_abs_free <- .extract_nonp(object@stan_samples,'sigma_abs_free')[[1]]
sigma_reg_free <- .extract_nonp(object@stan_samples,'sigma_reg_free')[[1]]
pr_absence_iter <- sapply(these_draws, function(d) {
if(latent_space) {
# use latent-space formulation for likelihood
pr_absence <- sapply(1:length(person_points),function(n) {
-sqrt((L_tp1[d,time_points[n],person_points[n]] - A_int_free[d,bill_points[n]])^2)
}) %>% plogis()
} else {
# use IRT formulation for likelihood
pr_absence <- sapply(1:length(person_points),function(n) {
L_tp1[d,time_points[n],person_points[n]]*sigma_abs_free[d,bill_points[n]] - A_int_free[d,bill_points[n]]
}) %>% plogis()
}
return(pr_absence)
})
pr_vote_iter <- sapply(these_draws, function(d) {
if(latent_space) {
if(inflate) {
pr_vote <- sapply(1:length(person_points),function(n) {
-sqrt((L_tp1[d,time_points[n],person_points[n]] - B_int_free[d,bill_points[n]])^2)
}) %>% plogis()
} else {
# latent space non-inflated formulation is different
pr_vote <- sapply(1:length(person_points),function(n) {
sigma_reg_free[d,bill_points[n]] + sigma_abs_free[d,bill_points[n]] -
sqrt((L_tp1[d,time_points[n],person_points[n]] - B_int_free[d,bill_points[n]])^2)
}) %>% plogis()
}
} else {
pr_vote <- sapply(1:length(person_points),function(n) {
L_tp1[d,time_points[n],person_points[n]]*sigma_reg_free[d,bill_points[n]] - B_int_free[d,bill_points[n]]
}) %>% plogis()
}
return(pr_vote)
})
rep_func <- switch(as.character(model_type),
`1`=.binary,
`2`=.binary,
`3`=.ordinal_ratingscale,
`4`=.ordinal_ratingscale,
`5`=.ordinal_grm,
`6`=.ordinal_grm,
`7`=.poisson,
`8`=.poisson,
`9`=.normal,
`10`=.normal,
`11`=.lognormal,
`12`=.lognormal,
`13`=.binary,
`14`=.binary)
# pass along cutpoints as well
if(model_type %in% c(3,4)) {
cutpoints <- .extract_nonp(object@stan_samples,'steps_votes')[[1]]
cutpoints <- cutpoints[these_draws,]
} else if(model_type %in% c(5,6)) {
cutpoints <- .extract_nonp(object@stan_samples,'steps_votes_grm')[[1]]
cutpoints <- cutpoints[these_draws,,]
} else {
cutpoints <- 1
}
out_predict <- rep_func(pr_absence=pr_absence_iter,
pr_vote=pr_vote_iter,
N=length(person_points),
ordinal_outcomes=length(unique(object@score_data@score_matrix$outcome)),
inflate=inflate,
latent_space=latent_space,
time_points=time_points,
item_points=bill_points,
max_val=max_val,
outcome=y,
miss_val=miss_val,
person_points=person_points,
sigma_sd=.extract_nonp(object@stan_samples,'extra_sd')[[1]][these_draws],
cutpoints=cutpoints,
type=type,
output=output)
# set attributes to pass along sample info
attr(out_predict,'chain_order') <- attr(L_tp1,'chain_order')[these_draws]
attr(out_predict,'this_sample') <- this_sample
if(type=='predict') {
class(out_predict) <- c('matrix','ppd')
} else if(type=='log_lik') {
class(out_predict) <- c('matrix','log_lik')
}
return(out_predict)
})
#' Plot Posterior Predictive Distribution for \code{idealstan} Objects
#'
#' This function is the generic method for generating posterior distributions
#' from a fitted \code{idealstan} model. Functions are documented in the
#' actual method.
#'
#' This function is a wrapper around \code{\link[bayesplot]{ppc_bars}},
#' \code{\link[bayesplot]{ppc_dens_overlay}} and
#' \code{\link[bayesplot]{ppc_violin_grouped} that plots the posterior predictive distribution
#' derived from \code{\link{id_post_pred}} against the original data. You can also subset the
#' posterior predictions over
#' legislators/persons or
#' bills/item sby specifying the ID of each in the original data as a character vector.
#' Only persons or items can be specified,
#' not both.
#'
#' If you specify a value for \code{group} that is either a person ID or a group ID
#' (depending on whether a person or group-level model was fit), then you can see the
#' posterior distributions for those specific persons. Similarly, if an item ID is passed
#' to \code{item}, you can see how well the model predictions compare to the true values
#' for that specific item.
#'
#' @param object A fitted \code{idealstan} object
#' @param ... Other arguments passed on to \code{\link[bayesplot]{ppc_bars}}
#' @export
setGeneric('id_plot_ppc',signature='object',
function(object,...) standardGeneric('id_plot_ppc'))
#' Plot Posterior Predictive Distribution for \code{idealstan} Objects
#'
#' This function is the actual method for generating posterior distributions
#' from a fitted \code{idealstan} model.
#'
#' This function is a wrapper around \code{\link[bayesplot]{ppc_bars}},
#' \code{\link[bayesplot]{ppc_dens_overlay}} and
#' \code{\link[bayesplot]{ppc_violin_grouped} that plots the posterior predictive distribution
#' derived from \code{\link{id_post_pred}} against the original data. You can also subset the
#' posterior predictions over
#' legislators/persons or
#' bills/item sby specifying the ID of each in the original data as a character vector.
#' Only persons or items can be specified,
#' not both.
#'
#' If you specify a value for \code{group} that is either a person ID or a group ID
#' (depending on whether a person or group-level model was fit), then you can see the
#' posterior distributions for those specific persons. Similarly, if an item ID is passed
#' to \code{item}, you can see how well the model predictions compare to the true values
#' for that specific item.
#'
#' @param object A fitted idealstan object
#' @param ppc_pred The output of the \code{\link{id_post_pred}} function on a fitted idealstan object
#' @param group A character vector of the person or group IDs
#' over which to subset the predictive distribution
#' @param item A character vector of item IDs to subset the posterior distribution
#' @param ... Other arguments passed on to \code{\link[bayesplot]{ppc_bars}}
#' @export
setMethod('id_plot_ppc',signature(object='idealstan'),function(object,
ppc_pred=NULL,
group=NULL,
item=NULL,...) {
this_sample <- attr(ppc_pred,'this_sample')
# create grouping variable
if(!is.null(group)) {
if(object@use_groups) {
group_var <- factor(object@score_data@score_matrix$group_id, levels=group)
} else {
group_var <- factor(object@score_data@score_matrix$person_id, levels=group)
}
grouped <- T
} else if(!is.null(item)) {
group_var <- factor(object@score_data@score_matrix$item_id, levels=item)
grouped <- T
} else {
grouped <- F
}
y <- object@score_data@score_matrix$outcome[this_sample]
# check to see if we need to recode missing values from the data if the model_type doesn't handle missing data
if(object@model_type %in% c(1,3,5,7,9,11,13) & !is.null(object@score_data@miss_val)) {
y <- .na_if(y,object@score_data@miss_val)
}
if(object@use_groups) {
person_points <- as.numeric(object@score_data@score_matrix$group_id)[this_sample]
} else {
person_points <- as.numeric(object@score_data@score_matrix$person_id)[this_sample]
}
bill_points <- as.numeric(object@score_data@score_matrix$item_id)[this_sample]
time_points <- as.numeric(object@score_data@score_matrix$time_id)[this_sample]
remove_nas <- !is.na(y) & !is.na(person_points) & !is.na(bill_points) & !is.na(time_points)
y <- y[remove_nas]
bill_points <- bill_points[remove_nas]
time_points <- time_points[remove_nas]
person_points <- person_points[remove_nas]
if(!is.null(group)) {
group_var <- group_var[remove_nas]
# create a second one for the grouping variable
remove_nas_group <- !is.na(group)
}
if(!is.null(item) && !is.null(group))
stop('Please only specify an index to item or person, not both.')
if(attr(ppc_pred,'output')=='all') {
y <- as.numeric(y)
if(grouped) {
bayesplot::ppc_bars_grouped(y=y[remove_nas_group],yrep=ppc_pred[,remove_nas_group],
group=group_var[remove_nas_group],...)
} else {
bayesplot::ppc_bars(y=y,yrep=ppc_pred,...)
}
} else if(attr(ppc_pred,'output')=='observed') {
# only show observed data for yrep
y <- .na_if(y,object@score_data@miss_val)
to_remove <- !is.na(y)
y <- y[to_remove]
if(!is.null(group)) {
group_var <- group_var[to_remove]
remove_nas_group <- !is.na(group_var)
}
y <- as.numeric(y)
if(attr(ppc_pred,'output_type')=='continuous') {
ppc_pred <- ppc_pred[,to_remove]
#unbounded observed outcomes (i.e., continuous)
if(grouped) {
bayesplot::ppc_violin_grouped(y=y[remove_nas_group],yrep=ppc_pred[,remove_nas_group],
group=group_var[remove_nas_group],
...)
} else {
bayesplot::ppc_dens_overlay(y=y,yrep=ppc_pred,...)
}
} else if(attr(ppc_pred,'output_type')=='discrete') {
ppc_pred <- ppc_pred[,to_remove]
if(grouped) {
bayesplot::ppc_bars_grouped(y=y[remove_nas_group],yrep=ppc_pred[,remove_nas_group],
group=group_var[remove_nas_group],...)
} else {
bayesplot::ppc_bars(y=y,yrep=ppc_pred,...)
}
}
} else if(attr(ppc_pred,'output')=='missing') {
y <- .na_if(y,object@score_data@miss_val)
y <- as.numeric(is.na(y))
if(grouped) {
bayesplot::ppc_bars_grouped(y=y[remove_nas_group],yrep=ppc_pred[,remove_nas_group],
group=group_var[remove_nas_group],...)
} else {
bayesplot::ppc_bars(y=y,yrep=ppc_pred,...)
}
}
})
#' Helper Function for `loo` calculation
#'
#' This function accepts a log-likelihood matrix produced by `id_post_pred` and
#' extracts the IDs of the MCMC chains. It is necessary to use this function
#' as the second argument to the `loo` function along with an exponentiated
#' log-likelihood matrix. See the package vignette How to Evaluate Models
#' for more details.
#'
#' @param ll_matrix A log-likelihood matrix as produced by the \code{\link{id_post_pred}}
#' function
#' @export
derive_chain <- function(ll_matrix=NULL) {
attr(ll_matrix,'chain_order')
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R, R/complement.R
\name{complement_element}
\alias{complement_element}
\alias{complement_element,TidySet,characterORfactor-method}
\title{Complement of elements}
\usage{
complement_element(object, elements, ...)
\S4method{complement_element}{TidySet,characterORfactor}(
object,
elements,
name = NULL,
keep = TRUE,
keep_relations = keep,
keep_elements = keep,
keep_sets = keep
)
}
\arguments{
\item{object}{A TidySet object.}
\item{elements}{The set to look for the complement.}
\item{...}{Other arguments.}
\item{name}{Name of the new set. By default it adds a "C".}
\item{keep}{Logical value to keep all the other sets.}
\item{keep_relations}{A logical value if you wan to keep old relations}
\item{keep_elements}{A logical value if you wan to keep old elements}
\item{keep_sets}{A logical value if you wan to keep old sets}
}
\value{
A \code{TidySet} object.
}
\description{
Return the objects without the elements listed
}
\section{Methods (by class)}{
\itemize{
\item \code{object = TidySet,elements = characterORfactor}: Complement of the elements.
}}
\examples{
relations <- data.frame(sets = c("a", "a", "b", "b", "c", "c"),
elements = letters[seq_len(6)],
fuzzy = runif(6))
a <- tidySet(relations)
complement_element(a, "a", "C_a")
complement_element(a, "a", "C_a", keep = FALSE)
}
\seealso{
Other complements:
\code{\link{complement_set}()},
\code{\link{complement}()},
\code{\link{subtract}()}
Other methods that create new sets:
\code{\link{complement_set}()},
\code{\link{intersection}()},
\code{\link{subtract}()},
\code{\link{union}()}
Other methods:
\code{\link{TidySet-class}},
\code{\link{activate}()},
\code{\link{add_column}()},
\code{\link{add_relation}()},
\code{\link{arrange.TidySet}()},
\code{\link{cartesian}()},
\code{\link{complement_set}()},
\code{\link{complement}()},
\code{\link{element_size}()},
\code{\link{elements}()},
\code{\link{filter.TidySet}()},
\code{\link{group_by.TidySet}()},
\code{\link{group}()},
\code{\link{incidence}()},
\code{\link{intersection}()},
\code{\link{is.fuzzy}()},
\code{\link{is_nested}()},
\code{\link{move_to}()},
\code{\link{mutate.TidySet}()},
\code{\link{nElements}()},
\code{\link{nRelations}()},
\code{\link{nSets}()},
\code{\link{name_elements<-}()},
\code{\link{name_sets<-}()},
\code{\link{name_sets}()},
\code{\link{power_set}()},
\code{\link{pull.TidySet}()},
\code{\link{relations}()},
\code{\link{remove_column}()},
\code{\link{remove_element}()},
\code{\link{remove_relation}()},
\code{\link{remove_set}()},
\code{\link{rename_elements}()},
\code{\link{rename_set}()},
\code{\link{select.TidySet}()},
\code{\link{set_size}()},
\code{\link{sets}()},
\code{\link{subtract}()},
\code{\link{union}()}
}
\concept{complements}
\concept{methods}
\concept{methods that create new sets}
| /man/complement_element.Rd | permissive | annakrystalli/BaseSet | R | false | true | 2,921 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R, R/complement.R
\name{complement_element}
\alias{complement_element}
\alias{complement_element,TidySet,characterORfactor-method}
\title{Complement of elements}
\usage{
complement_element(object, elements, ...)
\S4method{complement_element}{TidySet,characterORfactor}(
object,
elements,
name = NULL,
keep = TRUE,
keep_relations = keep,
keep_elements = keep,
keep_sets = keep
)
}
\arguments{
\item{object}{A TidySet object.}
\item{elements}{The set to look for the complement.}
\item{...}{Other arguments.}
\item{name}{Name of the new set. By default it adds a "C".}
\item{keep}{Logical value to keep all the other sets.}
\item{keep_relations}{A logical value if you wan to keep old relations}
\item{keep_elements}{A logical value if you wan to keep old elements}
\item{keep_sets}{A logical value if you wan to keep old sets}
}
\value{
A \code{TidySet} object.
}
\description{
Return the objects without the elements listed
}
\section{Methods (by class)}{
\itemize{
\item \code{object = TidySet,elements = characterORfactor}: Complement of the elements.
}}
\examples{
relations <- data.frame(sets = c("a", "a", "b", "b", "c", "c"),
elements = letters[seq_len(6)],
fuzzy = runif(6))
a <- tidySet(relations)
complement_element(a, "a", "C_a")
complement_element(a, "a", "C_a", keep = FALSE)
}
\seealso{
Other complements:
\code{\link{complement_set}()},
\code{\link{complement}()},
\code{\link{subtract}()}
Other methods that create new sets:
\code{\link{complement_set}()},
\code{\link{intersection}()},
\code{\link{subtract}()},
\code{\link{union}()}
Other methods:
\code{\link{TidySet-class}},
\code{\link{activate}()},
\code{\link{add_column}()},
\code{\link{add_relation}()},
\code{\link{arrange.TidySet}()},
\code{\link{cartesian}()},
\code{\link{complement_set}()},
\code{\link{complement}()},
\code{\link{element_size}()},
\code{\link{elements}()},
\code{\link{filter.TidySet}()},
\code{\link{group_by.TidySet}()},
\code{\link{group}()},
\code{\link{incidence}()},
\code{\link{intersection}()},
\code{\link{is.fuzzy}()},
\code{\link{is_nested}()},
\code{\link{move_to}()},
\code{\link{mutate.TidySet}()},
\code{\link{nElements}()},
\code{\link{nRelations}()},
\code{\link{nSets}()},
\code{\link{name_elements<-}()},
\code{\link{name_sets<-}()},
\code{\link{name_sets}()},
\code{\link{power_set}()},
\code{\link{pull.TidySet}()},
\code{\link{relations}()},
\code{\link{remove_column}()},
\code{\link{remove_element}()},
\code{\link{remove_relation}()},
\code{\link{remove_set}()},
\code{\link{rename_elements}()},
\code{\link{rename_set}()},
\code{\link{select.TidySet}()},
\code{\link{set_size}()},
\code{\link{sets}()},
\code{\link{subtract}()},
\code{\link{union}()}
}
\concept{complements}
\concept{methods}
\concept{methods that create new sets}
|
cat("Hello World!")
| /R/HelloWorld_001.R | no_license | sat0317/HelloWorld | R | false | false | 20 | r | cat("Hello World!")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/validate.R
\name{custom_validate}
\alias{custom_validate}
\title{Customize validation components}
\usage{
custom_validate(
required_object = FALSE,
unique_object = FALSE,
unique_name = FALSE,
extensible = FALSE,
required_field = FALSE,
auto_field = FALSE,
type = FALSE,
choice = FALSE,
range = FALSE,
reference = FALSE
)
}
\arguments{
\item{required_object}{Check if required objects are missing in current
model. Default: \code{FALSE}.}
\item{unique_object}{Check if there are multiple objects in one unique-object
class. Default: \code{FALSE}.}
\item{unique_name}{Check if all objects in every class have unique names.
Default: \code{FALSE}.}
\item{extensible}{Check if all fields in an extensible group have values.
Default: \code{FALSE}.}
\item{required_field}{Check if all required fields have values. Default:
\code{FALSE}.}
\item{auto_field}{Check if all fields with value \code{"Autosize"} and
\code{"Autocalculate"} are valid or not. Default: \code{FALSE}.}
\item{type}{Check if all fields have values with valid types, i.e.
character, numeric and integer fields should be filled with corresponding
type of values. Default: \code{FALSE}.}
\item{choice}{Check if all choice fields have valid choice values. Default:
\code{FALSE}.}
\item{range}{Check if all numeric fields have values within defined ranges.
Default: \code{FALSE}.}
\item{reference}{Check if all fields whose values refer to other fields are
valid. Default: \code{FALSE}.}
}
\value{
A named list with 10 elements.
}
\description{
\code{custom_validate()} makes it easy to customize what validation components
should be included during IDF object modifications using \verb{$dup()}, \verb{$add()},
\verb{$set()} and other methods in \link{Idf} class.
}
\details{
There are 10 different validation check components in total. Three predefined
validation level are included, i.e. \code{"none"}, \code{"draft"} and \code{"final"}. To get
what validation components those levels contain, see \code{\link[=level_checks]{level_checks()}}.
}
\examples{
custom_validate(unique_object = TRUE)
# only check unique name during validation
eplusr_option(validate_level = custom_validate(unique_name = TRUE))
}
| /man/custom_validate.Rd | permissive | hongyuanjia/eplusr | R | false | true | 2,275 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/validate.R
\name{custom_validate}
\alias{custom_validate}
\title{Customize validation components}
\usage{
custom_validate(
required_object = FALSE,
unique_object = FALSE,
unique_name = FALSE,
extensible = FALSE,
required_field = FALSE,
auto_field = FALSE,
type = FALSE,
choice = FALSE,
range = FALSE,
reference = FALSE
)
}
\arguments{
\item{required_object}{Check if required objects are missing in current
model. Default: \code{FALSE}.}
\item{unique_object}{Check if there are multiple objects in one unique-object
class. Default: \code{FALSE}.}
\item{unique_name}{Check if all objects in every class have unique names.
Default: \code{FALSE}.}
\item{extensible}{Check if all fields in an extensible group have values.
Default: \code{FALSE}.}
\item{required_field}{Check if all required fields have values. Default:
\code{FALSE}.}
\item{auto_field}{Check if all fields with value \code{"Autosize"} and
\code{"Autocalculate"} are valid or not. Default: \code{FALSE}.}
\item{type}{Check if all fields have values with valid types, i.e.
character, numeric and integer fields should be filled with corresponding
type of values. Default: \code{FALSE}.}
\item{choice}{Check if all choice fields have valid choice values. Default:
\code{FALSE}.}
\item{range}{Check if all numeric fields have values within defined ranges.
Default: \code{FALSE}.}
\item{reference}{Check if all fields whose values refer to other fields are
valid. Default: \code{FALSE}.}
}
\value{
A named list with 10 elements.
}
\description{
\code{custom_validate()} makes it easy to customize what validation components
should be included during IDF object modifications using \verb{$dup()}, \verb{$add()},
\verb{$set()} and other methods in \link{Idf} class.
}
\details{
There are 10 different validation check components in total. Three predefined
validation level are included, i.e. \code{"none"}, \code{"draft"} and \code{"final"}. To get
what validation components those levels contain, see \code{\link[=level_checks]{level_checks()}}.
}
\examples{
custom_validate(unique_object = TRUE)
# only check unique name during validation
eplusr_option(validate_level = custom_validate(unique_name = TRUE))
}
|
library(rdfp)
### Name: dfp_getCustomTargetingValuesByStatement
### Title: getCustomTargetingValuesByStatement
### Aliases: dfp_getCustomTargetingValuesByStatement
### ** Examples
## Not run:
##D dat <- list(filterStatement=list('query'="WHERE status='ACTIVE'"))
##D res <- dfp_getCustomTargetingValuesByStatement(dat)
## End(Not run)
| /data/genthat_extracted_code/rdfp/examples/dfp_getCustomTargetingValuesByStatement.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 346 | r | library(rdfp)
### Name: dfp_getCustomTargetingValuesByStatement
### Title: getCustomTargetingValuesByStatement
### Aliases: dfp_getCustomTargetingValuesByStatement
### ** Examples
## Not run:
##D dat <- list(filterStatement=list('query'="WHERE status='ACTIVE'"))
##D res <- dfp_getCustomTargetingValuesByStatement(dat)
## End(Not run)
|
################################################################################
### general / non-parametric
# Numerical integration
# Note: y can be a vector, all other inputs are scalars
crps.int <- function(y, pxxx, lower, upper, rel_tol = 1e-6){
ind <- (y > upper) - (y < lower)
out <- numeric(length(y))
F1 <- function(x) pxxx(x)^2
F2 <- function(x) (1-pxxx(x))^2
if (any(ind == -1)) {
out[ind == -1] <- sapply(which(ind == -1), function(i) {
s1 <- lower - y[i]
s2 <- integrate(F2, lower, upper, rel.tol = rel_tol)$value
s1 + s2
})
} else if (any(ind == 0)) {
out[ind == 0] <- sapply(which(ind == 0), function(i) {
s1 <- integrate(F1, lower, y[i], rel.tol = rel_tol)$value
s2 <- integrate(F2, y[i], upper, rel.tol = rel_tol)$value
s1 + s2
})
} else if (any(ind == 1)) {
out[ind == 1] <- sapply(which(ind == 1), function(i) {
s1 <- integrate(F1, lower, upper, rel.tol = rel_tol)$value
s2 <- y[i] - upper
s1 + s2
})
}
return(out)
}
# (weighted) empirical distribution
crps.edf <- function(dat, y, w = NULL){
n <- length(dat)
# Set uniform weights unless specified otherwise
if (is.null(w)){
x <- sort(dat, decreasing = FALSE)
out <- sapply(y, function(s) 2 / n^2 * sum((n * (s < x) - 1:n + 0.5) * (x - s)))
} else {
ord <- order(dat)
x <- dat[ord]
w <- w[ord]
p <- c(0, cumsum(w))
out <- sapply(y, function(s) 2 * sum((w * (s < x) - 0.5 * (p[2:(n+1)]^2 - p[1:n]^2)) * (x - s)))
}
return(out)
}
# kernel density estimation
crps.kdens = function(dat, y, bw = NULL){
n <- length(dat)
if (is.null(bw)) {
s <- matrix(bw.nrd(dat), nrow = 1, ncol = n)
}
else {
s <- matrix(bw, nrow = 1, ncol = n)
}
m <- matrix(dat, nrow = 1, ncol = n)
w <- matrix(1/n, nrow = 1, ncol = n)
return(crps.mixnorm(y = y, m = m, s = s, w = w))
}
################################################################################
### discrete / infinite support
# poisson
crps.pois <- function(y, lambda) {
c1 <- (y - lambda) * (2*ppois(y, lambda) - 1)
c2 <- 2*dpois(floor(y), lambda) - exp(-2*lambda) * (besselI(2*lambda, 0) + besselI(2*lambda, 1))
return(c1 + lambda*c2)
}
# negative binomial
crps.nbinom <- function(y, size, prob) {
if (!requireNamespace("hypergeo", quietly = TRUE)) {
stop(paste(
"Calculations require an implementation of the gaussian hypergeometric function.",
"Please install the following package: hypergeo (>= 1.0)",
sep = "\n"))
}
c1 <- y * (2 * pnbinom(y, size, prob) - 1)
c2 <- (1 - prob) / prob ^ 2
c3 <- prob * (2 * pnbinom(y - 1, size + 1, prob) - 1) + Re(hypergeo::hypergeo(size + 1, 0.5, 2,-4 * c2))
return(c1 - size * c2 * c3)
}
################################################################################
### bounded interval
# uniform
crps.unif <- function(y, min, max, lmass = 0, umass = 0) {
c <- 1 - (lmass + umass)
if (any(c < 0)) {
stop("Sum of 'lmass' and 'umass' exceeds 1.")
}
p <- punif(y, min, max)
c1 <- 2 * (c * p + lmass) - 1
c1[y < min] <- -1
c1[!y < max] <- 1
c2 <- c^2 / 3 - c * p^2
c3 <- umass * (2 * (y >= max) - 1 + lmass)
return((y - min) * c1 + (max - min) * (c2 - c3))
}
# beta
crps.beta <- function(y, shape1, shape2) {
c1 <- y * (2*pbeta(y, shape1, shape2) - 1)
c2 <- shape1/(shape1+shape2)
c3 <- 1 - 2*pbeta(y, shape1 + 1, shape2)
c4 <- 2/shape1 * beta(2*shape1, 2*shape2) / beta(shape1, shape2)^2
ind <- !is.finite(c4)
if (any(ind)) {
c4[ind] <- sqrt(shape2 / (pi * shape1 * (shape1 + shape2)))[ind] # stirling's approximation
}
return(c1 + c2*(c3 - c4))
}
################################################################################
### real line
# laplace
crps.lapl <- function(y, location, scale) {
z <- (y - location)/scale
p <- 0.5 + 0.5 * sign(z) * pexp(abs(z))
minp <- pmin(p, 1-p)
c1 <- z*(2*p - 1) - 2*minp*(log(2*minp) - 1) - 0.75
return(scale*c1)
}
# logistic
crps.logis <- function(y, location, scale) {
z <- (y - location)/scale
p <- plogis(z)
c1 <- z*(2*p - 1) - 1 - 2*(p*log(p) + (1-p)*log(1-p))
return(scale*c1)
}
# normal
crps.norm <- function(y, location, scale,
lower = -Inf, upper = Inf,
lmass = 0, umass = 0) {
### standard formula
ind1 <- any(is.finite(lower))
ind2 <- any(is.finite(upper))
if (!ind1 & !ind2) {
z <- y
if (!identical(location, 0) | !identical(scale, 1)) {
z <- (y - location) / scale
}
out <- z * (2 * pnorm(z) - 1) + 2 * dnorm(z) - 1 / sqrt(pi)
return(scale * out)
}
### dealing with truncation/censoring
zb <- y
if (ind1) {
zb <- pmax(lower, zb)
lb <- (lower - location) / scale
if (is.character(lmass)) {
n1 <- length(lb)
n2 <- length(lmass)
if (n1 < n2) {
Plb <- numeric(n2)
Plb[lmass == "cens"] <- pnorm(lb)
} else {
Plb <- numeric(n1)
ind <- lmass == "cens"
Plb[ind] <- pnorm(lb[ind])
}
} else {
Plb <- lmass
}
}
if (ind2) {
zb <- pmin(upper, zb)
ub <- (upper - location) / scale
if (is.character(umass)) {
n1 <- length(ub)
n2 <- length(umass)
if (n1 < n2) {
Pub <- numeric(n2)
Pub[umass == "cens"] <- pnorm(ub, lower.tail = FALSE)
} else {
Pub <- numeric(n1)
ind <- umass == "cens"
Pub[ind] <- pnorm(ub[ind], lower.tail = FALSE)
}
} else {
Pub <- umass
}
}
res <- abs(y - zb)
zb <- (zb - location) / scale
if (ind1 & ind2) {
if (any(Plb + Pub > 1)){
stop("Sum of 'lmass' and 'umass' exceeds 1.")
}
a <- (1 - Plb - Pub) / (pnorm(ub) - pnorm(lb))
out_l <- -lb * Plb^2 - 2 * a * dnorm(lb) * Plb + a^2 / sqrt(pi) * pnorm(lb * sqrt(2))
out_u <- ub * Pub^2 - 2 * a * dnorm(ub) * Pub + a^2 / sqrt(pi) * pnorm(ub * sqrt(2), lower.tail = FALSE)
out_y <- zb * (2 * (a * (pnorm(zb) - pnorm(lb)) + Plb) - 1) + 2 * a * dnorm(zb) - a^2 / sqrt(pi)
} else if (ind1 & !ind2) {
a <- (1 - Plb) / (1 - pnorm(lb))
out_l <- -lb * Plb^2 - 2 * a * dnorm(lb) * Plb + a^2 / sqrt(pi) * pnorm(lb * sqrt(2))
out_u <- 0
out_y <- zb * (2 * (1 - a * pnorm(zb, lower.tail = FALSE)) - 1) + 2 * a * dnorm(zb) - a^2 / sqrt(pi)
} else if (!ind1 & ind2) {
a <- (1 - Pub) / pnorm(ub)
out_l <- 0
out_u <- ub * Pub^2 - 2 * a * dnorm(ub) * Pub + a^2 / sqrt(pi) * pnorm(ub * sqrt(2), lower.tail = FALSE)
out_y <- zb * (2 * a * pnorm(zb) - 1) + 2 * a * dnorm(zb) - a^2 / sqrt(pi)
}
return(res + scale * (out_y + out_l + out_u))
}
# t
crps.t <- function(y, df, location, scale) {
if (any(!df > 1))
stop("Parameter 'df' contains values not greater than 1. The CRPS is not defined.")
z <- y
if (!identical(location, 0) | !identical(scale, 1)) {
z <- (y - location) / scale
}
ind <- df == Inf
if (any(ind)) {
if (length(z) < length(df)) {
z <- rep(z, len = length(df))
}
out <- numeric(length(z))
out[ind] <- crps.norm(z[ind], 0, 1)
out[!ind] <- crps.t(z[!ind], df[!ind], 0, 1)
} else {
c1 <- z * (2 * pt(z, df) - 1)
c2 <- dt(z, df) * (1 + z^2 / df)
c3 <- beta(0.5, df - 0.5) / sqrt(df) / beta(0.5, 0.5 * df)^2
out <- c1 + 2 * df / (df - 1) * (c2 - c3)
}
return(scale * out)
}
# mixture of normals (numerical integration)
crps.mixnorm.int <- function(y, m, s, w, rel_tol){
Fmix <- function(z){
sapply(z, function(r) sum(w*pnorm((r-m)/s)))
}
crps.int(y, Fmix, -Inf, Inf, rel_tol)
}
# mixture of normals
crps.mixnorm = function(y, m, s, w, exact = TRUE, rel_tol = 1e-6){
if (exact == TRUE){
out <- sapply(seq_along(y), function(i) crpsmixnC(w[i, ], m[i, ], s[i, ], y[i]))
} else {
out <- sapply(seq_along(y), function(i) crps.mixnorm.int(y[i], m[i, ], s[i, ], w[i, ], rel_tol))
}
return(out)
}
crps.2pexp <- function(y, location, scale1, scale2) {
y1 <- pmin(y, location)
y2 <- pmax(y, location)
s <- scale1 + scale2
a1 <- scale1 / s
a2 <- scale2 / s
b2 <- a1 - a2
crps.exp(-y1, -location, scale1, a2) +
crps.exp(y2, location, scale2, a1)
}
crps.2pnorm <- function(y, location, scale1, scale2) {
y1 <- pmin(y, location)
y2 <- pmax(y, location)
s <- scale1 + scale2
a1 <- scale1 / s
a2 <- scale2 / s
b2 <- a1 - a2
crps.norm(y1, location, scale1, upper = location, umass = a2) +
crps.norm(y2, location, scale2, lower = location, lmass = a1)
}
################################################################################
### non-negative
# gamma
crps.gamma <- function(y, shape, scale) {
c1 <- y*(2*pgamma(y, shape, scale=scale) - 1)
c2 <- shape*(2*pgamma(y, shape+1, scale=scale) - 1) + 1/beta(.5, shape)
return(c1 - scale*c2)
}
# log-laplace
crps.llapl <- function(y, locationlog, scalelog) {
if (any(!scalelog < 1)) stop("Parameter 'scalelog' contains values not in (0, 1). The CRPS is not defined.")
y1 <- log(pmax(y, 0))
z <- (y1 - locationlog) / scalelog
p <- 0.5 + 0.5 * sign(z) * pexp(abs(z))
c1 <- y*(2*p - 1)
c2 <- ifelse (z < 0,
(1 - (2*p)^(1 + scalelog)) / (1 + scalelog),
- (1 - (2*(1-p))^(1 - scalelog)) / (1 - scalelog)
)
c3 <- scalelog / (4 - scalelog^2) + c2
return(c1 + exp(locationlog)*c3)
}
# log-logistic
crps.llogis <- function(y, locationlog, scalelog) {
if (any(!scalelog < 1)) stop("Parameter 'scalelog' contains values not in (0, 1). The CRPS is not defined.")
y1 <- log(pmax(y, 0))
p <- plogis(y1, locationlog, scalelog)
c1 <- y*(2*p - 1)
c2 <- 2*exp(locationlog)*beta(1 + scalelog, 1 - scalelog)
c3 <- (1 - scalelog)/2 - pbeta(p, 1 + scalelog, 1 - scalelog)
return(c1 + c2*c3)
}
# log-normal
crps.lnorm <- function(y, meanlog, sdlog) {
c1 <- y*(2*plnorm(y, meanlog, sdlog) - 1)
c2 <- 2*exp(meanlog + 0.5*sdlog^2)
c3 <- plnorm(y, meanlog + sdlog^2, sdlog) + pnorm(sdlog/sqrt(2)) - 1
return(c1 - c2*c3)
}
################################################################################
### variable support
# exponential
crps.exp <- function(y, location, scale, mass = 0) {
z <- y
if (!identical(location, 0) | !identical(scale, 1)) {
z <- (y - location)/scale
}
c1 <- abs(z) - 2 * (1 - mass) * pexp(z) + 0.5 * (1 - mass)^2
return(scale * c1)
}
# generalized pareto distribution
crps.gpd <- function(y, location, scale, shape, mass = 0) {
if (any(!shape < 1)) stop("Parameter 'shape' contains values not smaller than 1. The CRPS is not defined.")
z <- y
if (!identical(location, 0) | !identical(scale, 1)) {
z <- (y - location)/scale
}
ind <- abs(shape) < 1e-12
if (any(ind)) {
if (any(ind & shape != 0))
warning("Parameter 'shape' contains values close to zero. In those cases the CRPS is calculated assuming a value of 0.")
if (all(ind)) {
out <- crps.exp(z, 0, 1, mass)
} else {
if (length(z) < length(shape)) {
z <- rep(z, len = length(shape))
}
if (length(mass) < length(shape)) {
mass <- rep(mass, len = length(shape))
}
out <- numeric(length(z))
out[ind] <- crps.exp(z[ind], 0, 1, mass[ind])
out[!ind] <- crps.gpd(z[!ind], 0, 1, shape[!ind], mass[!ind])
}
} else {
x <- 1 + shape * z
x[x < 0] <- 0
p <- 1 - x ^ (-1 / shape) * (1 - mass)
p[p < 0] <- 0
c1 <- (z + 1 / shape) * (2 * p - 1)
c2 <- 2 * (1 - mass)^shape / shape / (shape - 1) * (1 / (shape - 2) + (1 - p) ^ (1 - shape))
out <- c1 - c2
}
return(scale * out)
}
# generalized extreme value distribution
crps.gev <- function(y, location, scale, shape) {
if (any(!shape < 1)) stop("Parameter 'shape' contains values not smaller than 1. The CRPS is not defined.")
z <- y
if (!identical(location, 0) | !identical(scale, 1)) {
z <- (y - location)/scale
}
ind <- abs(shape) < 1e-12
if (any(ind)) {
if (any(ind & shape != 0))
warning("Parameter 'shape' contains values close to zero. In those cases the CRPS is calculated assuming a value of 0.")
if (length(z) < length(shape)) {
z <- rep(z, len = length(shape))
}
out <- numeric(length(z))
if (requireNamespace("gsl", quietly = TRUE)) {
out[ind] <- -z[ind] - 2 * gsl::expint_Ei(-exp(-z[ind])) - digamma(1) - log(2)
} else {
warning(paste("The exponential integral is approximated using the 'integrate' function.",
"Consider installing the 'gsl' package to leverage a more accurate implementation.",
sep = "\n"))
expint_Ei <- sapply(-exp(-z[ind]), function(upper) {
integrate(function(x) exp(x)/x, -Inf, upper)$value
})
out[ind] <- -z[ind] - 2 * expint_Ei - digamma(1) - log(2)
}
out[!ind] <- crps.gev(z[!ind], 0, 1, shape[!ind])
} else {
x <- 1 + shape * z
x[x < 0] <- 0
p <- exp(-x^(-1/shape))
out <- (-z - 1/shape)*(1 - 2*p) - 1/shape*gamma(1-shape)*(2^shape - 2*pgamma(-log(p), 1-shape))
}
return(scale * out)
}
| /R/crpsFunctions.R | no_license | ml-lab/scoringRules | R | false | false | 13,014 | r | ################################################################################
### general / non-parametric
# Numerical integration
# Note: y can be a vector, all other inputs are scalars
crps.int <- function(y, pxxx, lower, upper, rel_tol = 1e-6){
ind <- (y > upper) - (y < lower)
out <- numeric(length(y))
F1 <- function(x) pxxx(x)^2
F2 <- function(x) (1-pxxx(x))^2
if (any(ind == -1)) {
out[ind == -1] <- sapply(which(ind == -1), function(i) {
s1 <- lower - y[i]
s2 <- integrate(F2, lower, upper, rel.tol = rel_tol)$value
s1 + s2
})
} else if (any(ind == 0)) {
out[ind == 0] <- sapply(which(ind == 0), function(i) {
s1 <- integrate(F1, lower, y[i], rel.tol = rel_tol)$value
s2 <- integrate(F2, y[i], upper, rel.tol = rel_tol)$value
s1 + s2
})
} else if (any(ind == 1)) {
out[ind == 1] <- sapply(which(ind == 1), function(i) {
s1 <- integrate(F1, lower, upper, rel.tol = rel_tol)$value
s2 <- y[i] - upper
s1 + s2
})
}
return(out)
}
# (weighted) empirical distribution
crps.edf <- function(dat, y, w = NULL){
n <- length(dat)
# Set uniform weights unless specified otherwise
if (is.null(w)){
x <- sort(dat, decreasing = FALSE)
out <- sapply(y, function(s) 2 / n^2 * sum((n * (s < x) - 1:n + 0.5) * (x - s)))
} else {
ord <- order(dat)
x <- dat[ord]
w <- w[ord]
p <- c(0, cumsum(w))
out <- sapply(y, function(s) 2 * sum((w * (s < x) - 0.5 * (p[2:(n+1)]^2 - p[1:n]^2)) * (x - s)))
}
return(out)
}
# kernel density estimation
crps.kdens = function(dat, y, bw = NULL){
n <- length(dat)
if (is.null(bw)) {
s <- matrix(bw.nrd(dat), nrow = 1, ncol = n)
}
else {
s <- matrix(bw, nrow = 1, ncol = n)
}
m <- matrix(dat, nrow = 1, ncol = n)
w <- matrix(1/n, nrow = 1, ncol = n)
return(crps.mixnorm(y = y, m = m, s = s, w = w))
}
################################################################################
### discrete / infinite support
# poisson
crps.pois <- function(y, lambda) {
c1 <- (y - lambda) * (2*ppois(y, lambda) - 1)
c2 <- 2*dpois(floor(y), lambda) - exp(-2*lambda) * (besselI(2*lambda, 0) + besselI(2*lambda, 1))
return(c1 + lambda*c2)
}
# negative binomial
crps.nbinom <- function(y, size, prob) {
if (!requireNamespace("hypergeo", quietly = TRUE)) {
stop(paste(
"Calculations require an implementation of the gaussian hypergeometric function.",
"Please install the following package: hypergeo (>= 1.0)",
sep = "\n"))
}
c1 <- y * (2 * pnbinom(y, size, prob) - 1)
c2 <- (1 - prob) / prob ^ 2
c3 <- prob * (2 * pnbinom(y - 1, size + 1, prob) - 1) + Re(hypergeo::hypergeo(size + 1, 0.5, 2,-4 * c2))
return(c1 - size * c2 * c3)
}
################################################################################
### bounded interval
# uniform
crps.unif <- function(y, min, max, lmass = 0, umass = 0) {
c <- 1 - (lmass + umass)
if (any(c < 0)) {
stop("Sum of 'lmass' and 'umass' exceeds 1.")
}
p <- punif(y, min, max)
c1 <- 2 * (c * p + lmass) - 1
c1[y < min] <- -1
c1[!y < max] <- 1
c2 <- c^2 / 3 - c * p^2
c3 <- umass * (2 * (y >= max) - 1 + lmass)
return((y - min) * c1 + (max - min) * (c2 - c3))
}
# beta
crps.beta <- function(y, shape1, shape2) {
c1 <- y * (2*pbeta(y, shape1, shape2) - 1)
c2 <- shape1/(shape1+shape2)
c3 <- 1 - 2*pbeta(y, shape1 + 1, shape2)
c4 <- 2/shape1 * beta(2*shape1, 2*shape2) / beta(shape1, shape2)^2
ind <- !is.finite(c4)
if (any(ind)) {
c4[ind] <- sqrt(shape2 / (pi * shape1 * (shape1 + shape2)))[ind] # stirling's approximation
}
return(c1 + c2*(c3 - c4))
}
################################################################################
### real line
# laplace
crps.lapl <- function(y, location, scale) {
z <- (y - location)/scale
p <- 0.5 + 0.5 * sign(z) * pexp(abs(z))
minp <- pmin(p, 1-p)
c1 <- z*(2*p - 1) - 2*minp*(log(2*minp) - 1) - 0.75
return(scale*c1)
}
# logistic
crps.logis <- function(y, location, scale) {
z <- (y - location)/scale
p <- plogis(z)
c1 <- z*(2*p - 1) - 1 - 2*(p*log(p) + (1-p)*log(1-p))
return(scale*c1)
}
# normal
crps.norm <- function(y, location, scale,
lower = -Inf, upper = Inf,
lmass = 0, umass = 0) {
### standard formula
ind1 <- any(is.finite(lower))
ind2 <- any(is.finite(upper))
if (!ind1 & !ind2) {
z <- y
if (!identical(location, 0) | !identical(scale, 1)) {
z <- (y - location) / scale
}
out <- z * (2 * pnorm(z) - 1) + 2 * dnorm(z) - 1 / sqrt(pi)
return(scale * out)
}
### dealing with truncation/censoring
zb <- y
if (ind1) {
zb <- pmax(lower, zb)
lb <- (lower - location) / scale
if (is.character(lmass)) {
n1 <- length(lb)
n2 <- length(lmass)
if (n1 < n2) {
Plb <- numeric(n2)
Plb[lmass == "cens"] <- pnorm(lb)
} else {
Plb <- numeric(n1)
ind <- lmass == "cens"
Plb[ind] <- pnorm(lb[ind])
}
} else {
Plb <- lmass
}
}
if (ind2) {
zb <- pmin(upper, zb)
ub <- (upper - location) / scale
if (is.character(umass)) {
n1 <- length(ub)
n2 <- length(umass)
if (n1 < n2) {
Pub <- numeric(n2)
Pub[umass == "cens"] <- pnorm(ub, lower.tail = FALSE)
} else {
Pub <- numeric(n1)
ind <- umass == "cens"
Pub[ind] <- pnorm(ub[ind], lower.tail = FALSE)
}
} else {
Pub <- umass
}
}
res <- abs(y - zb)
zb <- (zb - location) / scale
if (ind1 & ind2) {
if (any(Plb + Pub > 1)){
stop("Sum of 'lmass' and 'umass' exceeds 1.")
}
a <- (1 - Plb - Pub) / (pnorm(ub) - pnorm(lb))
out_l <- -lb * Plb^2 - 2 * a * dnorm(lb) * Plb + a^2 / sqrt(pi) * pnorm(lb * sqrt(2))
out_u <- ub * Pub^2 - 2 * a * dnorm(ub) * Pub + a^2 / sqrt(pi) * pnorm(ub * sqrt(2), lower.tail = FALSE)
out_y <- zb * (2 * (a * (pnorm(zb) - pnorm(lb)) + Plb) - 1) + 2 * a * dnorm(zb) - a^2 / sqrt(pi)
} else if (ind1 & !ind2) {
a <- (1 - Plb) / (1 - pnorm(lb))
out_l <- -lb * Plb^2 - 2 * a * dnorm(lb) * Plb + a^2 / sqrt(pi) * pnorm(lb * sqrt(2))
out_u <- 0
out_y <- zb * (2 * (1 - a * pnorm(zb, lower.tail = FALSE)) - 1) + 2 * a * dnorm(zb) - a^2 / sqrt(pi)
} else if (!ind1 & ind2) {
a <- (1 - Pub) / pnorm(ub)
out_l <- 0
out_u <- ub * Pub^2 - 2 * a * dnorm(ub) * Pub + a^2 / sqrt(pi) * pnorm(ub * sqrt(2), lower.tail = FALSE)
out_y <- zb * (2 * a * pnorm(zb) - 1) + 2 * a * dnorm(zb) - a^2 / sqrt(pi)
}
return(res + scale * (out_y + out_l + out_u))
}
# t
crps.t <- function(y, df, location, scale) {
if (any(!df > 1))
stop("Parameter 'df' contains values not greater than 1. The CRPS is not defined.")
z <- y
if (!identical(location, 0) | !identical(scale, 1)) {
z <- (y - location) / scale
}
ind <- df == Inf
if (any(ind)) {
if (length(z) < length(df)) {
z <- rep(z, len = length(df))
}
out <- numeric(length(z))
out[ind] <- crps.norm(z[ind], 0, 1)
out[!ind] <- crps.t(z[!ind], df[!ind], 0, 1)
} else {
c1 <- z * (2 * pt(z, df) - 1)
c2 <- dt(z, df) * (1 + z^2 / df)
c3 <- beta(0.5, df - 0.5) / sqrt(df) / beta(0.5, 0.5 * df)^2
out <- c1 + 2 * df / (df - 1) * (c2 - c3)
}
return(scale * out)
}
# mixture of normals (numerical integration)
crps.mixnorm.int <- function(y, m, s, w, rel_tol){
Fmix <- function(z){
sapply(z, function(r) sum(w*pnorm((r-m)/s)))
}
crps.int(y, Fmix, -Inf, Inf, rel_tol)
}
# mixture of normals
crps.mixnorm = function(y, m, s, w, exact = TRUE, rel_tol = 1e-6){
if (exact == TRUE){
out <- sapply(seq_along(y), function(i) crpsmixnC(w[i, ], m[i, ], s[i, ], y[i]))
} else {
out <- sapply(seq_along(y), function(i) crps.mixnorm.int(y[i], m[i, ], s[i, ], w[i, ], rel_tol))
}
return(out)
}
crps.2pexp <- function(y, location, scale1, scale2) {
y1 <- pmin(y, location)
y2 <- pmax(y, location)
s <- scale1 + scale2
a1 <- scale1 / s
a2 <- scale2 / s
b2 <- a1 - a2
crps.exp(-y1, -location, scale1, a2) +
crps.exp(y2, location, scale2, a1)
}
crps.2pnorm <- function(y, location, scale1, scale2) {
y1 <- pmin(y, location)
y2 <- pmax(y, location)
s <- scale1 + scale2
a1 <- scale1 / s
a2 <- scale2 / s
b2 <- a1 - a2
crps.norm(y1, location, scale1, upper = location, umass = a2) +
crps.norm(y2, location, scale2, lower = location, lmass = a1)
}
################################################################################
### non-negative
# gamma
crps.gamma <- function(y, shape, scale) {
c1 <- y*(2*pgamma(y, shape, scale=scale) - 1)
c2 <- shape*(2*pgamma(y, shape+1, scale=scale) - 1) + 1/beta(.5, shape)
return(c1 - scale*c2)
}
# log-laplace
crps.llapl <- function(y, locationlog, scalelog) {
if (any(!scalelog < 1)) stop("Parameter 'scalelog' contains values not in (0, 1). The CRPS is not defined.")
y1 <- log(pmax(y, 0))
z <- (y1 - locationlog) / scalelog
p <- 0.5 + 0.5 * sign(z) * pexp(abs(z))
c1 <- y*(2*p - 1)
c2 <- ifelse (z < 0,
(1 - (2*p)^(1 + scalelog)) / (1 + scalelog),
- (1 - (2*(1-p))^(1 - scalelog)) / (1 - scalelog)
)
c3 <- scalelog / (4 - scalelog^2) + c2
return(c1 + exp(locationlog)*c3)
}
# log-logistic
crps.llogis <- function(y, locationlog, scalelog) {
if (any(!scalelog < 1)) stop("Parameter 'scalelog' contains values not in (0, 1). The CRPS is not defined.")
y1 <- log(pmax(y, 0))
p <- plogis(y1, locationlog, scalelog)
c1 <- y*(2*p - 1)
c2 <- 2*exp(locationlog)*beta(1 + scalelog, 1 - scalelog)
c3 <- (1 - scalelog)/2 - pbeta(p, 1 + scalelog, 1 - scalelog)
return(c1 + c2*c3)
}
# log-normal
crps.lnorm <- function(y, meanlog, sdlog) {
c1 <- y*(2*plnorm(y, meanlog, sdlog) - 1)
c2 <- 2*exp(meanlog + 0.5*sdlog^2)
c3 <- plnorm(y, meanlog + sdlog^2, sdlog) + pnorm(sdlog/sqrt(2)) - 1
return(c1 - c2*c3)
}
################################################################################
### variable support
# exponential
crps.exp <- function(y, location, scale, mass = 0) {
z <- y
if (!identical(location, 0) | !identical(scale, 1)) {
z <- (y - location)/scale
}
c1 <- abs(z) - 2 * (1 - mass) * pexp(z) + 0.5 * (1 - mass)^2
return(scale * c1)
}
# generalized pareto distribution
crps.gpd <- function(y, location, scale, shape, mass = 0) {
if (any(!shape < 1)) stop("Parameter 'shape' contains values not smaller than 1. The CRPS is not defined.")
z <- y
if (!identical(location, 0) | !identical(scale, 1)) {
z <- (y - location)/scale
}
ind <- abs(shape) < 1e-12
if (any(ind)) {
if (any(ind & shape != 0))
warning("Parameter 'shape' contains values close to zero. In those cases the CRPS is calculated assuming a value of 0.")
if (all(ind)) {
out <- crps.exp(z, 0, 1, mass)
} else {
if (length(z) < length(shape)) {
z <- rep(z, len = length(shape))
}
if (length(mass) < length(shape)) {
mass <- rep(mass, len = length(shape))
}
out <- numeric(length(z))
out[ind] <- crps.exp(z[ind], 0, 1, mass[ind])
out[!ind] <- crps.gpd(z[!ind], 0, 1, shape[!ind], mass[!ind])
}
} else {
x <- 1 + shape * z
x[x < 0] <- 0
p <- 1 - x ^ (-1 / shape) * (1 - mass)
p[p < 0] <- 0
c1 <- (z + 1 / shape) * (2 * p - 1)
c2 <- 2 * (1 - mass)^shape / shape / (shape - 1) * (1 / (shape - 2) + (1 - p) ^ (1 - shape))
out <- c1 - c2
}
return(scale * out)
}
# generalized extreme value distribution
crps.gev <- function(y, location, scale, shape) {
if (any(!shape < 1)) stop("Parameter 'shape' contains values not smaller than 1. The CRPS is not defined.")
z <- y
if (!identical(location, 0) | !identical(scale, 1)) {
z <- (y - location)/scale
}
ind <- abs(shape) < 1e-12
if (any(ind)) {
if (any(ind & shape != 0))
warning("Parameter 'shape' contains values close to zero. In those cases the CRPS is calculated assuming a value of 0.")
if (length(z) < length(shape)) {
z <- rep(z, len = length(shape))
}
out <- numeric(length(z))
if (requireNamespace("gsl", quietly = TRUE)) {
out[ind] <- -z[ind] - 2 * gsl::expint_Ei(-exp(-z[ind])) - digamma(1) - log(2)
} else {
warning(paste("The exponential integral is approximated using the 'integrate' function.",
"Consider installing the 'gsl' package to leverage a more accurate implementation.",
sep = "\n"))
expint_Ei <- sapply(-exp(-z[ind]), function(upper) {
integrate(function(x) exp(x)/x, -Inf, upper)$value
})
out[ind] <- -z[ind] - 2 * expint_Ei - digamma(1) - log(2)
}
out[!ind] <- crps.gev(z[!ind], 0, 1, shape[!ind])
} else {
x <- 1 + shape * z
x[x < 0] <- 0
p <- exp(-x^(-1/shape))
out <- (-z - 1/shape)*(1 - 2*p) - 1/shape*gamma(1-shape)*(2^shape - 2*pgamma(-log(p), 1-shape))
}
return(scale * out)
}
|
snqProfitEst <- function( priceNames, quantNames, fixNames = NULL,
instNames = NULL, data, form = 0, base = 1, scalingFactors = NULL,
weights = snqProfitWeights( priceNames, quantNames, data, "DW92", base = base ),
method = ifelse( is.null( instNames ), "SUR", "3SLS" ), ... ) {
checkNames( c( priceNames, quantNames, fixNames, instNames ), names( data ) )
if( length( quantNames ) != length( priceNames ) ) {
stop( "arguments 'quantNames' and 'priceNames' must have the same length" )
}
if( length( priceNames ) < 2 ) {
stop( "you must specify at least 2 netputs" )
}
if( length( priceNames ) != length( weights ) ) {
stop( "arguments 'priceNames' and 'weights' must have the same length" )
}
if( min( weights ) < 0 ) {
warning( "At least one weight of the prices for normalization",
" (argument 'weights') is negative. Thus, in this case positive",
" semidefiniteness of the 'beta' matrix does not ensure",
" a convex profit function." )
}
if( !is.null( scalingFactors ) ) {
if( length( scalingFactors ) != length( priceNames ) ) {
stop( "arguments 'priceNames' and 'scalingFactors' must have the",
" same length" )
}
if( base != 1 ) {
warning( "argument 'base' is ignored because argument",
" 'scalingFactors' is provided" )
}
}
nNetput <- length( quantNames ) # number of netputs
nFix <- length( fixNames ) # number of fixed inputs
nIV <- length( instNames ) # number of fixed inputs
nObs <- nrow( data ) # number of observations
if( form == 0 ) {
nCoef <- nNetput + nNetput * ( nNetput - 1 ) / 2 + nNetput * nFix +
( nFix + 1 ) * nFix/2 #number of coefficients
} else if( form == 1 ) {
nCoef <- nNetput + nNetput * ( nNetput - 1 ) / 2 + nNetput * nFix +
nNetput * ( nFix + 1 ) * nFix/2 #number of coefficients
} else {
stop( "argument 'form' must be either 0 or 1" )
}
result <- list()
## scaling factors
if( is.null( scalingFactors ) ) {
scalingFactors <- rep( 1, nNetput )
if( !is.null( base ) ) {
for( i in 1:nNetput ) {
scalingFactors[ i ] <- 1 / mean( data[[ priceNames[ i ] ]][ base ] )
}
}
}
## mean Values
result$pMeans <- array( NA, nNetput )
result$qMeans <- array( NA, nNetput )
for( i in 1:nNetput ) {
result$pMeans[ i ] <- mean( data[[ priceNames[ i ] ]] ) * scalingFactors[ i ]
result$qMeans[ i ] <- mean( data[[ quantNames[ i ] ]] ) / scalingFactors[ i ]
}
names( result$pMeans ) <- priceNames
names( result$qMeans ) <- quantNames
if( nFix > 0 ) {
result$fMeans <- array( NA, nFix )
for( i in 1:nFix ) {
result$fMeans[ i ] <- mean( data[[ fixNames[ i ] ]] )
}
names( result$fMeans ) <- fixNames
}
## instrumental variables
if( nIV == 0 ) {
inst <- NULL
} else {
inst <- as.formula( paste( "~", paste( paste( "iv", c( 1:nIV ), sep="" ),
collapse = " + " ) ) )
}
## prepare and estimate the model
modelData <- .snqProfitModelData( data = data, weights = weights,
priceNames = priceNames, quantNames = quantNames, fixNames = fixNames, instNames = instNames,
form = form, netputScale = scalingFactors, fixedScale = result$fMeans )
system <- snqProfitSystem( nNetput, nFix ) # equation system
restrict <- snqProfitRestrict( nNetput, nFix, form ) # restrictions
result$est <- systemfit( formula = system, method = method, data = modelData,
restrict.regMat = restrict, inst = inst, ... )
result$coef <- snqProfitCoef( coef = coef( result$est, modified.regMat = TRUE ),
nNetput = nNetput, nFix = nFix, form = form,
coefCov = vcov( result$est, modified.regMat = TRUE ),
df = nNetput * nObs - nCoef,
quantNames = quantNames, priceNames = priceNames, fixNames = fixNames )
# estimated coefficients
result$coef <- .snqProfitRescaleCoef( result$coef, nNetput, result$fMeans,
form )
result$fitted <- data.frame( profit0 = rep( 0, nObs ) )
result$residuals <- data.frame( profit0 = rep( 0, nObs ) )
for( i in 1:nNetput ) {
result$fitted[[ quantNames[ i ] ]] <- fitted( result$est$eq[[ i ]] )
result$fitted[[ "profit0" ]] <- result$fitted[[ "profit0" ]] +
result$fitted[[ quantNames[ i ] ]] * data[[ priceNames[ i ] ]] *
scalingFactors[ i ]
result$residuals[[ quantNames[ i ] ]] <- data[[ quantNames[ i ] ]] /
scalingFactors[ i ] - result$fitted[[ quantNames[ i ] ]]
}
result$fitted[[ "profit" ]] <- result$fitted[[ "profit0" ]]
result$fitted[[ "profit0" ]] <- NULL
result$residuals[[ "profit" ]] <- modelData[[ "profit" ]] -
result$fitted[[ "profit" ]]
result$residuals[[ "profit0" ]] <- NULL
result$r2 <- array( NA, c( nNetput + 1 ) )
for( i in 1:nNetput ) {
# result$r2[ i ] <- summary( result$est$eq[[ i ]] )$r.squared
result$r2[ i ] <- rSquared( data[[ quantNames[ i ] ]] / scalingFactors[ i ],
result$residuals[[ quantNames[ i ] ]] )
}
result$r2[ nNetput + 1 ] <- rSquared( modelData[[ "profit" ]],
result$residuals[[ "profit" ]] )
names( result$r2 ) <- c( quantNames, "profit" )
result$hessian <- snqProfitHessian( result$coef$beta, result$pMeans, weights )
# Hessian matrix
result$ela <- snqProfitEla( result$coef$beta, result$pMeans,
result$qMeans, weights, coefVcov = result$coef$allCoefCov,
df = df.residual( result$est ) ) # estimated elasticities
if( nFix > 0 && form == 0 ) {
result$fixEla <- snqProfitFixEla( result$coef$delta, result$coef$gamma,
result$qMeans, result$fMeans, weights )
}
result$data <- data
result$weights <- weights
names( result$weights ) <- priceNames
result$normPrice <- modelData$normPrice
if( nNetput > 2 ){
result$convexity <- semidefiniteness( result$hessian[
1:( nNetput - 1 ), 1:( nNetput - 1 ) ] )$positive
} else if( nNetput == 2 ){
result$convexity <- result$hessian[ 1, 1 ] >= 0
}
result$priceNames <- priceNames
result$quantNames <- quantNames
result$fixNames <- fixNames
result$instNames <- instNames
result$form <- form
result$base <- base
result$method <- method
result$scalingFactors <- scalingFactors
names( result$scalingFactors ) <- priceNames
class( result ) <- "snqProfitEst"
return( result )
}
| /tags/0.5-1/R/snqProfitEst.R | no_license | scfmolina/micecon | R | false | false | 6,518 | r | snqProfitEst <- function( priceNames, quantNames, fixNames = NULL,
instNames = NULL, data, form = 0, base = 1, scalingFactors = NULL,
weights = snqProfitWeights( priceNames, quantNames, data, "DW92", base = base ),
method = ifelse( is.null( instNames ), "SUR", "3SLS" ), ... ) {
checkNames( c( priceNames, quantNames, fixNames, instNames ), names( data ) )
if( length( quantNames ) != length( priceNames ) ) {
stop( "arguments 'quantNames' and 'priceNames' must have the same length" )
}
if( length( priceNames ) < 2 ) {
stop( "you must specify at least 2 netputs" )
}
if( length( priceNames ) != length( weights ) ) {
stop( "arguments 'priceNames' and 'weights' must have the same length" )
}
if( min( weights ) < 0 ) {
warning( "At least one weight of the prices for normalization",
" (argument 'weights') is negative. Thus, in this case positive",
" semidefiniteness of the 'beta' matrix does not ensure",
" a convex profit function." )
}
if( !is.null( scalingFactors ) ) {
if( length( scalingFactors ) != length( priceNames ) ) {
stop( "arguments 'priceNames' and 'scalingFactors' must have the",
" same length" )
}
if( base != 1 ) {
warning( "argument 'base' is ignored because argument",
" 'scalingFactors' is provided" )
}
}
nNetput <- length( quantNames ) # number of netputs
nFix <- length( fixNames ) # number of fixed inputs
nIV <- length( instNames ) # number of fixed inputs
nObs <- nrow( data ) # number of observations
if( form == 0 ) {
nCoef <- nNetput + nNetput * ( nNetput - 1 ) / 2 + nNetput * nFix +
( nFix + 1 ) * nFix/2 #number of coefficients
} else if( form == 1 ) {
nCoef <- nNetput + nNetput * ( nNetput - 1 ) / 2 + nNetput * nFix +
nNetput * ( nFix + 1 ) * nFix/2 #number of coefficients
} else {
stop( "argument 'form' must be either 0 or 1" )
}
result <- list()
## scaling factors
if( is.null( scalingFactors ) ) {
scalingFactors <- rep( 1, nNetput )
if( !is.null( base ) ) {
for( i in 1:nNetput ) {
scalingFactors[ i ] <- 1 / mean( data[[ priceNames[ i ] ]][ base ] )
}
}
}
## mean Values
result$pMeans <- array( NA, nNetput )
result$qMeans <- array( NA, nNetput )
for( i in 1:nNetput ) {
result$pMeans[ i ] <- mean( data[[ priceNames[ i ] ]] ) * scalingFactors[ i ]
result$qMeans[ i ] <- mean( data[[ quantNames[ i ] ]] ) / scalingFactors[ i ]
}
names( result$pMeans ) <- priceNames
names( result$qMeans ) <- quantNames
if( nFix > 0 ) {
result$fMeans <- array( NA, nFix )
for( i in 1:nFix ) {
result$fMeans[ i ] <- mean( data[[ fixNames[ i ] ]] )
}
names( result$fMeans ) <- fixNames
}
## instrumental variables
if( nIV == 0 ) {
inst <- NULL
} else {
inst <- as.formula( paste( "~", paste( paste( "iv", c( 1:nIV ), sep="" ),
collapse = " + " ) ) )
}
## prepare and estimate the model
modelData <- .snqProfitModelData( data = data, weights = weights,
priceNames = priceNames, quantNames = quantNames, fixNames = fixNames, instNames = instNames,
form = form, netputScale = scalingFactors, fixedScale = result$fMeans )
system <- snqProfitSystem( nNetput, nFix ) # equation system
restrict <- snqProfitRestrict( nNetput, nFix, form ) # restrictions
result$est <- systemfit( formula = system, method = method, data = modelData,
restrict.regMat = restrict, inst = inst, ... )
result$coef <- snqProfitCoef( coef = coef( result$est, modified.regMat = TRUE ),
nNetput = nNetput, nFix = nFix, form = form,
coefCov = vcov( result$est, modified.regMat = TRUE ),
df = nNetput * nObs - nCoef,
quantNames = quantNames, priceNames = priceNames, fixNames = fixNames )
# estimated coefficients
result$coef <- .snqProfitRescaleCoef( result$coef, nNetput, result$fMeans,
form )
result$fitted <- data.frame( profit0 = rep( 0, nObs ) )
result$residuals <- data.frame( profit0 = rep( 0, nObs ) )
for( i in 1:nNetput ) {
result$fitted[[ quantNames[ i ] ]] <- fitted( result$est$eq[[ i ]] )
result$fitted[[ "profit0" ]] <- result$fitted[[ "profit0" ]] +
result$fitted[[ quantNames[ i ] ]] * data[[ priceNames[ i ] ]] *
scalingFactors[ i ]
result$residuals[[ quantNames[ i ] ]] <- data[[ quantNames[ i ] ]] /
scalingFactors[ i ] - result$fitted[[ quantNames[ i ] ]]
}
result$fitted[[ "profit" ]] <- result$fitted[[ "profit0" ]]
result$fitted[[ "profit0" ]] <- NULL
result$residuals[[ "profit" ]] <- modelData[[ "profit" ]] -
result$fitted[[ "profit" ]]
result$residuals[[ "profit0" ]] <- NULL
result$r2 <- array( NA, c( nNetput + 1 ) )
for( i in 1:nNetput ) {
# result$r2[ i ] <- summary( result$est$eq[[ i ]] )$r.squared
result$r2[ i ] <- rSquared( data[[ quantNames[ i ] ]] / scalingFactors[ i ],
result$residuals[[ quantNames[ i ] ]] )
}
result$r2[ nNetput + 1 ] <- rSquared( modelData[[ "profit" ]],
result$residuals[[ "profit" ]] )
names( result$r2 ) <- c( quantNames, "profit" )
result$hessian <- snqProfitHessian( result$coef$beta, result$pMeans, weights )
# Hessian matrix
result$ela <- snqProfitEla( result$coef$beta, result$pMeans,
result$qMeans, weights, coefVcov = result$coef$allCoefCov,
df = df.residual( result$est ) ) # estimated elasticities
if( nFix > 0 && form == 0 ) {
result$fixEla <- snqProfitFixEla( result$coef$delta, result$coef$gamma,
result$qMeans, result$fMeans, weights )
}
result$data <- data
result$weights <- weights
names( result$weights ) <- priceNames
result$normPrice <- modelData$normPrice
if( nNetput > 2 ){
result$convexity <- semidefiniteness( result$hessian[
1:( nNetput - 1 ), 1:( nNetput - 1 ) ] )$positive
} else if( nNetput == 2 ){
result$convexity <- result$hessian[ 1, 1 ] >= 0
}
result$priceNames <- priceNames
result$quantNames <- quantNames
result$fixNames <- fixNames
result$instNames <- instNames
result$form <- form
result$base <- base
result$method <- method
result$scalingFactors <- scalingFactors
names( result$scalingFactors ) <- priceNames
class( result ) <- "snqProfitEst"
return( result )
}
|
write.filex.env.sec <- function(sec){
sec.out <- list()
sec.out$E <- sprintf('%2i ',sec$E)
if('POSIXt'%in%class(sec$ODATE)){
sec.out$ODATE <- format(sec$ODATE,'%y%j')
}else if(is.numeric(sec$ODATE)|is.integer(sec$ODATE)){
sec.out$ODATE <- sprintf('%5i',sec$ODATE)
}else if(is.character(sec$ODATE)){
sec.out$ODATE <- substr(gsub('^ *','',sec$ODATE),1,5)
}
sec.out$ODATE <- paste0(sec.out$ODATE,' ')
out.list <- c('DAY','RAD','MAX','MIN','RAIN','CO2','DEW','WIND')
sec.list <- c('DAY','RAD','TX','TM','PRC','CO2','DPT','WND')
for(i in 1:length(out.list)){
sec.out[[sprintf('E%3s',out.list[i])]] <-
paste0(
substr(gsub('^ *','',sec[[sprintf('%sFAC',sec.list[i])]]),1,1),
sprintf('%4s',format(sec[[sprintf('%sADJ',sec.list[i])]],scientific=FALSE)),
' '
)
}
sec.out$ENVNAME <- sec$ENVNAME
sec.out <- do.call(paste0,sec.out)
return(sec.out)
}
| /R/write_filex_env_sec.R | no_license | sethgis/dssatR | R | false | false | 999 | r | write.filex.env.sec <- function(sec){
sec.out <- list()
sec.out$E <- sprintf('%2i ',sec$E)
if('POSIXt'%in%class(sec$ODATE)){
sec.out$ODATE <- format(sec$ODATE,'%y%j')
}else if(is.numeric(sec$ODATE)|is.integer(sec$ODATE)){
sec.out$ODATE <- sprintf('%5i',sec$ODATE)
}else if(is.character(sec$ODATE)){
sec.out$ODATE <- substr(gsub('^ *','',sec$ODATE),1,5)
}
sec.out$ODATE <- paste0(sec.out$ODATE,' ')
out.list <- c('DAY','RAD','MAX','MIN','RAIN','CO2','DEW','WIND')
sec.list <- c('DAY','RAD','TX','TM','PRC','CO2','DPT','WND')
for(i in 1:length(out.list)){
sec.out[[sprintf('E%3s',out.list[i])]] <-
paste0(
substr(gsub('^ *','',sec[[sprintf('%sFAC',sec.list[i])]]),1,1),
sprintf('%4s',format(sec[[sprintf('%sADJ',sec.list[i])]],scientific=FALSE)),
' '
)
}
sec.out$ENVNAME <- sec$ENVNAME
sec.out <- do.call(paste0,sec.out)
return(sec.out)
}
|
testlist <- list(b = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), p1 = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161), p2 = -1.72134747229037e+83)
result <- do.call(metacoder:::intersect_line_rectangle,testlist)
str(result) | /metacoder/inst/testfiles/intersect_line_rectangle/AFL_intersect_line_rectangle/intersect_line_rectangle_valgrind_files/1615769384-test.R | permissive | akhikolla/updatedatatype-list3 | R | false | false | 727 | r | testlist <- list(b = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), p1 = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161), p2 = -1.72134747229037e+83)
result <- do.call(metacoder:::intersect_line_rectangle,testlist)
str(result) |
# file.create("./subsetData.csv")
# STEP1
# preprocessing source data to reduce memory usage
src <- file("./household_power_consumption.txt", open="r")
res <- file("./subsetData.csv", open="w")
# STEP3
# preprocess data to reduce memory usage
preprocess(src, res)
clearData <- read.csv("./subsetData.csv", header=TRUE)
## 'Global Active Power' timeseries
# adding datetime variable
datetime <- strptime(paste(clearData$Date, clearData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
clearDataDateTimed <- cbind(clearData, datetime)
# exporting plot to PNG (480x480 dpi)
# launching png-device
png(file="plot2.png", width = 480, height = 480)
with(clearDataDateTimed, plot(datetime, Global_active_power, type = "l", xlab="", ylab="Global Active Power (kilowatts)"))
dev.off()
# STEP2
#___________________________________________________________________________________________
## preprocessing function, source before launching base program
preprocess <- function(src, res) {
#first iteration
buffer <- read.table(file=src, header=TRUE, sep=";", nrows=100000)
subsetData <- subset(buffer, buffer$Date == "1/2/2007" | buffer$Date == "2/2/2007")
write.table(subsetData, file=res, sep=",")
repeat {
buffer <- read.table(file=src, header=FALSE, sep=";", nrows=100000)
subsetData <- subset(buffer, buffer$Date == "1/2/2007" | buffer$Date == "2/2/2007")
write.table(subsetData, file=res, sep=",", col.names=FALSE)
print(c("Number of rows buffered: ", nrow(buffer)))
png
if(nrow(buffer) < 100000) {
close(src)
close(res)
break;
}
}
} | /plot2.R | no_license | Hamsterkiller/ExData_Plotting1 | R | false | false | 1,613 | r | # file.create("./subsetData.csv")
# STEP1
# preprocessing source data to reduce memory usage
src <- file("./household_power_consumption.txt", open="r")
res <- file("./subsetData.csv", open="w")
# STEP3
# preprocess data to reduce memory usage
preprocess(src, res)
clearData <- read.csv("./subsetData.csv", header=TRUE)
## 'Global Active Power' timeseries
# adding datetime variable
datetime <- strptime(paste(clearData$Date, clearData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
clearDataDateTimed <- cbind(clearData, datetime)
# exporting plot to PNG (480x480 dpi)
# launching png-device
png(file="plot2.png", width = 480, height = 480)
with(clearDataDateTimed, plot(datetime, Global_active_power, type = "l", xlab="", ylab="Global Active Power (kilowatts)"))
dev.off()
# STEP2
#___________________________________________________________________________________________
## preprocessing function, source before launching base program
preprocess <- function(src, res) {
#first iteration
buffer <- read.table(file=src, header=TRUE, sep=";", nrows=100000)
subsetData <- subset(buffer, buffer$Date == "1/2/2007" | buffer$Date == "2/2/2007")
write.table(subsetData, file=res, sep=",")
repeat {
buffer <- read.table(file=src, header=FALSE, sep=";", nrows=100000)
subsetData <- subset(buffer, buffer$Date == "1/2/2007" | buffer$Date == "2/2/2007")
write.table(subsetData, file=res, sep=",", col.names=FALSE)
print(c("Number of rows buffered: ", nrow(buffer)))
png
if(nrow(buffer) < 100000) {
close(src)
close(res)
break;
}
}
} |
## -------------------------------------------------------------------------------------------------------------------------------------- ##
# Vegetation Response to Anti-Tall Fescue Herbicide Treatments
## -------------------------------------------------------------------------------------------------------------------------------------- ##
# Code written by Nicholas J Lyon
# Objective 3
## Structural Response to Treatment
## In 2014 (so pre-treatment differences)
# START ####
# Required libraries
library(RRPP) # Analysis
# Set working directory (Also, "Session" menu to "Set Working Directory" works)
setwd("~/Documents/School/Iowa State/Collaborations/'Daubenmire Herbicide Bit/Daubenmire.HerbicideComponent.WD")
# Clear environment of other stuff
rm(list = ls())
## --------------------------------------------------------------------------------------------- ##
# Housekeeping ####
## --------------------------------------------------------------------------------------------- ##
# Pull in the dataset
sns <- read.csv("./Data/sns-data_2014.csv")
# Re-level the factors too though
unique(sns$Herbicide.Treatment)
sns$Herbicide.Treatment <- factor(as.character(sns$Herbicide.Treatment), levels = c("Con", "Spr", "SnS"))
unique(sns$Herbicide.Treatment)
# Further separate into cattle-grazed restorations (CGRs) and un-grazed restorations (UGRs)
cgr <- subset(sns, sns$Treatment == "GB")
ugr <- subset(sns, sns$Treatment == "None")
# Helpful custom functions
## Modification of RRPP's summary function to do multiple comparison adjustment as a matter of course
simp.rrpp <- function (object, test.type = c("dist", "VC", "var"), angle.type = c("rad", "deg"),
stat.table = T, confidence = 0.95, show.vectors = F, crit.dig = 3, ...) {
test.type <- match.arg(test.type)
angle.type <- match.arg(angle.type)
x <- object
if (test.type != "var") { # if you don't specify that the test.type is "var" (which means variances), see what the object should take
if (is.null(x$LS.means))
type = "slopes" # this would be appropriate for linear regression analyses
if (is.null(x$slopes))
type = "means" # this would be appropriate for ANOVAs. For my data, that turned it into type = 'means'
}
else type <- "var" # ignore for my data
RRPP:::print.pairwise(x) # Print onscreen the output from the fitted object
cat("\n") # add a line in the output
vars <- object$vars # needed. not sure why but setting something up with iterations I think
if (type == "var") { # ignore for my data
var.diff <- lapply(1:NCOL(vars), function(j) {
v <- as.matrix(vars[, j])
as.matrix(dist(v))
})
L <- d.summary.from.list(var.diff)
cat("\nObserved variances by group\n\n")
print(vars[, 1])
if (stat.table) {
tab <- makePWDTable(L)
cat("\nPairwise distances between variances, plus statistics\n")
print(tab)
}
else {
cat("\nPairwise distances between variances\n")
print(L$D)
cat("\nPairwise", paste(L$confidence * 100, "%",
sep = ""), "upper confidence limits between variances\n")
print(L$CL)
cat("\nPairwise effect sizes (Z) between variances\n")
print(L$Z)
cat("\nPairwise P-values between variances\n")
print(L$P)
}
}
if (type == "means") { # this is appropriate for my data
cat("LS means:\n")
if (show.vectors)
print(x$LS.means[[1]])
else cat("Vectors hidden (use show.vectors = TRUE to view)\n") # print out message for LS means output
if (test.type == "dist") { # if type = dist (like my data)
L <- RRPP:::d.summary.from.list(x$means.dist) # THIS IS WHERE THE P VALUE LIST IS MADE - L$P
if (stat.table) { # if you ask for it in a table, this is how it's made
tab <- RRPP:::makePWDTable(L) # making the table
cat("\nPairwise distances between means, plus statistics\n")
print(tab)
}
else { # ignore
cat("\nPairwise distances between means\n")
print(L$D)
cat("\nPairwise", paste(L$confidence * 100, "%",
sep = ""), "upper confidence limits between means\n")
print(L$CL)
cat("\nPairwise effect sizes (Z) between means\n")
print(L$Z)
cat("\nPairwise P-values between means\n")
print(L$P)
}
}
if (test.type == "VC") {
L <- r.summary.from.list(x$means.vec.cor)
if (stat.table) {
tab <- makePWCorTable(L)
cat("\nPairwise statistics based on mean vector correlations\n")
if (angle.type == "deg") {
tab$angle <- tab$angle * 180/pi
tab[, 3] <- tab[, 3] * 180/pi
}
print(tab)
}
else {
cat("\nPairwise vector correlations between mean vectors\n")
print(L$r)
cat("\nPairwise angles between mean vectors\n")
if (angle.type == "deg")
print(L$angle * 180/pi)
else print(L$angle)
cat("\nPairwise", paste(L$confidence * 100, "%",
sep = ""), "upper confidence limits for angles between mean vectors\n")
if (angle.type == "deg")
print(L$aCL * 180/pi)
else print(L$aCL)
cat("\nPairwise effect sizes (Z) for angles between mean vectors\n")
print(L$Z)
cat("\nPairwise P-values for angles between mean vectors\n")
print(L$P)
}
}
}
if (type == "slopes") {
cat("Slopes (vectors of variate change per one unit of covariate change, by group):\n")
if (show.vectors)
print(x$slopes[[1]])
else cat("Vectors hidden (use show.vectors = TRUE to view)\n")
if (test.type == "dist") {
cat("\nSlope vector lengths\n")
print(x$slopes.length[[1]])
L <- d.summary.from.list(x$slopes.dist)
if (stat.table) {
tab <- makePWDTable(L)
cat("\nPairwise absolute difference (d) between vector lengths, plus statistics\n")
print(tab)
}
else {
cat("\nPairwise absolute differences (d) between slope lengths\n")
print(L$D)
cat("\nPairwise", paste(L$confidence * 100, "%",
sep = ""), "upper confidence limits between slope lengths\n")
print(L$CL)
cat("\nPairwise effect sizes (Z) between slope lengths\n")
print(L$Z)
cat("\nPairwise P-values between slope lengths\n")
print(L$P)
}
}
if (test.type == "VC") {
L <- r.summary.from.list(x$slopes.vec.cor)
cat("\nPairwise statistics based on slopes vector correlations (r) and angles, acos(r)")
cat("\nThe null hypothesis is that r = 1 (parallel vectors).")
cat("\nThis null hypothesis is better treated as the angle between vectors = 0\n")
if (stat.table) {
tab <- makePWCorTable(L)
if (angle.type == "deg") {
tab$angle <- tab$angle * 180/pi
tab[, 3] <- tab[, 3] * 180/pi
}
print(tab)
}
else {
cat("\nPairwise vector correlations between slope vectors\n")
print(L$r)
cat("\nPairwise angles between slope vectors\n")
if (angle.type == "deg")
print(L$angle * 180/pi)
else print(L$angle)
cat("\nPairwise", paste(L$confidence * 100, "%",
sep = ""), "upper confidence limits for angles between mean vectors\n")
if (angle.type == "deg")
print(L$aCL * 180/pi)
else print(L$aCL)
cat("\nPairwise effect sizes (Z) for angles between slope vectors\n")
print(L$Z)
cat("\nPairwise P-values for angles between slope vectors\n")
print(L$P)
}
}
}
# Make new dataframe
df <- tab
# The following steps are necessary for performing Sequential Bonferroni multiple comparison adjustment
## Order the rows from lowest to highest p value
results <- df[order(df$"Pr > d"), ]
## Assign a rank based on that order
rank <- c(1:length(results$P))
# Now modify the critical point based on that rank (hence "sequential" Bonferroni)
results$Alpha <- round( with(results, ( (0.05 / (length(results$"Pr > d") + 1 - rank)) ) ), digits = crit.dig)
# Helpful to know how much larger the p value is than its critical point
results$"P/Alpha" <- round( (results$"Pr > d" / results$Alpha), digits = crit.dig)
# Now get the ranges of "significance" to be reduced to qualitative bits
results$Sig <- ifelse(test = results$"P/Alpha" > 2, yes = " ",
no = ifelse(test = results$"P/Alpha" > 1, yes = ".",
no = ifelse(test = results$"P/Alpha" > 0.2, yes = "*",
no = ifelse(test = results$"P/Alpha" > 0.02, yes = "**",
no = ifelse(test = results$"P/Alpha" > 0.002, yes = "***", no = "****")))))
## Viewer discretion is advized when using this bonus column
# Just in case you don't want to look in the guts of this function to see what * vs. ** means:
message("Sig codes: P / Crit > 2 = ''
1 < P/C ≤ 2 = '.'
0.2 < P/C ≤ 1 = '*'
0.02 < P/C ≤ 0.2 = '**'
0.002 < P/C ≤ 0.02 = '***'
P/C ≤ 0.002 = '****'")
# And spit out the result
return(results)
}
# General analytical procedure
## 1) Fit model with interaction term and assess *ONLY* the interaction term
## 2) If insignificant, run a new model without it (if significant, stop there, you're done)
## 3) If either explanatory variable is significant, fit a separate model of just that one
## 4) Run pairwise comparisons on that single-variable model
## --------------------------------------------------------------------------------------------- ##
# Bare Cover ####
## --------------------------------------------------------------------------------------------- ##
# Analysis
anova(lm.rrpp(Bare ~ Herbicide.Treatment, data = cgr, iter = 9999), effect.type = "F") # NS
anova(lm.rrpp(Bare ~ Herbicide.Treatment, data = ugr, iter = 9999), effect.type = "F") # NS
# Pairwise
bar.14.cgr.fit <- lm.rrpp(Bare ~ Herbicide.Treatment, data = cgr, iter = 9999)
simp.rrpp(pairwise(bar.14.cgr.fit, fit.null = NULL, groups = cgr$Herbicide.Treatment))
bar.14.ugr.fit <- lm.rrpp(Bare ~ Herbicide.Treatment, data = ugr, iter = 9999)
simp.rrpp(pairwise(bar.14.ugr.fit, fit.null = NULL, groups = ugr$Herbicide.Treatment))
## --------------------------------------------------------------------------------------------- ##
# Litter Cover ####
## --------------------------------------------------------------------------------------------- ##
# Analysis
anova(lm.rrpp(Litter ~ Herbicide.Treatment, data = cgr, iter = 9999), effect.type = "F") # NS
anova(lm.rrpp(Litter ~ Herbicide.Treatment, data = ugr, iter = 9999), effect.type = "F") # NS
# Pairwise
ltr.14.cgr.fit <- lm.rrpp(Litter ~ Herbicide.Treatment, data = cgr, iter = 9999)
simp.rrpp(pairwise(ltr.14.cgr.fit, fit.null = NULL, groups = cgr$Herbicide.Treatment))
ltr.14.ugr.fit <- lm.rrpp(Litter ~ Herbicide.Treatment, data = ugr, iter = 9999)
simp.rrpp(pairwise(ltr.14.ugr.fit, fit.null = NULL, groups = ugr$Herbicide.Treatment))
## --------------------------------------------------------------------------------------------- ##
# Robel (dm) ####
## --------------------------------------------------------------------------------------------- ##
# Analysis
anova(lm.rrpp(Robel ~ Herbicide.Treatment, data = cgr, iter = 9999), effect.type = "F") # NS
anova(lm.rrpp(Robel ~ Herbicide.Treatment, data = ugr, iter = 9999), effect.type = "F") # NS
# Pairwise
rbl.14.cgr.fit <- lm.rrpp(Robel ~ Herbicide.Treatment, data = cgr, iter = 9999)
simp.rrpp(pairwise(rbl.14.cgr.fit, fit.null = NULL, groups = cgr$Herbicide.Treatment))
rbl.14.ugr.fit <- lm.rrpp(Robel ~ Herbicide.Treatment, data = ugr, iter = 9999)
simp.rrpp(pairwise(rbl.14.ugr.fit, fit.null = NULL, groups = ugr$Herbicide.Treatment))
## --------------------------------------------------------------------------------------------- ##
# Litter Depth (cm) ####
## --------------------------------------------------------------------------------------------- ##
# Analysis
anova(lm.rrpp(LitDep ~ Herbicide.Treatment, data = cgr, iter = 9999), effect.type = "F") # NS
anova(lm.rrpp(LitDep ~ Herbicide.Treatment, data = ugr, iter = 9999), effect.type = "F") # NS
# Pairwise
ldp.14.cgr.fit <- lm.rrpp(LitDep ~ Herbicide.Treatment, data = cgr, iter = 9999)
simp.rrpp(pairwise(ldp.14.cgr.fit, fit.null = NULL, groups = cgr$Herbicide.Treatment))
ldp.14.ugr.fit <- lm.rrpp(LitDep ~ Herbicide.Treatment, data = ugr, iter = 9999)
simp.rrpp(pairwise(ldp.14.ugr.fit, fit.null = NULL, groups = ugr$Herbicide.Treatment))
# END ####
| /Code/old code/obj-3_2014.R | permissive | njlyon0/collab_coon-daubenmire-herbicide | R | false | false | 13,008 | r | ## -------------------------------------------------------------------------------------------------------------------------------------- ##
# Vegetation Response to Anti-Tall Fescue Herbicide Treatments
## -------------------------------------------------------------------------------------------------------------------------------------- ##
# Code written by Nicholas J Lyon
# Objective 3
## Structural Response to Treatment
## In 2014 (so pre-treatment differences)
# START ####
# Required libraries
library(RRPP) # Analysis
# Set working directory (Also, "Session" menu to "Set Working Directory" works)
setwd("~/Documents/School/Iowa State/Collaborations/'Daubenmire Herbicide Bit/Daubenmire.HerbicideComponent.WD")
# Clear environment of other stuff
rm(list = ls())
## --------------------------------------------------------------------------------------------- ##
# Housekeeping ####
## --------------------------------------------------------------------------------------------- ##
# Pull in the dataset
sns <- read.csv("./Data/sns-data_2014.csv")
# Re-level the factors too though
unique(sns$Herbicide.Treatment)
sns$Herbicide.Treatment <- factor(as.character(sns$Herbicide.Treatment), levels = c("Con", "Spr", "SnS"))
unique(sns$Herbicide.Treatment)
# Further separate into cattle-grazed restorations (CGRs) and un-grazed restorations (UGRs)
cgr <- subset(sns, sns$Treatment == "GB")
ugr <- subset(sns, sns$Treatment == "None")
# Helpful custom functions
## Modification of RRPP's summary function to do multiple comparison adjustment as a matter of course
simp.rrpp <- function (object, test.type = c("dist", "VC", "var"), angle.type = c("rad", "deg"),
stat.table = T, confidence = 0.95, show.vectors = F, crit.dig = 3, ...) {
test.type <- match.arg(test.type)
angle.type <- match.arg(angle.type)
x <- object
if (test.type != "var") { # if you don't specify that the test.type is "var" (which means variances), see what the object should take
if (is.null(x$LS.means))
type = "slopes" # this would be appropriate for linear regression analyses
if (is.null(x$slopes))
type = "means" # this would be appropriate for ANOVAs. For my data, that turned it into type = 'means'
}
else type <- "var" # ignore for my data
RRPP:::print.pairwise(x) # Print onscreen the output from the fitted object
cat("\n") # add a line in the output
vars <- object$vars # needed. not sure why but setting something up with iterations I think
if (type == "var") { # ignore for my data
var.diff <- lapply(1:NCOL(vars), function(j) {
v <- as.matrix(vars[, j])
as.matrix(dist(v))
})
L <- d.summary.from.list(var.diff)
cat("\nObserved variances by group\n\n")
print(vars[, 1])
if (stat.table) {
tab <- makePWDTable(L)
cat("\nPairwise distances between variances, plus statistics\n")
print(tab)
}
else {
cat("\nPairwise distances between variances\n")
print(L$D)
cat("\nPairwise", paste(L$confidence * 100, "%",
sep = ""), "upper confidence limits between variances\n")
print(L$CL)
cat("\nPairwise effect sizes (Z) between variances\n")
print(L$Z)
cat("\nPairwise P-values between variances\n")
print(L$P)
}
}
if (type == "means") { # this is appropriate for my data
cat("LS means:\n")
if (show.vectors)
print(x$LS.means[[1]])
else cat("Vectors hidden (use show.vectors = TRUE to view)\n") # print out message for LS means output
if (test.type == "dist") { # if type = dist (like my data)
L <- RRPP:::d.summary.from.list(x$means.dist) # THIS IS WHERE THE P VALUE LIST IS MADE - L$P
if (stat.table) { # if you ask for it in a table, this is how it's made
tab <- RRPP:::makePWDTable(L) # making the table
cat("\nPairwise distances between means, plus statistics\n")
print(tab)
}
else { # ignore
cat("\nPairwise distances between means\n")
print(L$D)
cat("\nPairwise", paste(L$confidence * 100, "%",
sep = ""), "upper confidence limits between means\n")
print(L$CL)
cat("\nPairwise effect sizes (Z) between means\n")
print(L$Z)
cat("\nPairwise P-values between means\n")
print(L$P)
}
}
if (test.type == "VC") {
L <- r.summary.from.list(x$means.vec.cor)
if (stat.table) {
tab <- makePWCorTable(L)
cat("\nPairwise statistics based on mean vector correlations\n")
if (angle.type == "deg") {
tab$angle <- tab$angle * 180/pi
tab[, 3] <- tab[, 3] * 180/pi
}
print(tab)
}
else {
cat("\nPairwise vector correlations between mean vectors\n")
print(L$r)
cat("\nPairwise angles between mean vectors\n")
if (angle.type == "deg")
print(L$angle * 180/pi)
else print(L$angle)
cat("\nPairwise", paste(L$confidence * 100, "%",
sep = ""), "upper confidence limits for angles between mean vectors\n")
if (angle.type == "deg")
print(L$aCL * 180/pi)
else print(L$aCL)
cat("\nPairwise effect sizes (Z) for angles between mean vectors\n")
print(L$Z)
cat("\nPairwise P-values for angles between mean vectors\n")
print(L$P)
}
}
}
if (type == "slopes") {
cat("Slopes (vectors of variate change per one unit of covariate change, by group):\n")
if (show.vectors)
print(x$slopes[[1]])
else cat("Vectors hidden (use show.vectors = TRUE to view)\n")
if (test.type == "dist") {
cat("\nSlope vector lengths\n")
print(x$slopes.length[[1]])
L <- d.summary.from.list(x$slopes.dist)
if (stat.table) {
tab <- makePWDTable(L)
cat("\nPairwise absolute difference (d) between vector lengths, plus statistics\n")
print(tab)
}
else {
cat("\nPairwise absolute differences (d) between slope lengths\n")
print(L$D)
cat("\nPairwise", paste(L$confidence * 100, "%",
sep = ""), "upper confidence limits between slope lengths\n")
print(L$CL)
cat("\nPairwise effect sizes (Z) between slope lengths\n")
print(L$Z)
cat("\nPairwise P-values between slope lengths\n")
print(L$P)
}
}
if (test.type == "VC") {
L <- r.summary.from.list(x$slopes.vec.cor)
cat("\nPairwise statistics based on slopes vector correlations (r) and angles, acos(r)")
cat("\nThe null hypothesis is that r = 1 (parallel vectors).")
cat("\nThis null hypothesis is better treated as the angle between vectors = 0\n")
if (stat.table) {
tab <- makePWCorTable(L)
if (angle.type == "deg") {
tab$angle <- tab$angle * 180/pi
tab[, 3] <- tab[, 3] * 180/pi
}
print(tab)
}
else {
cat("\nPairwise vector correlations between slope vectors\n")
print(L$r)
cat("\nPairwise angles between slope vectors\n")
if (angle.type == "deg")
print(L$angle * 180/pi)
else print(L$angle)
cat("\nPairwise", paste(L$confidence * 100, "%",
sep = ""), "upper confidence limits for angles between mean vectors\n")
if (angle.type == "deg")
print(L$aCL * 180/pi)
else print(L$aCL)
cat("\nPairwise effect sizes (Z) for angles between slope vectors\n")
print(L$Z)
cat("\nPairwise P-values for angles between slope vectors\n")
print(L$P)
}
}
}
# Make new dataframe
df <- tab
# The following steps are necessary for performing Sequential Bonferroni multiple comparison adjustment
## Order the rows from lowest to highest p value
results <- df[order(df$"Pr > d"), ]
## Assign a rank based on that order
rank <- c(1:length(results$P))
# Now modify the critical point based on that rank (hence "sequential" Bonferroni)
results$Alpha <- round( with(results, ( (0.05 / (length(results$"Pr > d") + 1 - rank)) ) ), digits = crit.dig)
# Helpful to know how much larger the p value is than its critical point
results$"P/Alpha" <- round( (results$"Pr > d" / results$Alpha), digits = crit.dig)
# Now get the ranges of "significance" to be reduced to qualitative bits
results$Sig <- ifelse(test = results$"P/Alpha" > 2, yes = " ",
no = ifelse(test = results$"P/Alpha" > 1, yes = ".",
no = ifelse(test = results$"P/Alpha" > 0.2, yes = "*",
no = ifelse(test = results$"P/Alpha" > 0.02, yes = "**",
no = ifelse(test = results$"P/Alpha" > 0.002, yes = "***", no = "****")))))
## Viewer discretion is advized when using this bonus column
# Just in case you don't want to look in the guts of this function to see what * vs. ** means:
message("Sig codes: P / Crit > 2 = ''
1 < P/C ≤ 2 = '.'
0.2 < P/C ≤ 1 = '*'
0.02 < P/C ≤ 0.2 = '**'
0.002 < P/C ≤ 0.02 = '***'
P/C ≤ 0.002 = '****'")
# And spit out the result
return(results)
}
# General analytical procedure
## 1) Fit model with interaction term and assess *ONLY* the interaction term
## 2) If insignificant, run a new model without it (if significant, stop there, you're done)
## 3) If either explanatory variable is significant, fit a separate model of just that one
## 4) Run pairwise comparisons on that single-variable model
## --------------------------------------------------------------------------------------------- ##
# Bare Cover ####
## --------------------------------------------------------------------------------------------- ##
# Analysis
anova(lm.rrpp(Bare ~ Herbicide.Treatment, data = cgr, iter = 9999), effect.type = "F") # NS
anova(lm.rrpp(Bare ~ Herbicide.Treatment, data = ugr, iter = 9999), effect.type = "F") # NS
# Pairwise
bar.14.cgr.fit <- lm.rrpp(Bare ~ Herbicide.Treatment, data = cgr, iter = 9999)
simp.rrpp(pairwise(bar.14.cgr.fit, fit.null = NULL, groups = cgr$Herbicide.Treatment))
bar.14.ugr.fit <- lm.rrpp(Bare ~ Herbicide.Treatment, data = ugr, iter = 9999)
simp.rrpp(pairwise(bar.14.ugr.fit, fit.null = NULL, groups = ugr$Herbicide.Treatment))
## --------------------------------------------------------------------------------------------- ##
# Litter Cover ####
## --------------------------------------------------------------------------------------------- ##
# Analysis
anova(lm.rrpp(Litter ~ Herbicide.Treatment, data = cgr, iter = 9999), effect.type = "F") # NS
anova(lm.rrpp(Litter ~ Herbicide.Treatment, data = ugr, iter = 9999), effect.type = "F") # NS
# Pairwise
ltr.14.cgr.fit <- lm.rrpp(Litter ~ Herbicide.Treatment, data = cgr, iter = 9999)
simp.rrpp(pairwise(ltr.14.cgr.fit, fit.null = NULL, groups = cgr$Herbicide.Treatment))
ltr.14.ugr.fit <- lm.rrpp(Litter ~ Herbicide.Treatment, data = ugr, iter = 9999)
simp.rrpp(pairwise(ltr.14.ugr.fit, fit.null = NULL, groups = ugr$Herbicide.Treatment))
## --------------------------------------------------------------------------------------------- ##
# Robel (dm) ####
## --------------------------------------------------------------------------------------------- ##
# Analysis
anova(lm.rrpp(Robel ~ Herbicide.Treatment, data = cgr, iter = 9999), effect.type = "F") # NS
anova(lm.rrpp(Robel ~ Herbicide.Treatment, data = ugr, iter = 9999), effect.type = "F") # NS
# Pairwise
rbl.14.cgr.fit <- lm.rrpp(Robel ~ Herbicide.Treatment, data = cgr, iter = 9999)
simp.rrpp(pairwise(rbl.14.cgr.fit, fit.null = NULL, groups = cgr$Herbicide.Treatment))
rbl.14.ugr.fit <- lm.rrpp(Robel ~ Herbicide.Treatment, data = ugr, iter = 9999)
simp.rrpp(pairwise(rbl.14.ugr.fit, fit.null = NULL, groups = ugr$Herbicide.Treatment))
## --------------------------------------------------------------------------------------------- ##
# Litter Depth (cm) ####
## --------------------------------------------------------------------------------------------- ##
# Analysis
anova(lm.rrpp(LitDep ~ Herbicide.Treatment, data = cgr, iter = 9999), effect.type = "F") # NS
anova(lm.rrpp(LitDep ~ Herbicide.Treatment, data = ugr, iter = 9999), effect.type = "F") # NS
# Pairwise
ldp.14.cgr.fit <- lm.rrpp(LitDep ~ Herbicide.Treatment, data = cgr, iter = 9999)
simp.rrpp(pairwise(ldp.14.cgr.fit, fit.null = NULL, groups = cgr$Herbicide.Treatment))
ldp.14.ugr.fit <- lm.rrpp(LitDep ~ Herbicide.Treatment, data = ugr, iter = 9999)
simp.rrpp(pairwise(ldp.14.ugr.fit, fit.null = NULL, groups = ugr$Herbicide.Treatment))
# END ####
|
# Already set in other file:
# process:= what kind of spatial process (ebf, gsk)
# margin := how to construct marginal basis functions
# cv := which cross-validation testing set to use
# L := the number of basis functions to use
#### load in the data ####
load(file = "precip_preprocess.RData")
# basis functions are precomputed, so if we change cv settings, we'll
# need to rerun all of cv-setup.
basis.file <- paste("./basis_functions/ebf-", L, ".RData", sep = "")
gsk.file <- paste("./basis_functions/gsk-", L, ".RData", sep = "")
results.file <- paste("./cv-results/", process, "-", time, "-", L,
"-", cv, ".RData", sep = "")
table.file <- paste("./cv-tables/", process, "-", time, "-", L,
"-", cv, ".txt", sep = "")
#### spatial setup ####
d <- rdist(s)
diag(d) <- 0
n <- nrow(s)
# standardize the locations
s.scale <- s
s.scale.factor <- min(diff(range(s[, 1])), diff(range(s[, 2])))
s.min <- apply(s, 2, min)
s.scale[, 1] <- (s[, 1] - s.min[1]) / s.scale.factor
s.scale[, 2] <- (s[, 2] - s.min[2]) / s.scale.factor
# get candidate knot grid for Gaussian kernel functions
cents.grid <- s
################################################################################
#### Load in cross-validation setup ############################################
################################################################################
load(file = "./cv-extcoef.RData")
load(file = basis.file)
load(file = gsk.file)
################################################################################
#### Get weight functions for spatial process ##################################
################################################################################
alpha <- alpha.hats[cv]
if (process == "ebf") {
B.sp <- B.ebf[[cv]]
ec.smooth <- ec.smooth[[cv]]
} else {
# get the knot locations
B.sp <- B.gsk[[cv]]
}
################################################################################
#### Covariate basis functions #################################################
################################################################################
if (margin == "ebf") {
if (process == "ebf") { # we can just copy the empirical basis functions
cat("B.cov = B.sp \n")
B.cov <- B.sp
} else { # we need to construct the empirical basis functions
cat("Estimating basis functions for covariates \n")
B.cov <- B.ebf[[cv]]
}
} else if (margin == "gsk") {
if (process == "ebf") {
B.cov <- B.gsk[[cv]]
} else{
cat("B.cov = B.sp \n")
B.cov <- B.sp
}
}
################################################################################
#### Run the MCMC ##############################################################
#### Use the basis functions with the MCMC
#### The response is the total acreage burned in a year
#### Y[i, t] = acres burned in county i and year t
#### X[i, t, p] = pth covariate for site i in year t
#### Using (1, time, B.cov, B.cov * time) where time = (t - nt / 2) / nt
################################################################################
ns <- nrow(Y)
nt <- ncol(Y) / 2
Y.all <- Y
## Y contains both current and future data, so subset on the relevant years
if (time == "current") {
this.cv <- cv.idx[[cv]][, 1:nt]
Y <- Y[, 1:nt]
Y.tst <- Y[this.cv] # save the testing data to validate
# Y[this.cv] <- NA # remove the testing data
} else {
this.cv <- cv.idx[[cv]][, (nt + 1):(2 * nt)]
Y <- Y[, (nt + 1):(2 * nt)]
Y.tst <- Y[this.cv]
# Y[this.cv] <- NA
}
################################################################################
#### Spatially smooth threshold ################################################
################################################################################
thresh90 <- thresh95 <- thresh99 <- rep(0, ns)
neighbors <- 5
d <- rdist(s)
diag(d) <- 0
# take the 5 closest neighbors when finding the threshold
for (i in 1:ns) {
these <- order(d[i, ])[2:(neighbors + 1)] # the closest is always site i
thresh90[i] <- quantile(Y[these, ], probs = 0.90, na.rm = TRUE)
thresh95[i] <- quantile(Y[these, ], probs = 0.95, na.rm = TRUE)
thresh99[i] <- quantile(Y[these, ], probs = 0.99, na.rm = TRUE)
}
thresh90 <- matrix(thresh90, nrow(Y), ncol(Y))
thresh95 <- matrix(thresh95, nrow(Y), ncol(Y))
thresh99 <- matrix(thresh99, nrow(Y), ncol(Y))
thresh90.tst <- thresh90[this.cv]
thresh95.tst <- thresh95[this.cv]
thresh99.tst <- thresh99[this.cv]
################################################################################
#### run the MCMC ##############################################################
################################################################################
iters <- 25000
burn <- 15000
update <- 500
# iters <- 100; burn <- 90; update <- 50 # for testing
A.init <- matrix(exp(2), L, nt) # consistent with estimates of alpha
# A.init <- matrix(1, L, nt)
theta.init <- (B.sp^(1 / alpha) %*% A.init)^alpha
xi.init <- 0.1
# find the beta estimates using ml for GEV
# going to use ML but independent to get a single mu and sigma for each site
# based on xi = 0, but with A.init and alpha
# xi.init <- rep(0, ns)
beta.int.init <- matrix(0, ns, 2)
for (i in 1:ns) {
fit <- optim(par = c(0, 0), fn = loglike.init,
y = Y[i, ], thresh = rep(-Inf, nt), xi = xi.init,
theta = theta.init[i, ], alpha = alpha,
control = list(maxit = 5000))$par
beta.int.init[i, ] <- fit[1:2]
if (i %% 50 == 0) {
print(paste("site ", i, " complete", sep = ""))
}
}
cat("Start mcmc fit \n")
set.seed(6262) # mcmc
beta.time.init <- matrix(0, ns, 2)
fit <- ReShMCMC(y = Y, test.set = this.cv, s = s.scale, thresh = -Inf, B = B.sp,
alpha = alpha, beta.int = beta.int.init, canbeta.int.sd = 0.5,
beta.time = beta.time.init, canbeta.time.sd = 0.5,
xi = xi.init, bw.init = 0.2, A = A.init,
iters = iters, burn = burn, update = update,
iterplot = FALSE)
cat("Finished fit and predict \n")
#### Summarize performance ####
# GG and CRPS come out directly from mcmc. Also need quantile and Brier scores
# and MAD
probs.for.qs <- c(0.95, 0.96, 0.97, 0.98, 0.99, 0.995)
qs.results <- QuantScore(preds = fit$y.pred, probs = probs.for.qs,
validate = Y.tst)
bs.results95 <- BrierScore(preds = fit$y.pred, validate = Y.tst,
thresh = thresh95.tst)
bs.results99 <- BrierScore(preds = fit$y.pred, validate = Y.tst,
thresh = thresh99.tst)
y.pred <- apply(fit$y.pred, 2, median)
MAD <- mean(abs(y.pred - Y.tst))
results <- c(qs.results, bs.results95, bs.results99, fit$GG, fit$CRPS, MAD,
fit$timing, Sys.info()["nodename"])
names(results) <- c(probs.for.qs, "bs-95", "bs-99", "GG", "CRPS", "MAD",
"timing", "system")
write.table(results, file = table.file)
if (do.upload) {
upload.pre <- paste("samorris@hpc.stat.ncsu.edu:~/repos-git/extreme-decomp/",
"markdown/precipitation/cv-tables/", sep = "")
upload.cmd <- paste("scp ", table.file, " ", upload.pre, sep = "")
system(upload.cmd)
}
save(B.sp, knots, thresh90, thresh95, thresh99, Y.tst,
alpha, fit, cv.idx, results, file = results.file)
| /markdown/precip-rev-4/fitmodel.R | permissive | sammorris81/extreme-decomp | R | false | false | 7,315 | r | # Already set in other file:
# process:= what kind of spatial process (ebf, gsk)
# margin := how to construct marginal basis functions
# cv := which cross-validation testing set to use
# L := the number of basis functions to use
#### load in the data ####
load(file = "precip_preprocess.RData")
# basis functions are precomputed, so if we change cv settings, we'll
# need to rerun all of cv-setup.
basis.file <- paste("./basis_functions/ebf-", L, ".RData", sep = "")
gsk.file <- paste("./basis_functions/gsk-", L, ".RData", sep = "")
results.file <- paste("./cv-results/", process, "-", time, "-", L,
"-", cv, ".RData", sep = "")
table.file <- paste("./cv-tables/", process, "-", time, "-", L,
"-", cv, ".txt", sep = "")
#### spatial setup ####
d <- rdist(s)
diag(d) <- 0
n <- nrow(s)
# standardize the locations
s.scale <- s
s.scale.factor <- min(diff(range(s[, 1])), diff(range(s[, 2])))
s.min <- apply(s, 2, min)
s.scale[, 1] <- (s[, 1] - s.min[1]) / s.scale.factor
s.scale[, 2] <- (s[, 2] - s.min[2]) / s.scale.factor
# get candidate knot grid for Gaussian kernel functions
cents.grid <- s
################################################################################
#### Load in cross-validation setup ############################################
################################################################################
load(file = "./cv-extcoef.RData")
load(file = basis.file)
load(file = gsk.file)
################################################################################
#### Get weight functions for spatial process ##################################
################################################################################
alpha <- alpha.hats[cv]
if (process == "ebf") {
B.sp <- B.ebf[[cv]]
ec.smooth <- ec.smooth[[cv]]
} else {
# get the knot locations
B.sp <- B.gsk[[cv]]
}
################################################################################
#### Covariate basis functions #################################################
################################################################################
if (margin == "ebf") {
if (process == "ebf") { # we can just copy the empirical basis functions
cat("B.cov = B.sp \n")
B.cov <- B.sp
} else { # we need to construct the empirical basis functions
cat("Estimating basis functions for covariates \n")
B.cov <- B.ebf[[cv]]
}
} else if (margin == "gsk") {
if (process == "ebf") {
B.cov <- B.gsk[[cv]]
} else{
cat("B.cov = B.sp \n")
B.cov <- B.sp
}
}
################################################################################
#### Run the MCMC ##############################################################
#### Use the basis functions with the MCMC
#### The response is the total acreage burned in a year
#### Y[i, t] = acres burned in county i and year t
#### X[i, t, p] = pth covariate for site i in year t
#### Using (1, time, B.cov, B.cov * time) where time = (t - nt / 2) / nt
################################################################################
ns <- nrow(Y)
nt <- ncol(Y) / 2
Y.all <- Y
## Y contains both current and future data, so subset on the relevant years
if (time == "current") {
this.cv <- cv.idx[[cv]][, 1:nt]
Y <- Y[, 1:nt]
Y.tst <- Y[this.cv] # save the testing data to validate
# Y[this.cv] <- NA # remove the testing data
} else {
this.cv <- cv.idx[[cv]][, (nt + 1):(2 * nt)]
Y <- Y[, (nt + 1):(2 * nt)]
Y.tst <- Y[this.cv]
# Y[this.cv] <- NA
}
################################################################################
#### Spatially smooth threshold ################################################
################################################################################
thresh90 <- thresh95 <- thresh99 <- rep(0, ns)
neighbors <- 5
d <- rdist(s)
diag(d) <- 0
# take the 5 closest neighbors when finding the threshold
for (i in 1:ns) {
these <- order(d[i, ])[2:(neighbors + 1)] # the closest is always site i
thresh90[i] <- quantile(Y[these, ], probs = 0.90, na.rm = TRUE)
thresh95[i] <- quantile(Y[these, ], probs = 0.95, na.rm = TRUE)
thresh99[i] <- quantile(Y[these, ], probs = 0.99, na.rm = TRUE)
}
thresh90 <- matrix(thresh90, nrow(Y), ncol(Y))
thresh95 <- matrix(thresh95, nrow(Y), ncol(Y))
thresh99 <- matrix(thresh99, nrow(Y), ncol(Y))
thresh90.tst <- thresh90[this.cv]
thresh95.tst <- thresh95[this.cv]
thresh99.tst <- thresh99[this.cv]
################################################################################
#### run the MCMC ##############################################################
################################################################################
iters <- 25000
burn <- 15000
update <- 500
# iters <- 100; burn <- 90; update <- 50 # for testing
A.init <- matrix(exp(2), L, nt) # consistent with estimates of alpha
# A.init <- matrix(1, L, nt)
theta.init <- (B.sp^(1 / alpha) %*% A.init)^alpha
xi.init <- 0.1
# find the beta estimates using ml for GEV
# going to use ML but independent to get a single mu and sigma for each site
# based on xi = 0, but with A.init and alpha
# xi.init <- rep(0, ns)
beta.int.init <- matrix(0, ns, 2)
for (i in 1:ns) {
fit <- optim(par = c(0, 0), fn = loglike.init,
y = Y[i, ], thresh = rep(-Inf, nt), xi = xi.init,
theta = theta.init[i, ], alpha = alpha,
control = list(maxit = 5000))$par
beta.int.init[i, ] <- fit[1:2]
if (i %% 50 == 0) {
print(paste("site ", i, " complete", sep = ""))
}
}
cat("Start mcmc fit \n")
set.seed(6262) # mcmc
beta.time.init <- matrix(0, ns, 2)
fit <- ReShMCMC(y = Y, test.set = this.cv, s = s.scale, thresh = -Inf, B = B.sp,
alpha = alpha, beta.int = beta.int.init, canbeta.int.sd = 0.5,
beta.time = beta.time.init, canbeta.time.sd = 0.5,
xi = xi.init, bw.init = 0.2, A = A.init,
iters = iters, burn = burn, update = update,
iterplot = FALSE)
cat("Finished fit and predict \n")
#### Summarize performance ####
# GG and CRPS come out directly from mcmc. Also need quantile and Brier scores
# and MAD
probs.for.qs <- c(0.95, 0.96, 0.97, 0.98, 0.99, 0.995)
qs.results <- QuantScore(preds = fit$y.pred, probs = probs.for.qs,
validate = Y.tst)
bs.results95 <- BrierScore(preds = fit$y.pred, validate = Y.tst,
thresh = thresh95.tst)
bs.results99 <- BrierScore(preds = fit$y.pred, validate = Y.tst,
thresh = thresh99.tst)
y.pred <- apply(fit$y.pred, 2, median)
MAD <- mean(abs(y.pred - Y.tst))
results <- c(qs.results, bs.results95, bs.results99, fit$GG, fit$CRPS, MAD,
fit$timing, Sys.info()["nodename"])
names(results) <- c(probs.for.qs, "bs-95", "bs-99", "GG", "CRPS", "MAD",
"timing", "system")
write.table(results, file = table.file)
if (do.upload) {
upload.pre <- paste("samorris@hpc.stat.ncsu.edu:~/repos-git/extreme-decomp/",
"markdown/precipitation/cv-tables/", sep = "")
upload.cmd <- paste("scp ", table.file, " ", upload.pre, sep = "")
system(upload.cmd)
}
save(B.sp, knots, thresh90, thresh95, thresh99, Y.tst,
alpha, fit, cv.idx, results, file = results.file)
|
#' @export
getGridVar = function(variable="DEPTH",source = c("atSea","FSRS"),grids){
if(missing(grids)){
logs = lobster.db("process.logs")
grids = sort(unique(logs$GRID_NUM))
}
if("atSea" %in% source){
lobster.db('atSea')
atSea$X = atSea$LONGITUDE
atSea$Y = atSea$LATITUDE
x=tapply(atSea[,variable],atSea$GRIDNO,mean,na.rm=T)
xd = data.frame(GRID=names(x),x)
xd = subset(xd,GRID%in%grids)
gridvar = xd
}
if("FSRS" %in% source){
lobster.db('fsrs')
fsrs$X = fsrs$LONG_DD
fsrs$Y = fsrs$LAT_DD
y=tapply(fsrs[,variable],fsrs$LFA_GRID,mean,na.rm=T)
yd = data.frame(GRID=names(y),y)
yd = subset(yd,GRID%in%grids)
gridvar = yd
}
if("atSea" %in% source && "FSRS" %in% source){
gridvar = merge(xd,yd,all=T)
gridvar=data.frame(GRID=gridvar$GRID,rowMeans(gridvar[,2:3],na.rm=T))
}
names(gridvar)[2] = variable
gridvar$GRID = as.numeric(gridvar$GRID)
gridvar = merge(gridvar,data.frame(GRID=grids),all=T)
if(any(is.na(gridvar))){
library(bio.spacetime)
library(bio.bathymetry)
LFAgrid<-read.csv(file.path( project.datadirectory("bio.lobster"), "data","maps","GridPolys.csv"))
grids.dat<-calcCentroid(LFAgrid)
grids.dat = data.frame(GRID=grids.dat$SID,X=grids.dat$X,Y=grids.dat$Y)
if(variable=="DEPTH"){
p = spatial_parameters( type = "canada.east" )
grids.dat = lonlat2planar(grids.dat, input_names=c("X", "Y"),proj.type = p$internal.projection)
Complete = bathymetry.db(p=p, DS="complete")
# identify locations of data relative to baseline for envionmental data
locsmap = match(
lbm::array_map( "xy->1", grids.dat[,c("plon","plat")], gridparams=p$gridparams ),
lbm::array_map( "xy->1", Complete[,c("plon","plat")], gridparams=p$gridparams ) )
grids.dat$DEPTH = Complete$z[locsmap]
}
#browser()
missinggrids = gridvar[is.na(gridvar[,variable]),]$GRID
gridvar=rbind(gridvar[!is.na(gridvar[,variable]),],subset(grids.dat,GRID%in%missinggrids,c("GRID",variable)))
}
return(gridvar)
} | /R/getGridVar.r | no_license | BradHubley/bio.lobster | R | false | false | 2,002 | r | #' @export
getGridVar = function(variable="DEPTH",source = c("atSea","FSRS"),grids){
if(missing(grids)){
logs = lobster.db("process.logs")
grids = sort(unique(logs$GRID_NUM))
}
if("atSea" %in% source){
lobster.db('atSea')
atSea$X = atSea$LONGITUDE
atSea$Y = atSea$LATITUDE
x=tapply(atSea[,variable],atSea$GRIDNO,mean,na.rm=T)
xd = data.frame(GRID=names(x),x)
xd = subset(xd,GRID%in%grids)
gridvar = xd
}
if("FSRS" %in% source){
lobster.db('fsrs')
fsrs$X = fsrs$LONG_DD
fsrs$Y = fsrs$LAT_DD
y=tapply(fsrs[,variable],fsrs$LFA_GRID,mean,na.rm=T)
yd = data.frame(GRID=names(y),y)
yd = subset(yd,GRID%in%grids)
gridvar = yd
}
if("atSea" %in% source && "FSRS" %in% source){
gridvar = merge(xd,yd,all=T)
gridvar=data.frame(GRID=gridvar$GRID,rowMeans(gridvar[,2:3],na.rm=T))
}
names(gridvar)[2] = variable
gridvar$GRID = as.numeric(gridvar$GRID)
gridvar = merge(gridvar,data.frame(GRID=grids),all=T)
if(any(is.na(gridvar))){
library(bio.spacetime)
library(bio.bathymetry)
LFAgrid<-read.csv(file.path( project.datadirectory("bio.lobster"), "data","maps","GridPolys.csv"))
grids.dat<-calcCentroid(LFAgrid)
grids.dat = data.frame(GRID=grids.dat$SID,X=grids.dat$X,Y=grids.dat$Y)
if(variable=="DEPTH"){
p = spatial_parameters( type = "canada.east" )
grids.dat = lonlat2planar(grids.dat, input_names=c("X", "Y"),proj.type = p$internal.projection)
Complete = bathymetry.db(p=p, DS="complete")
# identify locations of data relative to baseline for envionmental data
locsmap = match(
lbm::array_map( "xy->1", grids.dat[,c("plon","plat")], gridparams=p$gridparams ),
lbm::array_map( "xy->1", Complete[,c("plon","plat")], gridparams=p$gridparams ) )
grids.dat$DEPTH = Complete$z[locsmap]
}
#browser()
missinggrids = gridvar[is.na(gridvar[,variable]),]$GRID
gridvar=rbind(gridvar[!is.na(gridvar[,variable]),],subset(grids.dat,GRID%in%missinggrids,c("GRID",variable)))
}
return(gridvar)
} |
library(RCIndex)
tu = createTU(system.file("exampleCode", "register.c", package = "RCIndex"),
includes = sprintf("%s%sinclude", R.home(), .Platform$file.sep))
incs = getInclusions(tu, clone = FALSE)
| /tests/inclusions.R | no_license | omegahat/RClangSimple | R | false | false | 221 | r | library(RCIndex)
tu = createTU(system.file("exampleCode", "register.c", package = "RCIndex"),
includes = sprintf("%s%sinclude", R.home(), .Platform$file.sep))
incs = getInclusions(tu, clone = FALSE)
|
/R/makeWindow.R | no_license | itsaquestion/TimeWindowMaker2 | R | false | false | 2,310 | r | ||
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasim.R
\docType{data}
\name{datasim}
\alias{datasim}
\title{Random Data for demonstration.}
\format{A List with components X and Y, both of which are matrices.}
\description{
Y = XB + E, n = 300, p =200, K = 5.
}
\keyword{datasets}
| /man/datasim.Rd | no_license | gordonliu810822/VIMCO | R | false | true | 315 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasim.R
\docType{data}
\name{datasim}
\alias{datasim}
\title{Random Data for demonstration.}
\format{A List with components X and Y, both of which are matrices.}
\description{
Y = XB + E, n = 300, p =200, K = 5.
}
\keyword{datasets}
|
#' Remove the n latest installed R packages
#'
#' @param n the last number of installed packages to remove. Default to 1 for the last installed package
#' @param lib a character vector giving the library directories. Defaults to the first element in .libPaths()
#' @return called for the side effect of removing the n latest installed packages
#' @export
#'
#' @examples
#'
#' \dontrun{
#'
#' # Removing the last 10 installed packages
#'
#' rm_latest_packages(n = 10)
#'
#' }
rm_latest_packages <- function(n = 1, lib = .libPaths()){
if (!is.numeric(n)) {
stop(paste0("'n' must be numeric not ", typeof(n)))
}
if (!length(n) == 1) {
stop(paste0("'n' must be of length 1 not ", length(n)))
}
if (n < 1) {
stop(" 'n' must be greater or equal than 1")
}
decision <- switch(utils::menu(
choices = c("NO", "No Way!", "No !!!", "Yes", "Let me think a little bit"),
title= glue::glue("Removing the last {n} installed packages ?")),
"NO", "NO", "NO", "YES", "NO")
if (decision == "YES"){
pack_paths <- fs::dir_ls(lib)
mod_time <- file.mtime(pack_paths)
data <- data.frame(pack_paths, mod_time)
pack_latest <- utils::head(data[rev(order(data$mod_time)), ], n)
# getting the names of the packages (which is the last part of the path)
pack_names <- sapply(fs::path_split(pack_latest$pack_paths), utils::tail, 1)
# removing the packages
utils::remove.packages(pack_names)
message(glue::glue("{pack_names} removed ~~~o_o~~~ "))
} else {
message("Ok, think again ...")
}
}
| /R/rm_latest_packages.R | permissive | cran/batata | R | false | false | 1,664 | r | #' Remove the n latest installed R packages
#'
#' @param n the last number of installed packages to remove. Default to 1 for the last installed package
#' @param lib a character vector giving the library directories. Defaults to the first element in .libPaths()
#' @return called for the side effect of removing the n latest installed packages
#' @export
#'
#' @examples
#'
#' \dontrun{
#'
#' # Removing the last 10 installed packages
#'
#' rm_latest_packages(n = 10)
#'
#' }
rm_latest_packages <- function(n = 1, lib = .libPaths()){
if (!is.numeric(n)) {
stop(paste0("'n' must be numeric not ", typeof(n)))
}
if (!length(n) == 1) {
stop(paste0("'n' must be of length 1 not ", length(n)))
}
if (n < 1) {
stop(" 'n' must be greater or equal than 1")
}
decision <- switch(utils::menu(
choices = c("NO", "No Way!", "No !!!", "Yes", "Let me think a little bit"),
title= glue::glue("Removing the last {n} installed packages ?")),
"NO", "NO", "NO", "YES", "NO")
if (decision == "YES"){
pack_paths <- fs::dir_ls(lib)
mod_time <- file.mtime(pack_paths)
data <- data.frame(pack_paths, mod_time)
pack_latest <- utils::head(data[rev(order(data$mod_time)), ], n)
# getting the names of the packages (which is the last part of the path)
pack_names <- sapply(fs::path_split(pack_latest$pack_paths), utils::tail, 1)
# removing the packages
utils::remove.packages(pack_names)
message(glue::glue("{pack_names} removed ~~~o_o~~~ "))
} else {
message("Ok, think again ...")
}
}
|
smk_net <- set_agd_arm(smoking,
study = studyn,
trt = trtc,
r = r,
n = n,
trt_ref = "No intervention")
# Only test gradients, no sampling
smk_fit_RE <- nma(smk_net,
trt_effects = "random",
prior_intercept = normal(scale = 100),
prior_trt = normal(scale = 100),
prior_het = normal(scale = 5),
test_grad = TRUE)
test_that("probs argument", {
m <- "numeric vector of probabilities"
expect_error(posterior_ranks(smk_fit_RE, probs = "a"), m)
expect_error(posterior_ranks(smk_fit_RE, probs = -1), m)
expect_error(posterior_ranks(smk_fit_RE, probs = 1.5), m)
expect_error(posterior_ranks(smk_fit_RE, probs = Inf), m)
expect_error(posterior_ranks(smk_fit_RE, probs = list()), m)
expect_error(posterior_ranks(smk_fit_RE, probs = NA), m)
expect_error(posterior_ranks(smk_fit_RE, probs = NULL), m)
})
test_that("summary argument", {
m <- "should be TRUE or FALSE"
expect_error(posterior_ranks(smk_fit_RE, summary = "a"), m)
expect_error(posterior_ranks(smk_fit_RE, summary = 1), m)
expect_error(posterior_ranks(smk_fit_RE, summary = list()), m)
expect_error(posterior_ranks(smk_fit_RE, summary = NA), m)
expect_error(posterior_ranks(smk_fit_RE, summary = NULL), m)
})
test_that("newdata argument", {
m <- "not a data frame"
expect_error(posterior_ranks(smk_fit_RE, newdata = "a"), m)
expect_error(posterior_ranks(smk_fit_RE, newdata = 1), m)
expect_error(posterior_ranks(smk_fit_RE, newdata = list()), m)
expect_error(posterior_ranks(smk_fit_RE, newdata = NA), m)
})
test_that("lower_better argument", {
m <- "should be TRUE or FALSE"
expect_error(posterior_ranks(smk_fit_RE, lower_better = "a"), m)
expect_error(posterior_ranks(smk_fit_RE, lower_better = 1), m)
expect_error(posterior_ranks(smk_fit_RE, lower_better = list()), m)
expect_error(posterior_ranks(smk_fit_RE, lower_better = NA), m)
expect_error(posterior_ranks(smk_fit_RE, lower_better = NULL), m)
})
test_that("newdata argument", {
m <- "not a data frame"
expect_error(posterior_rank_probs(smk_fit_RE, newdata = "a"), m)
expect_error(posterior_rank_probs(smk_fit_RE, newdata = 1), m)
expect_error(posterior_rank_probs(smk_fit_RE, newdata = list()), m)
expect_error(posterior_rank_probs(smk_fit_RE, newdata = NA), m)
})
test_that("lower_better argument", {
m <- "should be TRUE or FALSE"
expect_error(posterior_rank_probs(smk_fit_RE, lower_better = "a"), m)
expect_error(posterior_rank_probs(smk_fit_RE, lower_better = 1), m)
expect_error(posterior_rank_probs(smk_fit_RE, lower_better = list()), m)
expect_error(posterior_rank_probs(smk_fit_RE, lower_better = NA), m)
expect_error(posterior_rank_probs(smk_fit_RE, lower_better = NULL), m)
})
test_that("cumulative argument", {
m <- "should be TRUE or FALSE"
expect_error(posterior_rank_probs(smk_fit_RE, cumulative = "a"), m)
expect_error(posterior_rank_probs(smk_fit_RE, cumulative = 1), m)
expect_error(posterior_rank_probs(smk_fit_RE, cumulative = list()), m)
expect_error(posterior_rank_probs(smk_fit_RE, cumulative = NA), m)
expect_error(posterior_rank_probs(smk_fit_RE, cumulative = NULL), m)
})
| /tests/testthat/test-posterior_ranks.R | no_license | softloud/multinma | R | false | false | 3,303 | r |
smk_net <- set_agd_arm(smoking,
study = studyn,
trt = trtc,
r = r,
n = n,
trt_ref = "No intervention")
# Only test gradients, no sampling
smk_fit_RE <- nma(smk_net,
trt_effects = "random",
prior_intercept = normal(scale = 100),
prior_trt = normal(scale = 100),
prior_het = normal(scale = 5),
test_grad = TRUE)
test_that("probs argument", {
m <- "numeric vector of probabilities"
expect_error(posterior_ranks(smk_fit_RE, probs = "a"), m)
expect_error(posterior_ranks(smk_fit_RE, probs = -1), m)
expect_error(posterior_ranks(smk_fit_RE, probs = 1.5), m)
expect_error(posterior_ranks(smk_fit_RE, probs = Inf), m)
expect_error(posterior_ranks(smk_fit_RE, probs = list()), m)
expect_error(posterior_ranks(smk_fit_RE, probs = NA), m)
expect_error(posterior_ranks(smk_fit_RE, probs = NULL), m)
})
test_that("summary argument", {
m <- "should be TRUE or FALSE"
expect_error(posterior_ranks(smk_fit_RE, summary = "a"), m)
expect_error(posterior_ranks(smk_fit_RE, summary = 1), m)
expect_error(posterior_ranks(smk_fit_RE, summary = list()), m)
expect_error(posterior_ranks(smk_fit_RE, summary = NA), m)
expect_error(posterior_ranks(smk_fit_RE, summary = NULL), m)
})
test_that("newdata argument", {
m <- "not a data frame"
expect_error(posterior_ranks(smk_fit_RE, newdata = "a"), m)
expect_error(posterior_ranks(smk_fit_RE, newdata = 1), m)
expect_error(posterior_ranks(smk_fit_RE, newdata = list()), m)
expect_error(posterior_ranks(smk_fit_RE, newdata = NA), m)
})
test_that("lower_better argument", {
m <- "should be TRUE or FALSE"
expect_error(posterior_ranks(smk_fit_RE, lower_better = "a"), m)
expect_error(posterior_ranks(smk_fit_RE, lower_better = 1), m)
expect_error(posterior_ranks(smk_fit_RE, lower_better = list()), m)
expect_error(posterior_ranks(smk_fit_RE, lower_better = NA), m)
expect_error(posterior_ranks(smk_fit_RE, lower_better = NULL), m)
})
test_that("newdata argument", {
m <- "not a data frame"
expect_error(posterior_rank_probs(smk_fit_RE, newdata = "a"), m)
expect_error(posterior_rank_probs(smk_fit_RE, newdata = 1), m)
expect_error(posterior_rank_probs(smk_fit_RE, newdata = list()), m)
expect_error(posterior_rank_probs(smk_fit_RE, newdata = NA), m)
})
test_that("lower_better argument", {
m <- "should be TRUE or FALSE"
expect_error(posterior_rank_probs(smk_fit_RE, lower_better = "a"), m)
expect_error(posterior_rank_probs(smk_fit_RE, lower_better = 1), m)
expect_error(posterior_rank_probs(smk_fit_RE, lower_better = list()), m)
expect_error(posterior_rank_probs(smk_fit_RE, lower_better = NA), m)
expect_error(posterior_rank_probs(smk_fit_RE, lower_better = NULL), m)
})
test_that("cumulative argument", {
m <- "should be TRUE or FALSE"
expect_error(posterior_rank_probs(smk_fit_RE, cumulative = "a"), m)
expect_error(posterior_rank_probs(smk_fit_RE, cumulative = 1), m)
expect_error(posterior_rank_probs(smk_fit_RE, cumulative = list()), m)
expect_error(posterior_rank_probs(smk_fit_RE, cumulative = NA), m)
expect_error(posterior_rank_probs(smk_fit_RE, cumulative = NULL), m)
})
|
mallows <-
function (y, x, ndigit=3, return.result=FALSE)
{
if (!is.vector(y)) return("First Argument has to be Numeric Vector")
m <- ncol(x)
xname<-deparse(substitute(x))
varNames<-c(deparse(substitute(y)),colnames(x))
dta <- data.frame(x = x, y = y)
mall<-leaps(x,y, method="Cp",nbest=1)
out <- matrix(0,m,2+m)
rownames(out) <- rep("",nrow(out))
colnames(out) <- c("Number of Variables","Cp",colnames(x))
for(i in 1:m) {
out[i, ] <- c(i, round(mall$Cp[i],2), ifelse(mall$which[i,],"X",""))
}
if(return.result) return(out)
print(out,quote=FALSE)
}
| /R/mallows.R | no_license | WolfgangRolke/Resma3 | R | false | false | 613 | r | mallows <-
function (y, x, ndigit=3, return.result=FALSE)
{
if (!is.vector(y)) return("First Argument has to be Numeric Vector")
m <- ncol(x)
xname<-deparse(substitute(x))
varNames<-c(deparse(substitute(y)),colnames(x))
dta <- data.frame(x = x, y = y)
mall<-leaps(x,y, method="Cp",nbest=1)
out <- matrix(0,m,2+m)
rownames(out) <- rep("",nrow(out))
colnames(out) <- c("Number of Variables","Cp",colnames(x))
for(i in 1:m) {
out[i, ] <- c(i, round(mall$Cp[i],2), ifelse(mall$which[i,],"X",""))
}
if(return.result) return(out)
print(out,quote=FALSE)
}
|
#Load data from text file, each column as a character
data <- read.table("household_power_consumption.txt",
header=TRUE,
sep=";",
colClass="character")
#Concatenate date and time fields
date_time<-paste(data$Date,data$Time)
#Add date_time to data frame
data<-cbind(date_time,data)
#Change date field from character to date type
data$Date <- as.Date(data$Date,format="%d/%m/%Y")
#Change Global active power field to numeric
data$Global_active_power <- suppressWarnings(as.numeric(
data$Global_active_power))
#Subset data to approperitate days
data<-data[data$Date=="2007-02-01" | data$Date=="2007-02-02",]
#Change date_time field to date type
data$date_time<-as.POSIXct(strptime(data$date_time, "%d/%m/%Y %X"))
#change column 8 to numeric
data[,8]<-as.numeric(data[,8])
#change column 9 to numeric
data[,9]<-as.numeric(data[,9])
#change column 10 to numeric
data[,10]<-as.numeric(data[,10])
#Save plot to the current directory
png("plot3.png",width=480,height=480)
#Plot 1
with(data,plot(date_time,
Sub_metering_1,
ylab="Energy sub metering",
xlab="",
type="l"))
#Plot 2
lines(data$date_time,data$Sub_metering_2,type="l",col="red")
#Plot 3
lines(data$date_time,data$Sub_metering_3,type="l",col="blue")
#Legend
legend("topright",
c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
lty=c(1,1,1),
col=c("black","red","blue"))
dev.off() | /plot3.R | no_license | wilfredwards/ExData_Plotting1 | R | false | false | 1,491 | r | #Load data from text file, each column as a character
data <- read.table("household_power_consumption.txt",
header=TRUE,
sep=";",
colClass="character")
#Concatenate date and time fields
date_time<-paste(data$Date,data$Time)
#Add date_time to data frame
data<-cbind(date_time,data)
#Change date field from character to date type
data$Date <- as.Date(data$Date,format="%d/%m/%Y")
#Change Global active power field to numeric
data$Global_active_power <- suppressWarnings(as.numeric(
data$Global_active_power))
#Subset data to approperitate days
data<-data[data$Date=="2007-02-01" | data$Date=="2007-02-02",]
#Change date_time field to date type
data$date_time<-as.POSIXct(strptime(data$date_time, "%d/%m/%Y %X"))
#change column 8 to numeric
data[,8]<-as.numeric(data[,8])
#change column 9 to numeric
data[,9]<-as.numeric(data[,9])
#change column 10 to numeric
data[,10]<-as.numeric(data[,10])
#Save plot to the current directory
png("plot3.png",width=480,height=480)
#Plot 1
with(data,plot(date_time,
Sub_metering_1,
ylab="Energy sub metering",
xlab="",
type="l"))
#Plot 2
lines(data$date_time,data$Sub_metering_2,type="l",col="red")
#Plot 3
lines(data$date_time,data$Sub_metering_3,type="l",col="blue")
#Legend
legend("topright",
c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
lty=c(1,1,1),
col=c("black","red","blue"))
dev.off() |
# Montiglio 2017 Rec 170
# Phenotypic extractions
# Load libraries====
library(here)
library(tidyverse)
# Set wd====
dir<-here()
# Load data====
data<-read.csv("Data/data170_mating_rate.csv")
# Prep data====
proportion= data %>%
group_by(id_exp, mating) %>%
summarise(n=n()) %>%
mutate(rel.freq=n/sum(n)) %>%
mutate(p_mate= if_else(mating==0 & rel.freq==1, 0, rel.freq)) %>%
subset(mating==1| p_mate==0)
ind=data %>%
group_by(id_exp) %>%
summarize(Male=n_distinct(id_exp)) #n=295
Activity=data %>%
group_by(id_exp) %>%
summarize(Act=max(coef.activity))
Agg=data %>%
group_by(id_exp) %>%
summarize(Agg=max(coef.agg))
data2<-merge(Activity, Agg, by="id_exp")
data3<-merge(data2, proportion, by="id_exp")
# correlations====
hist(data3$p_mate)
cor.test(data3$Act, data3$p_mate) #r=0.1663807
cor.test(data3$Agg, data3$p_mate) #r=0.1976249 | /RawDataExtractions/Montiglio2017.R | no_license | elene-haave-audet/Extractions-Pers_and_Fit-Meta | R | false | false | 878 | r | # Montiglio 2017 Rec 170
# Phenotypic extractions
# Load libraries====
library(here)
library(tidyverse)
# Set wd====
dir<-here()
# Load data====
data<-read.csv("Data/data170_mating_rate.csv")
# Prep data====
proportion= data %>%
group_by(id_exp, mating) %>%
summarise(n=n()) %>%
mutate(rel.freq=n/sum(n)) %>%
mutate(p_mate= if_else(mating==0 & rel.freq==1, 0, rel.freq)) %>%
subset(mating==1| p_mate==0)
ind=data %>%
group_by(id_exp) %>%
summarize(Male=n_distinct(id_exp)) #n=295
Activity=data %>%
group_by(id_exp) %>%
summarize(Act=max(coef.activity))
Agg=data %>%
group_by(id_exp) %>%
summarize(Agg=max(coef.agg))
data2<-merge(Activity, Agg, by="id_exp")
data3<-merge(data2, proportion, by="id_exp")
# correlations====
hist(data3$p_mate)
cor.test(data3$Act, data3$p_mate) #r=0.1663807
cor.test(data3$Agg, data3$p_mate) #r=0.1976249 |
test_that("brms models work", {
tmp <- capture.output(
fit_warp_brms <- stanova_brm(breaks ~ wool * tension, data = warpbreaks,
chains = 2, iter = 1000)
)
expect_is(fit_warp_brms, "stanova")
ar <- stanova_samples(fit_warp_brms, return = "array")
expect_is(ar, "list")
expect_length(ar, 4)
expect_is(ar$`(Intercept)`, "array")
expect_length(dim(ar$`(Intercept)`), 3)
expect_equal(dim(ar$`wool:tension`), c(500, 6, 2))
df <- stanova_samples(fit_warp_brms, return = "data.frame")
expect_is(df, "list")
expect_length(df, 4)
expect_is(df$`(Intercept)`, "data.frame")
expect_length(dim(df$`(Intercept)`), 2)
expect_equal(dim(df$`wool:tension`), c(6000, 6))
expect_equal(df$`wool:tension`$Chain, rep(1:2, each = 3000))
expect_equal(df$`wool:tension`$Iteration,
rep(seq_len(nrow(df$`(Intercept)`)/2), 6*2))
sum1 <- summary(fit_warp_brms, diff_intercept = TRUE)
expect_equivalent(sum1$`(Intercept)`$Mean, 28.1, tolerance = 0.4)
sum2 <- summary(fit_warp_brms, diff_intercept = FALSE)
modlm <- lm(breaks ~ wool * tension, data = warpbreaks,
contrasts = list(wool = "contr.sum", tension = "contr.sum"))
expect_equivalent(sum2$`(Intercept)`$Mean,
coef(modlm,)[1],
tolerance = 1, scale = 1)
expect_equivalent(sum2$`wool:tension`$Mean,
summary(emmeans::emmeans(modlm, c("wool", "tension")))$emmean,
tolerance = 1, scale = 1)
expect_equivalent(sum2$`wool:tension`$Mean[1], 45.5,
tolerance = 2, scale = 1)
})
test_that("Binomial GLM brms works", {
## binomial model
### from: ?predict.glm
## example from Venables and Ripley (2002, pp. 190-2.)
dfbin <- data.frame(
ldose = rep(0:5, 2),
numdead = c(1, 4, 9, 13, 18, 20, 0, 2, 6, 10, 12, 16),
sex = factor(rep(c("M", "F"), c(6, 6)))
)
dfbin$n <- 20
capture.output(
budworm.lg_brm <- stanova_brm(numdead | trials(n) ~ sex*ldose,
data = dfbin,
family = binomial,
chains = 2, iter = 1000)
)
sum1 <- summary(budworm.lg_brm, diff_intercept = FALSE)
modglm <- glm(cbind(numdead, numalive = 20-numdead) ~ sex*ldose,
data = dfbin, family = binomial,
contrasts = list(sex = "contr.sum"))
#summary(modglm)
expect_equivalent(sum1$`(Intercept)`$Mean,
coef(modglm)[1],
tolerance = 0.5, scale = 1)
expect_equivalent(sum1$sex$Mean,
summary(emmeans::emmeans(modglm, c("sex")))$emmean,
tolerance = 0.3, scale = 1)
expect_equivalent(sum1$ldose$Mean,
summary(emmeans::emtrends(modglm, var = "ldose",
specs = "1"))$ldose.trend,
tolerance = 0.3, scale = 1)
sum2 <- summary(budworm.lg_brm, diff_intercept = TRUE)
expect_equivalent(sum2$`(Intercept)`$Mean,
coef(modglm)[1],
tolerance = 0.5, scale = 1)
expect_equivalent(sum2$sex$Mean,
summary(emmeans::emmeans(modglm, c("sex")))$emmean - coef(modglm)[1],
tolerance = 0.3, scale = 1)
expect_equivalent(sum2$ldose$Mean,
summary(emmeans::emtrends(modglm, var = "ldose",
specs = "1"))$ldose.trend,
tolerance = 0.3, scale = 1)
})
| /tests/testthat/test-stanova_brm.R | no_license | bayesstuff/stanova | R | false | false | 3,536 | r | test_that("brms models work", {
tmp <- capture.output(
fit_warp_brms <- stanova_brm(breaks ~ wool * tension, data = warpbreaks,
chains = 2, iter = 1000)
)
expect_is(fit_warp_brms, "stanova")
ar <- stanova_samples(fit_warp_brms, return = "array")
expect_is(ar, "list")
expect_length(ar, 4)
expect_is(ar$`(Intercept)`, "array")
expect_length(dim(ar$`(Intercept)`), 3)
expect_equal(dim(ar$`wool:tension`), c(500, 6, 2))
df <- stanova_samples(fit_warp_brms, return = "data.frame")
expect_is(df, "list")
expect_length(df, 4)
expect_is(df$`(Intercept)`, "data.frame")
expect_length(dim(df$`(Intercept)`), 2)
expect_equal(dim(df$`wool:tension`), c(6000, 6))
expect_equal(df$`wool:tension`$Chain, rep(1:2, each = 3000))
expect_equal(df$`wool:tension`$Iteration,
rep(seq_len(nrow(df$`(Intercept)`)/2), 6*2))
sum1 <- summary(fit_warp_brms, diff_intercept = TRUE)
expect_equivalent(sum1$`(Intercept)`$Mean, 28.1, tolerance = 0.4)
sum2 <- summary(fit_warp_brms, diff_intercept = FALSE)
modlm <- lm(breaks ~ wool * tension, data = warpbreaks,
contrasts = list(wool = "contr.sum", tension = "contr.sum"))
expect_equivalent(sum2$`(Intercept)`$Mean,
coef(modlm,)[1],
tolerance = 1, scale = 1)
expect_equivalent(sum2$`wool:tension`$Mean,
summary(emmeans::emmeans(modlm, c("wool", "tension")))$emmean,
tolerance = 1, scale = 1)
expect_equivalent(sum2$`wool:tension`$Mean[1], 45.5,
tolerance = 2, scale = 1)
})
test_that("Binomial GLM brms works", {
## binomial model
### from: ?predict.glm
## example from Venables and Ripley (2002, pp. 190-2.)
dfbin <- data.frame(
ldose = rep(0:5, 2),
numdead = c(1, 4, 9, 13, 18, 20, 0, 2, 6, 10, 12, 16),
sex = factor(rep(c("M", "F"), c(6, 6)))
)
dfbin$n <- 20
capture.output(
budworm.lg_brm <- stanova_brm(numdead | trials(n) ~ sex*ldose,
data = dfbin,
family = binomial,
chains = 2, iter = 1000)
)
sum1 <- summary(budworm.lg_brm, diff_intercept = FALSE)
modglm <- glm(cbind(numdead, numalive = 20-numdead) ~ sex*ldose,
data = dfbin, family = binomial,
contrasts = list(sex = "contr.sum"))
#summary(modglm)
expect_equivalent(sum1$`(Intercept)`$Mean,
coef(modglm)[1],
tolerance = 0.5, scale = 1)
expect_equivalent(sum1$sex$Mean,
summary(emmeans::emmeans(modglm, c("sex")))$emmean,
tolerance = 0.3, scale = 1)
expect_equivalent(sum1$ldose$Mean,
summary(emmeans::emtrends(modglm, var = "ldose",
specs = "1"))$ldose.trend,
tolerance = 0.3, scale = 1)
sum2 <- summary(budworm.lg_brm, diff_intercept = TRUE)
expect_equivalent(sum2$`(Intercept)`$Mean,
coef(modglm)[1],
tolerance = 0.5, scale = 1)
expect_equivalent(sum2$sex$Mean,
summary(emmeans::emmeans(modglm, c("sex")))$emmean - coef(modglm)[1],
tolerance = 0.3, scale = 1)
expect_equivalent(sum2$ldose$Mean,
summary(emmeans::emtrends(modglm, var = "ldose",
specs = "1"))$ldose.trend,
tolerance = 0.3, scale = 1)
})
|
sql <- "SELECT name, number FROM thing"
dat <- DBI::dbGetQuery(con, sql)
png("mygraph.png")
par(mar = c(15, 4, .5, .5))
barplot(setNames(dat$number, dat$name), las = 2)
dev.off()
| /src/app/orderly_demo/src/connection/script.R | permissive | vimc/orderly-web | R | false | false | 179 | r | sql <- "SELECT name, number FROM thing"
dat <- DBI::dbGetQuery(con, sql)
png("mygraph.png")
par(mar = c(15, 4, .5, .5))
barplot(setNames(dat$number, dat$name), las = 2)
dev.off()
|
#' Cloud SQL Administration API Objects
#' Creates and configures Cloud SQL instances, which provide fully-managed MySQL databases.
#'
#' Auto-generated code by googleAuthR::gar_create_api_objects
#' at 2016-09-03 23:48:08
#' filename: /Users/mark/dev/R/autoGoogleAPI/googlesqladminv1beta4.auto/R/sqladmin_objects.R
#' api_json: api_json
#'
#' Objects for use by the functions created by googleAuthR::gar_create_api_skeleton
#' AclEntry Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' An entry for an Access Control list.
#'
#' @param expirationTime The time when this access control entry expires in RFC 3339 format, for example 2012-11-15T16:19:00
#' @param name An optional label to identify this entry
#' @param value The whitelisted value for the access control list
#'
#' @return AclEntry object
#'
#' @family AclEntry functions
#' @export
AclEntry <- function(expirationTime = NULL, name = NULL, value = NULL) {
structure(list(expirationTime = expirationTime, kind = `sql#aclEntry`, name = name,
value = value), class = "gar_AclEntry")
}
#' BackupConfiguration Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Database instance backup configuration.
#'
#' @param binaryLogEnabled Whether binary log is enabled
#' @param enabled Whether this configuration is enabled
#' @param startTime Start time for the daily backup configuration in UTC timezone in the 24 hour format - HH:MM
#'
#' @return BackupConfiguration object
#'
#' @family BackupConfiguration functions
#' @export
BackupConfiguration <- function(binaryLogEnabled = NULL, enabled = NULL, startTime = NULL) {
structure(list(binaryLogEnabled = binaryLogEnabled, enabled = enabled, kind = `sql#backupConfiguration`,
startTime = startTime), class = "gar_BackupConfiguration")
}
#' BackupRun Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' A database instance backup run resource.
#'
#' @param description The description of this run, only applicable to on-demand backups
#' @param endTime The time the backup operation completed in UTC timezone in RFC 3339 format, for example 2012-11-15T16:19:00
#' @param enqueuedTime The time the run was enqueued in UTC timezone in RFC 3339 format, for example 2012-11-15T16:19:00
#' @param error Information about why the backup operation failed
#' @param id A unique identifier for this backup run
#' @param instance Name of the database instance
#' @param selfLink The URI of this resource
#' @param startTime The time the backup operation actually started in UTC timezone in RFC 3339 format, for example 2012-11-15T16:19:00
#' @param status The status of this run
#' @param type The type of this run; can be either 'AUTOMATED' or 'ON_DEMAND'
#' @param windowStartTime The start time of the backup window during which this the backup was attempted in RFC 3339 format, for example 2012-11-15T16:19:00
#'
#' @return BackupRun object
#'
#' @family BackupRun functions
#' @export
BackupRun <- function(description = NULL, endTime = NULL, enqueuedTime = NULL, error = NULL,
id = NULL, instance = NULL, selfLink = NULL, startTime = NULL, status = NULL,
type = NULL, windowStartTime = NULL) {
structure(list(description = description, endTime = endTime, enqueuedTime = enqueuedTime,
error = error, id = id, instance = instance, kind = `sql#backupRun`, selfLink = selfLink,
startTime = startTime, status = status, type = type, windowStartTime = windowStartTime),
class = "gar_BackupRun")
}
#' BackupRunsListResponse Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Backup run list results.
#'
#' @param items A list of backup runs in reverse chronological order of the enqueued time
#' @param nextPageToken The continuation token, used to page through large result sets
#'
#' @return BackupRunsListResponse object
#'
#' @family BackupRunsListResponse functions
#' @export
BackupRunsListResponse <- function(items = NULL, nextPageToken = NULL) {
structure(list(items = items, kind = `sql#backupRunsList`, nextPageToken = nextPageToken),
class = "gar_BackupRunsListResponse")
}
#' BinLogCoordinates Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Binary log coordinates.
#'
#' @param binLogFileName Name of the binary log file for a Cloud SQL instance
#' @param binLogPosition Position (offset) within the binary log file
#'
#' @return BinLogCoordinates object
#'
#' @family BinLogCoordinates functions
#' @export
BinLogCoordinates <- function(binLogFileName = NULL, binLogPosition = NULL) {
structure(list(binLogFileName = binLogFileName, binLogPosition = binLogPosition,
kind = `sql#binLogCoordinates`), class = "gar_BinLogCoordinates")
}
#' CloneContext Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Database instance clone context.
#'
#' @param binLogCoordinates Binary log coordinates, if specified, indentify the the position up to which the source instance should be cloned
#' @param destinationInstanceName Name of the Cloud SQL instance to be created as a clone
#'
#' @return CloneContext object
#'
#' @family CloneContext functions
#' @export
CloneContext <- function(binLogCoordinates = NULL, destinationInstanceName = NULL) {
structure(list(binLogCoordinates = binLogCoordinates, destinationInstanceName = destinationInstanceName,
kind = `sql#cloneContext`), class = "gar_CloneContext")
}
#' Database Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' A database resource inside a Cloud SQL instance.
#'
#' @param charset The MySQL charset value
#' @param collation The MySQL collation value
#' @param etag HTTP 1
#' @param instance The name of the Cloud SQL instance
#' @param name The name of the database in the Cloud SQL instance
#' @param project The project ID of the project containing the Cloud SQL database
#' @param selfLink The URI of this resource
#'
#' @return Database object
#'
#' @family Database functions
#' @export
Database <- function(charset = NULL, collation = NULL, etag = NULL, instance = NULL,
name = NULL, project = NULL, selfLink = NULL) {
structure(list(charset = charset, collation = collation, etag = etag, instance = instance,
kind = `sql#database`, name = name, project = project, selfLink = selfLink),
class = "gar_Database")
}
#' DatabaseFlags Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' MySQL flags for Cloud SQL instances.
#'
#' @param name The name of the flag
#' @param value The value of the flag
#'
#' @return DatabaseFlags object
#'
#' @family DatabaseFlags functions
#' @export
DatabaseFlags <- function(name = NULL, value = NULL) {
structure(list(name = name, value = value), class = "gar_DatabaseFlags")
}
#' DatabaseInstance Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' A Cloud SQL instance resource.
#'
#' @param DatabaseInstance.failoverReplica The \link{DatabaseInstance.failoverReplica} object or list of objects
#' @param backendType FIRST_GEN: Basic Cloud SQL instance that runs in a Google-managed container
#' @param currentDiskSize The current disk usage of the instance in bytes
#' @param databaseVersion The database engine type and version
#' @param etag HTTP 1
#' @param failoverReplica The name and status of the failover replica
#' @param instanceType The instance type
#' @param ipAddresses The assigned IP addresses for the instance
#' @param ipv6Address The IPv6 address assigned to the instance
#' @param masterInstanceName The name of the instance which will act as master in the replication setup
#' @param maxDiskSize The maximum disk size of the instance in bytes
#' @param name Name of the Cloud SQL instance
#' @param onPremisesConfiguration Configuration specific to on-premises instances
#' @param project The project ID of the project containing the Cloud SQL instance
#' @param region The geographical region
#' @param replicaConfiguration Configuration specific to read-replicas replicating from on-premises masters
#' @param replicaNames The replicas of the instance
#' @param selfLink The URI of this resource
#' @param serverCaCert SSL configuration
#' @param serviceAccountEmailAddress The service account email address assigned to the instance
#' @param settings The user settings
#' @param state The current serving state of the Cloud SQL instance
#' @param suspensionReason If the instance state is SUSPENDED, the reason for the suspension
#'
#' @return DatabaseInstance object
#'
#' @family DatabaseInstance functions
#' @export
DatabaseInstance <- function(DatabaseInstance.failoverReplica = NULL, backendType = NULL,
currentDiskSize = NULL, databaseVersion = NULL, etag = NULL, failoverReplica = NULL,
instanceType = NULL, ipAddresses = NULL, ipv6Address = NULL, masterInstanceName = NULL,
maxDiskSize = NULL, name = NULL, onPremisesConfiguration = NULL, project = NULL,
region = NULL, replicaConfiguration = NULL, replicaNames = NULL, selfLink = NULL,
serverCaCert = NULL, serviceAccountEmailAddress = NULL, settings = NULL, state = NULL,
suspensionReason = NULL) {
structure(list(DatabaseInstance.failoverReplica = DatabaseInstance.failoverReplica,
backendType = backendType, currentDiskSize = currentDiskSize, databaseVersion = databaseVersion,
etag = etag, failoverReplica = failoverReplica, instanceType = instanceType,
ipAddresses = ipAddresses, ipv6Address = ipv6Address, kind = `sql#instance`,
masterInstanceName = masterInstanceName, maxDiskSize = maxDiskSize, name = name,
onPremisesConfiguration = onPremisesConfiguration, project = project, region = region,
replicaConfiguration = replicaConfiguration, replicaNames = replicaNames,
selfLink = selfLink, serverCaCert = serverCaCert, serviceAccountEmailAddress = serviceAccountEmailAddress,
settings = settings, state = state, suspensionReason = suspensionReason),
class = "gar_DatabaseInstance")
}
#' DatabaseInstance.failoverReplica Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' The name and status of the failover replica. This property is applicable only to Second Generation instances.
#'
#' @param available The availability status of the failover replica
#' @param name The name of the failover replica
#'
#' @return DatabaseInstance.failoverReplica object
#'
#' @family DatabaseInstance functions
#' @export
DatabaseInstance.failoverReplica <- function(available = NULL, name = NULL) {
structure(list(available = available, name = name), class = "gar_DatabaseInstance.failoverReplica")
}
#' DatabasesListResponse Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Database list response.
#'
#' @param items List of database resources in the instance
#'
#' @return DatabasesListResponse object
#'
#' @family DatabasesListResponse functions
#' @export
DatabasesListResponse <- function(items = NULL) {
structure(list(items = items, kind = `sql#databasesList`), class = "gar_DatabasesListResponse")
}
#' ExportContext Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Database instance export context.
#'
#' @param ExportContext.csvExportOptions The \link{ExportContext.csvExportOptions} object or list of objects
#' @param ExportContext.sqlExportOptions The \link{ExportContext.sqlExportOptions} object or list of objects
#' @param csvExportOptions Options for exporting data as CSV
#' @param databases Databases (for example, guestbook) from which the export is made
#' @param fileType The file type for the specified uri
#' @param sqlExportOptions Options for exporting data as SQL statements
#' @param uri The path to the file in Google Cloud Storage where the export will be stored
#'
#' @return ExportContext object
#'
#' @family ExportContext functions
#' @export
ExportContext <- function(ExportContext.csvExportOptions = NULL, ExportContext.sqlExportOptions = NULL,
csvExportOptions = NULL, databases = NULL, fileType = NULL, sqlExportOptions = NULL,
uri = NULL) {
structure(list(ExportContext.csvExportOptions = ExportContext.csvExportOptions,
ExportContext.sqlExportOptions = ExportContext.sqlExportOptions, csvExportOptions = csvExportOptions,
databases = databases, fileType = fileType, kind = `sql#exportContext`, sqlExportOptions = sqlExportOptions,
uri = uri), class = "gar_ExportContext")
}
#' ExportContext.csvExportOptions Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Options for exporting data as CSV.
#'
#' @param selectQuery The select query used to extract the data
#'
#' @return ExportContext.csvExportOptions object
#'
#' @family ExportContext functions
#' @export
ExportContext.csvExportOptions <- function(selectQuery = NULL) {
structure(list(selectQuery = selectQuery), class = "gar_ExportContext.csvExportOptions")
}
#' ExportContext.sqlExportOptions Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Options for exporting data as SQL statements.
#'
#' @param schemaOnly Export only schemas
#' @param tables Tables to export, or that were exported, from the specified database
#'
#' @return ExportContext.sqlExportOptions object
#'
#' @family ExportContext functions
#' @export
ExportContext.sqlExportOptions <- function(schemaOnly = NULL, tables = NULL) {
structure(list(schemaOnly = schemaOnly, tables = tables), class = "gar_ExportContext.sqlExportOptions")
}
#' FailoverContext Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Database instance failover context.
#'
#' @param settingsVersion The current settings version of this instance
#'
#' @return FailoverContext object
#'
#' @family FailoverContext functions
#' @export
FailoverContext <- function(settingsVersion = NULL) {
structure(list(kind = `sql#failoverContext`, settingsVersion = settingsVersion),
class = "gar_FailoverContext")
}
#' Flag Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' A Google Cloud SQL service flag resource.
#'
#' @param allowedStringValues For STRING flags, a list of strings that the value can be set to
#' @param appliesTo The database version this flag applies to
#' @param maxValue For INTEGER flags, the maximum allowed value
#' @param minValue For INTEGER flags, the minimum allowed value
#' @param name This is the name of the flag
#' @param requiresRestart Indicates whether changing this flag will trigger a database restart
#' @param type The type of the flag
#'
#' @return Flag object
#'
#' @family Flag functions
#' @export
Flag <- function(allowedStringValues = NULL, appliesTo = NULL, maxValue = NULL, minValue = NULL,
name = NULL, requiresRestart = NULL, type = NULL) {
structure(list(allowedStringValues = allowedStringValues, appliesTo = appliesTo,
kind = `sql#flag`, maxValue = maxValue, minValue = minValue, name = name,
requiresRestart = requiresRestart, type = type), class = "gar_Flag")
}
#' FlagsListResponse Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Flags list response.
#'
#' @param items List of flags
#'
#' @return FlagsListResponse object
#'
#' @family FlagsListResponse functions
#' @export
FlagsListResponse <- function(items = NULL) {
structure(list(items = items, kind = `sql#flagsList`), class = "gar_FlagsListResponse")
}
#' ImportContext Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Database instance import context.
#'
#' @param ImportContext.csvImportOptions The \link{ImportContext.csvImportOptions} object or list of objects
#' @param csvImportOptions Options for importing data as CSV
#' @param database The database (for example, guestbook) to which the import is made
#' @param fileType The file type for the specified uri
#' @param uri A path to the file in Google Cloud Storage from which the import is made
#'
#' @return ImportContext object
#'
#' @family ImportContext functions
#' @export
ImportContext <- function(ImportContext.csvImportOptions = NULL, csvImportOptions = NULL,
database = NULL, fileType = NULL, uri = NULL) {
structure(list(ImportContext.csvImportOptions = ImportContext.csvImportOptions,
csvImportOptions = csvImportOptions, database = database, fileType = fileType,
kind = `sql#importContext`, uri = uri), class = "gar_ImportContext")
}
#' ImportContext.csvImportOptions Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Options for importing data as CSV.
#'
#' @param columns The columns to which CSV data is imported
#' @param table The table to which CSV data is imported
#'
#' @return ImportContext.csvImportOptions object
#'
#' @family ImportContext functions
#' @export
ImportContext.csvImportOptions <- function(columns = NULL, table = NULL) {
structure(list(columns = columns, table = table), class = "gar_ImportContext.csvImportOptions")
}
#' InstancesCloneRequest Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Database instance clone request.
#'
#' @param cloneContext Contains details about the clone operation
#'
#' @return InstancesCloneRequest object
#'
#' @family InstancesCloneRequest functions
#' @export
InstancesCloneRequest <- function(cloneContext = NULL) {
structure(list(cloneContext = cloneContext), class = "gar_InstancesCloneRequest")
}
#' InstancesExportRequest Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Database instance export request.
#'
#' @param exportContext Contains details about the export operation
#'
#' @return InstancesExportRequest object
#'
#' @family InstancesExportRequest functions
#' @export
InstancesExportRequest <- function(exportContext = NULL) {
structure(list(exportContext = exportContext), class = "gar_InstancesExportRequest")
}
#' InstancesFailoverRequest Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Instance failover request.
#'
#' @param failoverContext Failover Context
#'
#' @return InstancesFailoverRequest object
#'
#' @family InstancesFailoverRequest functions
#' @export
InstancesFailoverRequest <- function(failoverContext = NULL) {
structure(list(failoverContext = failoverContext), class = "gar_InstancesFailoverRequest")
}
#' InstancesImportRequest Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Database instance import request.
#'
#' @param importContext Contains details about the import operation
#'
#' @return InstancesImportRequest object
#'
#' @family InstancesImportRequest functions
#' @export
InstancesImportRequest <- function(importContext = NULL) {
structure(list(importContext = importContext), class = "gar_InstancesImportRequest")
}
#' InstancesListResponse Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Database instances list response.
#'
#' @param items List of database instance resources
#' @param nextPageToken The continuation token, used to page through large result sets
#'
#' @return InstancesListResponse object
#'
#' @family InstancesListResponse functions
#' @export
InstancesListResponse <- function(items = NULL, nextPageToken = NULL) {
structure(list(items = items, kind = `sql#instancesList`, nextPageToken = nextPageToken),
class = "gar_InstancesListResponse")
}
#' InstancesRestoreBackupRequest Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Database instance restore backup request.
#'
#' @param restoreBackupContext Parameters required to perform the restore backup operation
#'
#' @return InstancesRestoreBackupRequest object
#'
#' @family InstancesRestoreBackupRequest functions
#' @export
InstancesRestoreBackupRequest <- function(restoreBackupContext = NULL) {
structure(list(restoreBackupContext = restoreBackupContext), class = "gar_InstancesRestoreBackupRequest")
}
#' IpConfiguration Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' IP Management configuration.
#'
#' @param authorizedNetworks The list of external networks that are allowed to connect to the instance using the IP
#' @param ipv4Enabled Whether the instance should be assigned an IP address or not
#' @param requireSsl Whether the mysqld should default to 'REQUIRE X509' for users connecting over IP
#'
#' @return IpConfiguration object
#'
#' @family IpConfiguration functions
#' @export
IpConfiguration <- function(authorizedNetworks = NULL, ipv4Enabled = NULL, requireSsl = NULL) {
structure(list(authorizedNetworks = authorizedNetworks, ipv4Enabled = ipv4Enabled,
requireSsl = requireSsl), class = "gar_IpConfiguration")
}
#' IpMapping Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Database instance IP Mapping.
#'
#' @param ipAddress The IP address assigned
#' @param timeToRetire The due time for this IP to be retired in RFC 3339 format, for example 2012-11-15T16:19:00
#'
#' @return IpMapping object
#'
#' @family IpMapping functions
#' @export
IpMapping <- function(ipAddress = NULL, timeToRetire = NULL) {
structure(list(ipAddress = ipAddress, timeToRetire = timeToRetire), class = "gar_IpMapping")
}
#' LocationPreference Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Preferred location. This specifies where a Cloud SQL instance should preferably be located, either in a specific Compute Engine zone, or co-located with an App Engine application. Note that if the preferred location is not available, the instance will be located as close as possible within the region. Only one location may be specified.
#'
#' @param followGaeApplication The AppEngine application to follow, it must be in the same region as the Cloud SQL instance
#' @param zone The preferred Compute Engine zone (e
#'
#' @return LocationPreference object
#'
#' @family LocationPreference functions
#' @export
LocationPreference <- function(followGaeApplication = NULL, zone = NULL) {
structure(list(followGaeApplication = followGaeApplication, kind = `sql#locationPreference`,
zone = zone), class = "gar_LocationPreference")
}
#' MaintenanceWindow Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Maintenance window. This specifies when a v2 Cloud SQL instance should preferably be restarted for system maintenance puruposes.
#'
#' @param day day of week (1-7), starting on Monday
#' @param hour hour of day - 0 to 23
#' @param updateTrack No description
#'
#' @return MaintenanceWindow object
#'
#' @family MaintenanceWindow functions
#' @export
MaintenanceWindow <- function(day = NULL, hour = NULL, updateTrack = NULL) {
structure(list(day = day, hour = hour, kind = `sql#maintenanceWindow`, updateTrack = updateTrack),
class = "gar_MaintenanceWindow")
}
#' MySqlReplicaConfiguration Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Read-replica configuration specific to MySQL databases.
#'
#' @param caCertificate PEM representation of the trusted CA's x509 certificate
#' @param clientCertificate PEM representation of the slave's x509 certificate
#' @param clientKey PEM representation of the slave's private key
#' @param connectRetryInterval Seconds to wait between connect retries
#' @param dumpFilePath Path to a SQL dump file in Google Cloud Storage from which the slave instance is to be created
#' @param masterHeartbeatPeriod Interval in milliseconds between replication heartbeats
#' @param password The password for the replication connection
#' @param sslCipher A list of permissible ciphers to use for SSL encryption
#' @param username The username for the replication connection
#' @param verifyServerCertificate Whether or not to check the master's Common Name value in the certificate that it sends during the SSL handshake
#'
#' @return MySqlReplicaConfiguration object
#'
#' @family MySqlReplicaConfiguration functions
#' @export
MySqlReplicaConfiguration <- function(caCertificate = NULL, clientCertificate = NULL,
clientKey = NULL, connectRetryInterval = NULL, dumpFilePath = NULL, masterHeartbeatPeriod = NULL,
password = NULL, sslCipher = NULL, username = NULL, verifyServerCertificate = NULL) {
structure(list(caCertificate = caCertificate, clientCertificate = clientCertificate,
clientKey = clientKey, connectRetryInterval = connectRetryInterval, dumpFilePath = dumpFilePath,
kind = `sql#mysqlReplicaConfiguration`, masterHeartbeatPeriod = masterHeartbeatPeriod,
password = password, sslCipher = sslCipher, username = username, verifyServerCertificate = verifyServerCertificate),
class = "gar_MySqlReplicaConfiguration")
}
#' OnPremisesConfiguration Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' On-premises instance configuration.
#'
#' @param hostPort The host and port of the on-premises instance in host:port format
#'
#' @return OnPremisesConfiguration object
#'
#' @family OnPremisesConfiguration functions
#' @export
OnPremisesConfiguration <- function(hostPort = NULL) {
structure(list(hostPort = hostPort, kind = `sql#onPremisesConfiguration`), class = "gar_OnPremisesConfiguration")
}
#' Operation Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' An Operations resource contains information about database instance operations such as create, delete, and restart. Operations resources are created in response to operations that were initiated; you never create them directly.
#'
#' @param endTime The time this operation finished in UTC timezone in RFC 3339 format, for example 2012-11-15T16:19:00
#' @param error If errors occurred during processing of this operation, this field will be populated
#' @param exportContext The context for export operation, if applicable
#' @param importContext The context for import operation, if applicable
#' @param insertTime The time this operation was enqueued in UTC timezone in RFC 3339 format, for example 2012-11-15T16:19:00
#' @param name An identifier that uniquely identifies the operation
#' @param operationType The type of the operation
#' @param selfLink The URI of this resource
#' @param startTime The time this operation actually started in UTC timezone in RFC 3339 format, for example 2012-11-15T16:19:00
#' @param status The status of an operation
#' @param targetId Name of the database instance related to this operation
#' @param targetLink The URI of the instance related to the operation
#' @param targetProject The project ID of the target instance related to this operation
#' @param user The email address of the user who initiated this operation
#'
#' @return Operation object
#'
#' @family Operation functions
#' @export
Operation <- function(endTime = NULL, error = NULL, exportContext = NULL, importContext = NULL,
insertTime = NULL, name = NULL, operationType = NULL, selfLink = NULL, startTime = NULL,
status = NULL, targetId = NULL, targetLink = NULL, targetProject = NULL, user = NULL) {
structure(list(endTime = endTime, error = error, exportContext = exportContext,
importContext = importContext, insertTime = insertTime, kind = `sql#operation`,
name = name, operationType = operationType, selfLink = selfLink, startTime = startTime,
status = status, targetId = targetId, targetLink = targetLink, targetProject = targetProject,
user = user), class = "gar_Operation")
}
#' OperationError Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Database instance operation error.
#'
#' @param code Identifies the specific error that occurred
#' @param message Additional information about the error encountered
#'
#' @return OperationError object
#'
#' @family OperationError functions
#' @export
OperationError <- function(code = NULL, message = NULL) {
structure(list(code = code, kind = `sql#operationError`, message = message),
class = "gar_OperationError")
}
#' OperationErrors Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Database instance operation errors list wrapper.
#'
#' @param errors The list of errors encountered while processing this operation
#'
#' @return OperationErrors object
#'
#' @family OperationErrors functions
#' @export
OperationErrors <- function(errors = NULL) {
structure(list(errors = errors, kind = `sql#operationErrors`), class = "gar_OperationErrors")
}
#' OperationsListResponse Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Database instance list operations response.
#'
#' @param items List of operation resources
#' @param nextPageToken The continuation token, used to page through large result sets
#'
#' @return OperationsListResponse object
#'
#' @family OperationsListResponse functions
#' @export
OperationsListResponse <- function(items = NULL, nextPageToken = NULL) {
structure(list(items = items, kind = `sql#operationsList`, nextPageToken = nextPageToken),
class = "gar_OperationsListResponse")
}
#' ReplicaConfiguration Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Read-replica configuration for connecting to the master.
#'
#' @param failoverTarget Specifies if the replica is the failover target
#' @param mysqlReplicaConfiguration MySQL specific configuration when replicating from a MySQL on-premises master
#'
#' @return ReplicaConfiguration object
#'
#' @family ReplicaConfiguration functions
#' @export
ReplicaConfiguration <- function(failoverTarget = NULL, mysqlReplicaConfiguration = NULL) {
structure(list(failoverTarget = failoverTarget, kind = `sql#replicaConfiguration`,
mysqlReplicaConfiguration = mysqlReplicaConfiguration), class = "gar_ReplicaConfiguration")
}
#' RestoreBackupContext Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Database instance restore from backup context.
#'
#' @param backupRunId The ID of the backup run to restore from
#' @param instanceId The ID of the instance that the backup was taken from
#'
#' @return RestoreBackupContext object
#'
#' @family RestoreBackupContext functions
#' @export
RestoreBackupContext <- function(backupRunId = NULL, instanceId = NULL) {
structure(list(backupRunId = backupRunId, instanceId = instanceId, kind = `sql#restoreBackupContext`),
class = "gar_RestoreBackupContext")
}
#' Settings Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Database instance settings.
#'
#' @param activationPolicy The activation policy specifies when the instance is activated; it is applicable only when the instance state is RUNNABLE
#' @param authorizedGaeApplications The App Engine app IDs that can access this instance
#' @param backupConfiguration The daily backup configuration for the instance
#' @param crashSafeReplicationEnabled Configuration specific to read replica instances
#' @param dataDiskSizeGb The size of data disk, in GB
#' @param dataDiskType The type of data disk
#' @param databaseFlags The database flags passed to the instance at startup
#' @param databaseReplicationEnabled Configuration specific to read replica instances
#' @param ipConfiguration The settings for IP Management
#' @param locationPreference The location preference settings
#' @param maintenanceWindow The maintenance window for this instance
#' @param pricingPlan The pricing plan for this instance
#' @param replicationType The type of replication this instance uses
#' @param settingsVersion The version of instance settings
#' @param storageAutoResize Configuration to increase storage size automatically
#' @param tier The tier of service for this instance, for example D1, D2
#'
#' @return Settings object
#'
#' @family Settings functions
#' @export
Settings <- function(activationPolicy = NULL, authorizedGaeApplications = NULL, backupConfiguration = NULL,
crashSafeReplicationEnabled = NULL, dataDiskSizeGb = NULL, dataDiskType = NULL,
databaseFlags = NULL, databaseReplicationEnabled = NULL, ipConfiguration = NULL,
locationPreference = NULL, maintenanceWindow = NULL, pricingPlan = NULL, replicationType = NULL,
settingsVersion = NULL, storageAutoResize = NULL, tier = NULL) {
structure(list(activationPolicy = activationPolicy, authorizedGaeApplications = authorizedGaeApplications,
backupConfiguration = backupConfiguration, crashSafeReplicationEnabled = crashSafeReplicationEnabled,
dataDiskSizeGb = dataDiskSizeGb, dataDiskType = dataDiskType, databaseFlags = databaseFlags,
databaseReplicationEnabled = databaseReplicationEnabled, ipConfiguration = ipConfiguration,
kind = `sql#settings`, locationPreference = locationPreference, maintenanceWindow = maintenanceWindow,
pricingPlan = pricingPlan, replicationType = replicationType, settingsVersion = settingsVersion,
storageAutoResize = storageAutoResize, tier = tier), class = "gar_Settings")
}
#' SslCert Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' SslCerts Resource
#'
#' @param cert PEM representation
#' @param certSerialNumber Serial number, as extracted from the certificate
#' @param commonName User supplied name
#' @param createTime The time when the certificate was created in RFC 3339 format, for example 2012-11-15T16:19:00
#' @param expirationTime The time when the certificate expires in RFC 3339 format, for example 2012-11-15T16:19:00
#' @param instance Name of the database instance
#' @param selfLink The URI of this resource
#' @param sha1Fingerprint Sha1 Fingerprint
#'
#' @return SslCert object
#'
#' @family SslCert functions
#' @export
SslCert <- function(cert = NULL, certSerialNumber = NULL, commonName = NULL, createTime = NULL,
expirationTime = NULL, instance = NULL, selfLink = NULL, sha1Fingerprint = NULL) {
structure(list(cert = cert, certSerialNumber = certSerialNumber, commonName = commonName,
createTime = createTime, expirationTime = expirationTime, instance = instance,
kind = `sql#sslCert`, selfLink = selfLink, sha1Fingerprint = sha1Fingerprint),
class = "gar_SslCert")
}
#' SslCertDetail Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' SslCertDetail.
#'
#' @param certInfo The public information about the cert
#' @param certPrivateKey The private key for the client cert, in pem format
#'
#' @return SslCertDetail object
#'
#' @family SslCertDetail functions
#' @export
SslCertDetail <- function(certInfo = NULL, certPrivateKey = NULL) {
structure(list(certInfo = certInfo, certPrivateKey = certPrivateKey), class = "gar_SslCertDetail")
}
#' SslCertsCreateEphemeralRequest Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' SslCerts create ephemeral certificate request.
#'
#' @param public_key PEM encoded public key to include in the signed certificate
#'
#' @return SslCertsCreateEphemeralRequest object
#'
#' @family SslCertsCreateEphemeralRequest functions
#' @export
SslCertsCreateEphemeralRequest <- function(public_key = NULL) {
structure(list(public_key = public_key), class = "gar_SslCertsCreateEphemeralRequest")
}
#' SslCertsInsertRequest Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' SslCerts insert request.
#'
#' @param commonName User supplied name
#'
#' @return SslCertsInsertRequest object
#'
#' @family SslCertsInsertRequest functions
#' @export
SslCertsInsertRequest <- function(commonName = NULL) {
structure(list(commonName = commonName), class = "gar_SslCertsInsertRequest")
}
#' SslCertsInsertResponse Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' SslCert insert response.
#'
#' @param clientCert The new client certificate and private key
#' @param operation The operation to track the ssl certs insert request
#' @param serverCaCert The server Certificate Authority's certificate
#'
#' @return SslCertsInsertResponse object
#'
#' @family SslCertsInsertResponse functions
#' @export
SslCertsInsertResponse <- function(clientCert = NULL, operation = NULL, serverCaCert = NULL) {
structure(list(clientCert = clientCert, kind = `sql#sslCertsInsert`, operation = operation,
serverCaCert = serverCaCert), class = "gar_SslCertsInsertResponse")
}
#' SslCertsListResponse Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' SslCerts list response.
#'
#' @param items List of client certificates for the instance
#'
#' @return SslCertsListResponse object
#'
#' @family SslCertsListResponse functions
#' @export
SslCertsListResponse <- function(items = NULL) {
structure(list(items = items, kind = `sql#sslCertsList`), class = "gar_SslCertsListResponse")
}
#' Tier Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' A Google Cloud SQL service tier resource.
#'
#' @param DiskQuota The maximum disk size of this tier in bytes
#' @param RAM The maximum RAM usage of this tier in bytes
#' @param region The applicable regions for this tier
#' @param tier An identifier for the service tier, for example D1, D2 etc
#'
#' @return Tier object
#'
#' @family Tier functions
#' @export
Tier <- function(DiskQuota = NULL, RAM = NULL, region = NULL, tier = NULL) {
structure(list(DiskQuota = DiskQuota, RAM = RAM, kind = `sql#tier`, region = region,
tier = tier), class = "gar_Tier")
}
#' TiersListResponse Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Tiers list response.
#'
#' @param items List of tiers
#'
#' @return TiersListResponse object
#'
#' @family TiersListResponse functions
#' @export
TiersListResponse <- function(items = NULL) {
structure(list(items = items, kind = `sql#tiersList`), class = "gar_TiersListResponse")
}
#' User Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' A Cloud SQL user resource.
#'
#' @param etag HTTP 1
#' @param host The host name from which the user can connect
#' @param instance The name of the Cloud SQL instance
#' @param name The name of the user in the Cloud SQL instance
#' @param password The password for the user
#' @param project The project ID of the project containing the Cloud SQL database
#'
#' @return User object
#'
#' @family User functions
#' @export
User <- function(etag = NULL, host = NULL, instance = NULL, name = NULL, password = NULL,
project = NULL) {
structure(list(etag = etag, host = host, instance = instance, kind = `sql#user`,
name = name, password = password, project = project), class = "gar_User")
}
#' UsersListResponse Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' User list response.
#'
#' @param items List of user resources in the instance
#' @param nextPageToken An identifier that uniquely identifies the operation
#'
#' @return UsersListResponse object
#'
#' @family UsersListResponse functions
#' @export
UsersListResponse <- function(items = NULL, nextPageToken = NULL) {
structure(list(items = items, kind = `sql#usersList`, nextPageToken = nextPageToken),
class = "gar_UsersListResponse")
}
| /googlesqladminv1beta4.auto/R/sqladmin_objects.R | permissive | Phippsy/autoGoogleAPI | R | false | false | 40,357 | r | #' Cloud SQL Administration API Objects
#' Creates and configures Cloud SQL instances, which provide fully-managed MySQL databases.
#'
#' Auto-generated code by googleAuthR::gar_create_api_objects
#' at 2016-09-03 23:48:08
#' filename: /Users/mark/dev/R/autoGoogleAPI/googlesqladminv1beta4.auto/R/sqladmin_objects.R
#' api_json: api_json
#'
#' Objects for use by the functions created by googleAuthR::gar_create_api_skeleton
#' AclEntry Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' An entry for an Access Control list.
#'
#' @param expirationTime The time when this access control entry expires in RFC 3339 format, for example 2012-11-15T16:19:00
#' @param name An optional label to identify this entry
#' @param value The whitelisted value for the access control list
#'
#' @return AclEntry object
#'
#' @family AclEntry functions
#' @export
AclEntry <- function(expirationTime = NULL, name = NULL, value = NULL) {
structure(list(expirationTime = expirationTime, kind = `sql#aclEntry`, name = name,
value = value), class = "gar_AclEntry")
}
#' BackupConfiguration Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Database instance backup configuration.
#'
#' @param binaryLogEnabled Whether binary log is enabled
#' @param enabled Whether this configuration is enabled
#' @param startTime Start time for the daily backup configuration in UTC timezone in the 24 hour format - HH:MM
#'
#' @return BackupConfiguration object
#'
#' @family BackupConfiguration functions
#' @export
BackupConfiguration <- function(binaryLogEnabled = NULL, enabled = NULL, startTime = NULL) {
structure(list(binaryLogEnabled = binaryLogEnabled, enabled = enabled, kind = `sql#backupConfiguration`,
startTime = startTime), class = "gar_BackupConfiguration")
}
#' BackupRun Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' A database instance backup run resource.
#'
#' @param description The description of this run, only applicable to on-demand backups
#' @param endTime The time the backup operation completed in UTC timezone in RFC 3339 format, for example 2012-11-15T16:19:00
#' @param enqueuedTime The time the run was enqueued in UTC timezone in RFC 3339 format, for example 2012-11-15T16:19:00
#' @param error Information about why the backup operation failed
#' @param id A unique identifier for this backup run
#' @param instance Name of the database instance
#' @param selfLink The URI of this resource
#' @param startTime The time the backup operation actually started in UTC timezone in RFC 3339 format, for example 2012-11-15T16:19:00
#' @param status The status of this run
#' @param type The type of this run; can be either 'AUTOMATED' or 'ON_DEMAND'
#' @param windowStartTime The start time of the backup window during which this the backup was attempted in RFC 3339 format, for example 2012-11-15T16:19:00
#'
#' @return BackupRun object
#'
#' @family BackupRun functions
#' @export
BackupRun <- function(description = NULL, endTime = NULL, enqueuedTime = NULL, error = NULL,
id = NULL, instance = NULL, selfLink = NULL, startTime = NULL, status = NULL,
type = NULL, windowStartTime = NULL) {
structure(list(description = description, endTime = endTime, enqueuedTime = enqueuedTime,
error = error, id = id, instance = instance, kind = `sql#backupRun`, selfLink = selfLink,
startTime = startTime, status = status, type = type, windowStartTime = windowStartTime),
class = "gar_BackupRun")
}
#' BackupRunsListResponse Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Backup run list results.
#'
#' @param items A list of backup runs in reverse chronological order of the enqueued time
#' @param nextPageToken The continuation token, used to page through large result sets
#'
#' @return BackupRunsListResponse object
#'
#' @family BackupRunsListResponse functions
#' @export
BackupRunsListResponse <- function(items = NULL, nextPageToken = NULL) {
structure(list(items = items, kind = `sql#backupRunsList`, nextPageToken = nextPageToken),
class = "gar_BackupRunsListResponse")
}
#' BinLogCoordinates Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Binary log coordinates.
#'
#' @param binLogFileName Name of the binary log file for a Cloud SQL instance
#' @param binLogPosition Position (offset) within the binary log file
#'
#' @return BinLogCoordinates object
#'
#' @family BinLogCoordinates functions
#' @export
BinLogCoordinates <- function(binLogFileName = NULL, binLogPosition = NULL) {
structure(list(binLogFileName = binLogFileName, binLogPosition = binLogPosition,
kind = `sql#binLogCoordinates`), class = "gar_BinLogCoordinates")
}
#' CloneContext Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Database instance clone context.
#'
#' @param binLogCoordinates Binary log coordinates, if specified, indentify the the position up to which the source instance should be cloned
#' @param destinationInstanceName Name of the Cloud SQL instance to be created as a clone
#'
#' @return CloneContext object
#'
#' @family CloneContext functions
#' @export
CloneContext <- function(binLogCoordinates = NULL, destinationInstanceName = NULL) {
structure(list(binLogCoordinates = binLogCoordinates, destinationInstanceName = destinationInstanceName,
kind = `sql#cloneContext`), class = "gar_CloneContext")
}
#' Database Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' A database resource inside a Cloud SQL instance.
#'
#' @param charset The MySQL charset value
#' @param collation The MySQL collation value
#' @param etag HTTP 1
#' @param instance The name of the Cloud SQL instance
#' @param name The name of the database in the Cloud SQL instance
#' @param project The project ID of the project containing the Cloud SQL database
#' @param selfLink The URI of this resource
#'
#' @return Database object
#'
#' @family Database functions
#' @export
Database <- function(charset = NULL, collation = NULL, etag = NULL, instance = NULL,
name = NULL, project = NULL, selfLink = NULL) {
structure(list(charset = charset, collation = collation, etag = etag, instance = instance,
kind = `sql#database`, name = name, project = project, selfLink = selfLink),
class = "gar_Database")
}
#' DatabaseFlags Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' MySQL flags for Cloud SQL instances.
#'
#' @param name The name of the flag
#' @param value The value of the flag
#'
#' @return DatabaseFlags object
#'
#' @family DatabaseFlags functions
#' @export
DatabaseFlags <- function(name = NULL, value = NULL) {
structure(list(name = name, value = value), class = "gar_DatabaseFlags")
}
#' DatabaseInstance Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' A Cloud SQL instance resource.
#'
#' @param DatabaseInstance.failoverReplica The \link{DatabaseInstance.failoverReplica} object or list of objects
#' @param backendType FIRST_GEN: Basic Cloud SQL instance that runs in a Google-managed container
#' @param currentDiskSize The current disk usage of the instance in bytes
#' @param databaseVersion The database engine type and version
#' @param etag HTTP 1
#' @param failoverReplica The name and status of the failover replica
#' @param instanceType The instance type
#' @param ipAddresses The assigned IP addresses for the instance
#' @param ipv6Address The IPv6 address assigned to the instance
#' @param masterInstanceName The name of the instance which will act as master in the replication setup
#' @param maxDiskSize The maximum disk size of the instance in bytes
#' @param name Name of the Cloud SQL instance
#' @param onPremisesConfiguration Configuration specific to on-premises instances
#' @param project The project ID of the project containing the Cloud SQL instance
#' @param region The geographical region
#' @param replicaConfiguration Configuration specific to read-replicas replicating from on-premises masters
#' @param replicaNames The replicas of the instance
#' @param selfLink The URI of this resource
#' @param serverCaCert SSL configuration
#' @param serviceAccountEmailAddress The service account email address assigned to the instance
#' @param settings The user settings
#' @param state The current serving state of the Cloud SQL instance
#' @param suspensionReason If the instance state is SUSPENDED, the reason for the suspension
#'
#' @return DatabaseInstance object
#'
#' @family DatabaseInstance functions
#' @export
DatabaseInstance <- function(DatabaseInstance.failoverReplica = NULL, backendType = NULL,
currentDiskSize = NULL, databaseVersion = NULL, etag = NULL, failoverReplica = NULL,
instanceType = NULL, ipAddresses = NULL, ipv6Address = NULL, masterInstanceName = NULL,
maxDiskSize = NULL, name = NULL, onPremisesConfiguration = NULL, project = NULL,
region = NULL, replicaConfiguration = NULL, replicaNames = NULL, selfLink = NULL,
serverCaCert = NULL, serviceAccountEmailAddress = NULL, settings = NULL, state = NULL,
suspensionReason = NULL) {
structure(list(DatabaseInstance.failoverReplica = DatabaseInstance.failoverReplica,
backendType = backendType, currentDiskSize = currentDiskSize, databaseVersion = databaseVersion,
etag = etag, failoverReplica = failoverReplica, instanceType = instanceType,
ipAddresses = ipAddresses, ipv6Address = ipv6Address, kind = `sql#instance`,
masterInstanceName = masterInstanceName, maxDiskSize = maxDiskSize, name = name,
onPremisesConfiguration = onPremisesConfiguration, project = project, region = region,
replicaConfiguration = replicaConfiguration, replicaNames = replicaNames,
selfLink = selfLink, serverCaCert = serverCaCert, serviceAccountEmailAddress = serviceAccountEmailAddress,
settings = settings, state = state, suspensionReason = suspensionReason),
class = "gar_DatabaseInstance")
}
#' DatabaseInstance.failoverReplica Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' The name and status of the failover replica. This property is applicable only to Second Generation instances.
#'
#' @param available The availability status of the failover replica
#' @param name The name of the failover replica
#'
#' @return DatabaseInstance.failoverReplica object
#'
#' @family DatabaseInstance functions
#' @export
DatabaseInstance.failoverReplica <- function(available = NULL, name = NULL) {
structure(list(available = available, name = name), class = "gar_DatabaseInstance.failoverReplica")
}
#' DatabasesListResponse Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Database list response.
#'
#' @param items List of database resources in the instance
#'
#' @return DatabasesListResponse object
#'
#' @family DatabasesListResponse functions
#' @export
DatabasesListResponse <- function(items = NULL) {
structure(list(items = items, kind = `sql#databasesList`), class = "gar_DatabasesListResponse")
}
#' ExportContext Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Database instance export context.
#'
#' @param ExportContext.csvExportOptions The \link{ExportContext.csvExportOptions} object or list of objects
#' @param ExportContext.sqlExportOptions The \link{ExportContext.sqlExportOptions} object or list of objects
#' @param csvExportOptions Options for exporting data as CSV
#' @param databases Databases (for example, guestbook) from which the export is made
#' @param fileType The file type for the specified uri
#' @param sqlExportOptions Options for exporting data as SQL statements
#' @param uri The path to the file in Google Cloud Storage where the export will be stored
#'
#' @return ExportContext object
#'
#' @family ExportContext functions
#' @export
ExportContext <- function(ExportContext.csvExportOptions = NULL, ExportContext.sqlExportOptions = NULL,
csvExportOptions = NULL, databases = NULL, fileType = NULL, sqlExportOptions = NULL,
uri = NULL) {
structure(list(ExportContext.csvExportOptions = ExportContext.csvExportOptions,
ExportContext.sqlExportOptions = ExportContext.sqlExportOptions, csvExportOptions = csvExportOptions,
databases = databases, fileType = fileType, kind = `sql#exportContext`, sqlExportOptions = sqlExportOptions,
uri = uri), class = "gar_ExportContext")
}
#' ExportContext.csvExportOptions Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Options for exporting data as CSV.
#'
#' @param selectQuery The select query used to extract the data
#'
#' @return ExportContext.csvExportOptions object
#'
#' @family ExportContext functions
#' @export
ExportContext.csvExportOptions <- function(selectQuery = NULL) {
structure(list(selectQuery = selectQuery), class = "gar_ExportContext.csvExportOptions")
}
#' ExportContext.sqlExportOptions Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Options for exporting data as SQL statements.
#'
#' @param schemaOnly Export only schemas
#' @param tables Tables to export, or that were exported, from the specified database
#'
#' @return ExportContext.sqlExportOptions object
#'
#' @family ExportContext functions
#' @export
ExportContext.sqlExportOptions <- function(schemaOnly = NULL, tables = NULL) {
structure(list(schemaOnly = schemaOnly, tables = tables), class = "gar_ExportContext.sqlExportOptions")
}
#' FailoverContext Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Database instance failover context.
#'
#' @param settingsVersion The current settings version of this instance
#'
#' @return FailoverContext object
#'
#' @family FailoverContext functions
#' @export
FailoverContext <- function(settingsVersion = NULL) {
structure(list(kind = `sql#failoverContext`, settingsVersion = settingsVersion),
class = "gar_FailoverContext")
}
#' Flag Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' A Google Cloud SQL service flag resource.
#'
#' @param allowedStringValues For STRING flags, a list of strings that the value can be set to
#' @param appliesTo The database version this flag applies to
#' @param maxValue For INTEGER flags, the maximum allowed value
#' @param minValue For INTEGER flags, the minimum allowed value
#' @param name This is the name of the flag
#' @param requiresRestart Indicates whether changing this flag will trigger a database restart
#' @param type The type of the flag
#'
#' @return Flag object
#'
#' @family Flag functions
#' @export
Flag <- function(allowedStringValues = NULL, appliesTo = NULL, maxValue = NULL, minValue = NULL,
name = NULL, requiresRestart = NULL, type = NULL) {
structure(list(allowedStringValues = allowedStringValues, appliesTo = appliesTo,
kind = `sql#flag`, maxValue = maxValue, minValue = minValue, name = name,
requiresRestart = requiresRestart, type = type), class = "gar_Flag")
}
#' FlagsListResponse Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Flags list response.
#'
#' @param items List of flags
#'
#' @return FlagsListResponse object
#'
#' @family FlagsListResponse functions
#' @export
FlagsListResponse <- function(items = NULL) {
structure(list(items = items, kind = `sql#flagsList`), class = "gar_FlagsListResponse")
}
#' ImportContext Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Database instance import context.
#'
#' @param ImportContext.csvImportOptions The \link{ImportContext.csvImportOptions} object or list of objects
#' @param csvImportOptions Options for importing data as CSV
#' @param database The database (for example, guestbook) to which the import is made
#' @param fileType The file type for the specified uri
#' @param uri A path to the file in Google Cloud Storage from which the import is made
#'
#' @return ImportContext object
#'
#' @family ImportContext functions
#' @export
ImportContext <- function(ImportContext.csvImportOptions = NULL, csvImportOptions = NULL,
database = NULL, fileType = NULL, uri = NULL) {
structure(list(ImportContext.csvImportOptions = ImportContext.csvImportOptions,
csvImportOptions = csvImportOptions, database = database, fileType = fileType,
kind = `sql#importContext`, uri = uri), class = "gar_ImportContext")
}
#' ImportContext.csvImportOptions Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Options for importing data as CSV.
#'
#' @param columns The columns to which CSV data is imported
#' @param table The table to which CSV data is imported
#'
#' @return ImportContext.csvImportOptions object
#'
#' @family ImportContext functions
#' @export
ImportContext.csvImportOptions <- function(columns = NULL, table = NULL) {
structure(list(columns = columns, table = table), class = "gar_ImportContext.csvImportOptions")
}
#' InstancesCloneRequest Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Database instance clone request.
#'
#' @param cloneContext Contains details about the clone operation
#'
#' @return InstancesCloneRequest object
#'
#' @family InstancesCloneRequest functions
#' @export
InstancesCloneRequest <- function(cloneContext = NULL) {
structure(list(cloneContext = cloneContext), class = "gar_InstancesCloneRequest")
}
#' InstancesExportRequest Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Database instance export request.
#'
#' @param exportContext Contains details about the export operation
#'
#' @return InstancesExportRequest object
#'
#' @family InstancesExportRequest functions
#' @export
InstancesExportRequest <- function(exportContext = NULL) {
structure(list(exportContext = exportContext), class = "gar_InstancesExportRequest")
}
#' InstancesFailoverRequest Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Instance failover request.
#'
#' @param failoverContext Failover Context
#'
#' @return InstancesFailoverRequest object
#'
#' @family InstancesFailoverRequest functions
#' @export
InstancesFailoverRequest <- function(failoverContext = NULL) {
structure(list(failoverContext = failoverContext), class = "gar_InstancesFailoverRequest")
}
#' InstancesImportRequest Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Database instance import request.
#'
#' @param importContext Contains details about the import operation
#'
#' @return InstancesImportRequest object
#'
#' @family InstancesImportRequest functions
#' @export
InstancesImportRequest <- function(importContext = NULL) {
structure(list(importContext = importContext), class = "gar_InstancesImportRequest")
}
#' InstancesListResponse Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Database instances list response.
#'
#' @param items List of database instance resources
#' @param nextPageToken The continuation token, used to page through large result sets
#'
#' @return InstancesListResponse object
#'
#' @family InstancesListResponse functions
#' @export
InstancesListResponse <- function(items = NULL, nextPageToken = NULL) {
structure(list(items = items, kind = `sql#instancesList`, nextPageToken = nextPageToken),
class = "gar_InstancesListResponse")
}
#' InstancesRestoreBackupRequest Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Database instance restore backup request.
#'
#' @param restoreBackupContext Parameters required to perform the restore backup operation
#'
#' @return InstancesRestoreBackupRequest object
#'
#' @family InstancesRestoreBackupRequest functions
#' @export
InstancesRestoreBackupRequest <- function(restoreBackupContext = NULL) {
structure(list(restoreBackupContext = restoreBackupContext), class = "gar_InstancesRestoreBackupRequest")
}
#' IpConfiguration Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' IP Management configuration.
#'
#' @param authorizedNetworks The list of external networks that are allowed to connect to the instance using the IP
#' @param ipv4Enabled Whether the instance should be assigned an IP address or not
#' @param requireSsl Whether the mysqld should default to 'REQUIRE X509' for users connecting over IP
#'
#' @return IpConfiguration object
#'
#' @family IpConfiguration functions
#' @export
IpConfiguration <- function(authorizedNetworks = NULL, ipv4Enabled = NULL, requireSsl = NULL) {
structure(list(authorizedNetworks = authorizedNetworks, ipv4Enabled = ipv4Enabled,
requireSsl = requireSsl), class = "gar_IpConfiguration")
}
#' IpMapping Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Database instance IP Mapping.
#'
#' @param ipAddress The IP address assigned
#' @param timeToRetire The due time for this IP to be retired in RFC 3339 format, for example 2012-11-15T16:19:00
#'
#' @return IpMapping object
#'
#' @family IpMapping functions
#' @export
IpMapping <- function(ipAddress = NULL, timeToRetire = NULL) {
structure(list(ipAddress = ipAddress, timeToRetire = timeToRetire), class = "gar_IpMapping")
}
#' LocationPreference Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Preferred location. This specifies where a Cloud SQL instance should preferably be located, either in a specific Compute Engine zone, or co-located with an App Engine application. Note that if the preferred location is not available, the instance will be located as close as possible within the region. Only one location may be specified.
#'
#' @param followGaeApplication The AppEngine application to follow, it must be in the same region as the Cloud SQL instance
#' @param zone The preferred Compute Engine zone (e
#'
#' @return LocationPreference object
#'
#' @family LocationPreference functions
#' @export
LocationPreference <- function(followGaeApplication = NULL, zone = NULL) {
structure(list(followGaeApplication = followGaeApplication, kind = `sql#locationPreference`,
zone = zone), class = "gar_LocationPreference")
}
#' MaintenanceWindow Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Maintenance window. This specifies when a v2 Cloud SQL instance should preferably be restarted for system maintenance puruposes.
#'
#' @param day day of week (1-7), starting on Monday
#' @param hour hour of day - 0 to 23
#' @param updateTrack No description
#'
#' @return MaintenanceWindow object
#'
#' @family MaintenanceWindow functions
#' @export
MaintenanceWindow <- function(day = NULL, hour = NULL, updateTrack = NULL) {
structure(list(day = day, hour = hour, kind = `sql#maintenanceWindow`, updateTrack = updateTrack),
class = "gar_MaintenanceWindow")
}
#' MySqlReplicaConfiguration Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Read-replica configuration specific to MySQL databases.
#'
#' @param caCertificate PEM representation of the trusted CA's x509 certificate
#' @param clientCertificate PEM representation of the slave's x509 certificate
#' @param clientKey PEM representation of the slave's private key
#' @param connectRetryInterval Seconds to wait between connect retries
#' @param dumpFilePath Path to a SQL dump file in Google Cloud Storage from which the slave instance is to be created
#' @param masterHeartbeatPeriod Interval in milliseconds between replication heartbeats
#' @param password The password for the replication connection
#' @param sslCipher A list of permissible ciphers to use for SSL encryption
#' @param username The username for the replication connection
#' @param verifyServerCertificate Whether or not to check the master's Common Name value in the certificate that it sends during the SSL handshake
#'
#' @return MySqlReplicaConfiguration object
#'
#' @family MySqlReplicaConfiguration functions
#' @export
MySqlReplicaConfiguration <- function(caCertificate = NULL, clientCertificate = NULL,
clientKey = NULL, connectRetryInterval = NULL, dumpFilePath = NULL, masterHeartbeatPeriod = NULL,
password = NULL, sslCipher = NULL, username = NULL, verifyServerCertificate = NULL) {
structure(list(caCertificate = caCertificate, clientCertificate = clientCertificate,
clientKey = clientKey, connectRetryInterval = connectRetryInterval, dumpFilePath = dumpFilePath,
kind = `sql#mysqlReplicaConfiguration`, masterHeartbeatPeriod = masterHeartbeatPeriod,
password = password, sslCipher = sslCipher, username = username, verifyServerCertificate = verifyServerCertificate),
class = "gar_MySqlReplicaConfiguration")
}
#' OnPremisesConfiguration Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' On-premises instance configuration.
#'
#' @param hostPort The host and port of the on-premises instance in host:port format
#'
#' @return OnPremisesConfiguration object
#'
#' @family OnPremisesConfiguration functions
#' @export
OnPremisesConfiguration <- function(hostPort = NULL) {
structure(list(hostPort = hostPort, kind = `sql#onPremisesConfiguration`), class = "gar_OnPremisesConfiguration")
}
#' Operation Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' An Operations resource contains information about database instance operations such as create, delete, and restart. Operations resources are created in response to operations that were initiated; you never create them directly.
#'
#' @param endTime The time this operation finished in UTC timezone in RFC 3339 format, for example 2012-11-15T16:19:00
#' @param error If errors occurred during processing of this operation, this field will be populated
#' @param exportContext The context for export operation, if applicable
#' @param importContext The context for import operation, if applicable
#' @param insertTime The time this operation was enqueued in UTC timezone in RFC 3339 format, for example 2012-11-15T16:19:00
#' @param name An identifier that uniquely identifies the operation
#' @param operationType The type of the operation
#' @param selfLink The URI of this resource
#' @param startTime The time this operation actually started in UTC timezone in RFC 3339 format, for example 2012-11-15T16:19:00
#' @param status The status of an operation
#' @param targetId Name of the database instance related to this operation
#' @param targetLink The URI of the instance related to the operation
#' @param targetProject The project ID of the target instance related to this operation
#' @param user The email address of the user who initiated this operation
#'
#' @return Operation object
#'
#' @family Operation functions
#' @export
Operation <- function(endTime = NULL, error = NULL, exportContext = NULL, importContext = NULL,
insertTime = NULL, name = NULL, operationType = NULL, selfLink = NULL, startTime = NULL,
status = NULL, targetId = NULL, targetLink = NULL, targetProject = NULL, user = NULL) {
structure(list(endTime = endTime, error = error, exportContext = exportContext,
importContext = importContext, insertTime = insertTime, kind = `sql#operation`,
name = name, operationType = operationType, selfLink = selfLink, startTime = startTime,
status = status, targetId = targetId, targetLink = targetLink, targetProject = targetProject,
user = user), class = "gar_Operation")
}
#' OperationError Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Database instance operation error.
#'
#' @param code Identifies the specific error that occurred
#' @param message Additional information about the error encountered
#'
#' @return OperationError object
#'
#' @family OperationError functions
#' @export
OperationError <- function(code = NULL, message = NULL) {
structure(list(code = code, kind = `sql#operationError`, message = message),
class = "gar_OperationError")
}
#' OperationErrors Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Database instance operation errors list wrapper.
#'
#' @param errors The list of errors encountered while processing this operation
#'
#' @return OperationErrors object
#'
#' @family OperationErrors functions
#' @export
OperationErrors <- function(errors = NULL) {
structure(list(errors = errors, kind = `sql#operationErrors`), class = "gar_OperationErrors")
}
#' OperationsListResponse Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Database instance list operations response.
#'
#' @param items List of operation resources
#' @param nextPageToken The continuation token, used to page through large result sets
#'
#' @return OperationsListResponse object
#'
#' @family OperationsListResponse functions
#' @export
OperationsListResponse <- function(items = NULL, nextPageToken = NULL) {
structure(list(items = items, kind = `sql#operationsList`, nextPageToken = nextPageToken),
class = "gar_OperationsListResponse")
}
#' ReplicaConfiguration Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Read-replica configuration for connecting to the master.
#'
#' @param failoverTarget Specifies if the replica is the failover target
#' @param mysqlReplicaConfiguration MySQL specific configuration when replicating from a MySQL on-premises master
#'
#' @return ReplicaConfiguration object
#'
#' @family ReplicaConfiguration functions
#' @export
ReplicaConfiguration <- function(failoverTarget = NULL, mysqlReplicaConfiguration = NULL) {
structure(list(failoverTarget = failoverTarget, kind = `sql#replicaConfiguration`,
mysqlReplicaConfiguration = mysqlReplicaConfiguration), class = "gar_ReplicaConfiguration")
}
#' RestoreBackupContext Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Database instance restore from backup context.
#'
#' @param backupRunId The ID of the backup run to restore from
#' @param instanceId The ID of the instance that the backup was taken from
#'
#' @return RestoreBackupContext object
#'
#' @family RestoreBackupContext functions
#' @export
RestoreBackupContext <- function(backupRunId = NULL, instanceId = NULL) {
structure(list(backupRunId = backupRunId, instanceId = instanceId, kind = `sql#restoreBackupContext`),
class = "gar_RestoreBackupContext")
}
#' Settings Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Database instance settings.
#'
#' @param activationPolicy The activation policy specifies when the instance is activated; it is applicable only when the instance state is RUNNABLE
#' @param authorizedGaeApplications The App Engine app IDs that can access this instance
#' @param backupConfiguration The daily backup configuration for the instance
#' @param crashSafeReplicationEnabled Configuration specific to read replica instances
#' @param dataDiskSizeGb The size of data disk, in GB
#' @param dataDiskType The type of data disk
#' @param databaseFlags The database flags passed to the instance at startup
#' @param databaseReplicationEnabled Configuration specific to read replica instances
#' @param ipConfiguration The settings for IP Management
#' @param locationPreference The location preference settings
#' @param maintenanceWindow The maintenance window for this instance
#' @param pricingPlan The pricing plan for this instance
#' @param replicationType The type of replication this instance uses
#' @param settingsVersion The version of instance settings
#' @param storageAutoResize Configuration to increase storage size automatically
#' @param tier The tier of service for this instance, for example D1, D2
#'
#' @return Settings object
#'
#' @family Settings functions
#' @export
Settings <- function(activationPolicy = NULL, authorizedGaeApplications = NULL, backupConfiguration = NULL,
crashSafeReplicationEnabled = NULL, dataDiskSizeGb = NULL, dataDiskType = NULL,
databaseFlags = NULL, databaseReplicationEnabled = NULL, ipConfiguration = NULL,
locationPreference = NULL, maintenanceWindow = NULL, pricingPlan = NULL, replicationType = NULL,
settingsVersion = NULL, storageAutoResize = NULL, tier = NULL) {
structure(list(activationPolicy = activationPolicy, authorizedGaeApplications = authorizedGaeApplications,
backupConfiguration = backupConfiguration, crashSafeReplicationEnabled = crashSafeReplicationEnabled,
dataDiskSizeGb = dataDiskSizeGb, dataDiskType = dataDiskType, databaseFlags = databaseFlags,
databaseReplicationEnabled = databaseReplicationEnabled, ipConfiguration = ipConfiguration,
kind = `sql#settings`, locationPreference = locationPreference, maintenanceWindow = maintenanceWindow,
pricingPlan = pricingPlan, replicationType = replicationType, settingsVersion = settingsVersion,
storageAutoResize = storageAutoResize, tier = tier), class = "gar_Settings")
}
#' SslCert Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' SslCerts Resource
#'
#' @param cert PEM representation
#' @param certSerialNumber Serial number, as extracted from the certificate
#' @param commonName User supplied name
#' @param createTime The time when the certificate was created in RFC 3339 format, for example 2012-11-15T16:19:00
#' @param expirationTime The time when the certificate expires in RFC 3339 format, for example 2012-11-15T16:19:00
#' @param instance Name of the database instance
#' @param selfLink The URI of this resource
#' @param sha1Fingerprint Sha1 Fingerprint
#'
#' @return SslCert object
#'
#' @family SslCert functions
#' @export
SslCert <- function(cert = NULL, certSerialNumber = NULL, commonName = NULL, createTime = NULL,
expirationTime = NULL, instance = NULL, selfLink = NULL, sha1Fingerprint = NULL) {
structure(list(cert = cert, certSerialNumber = certSerialNumber, commonName = commonName,
createTime = createTime, expirationTime = expirationTime, instance = instance,
kind = `sql#sslCert`, selfLink = selfLink, sha1Fingerprint = sha1Fingerprint),
class = "gar_SslCert")
}
#' SslCertDetail Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' SslCertDetail.
#'
#' @param certInfo The public information about the cert
#' @param certPrivateKey The private key for the client cert, in pem format
#'
#' @return SslCertDetail object
#'
#' @family SslCertDetail functions
#' @export
SslCertDetail <- function(certInfo = NULL, certPrivateKey = NULL) {
structure(list(certInfo = certInfo, certPrivateKey = certPrivateKey), class = "gar_SslCertDetail")
}
#' SslCertsCreateEphemeralRequest Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' SslCerts create ephemeral certificate request.
#'
#' @param public_key PEM encoded public key to include in the signed certificate
#'
#' @return SslCertsCreateEphemeralRequest object
#'
#' @family SslCertsCreateEphemeralRequest functions
#' @export
SslCertsCreateEphemeralRequest <- function(public_key = NULL) {
structure(list(public_key = public_key), class = "gar_SslCertsCreateEphemeralRequest")
}
#' SslCertsInsertRequest Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' SslCerts insert request.
#'
#' @param commonName User supplied name
#'
#' @return SslCertsInsertRequest object
#'
#' @family SslCertsInsertRequest functions
#' @export
SslCertsInsertRequest <- function(commonName = NULL) {
structure(list(commonName = commonName), class = "gar_SslCertsInsertRequest")
}
#' SslCertsInsertResponse Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' SslCert insert response.
#'
#' @param clientCert The new client certificate and private key
#' @param operation The operation to track the ssl certs insert request
#' @param serverCaCert The server Certificate Authority's certificate
#'
#' @return SslCertsInsertResponse object
#'
#' @family SslCertsInsertResponse functions
#' @export
SslCertsInsertResponse <- function(clientCert = NULL, operation = NULL, serverCaCert = NULL) {
structure(list(clientCert = clientCert, kind = `sql#sslCertsInsert`, operation = operation,
serverCaCert = serverCaCert), class = "gar_SslCertsInsertResponse")
}
#' SslCertsListResponse Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' SslCerts list response.
#'
#' @param items List of client certificates for the instance
#'
#' @return SslCertsListResponse object
#'
#' @family SslCertsListResponse functions
#' @export
SslCertsListResponse <- function(items = NULL) {
structure(list(items = items, kind = `sql#sslCertsList`), class = "gar_SslCertsListResponse")
}
#' Tier Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' A Google Cloud SQL service tier resource.
#'
#' @param DiskQuota The maximum disk size of this tier in bytes
#' @param RAM The maximum RAM usage of this tier in bytes
#' @param region The applicable regions for this tier
#' @param tier An identifier for the service tier, for example D1, D2 etc
#'
#' @return Tier object
#'
#' @family Tier functions
#' @export
Tier <- function(DiskQuota = NULL, RAM = NULL, region = NULL, tier = NULL) {
structure(list(DiskQuota = DiskQuota, RAM = RAM, kind = `sql#tier`, region = region,
tier = tier), class = "gar_Tier")
}
#' TiersListResponse Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Tiers list response.
#'
#' @param items List of tiers
#'
#' @return TiersListResponse object
#'
#' @family TiersListResponse functions
#' @export
TiersListResponse <- function(items = NULL) {
structure(list(items = items, kind = `sql#tiersList`), class = "gar_TiersListResponse")
}
#' User Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' A Cloud SQL user resource.
#'
#' @param etag HTTP 1
#' @param host The host name from which the user can connect
#' @param instance The name of the Cloud SQL instance
#' @param name The name of the user in the Cloud SQL instance
#' @param password The password for the user
#' @param project The project ID of the project containing the Cloud SQL database
#'
#' @return User object
#'
#' @family User functions
#' @export
User <- function(etag = NULL, host = NULL, instance = NULL, name = NULL, password = NULL,
project = NULL) {
structure(list(etag = etag, host = host, instance = instance, kind = `sql#user`,
name = name, password = password, project = project), class = "gar_User")
}
#' UsersListResponse Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' User list response.
#'
#' @param items List of user resources in the instance
#' @param nextPageToken An identifier that uniquely identifies the operation
#'
#' @return UsersListResponse object
#'
#' @family UsersListResponse functions
#' @export
UsersListResponse <- function(items = NULL, nextPageToken = NULL) {
structure(list(items = items, kind = `sql#usersList`, nextPageToken = nextPageToken),
class = "gar_UsersListResponse")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/csl_date.R
\name{csl_date}
\alias{csl_date}
\alias{csl_dates}
\title{A date conforming to the CSL schema}
\usage{
csl_date(
date_parts = list(),
season = NULL,
circa = NULL,
literal = NULL,
raw = NULL,
edtf = NULL
)
csl_dates(x = list())
}
\arguments{
\item{date_parts}{A list containing one or two dates in a list. Each date is
also represented using lists in the format of \code{list(year, month, day)}.
Different precision can be achieved by providing an incomplete list:
\code{list(year, month)}. A range of dates can be specified by providing two
dates, where the first date is the start and second date is the end of the
interval.}
\item{season, circa, literal, raw, edtf}{Additional date variable properties as
described in the schema.}
\item{x}{A list of \code{csl_date()} values.}
}
\description{
This class provides helper utilities to display, sort, and select attributes
from a date in the CSL format.
}
\examples{
# Single date
csl_date(date_parts = list(list(2020,03,05)))
# Date interval
csl_date(date_parts = list(list(2020,03,05), list(2020,08,25)))
}
\seealso{
\url{https://citeproc-js.readthedocs.io/en/latest/csl-json/markup.html#date-fields}
}
| /man/csl_date.Rd | no_license | LTW-2720/vitae | R | false | true | 1,259 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/csl_date.R
\name{csl_date}
\alias{csl_date}
\alias{csl_dates}
\title{A date conforming to the CSL schema}
\usage{
csl_date(
date_parts = list(),
season = NULL,
circa = NULL,
literal = NULL,
raw = NULL,
edtf = NULL
)
csl_dates(x = list())
}
\arguments{
\item{date_parts}{A list containing one or two dates in a list. Each date is
also represented using lists in the format of \code{list(year, month, day)}.
Different precision can be achieved by providing an incomplete list:
\code{list(year, month)}. A range of dates can be specified by providing two
dates, where the first date is the start and second date is the end of the
interval.}
\item{season, circa, literal, raw, edtf}{Additional date variable properties as
described in the schema.}
\item{x}{A list of \code{csl_date()} values.}
}
\description{
This class provides helper utilities to display, sort, and select attributes
from a date in the CSL format.
}
\examples{
# Single date
csl_date(date_parts = list(list(2020,03,05)))
# Date interval
csl_date(date_parts = list(list(2020,03,05), list(2020,08,25)))
}
\seealso{
\url{https://citeproc-js.readthedocs.io/en/latest/csl-json/markup.html#date-fields}
}
|
library(roll)
### Name: roll_sum
### Title: Rolling Sums
### Aliases: roll_sum
### ** Examples
n_vars <- 3
n_obs <- 15
x <- matrix(rnorm(n_obs * n_vars), nrow = n_obs, ncol = n_vars)
# rolling sums
result <- roll_sum(x, 5)
# rolling sums with exponential decay
weights <- 0.9 ^ (5:1)
result <- roll_sum(x, 5, weights)
| /data/genthat_extracted_code/roll/examples/roll_sum.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 327 | r | library(roll)
### Name: roll_sum
### Title: Rolling Sums
### Aliases: roll_sum
### ** Examples
n_vars <- 3
n_obs <- 15
x <- matrix(rnorm(n_obs * n_vars), nrow = n_obs, ncol = n_vars)
# rolling sums
result <- roll_sum(x, 5)
# rolling sums with exponential decay
weights <- 0.9 ^ (5:1)
result <- roll_sum(x, 5, weights)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/run_forecasters.R
\name{run_forecasters}
\alias{run_forecasters}
\title{Run forecasters in specified list and populate a global variable with prediction_cards incrementally}
\usage{
run_forecasters(
forecasters,
response,
incidence_period = c("epiweek"),
ahead,
forecast_date,
geo_type = c("county", "state", "hrr", "msa"),
n_locations = 200,
data_list
)
}
\arguments{
\item{forecasters}{a named list of forecasters, such as those returned by [get_forecaster()]}
\item{response}{the response (e.g. "usafacts_deaths_incidence_num")}
\item{incidence_period}{the incidence period (e.g. "epiweek" for
now, for all forecasters)}
\item{ahead}{the ahead parameter (e.g. 1, 2, 3, 4)}
\item{forecast_date}{the date of the forecast}
\item{geo_type}{the geographic type (e.g "county" or "state" or
"hrr" or "msa"... but for now only the first two),}
\item{n_locations}{the number of locations (for now we will use 200
for this)}
\item{data_list}{a named list of data frames for each response/geo_type, i.e. for county/state}
}
\description{
Run forecasters in specified list and populate a global variable with prediction_cards incrementally
}
| /evalforecast/man/run_forecasters.Rd | permissive | brookslogan/covid-19-iif-blog-post-code | R | false | true | 1,234 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/run_forecasters.R
\name{run_forecasters}
\alias{run_forecasters}
\title{Run forecasters in specified list and populate a global variable with prediction_cards incrementally}
\usage{
run_forecasters(
forecasters,
response,
incidence_period = c("epiweek"),
ahead,
forecast_date,
geo_type = c("county", "state", "hrr", "msa"),
n_locations = 200,
data_list
)
}
\arguments{
\item{forecasters}{a named list of forecasters, such as those returned by [get_forecaster()]}
\item{response}{the response (e.g. "usafacts_deaths_incidence_num")}
\item{incidence_period}{the incidence period (e.g. "epiweek" for
now, for all forecasters)}
\item{ahead}{the ahead parameter (e.g. 1, 2, 3, 4)}
\item{forecast_date}{the date of the forecast}
\item{geo_type}{the geographic type (e.g "county" or "state" or
"hrr" or "msa"... but for now only the first two),}
\item{n_locations}{the number of locations (for now we will use 200
for this)}
\item{data_list}{a named list of data frames for each response/geo_type, i.e. for county/state}
}
\description{
Run forecasters in specified list and populate a global variable with prediction_cards incrementally
}
|
#' Author:
#' Subject:
# library(tidyverse)
library(magrittr)
# Import -----------------------------------------------------------------------
u_base <- "https://data.humdata.org/dataset/e1a91ae0-292d-4434-bc75-bf863d4608ba"
r0 <- httr::GET(u_base)
endpoint <- r0 %>%
xml2::read_html() %>%
xml2::xml_find_first("//a[contains(@href, 'xlsx')]") %>%
xml2::xml_attr("href")
u <- paste0("https://data.humdata.org", endpoint)
r <- httr::GET(u, httr::write_disk("arquivo.xlsx", TRUE))
# Tidy -------------------------------------------------------------------------
# Visualize --------------------------------------------------------------------
# Model ------------------------------------------------------------------------
# Export -----------------------------------------------------------------------
# readr::write_rds(d, "")
| /drafts/20201119_covid_dados.R | no_license | Tai-Rocha/lives | R | false | false | 846 | r | #' Author:
#' Subject:
# library(tidyverse)
library(magrittr)
# Import -----------------------------------------------------------------------
u_base <- "https://data.humdata.org/dataset/e1a91ae0-292d-4434-bc75-bf863d4608ba"
r0 <- httr::GET(u_base)
endpoint <- r0 %>%
xml2::read_html() %>%
xml2::xml_find_first("//a[contains(@href, 'xlsx')]") %>%
xml2::xml_attr("href")
u <- paste0("https://data.humdata.org", endpoint)
r <- httr::GET(u, httr::write_disk("arquivo.xlsx", TRUE))
# Tidy -------------------------------------------------------------------------
# Visualize --------------------------------------------------------------------
# Model ------------------------------------------------------------------------
# Export -----------------------------------------------------------------------
# readr::write_rds(d, "")
|
library(tibble)
### Name: enframe
### Title: Converting vectors to data frames, and vice versa
### Aliases: enframe deframe
### ** Examples
enframe(1:3)
enframe(c(a = 5, b = 7))
enframe(list(one = 1, two = 2:3, three = 4:6))
deframe(enframe(1:3))
deframe(tibble(a = 1:3))
deframe(tibble(a = as.list(1:3)))
| /data/genthat_extracted_code/tibble/examples/enframe.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 313 | r | library(tibble)
### Name: enframe
### Title: Converting vectors to data frames, and vice versa
### Aliases: enframe deframe
### ** Examples
enframe(1:3)
enframe(c(a = 5, b = 7))
enframe(list(one = 1, two = 2:3, three = 4:6))
deframe(enframe(1:3))
deframe(tibble(a = 1:3))
deframe(tibble(a = as.list(1:3)))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/combo_functions.R
\name{combo_int}
\alias{combo_int}
\title{Combinations of integer vector}
\usage{
combo_int(.set)
}
\arguments{
\item{.set}{Integer vector to combine}
}
\value{
List of vector of integer combinations
}
\description{
Returns a map of all non-empty combinations of set. Returns an empty list if
given an empty or invalid value.
}
\examples{
combo_int(1:5)
}
| /man/combo_int.Rd | permissive | user01/uvadsi | R | false | true | 453 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/combo_functions.R
\name{combo_int}
\alias{combo_int}
\title{Combinations of integer vector}
\usage{
combo_int(.set)
}
\arguments{
\item{.set}{Integer vector to combine}
}
\value{
List of vector of integer combinations
}
\description{
Returns a map of all non-empty combinations of set. Returns an empty list if
given an empty or invalid value.
}
\examples{
combo_int(1:5)
}
|
# Exercise 4
# Import libraries
library(readr)
library(dplyr)
# Import data
yDat <- read_csv('../data/brauer2007_tidy.csv')
## 4.1 Which 10 biological process annotations have the most genes
## associated with them? What about molecular functions?
ex4.1.1 <-
yDat %>%
group_by(bp) %>%
summarize(n = n_distinct(symbol)) %>%
arrange(desc(n)) %>%
head(10)
ex4.1.1
ex4.1.2 <-
yDat %>%
group_by(mf) %>%
summarize(n = n_distinct(symbol)) %>%
arrange(desc(n)) %>%
head(10)
ex4.1.2
## 4.2 How many distinct genes are there where we know what process
## the gene is involved in but we don't know what it does?
ex4.2.1 <-
yDat %>%
filter(bp != "biological process unknown" &
mf == "molecular function unknown") %>%
select(symbol, bp) %>%
distinct()
ex4.2.1
# Instructor's Answer: 737
# Is that correct? What if the same gene has two biological processes?
ex4.2.2 <-
yDat %>%
filter(bp != "biological process unknown" &
mf == "molecular function unknown") %>%
summarize(n_distinct(symbol))
ex4.2.2
# Correct Answer: 709
## 4.3 When the growth rate is restricted to 0.05 by limiting Glucose,
## which biological processes are the most upregulated?
## Show a sorted list with the most upregulated BPs on top,
## displaying the biological process and the average expression
## of all genes in that process rounded to two digits.
ex4.3 <-
yDat %>%
filter(nutrient == 'Glucose', rate == 0.05) %>%
group_by(bp) %>%
summarise(meanexp = mean(expression)) %>%
mutate(meanexp = round(meanexp, 2)) %>%
arrange(desc(meanexp))
ex4.3
## 4.4 Group the data by limiting nutrient (primarily) then by
## biological process. Get the average expression for all genes
## annotated with each process, separately for each limiting
## nutrient, where the growth rate is restricted to 0.05.
## Arrange the result to show the most upregulated processes
## on top.
ex4.4 <-
yDat %>%
filter(rate == 0.05) %>%
group_by(nutrient, bp) %>%
summarise(meanexp = mean(expression)) %>%
arrange(desc(meanexp))
ex4.4
## 4.5 Get only the top three most upregulated biological processes
## for each limiting nutrient.
ex4.5 <-
ex4.4 %>%
filter(row_number() <= 3)
ex4.5
?row_number
## 4.6 For the same groupings by limiting nutrient (primarily) then by
## biological process, summarize the correlation between rate
## and expression. Show the number of distinct genes within
## each grouping.
ex4.6 <-
yDat %>%
group_by(nutrient, bp) %>%
summarize(r = cor(rate, expression), ngenes = n_distinct(symbol))
ex4.6
# What do the results from 1 distinct gene represent?
## 4.7 Continue to process the result to show only results where
## the process has at least 5 genes. Add a column corresponding
## to the absolute value of the correlation coefficient, and
## show for each nutrient the singular process with the highest
## correlation between rate and expression, regardless of
## direction
ex4.7 <-
ex4.6 %>%
filter(ngenes >= 5) %>%
mutate(absr = abs(r)) %>%
arrange(desc(absr)) %>%
filter(row_number() == 1)
ex4.7
| /Intro_to_dplyr/exercise_4.R | no_license | luadam4c/BIMS-8382 | R | false | false | 3,253 | r | # Exercise 4
# Import libraries
library(readr)
library(dplyr)
# Import data
yDat <- read_csv('../data/brauer2007_tidy.csv')
## 4.1 Which 10 biological process annotations have the most genes
## associated with them? What about molecular functions?
ex4.1.1 <-
yDat %>%
group_by(bp) %>%
summarize(n = n_distinct(symbol)) %>%
arrange(desc(n)) %>%
head(10)
ex4.1.1
ex4.1.2 <-
yDat %>%
group_by(mf) %>%
summarize(n = n_distinct(symbol)) %>%
arrange(desc(n)) %>%
head(10)
ex4.1.2
## 4.2 How many distinct genes are there where we know what process
## the gene is involved in but we don't know what it does?
ex4.2.1 <-
yDat %>%
filter(bp != "biological process unknown" &
mf == "molecular function unknown") %>%
select(symbol, bp) %>%
distinct()
ex4.2.1
# Instructor's Answer: 737
# Is that correct? What if the same gene has two biological processes?
ex4.2.2 <-
yDat %>%
filter(bp != "biological process unknown" &
mf == "molecular function unknown") %>%
summarize(n_distinct(symbol))
ex4.2.2
# Correct Answer: 709
## 4.3 When the growth rate is restricted to 0.05 by limiting Glucose,
## which biological processes are the most upregulated?
## Show a sorted list with the most upregulated BPs on top,
## displaying the biological process and the average expression
## of all genes in that process rounded to two digits.
ex4.3 <-
yDat %>%
filter(nutrient == 'Glucose', rate == 0.05) %>%
group_by(bp) %>%
summarise(meanexp = mean(expression)) %>%
mutate(meanexp = round(meanexp, 2)) %>%
arrange(desc(meanexp))
ex4.3
## 4.4 Group the data by limiting nutrient (primarily) then by
## biological process. Get the average expression for all genes
## annotated with each process, separately for each limiting
## nutrient, where the growth rate is restricted to 0.05.
## Arrange the result to show the most upregulated processes
## on top.
ex4.4 <-
yDat %>%
filter(rate == 0.05) %>%
group_by(nutrient, bp) %>%
summarise(meanexp = mean(expression)) %>%
arrange(desc(meanexp))
ex4.4
## 4.5 Get only the top three most upregulated biological processes
## for each limiting nutrient.
ex4.5 <-
ex4.4 %>%
filter(row_number() <= 3)
ex4.5
?row_number
## 4.6 For the same groupings by limiting nutrient (primarily) then by
## biological process, summarize the correlation between rate
## and expression. Show the number of distinct genes within
## each grouping.
ex4.6 <-
yDat %>%
group_by(nutrient, bp) %>%
summarize(r = cor(rate, expression), ngenes = n_distinct(symbol))
ex4.6
# What do the results from 1 distinct gene represent?
## 4.7 Continue to process the result to show only results where
## the process has at least 5 genes. Add a column corresponding
## to the absolute value of the correlation coefficient, and
## show for each nutrient the singular process with the highest
## correlation between rate and expression, regardless of
## direction
ex4.7 <-
ex4.6 %>%
filter(ngenes >= 5) %>%
mutate(absr = abs(r)) %>%
arrange(desc(absr)) %>%
filter(row_number() == 1)
ex4.7
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geom-bin2d.r, R/stat-bin2d.r
\name{geom_bin2d}
\alias{geom_bin2d}
\alias{stat_bin2d}
\alias{stat_bin_2d}
\title{Add heatmap of 2d bin counts.}
\usage{
geom_bin2d(mapping = NULL, data = NULL, stat = "bin2d",
position = "identity", ..., na.rm = FALSE, show.legend = NA,
inherit.aes = TRUE)
stat_bin_2d(mapping = NULL, data = NULL, geom = "tile",
position = "identity", ..., bins = 30, binwidth = NULL, drop = TRUE,
na.rm = FALSE, show.legend = NA, inherit.aes = TRUE)
}
\arguments{
\item{mapping}{Set of aesthetic mappings created by \code{\link{aes}} or
\code{\link{aes_}}. If specified and \code{inherit.aes = TRUE} (the
default), it is combined with the default mapping at the top level of the
plot. You must supply \code{mapping} if there is no plot mapping.}
\item{data}{The data to be displayed in this layer. There are three
options:
If \code{NULL}, the default, the data is inherited from the plot
data as specified in the call to \code{\link{ggplot}}.
A \code{data.frame}, or other object, will override the plot
data. All objects will be fortified to produce a data frame. See
\code{\link{fortify}} for which variables will be created.
A \code{function} will be called with a single argument,
the plot data. The return value must be a \code{data.frame.}, and
will be used as the layer data.}
\item{position}{Position adjustment, either as a string, or the result of
a call to a position adjustment function.}
\item{...}{other arguments passed on to \code{\link{layer}}. These are
often aesthetics, used to set an aesthetic to a fixed value, like
\code{color = "red"} or \code{size = 3}. They may also be parameters
to the paired geom/stat.}
\item{na.rm}{If \code{FALSE} (the default), removes missing values with
a warning. If \code{TRUE} silently removes missing values.}
\item{show.legend}{logical. Should this layer be included in the legends?
\code{NA}, the default, includes if any aesthetics are mapped.
\code{FALSE} never includes, and \code{TRUE} always includes.}
\item{inherit.aes}{If \code{FALSE}, overrides the default aesthetics,
rather than combining with them. This is most useful for helper functions
that define both data and aesthetics and shouldn't inherit behaviour from
the default plot specification, e.g. \code{\link{borders}}.}
\item{geom, stat}{Use to override the default connection between
\code{geom_bin2d} and \code{stat_bin2d}.}
\item{bins}{numeric vector giving number of bins in both vertical and
horizontal directions. Set to 30 by default.}
\item{binwidth}{Numeric vector giving bin width in both vertical and
horizontal directions. Overrides \code{bins} if both set.}
\item{drop}{if \code{TRUE} removes all cells with 0 counts.}
}
\description{
Add heatmap of 2d bin counts.
}
\section{Aesthetics}{
\Sexpr[results=rd,stage=build]{ggplot2:::rd_aesthetics("stat", "bin2d")}
}
\examples{
d <- ggplot(diamonds, aes(x, y)) + xlim(4, 10) + ylim(4, 10)
d + geom_bin2d()
# You can control the size of the bins by specifying the number of
# bins in each direction:
d + geom_bin2d(bins = 10)
d + geom_bin2d(bins = 30)
# Or by specifying the width of the bins
d + geom_bin2d(binwidth = c(0.1, 0.1))
}
\seealso{
\code{\link{stat_binhex}} for hexagonal binning
}
| /man/geom_bin2d.Rd | no_license | sidiropoulos/ggplot2 | R | false | true | 3,325 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geom-bin2d.r, R/stat-bin2d.r
\name{geom_bin2d}
\alias{geom_bin2d}
\alias{stat_bin2d}
\alias{stat_bin_2d}
\title{Add heatmap of 2d bin counts.}
\usage{
geom_bin2d(mapping = NULL, data = NULL, stat = "bin2d",
position = "identity", ..., na.rm = FALSE, show.legend = NA,
inherit.aes = TRUE)
stat_bin_2d(mapping = NULL, data = NULL, geom = "tile",
position = "identity", ..., bins = 30, binwidth = NULL, drop = TRUE,
na.rm = FALSE, show.legend = NA, inherit.aes = TRUE)
}
\arguments{
\item{mapping}{Set of aesthetic mappings created by \code{\link{aes}} or
\code{\link{aes_}}. If specified and \code{inherit.aes = TRUE} (the
default), it is combined with the default mapping at the top level of the
plot. You must supply \code{mapping} if there is no plot mapping.}
\item{data}{The data to be displayed in this layer. There are three
options:
If \code{NULL}, the default, the data is inherited from the plot
data as specified in the call to \code{\link{ggplot}}.
A \code{data.frame}, or other object, will override the plot
data. All objects will be fortified to produce a data frame. See
\code{\link{fortify}} for which variables will be created.
A \code{function} will be called with a single argument,
the plot data. The return value must be a \code{data.frame.}, and
will be used as the layer data.}
\item{position}{Position adjustment, either as a string, or the result of
a call to a position adjustment function.}
\item{...}{other arguments passed on to \code{\link{layer}}. These are
often aesthetics, used to set an aesthetic to a fixed value, like
\code{color = "red"} or \code{size = 3}. They may also be parameters
to the paired geom/stat.}
\item{na.rm}{If \code{FALSE} (the default), removes missing values with
a warning. If \code{TRUE} silently removes missing values.}
\item{show.legend}{logical. Should this layer be included in the legends?
\code{NA}, the default, includes if any aesthetics are mapped.
\code{FALSE} never includes, and \code{TRUE} always includes.}
\item{inherit.aes}{If \code{FALSE}, overrides the default aesthetics,
rather than combining with them. This is most useful for helper functions
that define both data and aesthetics and shouldn't inherit behaviour from
the default plot specification, e.g. \code{\link{borders}}.}
\item{geom, stat}{Use to override the default connection between
\code{geom_bin2d} and \code{stat_bin2d}.}
\item{bins}{numeric vector giving number of bins in both vertical and
horizontal directions. Set to 30 by default.}
\item{binwidth}{Numeric vector giving bin width in both vertical and
horizontal directions. Overrides \code{bins} if both set.}
\item{drop}{if \code{TRUE} removes all cells with 0 counts.}
}
\description{
Add heatmap of 2d bin counts.
}
\section{Aesthetics}{
\Sexpr[results=rd,stage=build]{ggplot2:::rd_aesthetics("stat", "bin2d")}
}
\examples{
d <- ggplot(diamonds, aes(x, y)) + xlim(4, 10) + ylim(4, 10)
d + geom_bin2d()
# You can control the size of the bins by specifying the number of
# bins in each direction:
d + geom_bin2d(bins = 10)
d + geom_bin2d(bins = 30)
# Or by specifying the width of the bins
d + geom_bin2d(binwidth = c(0.1, 0.1))
}
\seealso{
\code{\link{stat_binhex}} for hexagonal binning
}
|
library(transmem)
A <- 2.9576*(0.08502959)/(20.3837+2.9576)
B <- 0.3257*(1.627547)/(20.3917+0.3257)
C <- 1.2368*(1.627547)/(19.0493+1.2368)
D <- 2.4622*(1.627547)/(18.0569+2.4622)
#-----STOCK SOLUTIONS--------------------------------------------------------
StockMg.1000_4 <- 1000
StockMg.2p5_4 <- StockMg.1000_4 * 0.1300 / 50.0295
StockCa.1000_4 <- 0.1242 * 0.40043 / 50.2750 * 1000000
StockCa.10_4 <- 0.1220 * StockCa.1000_4 / 12.2309
#-----CURVAS DE CALIBRACIÓN--------------------------------------------------
CalCurves <- list(
Magnessium.1 = data.frame(Conc = c(0.0000, 0.1332, 0.9991) * StockMg.2p5_4 /
c(6.0000, 6.1608, 6.0342),
Signal = c(0.000, 0.043, 0.289)),
Calcium.1 = data.frame(Conc = c(0.0000, 0.0744, 0.2916, 0.5724, 1.8285, 2.9865, 4.2062) * StockCa.10_4 /
c(6.0000, 6.3447, 6.3288, 6.2018, 6.4452, 6.1708, 6.1998),
Signal = c(0.000, 0.003, 0.030, 0.067, 0.205, 0.350, 0.485))
)
#-----MODELOS DE LAS CURVAS--------------------------------------------------
Order <- c(2, 1)
CalModels <- list()
for (i in 1:2) CalModels[[i]] <- calibCurve(curve = CalCurves[[i]], order = Order[i])
names(CalModels) <- names(CalCurves)
#-----MODELOS DE LAS CURVAS--------------------------------------------------
PO4_S <- 6.608 / 132.0562 / 50.002 * 1e3
NaOH_s <-mean(c(232.7, 224.7) / 204.22 / c(0.7210, 0.6570))
NaOHConc <- c(0.6510, 0.7794, 0.8647, 1.0015) * mean(naoh[3:4]) / c(10.1257, 10.0009, 10.0011, 10.0680)
HPO4Conc <- c(0.0644, 0.1054, 0.2464, 0.5403) * (6.608 / 132.0562 / 50.002 * 1e3) / c(10.0426, 10.0381, 10.0363, 10.0652)
#-----FACTOR DE DILUCIÓN DE LAS MUESTRAS-------------------------------------
dilutionsMg <- c(20027/5200, 20263/5107, 19994/5129, 19951/4939)
dilutionsCa <- rep(1, 4)
#-----ABSORBANCIAS DE LAS ALÍCUOTAS------------------------------------------
AliAbsMg <- c(0.096, 0.106, 0.039, 0.055)
AliAbsCa <- c(0.026, 0.019, 0.014, 0.056)
#-----CONCENTRACIÓN DE ESPECIES EN LAS ALÍCUOTAS-----------------------------
AliConcMg <- signal2conc(signal = AliAbsMg, model = CalModels$Magnessium.1, dilution = dilutionsMg)
AliConcCa <- signal2conc(signal = AliAbsCa, model = CalModels$Calcium.1, dilution = dilutionsCa)
# Desiciones
# NaOH: 0.15 M -> 1028 * 0.15 / NaOH6N
# HPO4: 0.005 M -> 0.005 * 1005 / PO4_S
| /19-JuneDecember/19-11-08-CaMgPrec.R | no_license | Crparedes/master-data-treatment | R | false | false | 2,386 | r | library(transmem)
A <- 2.9576*(0.08502959)/(20.3837+2.9576)
B <- 0.3257*(1.627547)/(20.3917+0.3257)
C <- 1.2368*(1.627547)/(19.0493+1.2368)
D <- 2.4622*(1.627547)/(18.0569+2.4622)
#-----STOCK SOLUTIONS--------------------------------------------------------
StockMg.1000_4 <- 1000
StockMg.2p5_4 <- StockMg.1000_4 * 0.1300 / 50.0295
StockCa.1000_4 <- 0.1242 * 0.40043 / 50.2750 * 1000000
StockCa.10_4 <- 0.1220 * StockCa.1000_4 / 12.2309
#-----CURVAS DE CALIBRACIÓN--------------------------------------------------
CalCurves <- list(
Magnessium.1 = data.frame(Conc = c(0.0000, 0.1332, 0.9991) * StockMg.2p5_4 /
c(6.0000, 6.1608, 6.0342),
Signal = c(0.000, 0.043, 0.289)),
Calcium.1 = data.frame(Conc = c(0.0000, 0.0744, 0.2916, 0.5724, 1.8285, 2.9865, 4.2062) * StockCa.10_4 /
c(6.0000, 6.3447, 6.3288, 6.2018, 6.4452, 6.1708, 6.1998),
Signal = c(0.000, 0.003, 0.030, 0.067, 0.205, 0.350, 0.485))
)
#-----MODELOS DE LAS CURVAS--------------------------------------------------
Order <- c(2, 1)
CalModels <- list()
for (i in 1:2) CalModels[[i]] <- calibCurve(curve = CalCurves[[i]], order = Order[i])
names(CalModels) <- names(CalCurves)
#-----MODELOS DE LAS CURVAS--------------------------------------------------
PO4_S <- 6.608 / 132.0562 / 50.002 * 1e3
NaOH_s <-mean(c(232.7, 224.7) / 204.22 / c(0.7210, 0.6570))
NaOHConc <- c(0.6510, 0.7794, 0.8647, 1.0015) * mean(naoh[3:4]) / c(10.1257, 10.0009, 10.0011, 10.0680)
HPO4Conc <- c(0.0644, 0.1054, 0.2464, 0.5403) * (6.608 / 132.0562 / 50.002 * 1e3) / c(10.0426, 10.0381, 10.0363, 10.0652)
#-----FACTOR DE DILUCIÓN DE LAS MUESTRAS-------------------------------------
dilutionsMg <- c(20027/5200, 20263/5107, 19994/5129, 19951/4939)
dilutionsCa <- rep(1, 4)
#-----ABSORBANCIAS DE LAS ALÍCUOTAS------------------------------------------
AliAbsMg <- c(0.096, 0.106, 0.039, 0.055)
AliAbsCa <- c(0.026, 0.019, 0.014, 0.056)
#-----CONCENTRACIÓN DE ESPECIES EN LAS ALÍCUOTAS-----------------------------
AliConcMg <- signal2conc(signal = AliAbsMg, model = CalModels$Magnessium.1, dilution = dilutionsMg)
AliConcCa <- signal2conc(signal = AliAbsCa, model = CalModels$Calcium.1, dilution = dilutionsCa)
# Desiciones
# NaOH: 0.15 M -> 1028 * 0.15 / NaOH6N
# HPO4: 0.005 M -> 0.005 * 1005 / PO4_S
|
# Code to extract balance data to share
library(dplyr)
databaseIds <- c("MDCR", "MDCD", "Optum EHR")
outputFolders <- paste0("d:/SmallSampleEstimationEvaluation_",
tolower(gsub(" ", "_", databaseIds)))
sampleSizes <- c(4000, 2000, 1000, 500, 250)
targetFolder <- "d:/SmallSampleBalance"
covariates <- tibble(covariateId = 1) %>%
filter(covariateId != 1)
covariateColumns <- c("covariateName","analysisId", "conceptId", "domainId", "isBinary")
for (i in seq_along(databaseIds)) {
message("Extracting for database ", databaseIds[i])
for (sampleSize in sampleSizes) {
message("- Sample size ", sampleSize)
samples <- 20000 / sampleSize
for (sample in seq_len(samples)) {
sourceSampleFolder <- file.path(outputFolders[i],
sprintf("smallSample%d", sampleSize),
sprintf("Sample_%d", sample))
targetSampleFolder <- file.path(targetFolder,
gsub(" ", "_", databaseIds[i]),
sprintf("SampleSize_%d", sampleSize),
sprintf("Sample_%d", sample))
if (!dir.exists(targetSampleFolder)) {
dir.create(targetSampleFolder, recursive = TRUE)
}
fileRef <- CohortMethod::getFileReference(sourceSampleFolder)
fileRef <- fileRef[!duplicated(fileRef$sharedBalanceFile), ]
fileRef <- fileRef[fileRef$sharedBalanceFile != "", ]
for (j in seq_len(nrow(fileRef))) {
sourceBalanceFile <- file.path(sourceSampleFolder, fileRef$sharedBalanceFile[j])
targetBalanceFile <- file.path(targetSampleFolder, sprintf("Balance_t%d_c%d_a%d.rds",
fileRef$targetId[j],
fileRef$comparatorId[j],
fileRef$analysisId[j]))
balance <- readRDS(sourceBalanceFile)
newCovariateIdx <- which(!balance$covariateId %in% covariates$covariateId)
if (length(newCovariateIdx) > 0) {
newCovariates <-balance[newCovariateIdx, c("covariateId", covariateColumns)]
covariates <- bind_rows(covariates, newCovariates)
}
balance[, covariateColumns] <- NULL
saveRDS(balance, targetBalanceFile)
}
}
}
}
readr::write_csv(covariates, file.path(targetFolder, "covariateRef.csv"))
csvFileName <- system.file("NegativeControls.csv", package = "SmallSampleEstimationEvaluation")
negativeControls <- readr::read_csv(csvFileName, show_col_types = FALSE)
targets <- negativeControls %>%
distinct(targetId, targetName)
readr::write_csv(targets, file.path(targetFolder, "targetRef.csv"))
comparators <- negativeControls %>%
distinct(comparatorId, comparatorName)
readr::write_csv(comparators, file.path(targetFolder, "comparatorRef.csv"))
analyses <- tibble(analysisId = c(1, 2, 3, 4, 5),
adjustment = c("Matching", "Crude", "Stratification", "Matching", "Stratification"),
propensityModel = c("Local", NA, "Local", "Global", "Global"))
readr::write_csv(analyses, file.path(targetFolder, "analysisRef.csv"))
| /extras/ExtractBalanceData.R | no_license | ohdsi-studies/SmallSampleEstimationEvaluation | R | false | false | 3,258 | r | # Code to extract balance data to share
library(dplyr)
databaseIds <- c("MDCR", "MDCD", "Optum EHR")
outputFolders <- paste0("d:/SmallSampleEstimationEvaluation_",
tolower(gsub(" ", "_", databaseIds)))
sampleSizes <- c(4000, 2000, 1000, 500, 250)
targetFolder <- "d:/SmallSampleBalance"
covariates <- tibble(covariateId = 1) %>%
filter(covariateId != 1)
covariateColumns <- c("covariateName","analysisId", "conceptId", "domainId", "isBinary")
for (i in seq_along(databaseIds)) {
message("Extracting for database ", databaseIds[i])
for (sampleSize in sampleSizes) {
message("- Sample size ", sampleSize)
samples <- 20000 / sampleSize
for (sample in seq_len(samples)) {
sourceSampleFolder <- file.path(outputFolders[i],
sprintf("smallSample%d", sampleSize),
sprintf("Sample_%d", sample))
targetSampleFolder <- file.path(targetFolder,
gsub(" ", "_", databaseIds[i]),
sprintf("SampleSize_%d", sampleSize),
sprintf("Sample_%d", sample))
if (!dir.exists(targetSampleFolder)) {
dir.create(targetSampleFolder, recursive = TRUE)
}
fileRef <- CohortMethod::getFileReference(sourceSampleFolder)
fileRef <- fileRef[!duplicated(fileRef$sharedBalanceFile), ]
fileRef <- fileRef[fileRef$sharedBalanceFile != "", ]
for (j in seq_len(nrow(fileRef))) {
sourceBalanceFile <- file.path(sourceSampleFolder, fileRef$sharedBalanceFile[j])
targetBalanceFile <- file.path(targetSampleFolder, sprintf("Balance_t%d_c%d_a%d.rds",
fileRef$targetId[j],
fileRef$comparatorId[j],
fileRef$analysisId[j]))
balance <- readRDS(sourceBalanceFile)
newCovariateIdx <- which(!balance$covariateId %in% covariates$covariateId)
if (length(newCovariateIdx) > 0) {
newCovariates <-balance[newCovariateIdx, c("covariateId", covariateColumns)]
covariates <- bind_rows(covariates, newCovariates)
}
balance[, covariateColumns] <- NULL
saveRDS(balance, targetBalanceFile)
}
}
}
}
readr::write_csv(covariates, file.path(targetFolder, "covariateRef.csv"))
csvFileName <- system.file("NegativeControls.csv", package = "SmallSampleEstimationEvaluation")
negativeControls <- readr::read_csv(csvFileName, show_col_types = FALSE)
targets <- negativeControls %>%
distinct(targetId, targetName)
readr::write_csv(targets, file.path(targetFolder, "targetRef.csv"))
comparators <- negativeControls %>%
distinct(comparatorId, comparatorName)
readr::write_csv(comparators, file.path(targetFolder, "comparatorRef.csv"))
analyses <- tibble(analysisId = c(1, 2, 3, 4, 5),
adjustment = c("Matching", "Crude", "Stratification", "Matching", "Stratification"),
propensityModel = c("Local", NA, "Local", "Global", "Global"))
readr::write_csv(analyses, file.path(targetFolder, "analysisRef.csv"))
|
#' Reset Cache and Update all Local Data
#' @param refresh_data Logical defaults to `FALSE`. Should all data sources be refreshed once the cache has been
#' removed.
#' @return Null
#' @export
#' @importFrom memoise cache_filesystem
#' @examples
#'
#'## Code
#'reset_cache
reset_cache <- function(refresh_data = FALSE) {
unlink(".cache", recursive = TRUE)
cache <- memoise::cache_filesystem(".cache")
if (refresh_data) {
tmp <- NCoVUtils::get_international_linelist()
tmp <- NCoVUtils::get_who_cases()
tmp <- NCoVUtils::get_italy_regional_cases()
}
return(invisible(NULL))
}
| /R/reset_cache.R | permissive | franzbischoff/NCoVUtils | R | false | false | 605 | r | #' Reset Cache and Update all Local Data
#' @param refresh_data Logical defaults to `FALSE`. Should all data sources be refreshed once the cache has been
#' removed.
#' @return Null
#' @export
#' @importFrom memoise cache_filesystem
#' @examples
#'
#'## Code
#'reset_cache
reset_cache <- function(refresh_data = FALSE) {
unlink(".cache", recursive = TRUE)
cache <- memoise::cache_filesystem(".cache")
if (refresh_data) {
tmp <- NCoVUtils::get_international_linelist()
tmp <- NCoVUtils::get_who_cases()
tmp <- NCoVUtils::get_italy_regional_cases()
}
return(invisible(NULL))
}
|
################################################################################
# Data analysis of COVID-19 published at: (article submitted for publication)
# date of creation: 06/27/2021 (date in US format)
# R version: 4.0.5
# script name: script.R
# aim: data analysis
# input: files from the folder 'data'
# output: files saved in the subdirectories of folder 'outputs'
# external sources: none
################################################################################
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# WARNING: On lines 253 - 255 you must choose the three measures of variable
# importance in order to keep going with the data analysis. Run the script only
# til line 250, then evaluate the results obtained and choose the three measures
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# script's parameters ----------------------------------------------------------
# you can change the parameters below according to your needs
# data related parameters
glog_data <- FALSE # set to TRUE if you want to transform the data using glog
std_data <- FALSE # set to TRUE if you wnat to standardize the data
redux_method <- "none" # choose between "none", "PCA", "FA"
redux_n_dim <- 4 # number of dimensions to retain in PCA or FA
# random forest related parameters
n_varibles_in_impt_plots <- 10 # choose number of variables to show plots
n_trees <- 5000 # initial number of trees
# graphics related parameters
width_factor <- 2 # increase width_factor to save plots with larger width
height_factor <- 2 # increase height_factor to save plots with larger height
color_ramp <- RColorBrewer::brewer.pal(n = 7, name = "Blues") # heatmap color
color_border <- grey(0.4) # heatmap border color
fontsize <- 8 # heatmap font size
# load sources -----------------------------------------------------------------
source("./codes/packages.R") # load packages
source("./codes/functions.R") # load functions
# seed -------------------------------------------------------------------------
set.seed(2021) # seed for replicate the replicating the analysis
# load dataset -----------------------------------------------------------------
#my_data <- read.csv("./data/my_data.csv", header = TRUE) # original data
my_data$Group <- factor(my_data$Group) # groups as factors
n <- ncol(my_data) # number of columns
if (glog_data) my_data[, -n] <- LogSt(my_data[, -n]) # apply glog
if (std_data) my_data[, -n] <- scale(my_data[, -n]) # standardize data
# descriptive measures ---------------------------------------------------------
# basic data struture
desc_data <- c(
n_obs = nrow(my_data), n_vars = ncol(my_data[, -n]),
n_in_group = table(my_data$Group)
)
desc_stat <- mvn(my_data[, -n])$Descriptives # standard descriptive statistics
desc_cor <- cor(my_data[, -n], method = "spearman") # spearman correlation
# plot desc_cor using clustering on rows and columns
cat("(New plot window) heatmap for spearman correlation matrix\n\n")
p_cor <- pheatmap(desc_cor,
clustering_method = "ward.D",
cluster_rows = TRUE, cluster_cols = TRUE,
color = color_ramp, border_color = color_border, fontsize = fontsize
)
# apply none/PCA/FA method according to redux_method ---------------------------
# perform data reduction if redux_method != "none"
redux_data <- switch(redux_method,
"none" = list(data = my_data, fit = "none"),
"PCA" = {
my_pca <- pca(my_data[, -n], redux_n_dim, rotate = "varimax")
my_data <- data.frame(my_pca$scores, Group = my_data[, n])
list(data = my_data, fit = fa.sort(my_pca))
},
"FA" = {
my_fa <- fa(my_data[, -n], redux_n_dim, rotate = "varimax")
my_data <- data.frame(my_fa$scores, Group = my_data[, n])
list(data = my_data, fit = fa.sort(my_fa))
}
)
my_data <- redux_data$data # data to be used in random forest
# plot loading matrix if redux_method != "none"
if (redux_method != "none") {
cat("(Print) results for ", redux_method, "\n\n")
print(redux_data$fit)
cat("\n\n")
# reset some parameters due to PCA or FA
n <- ncol(my_data)
k <- n_varibles_in_impt_plots
n_varibles_in_impt_plots <- min(k, redux_n_dim)
if (k != n_varibles_in_impt_plots) {
cat("(Print) n_varibles_in_impt_plots was changed to ", redux_n_dim, "\n\n")
}
redux_fit <- loadings(redux_data$fit) # loading matrix
redux_sorted_load <- matrix(unlist(redux_fit), ncol = redux_n_dim)
rownames(redux_sorted_load) <- rownames(redux_fit)
colnames(redux_sorted_load) <- colnames(redux_fit)
redux_vars <- rownames(redux_fit) # variables
redux_n <- seq_len(redux_n_dim) # factors
# dataset for ggplot
redux_df <- data.frame(
Loading = c(redux_fit),
Var = rep(redux_vars, redux_n_dim),
Factor = paste0("Factor ", rep(redux_n, each = nrow(redux_fit)))
)
redux_df$Var <- factor(redux_df$Var,
levels = rownames(redux_fit),
ordered = TRUE
)
p_redux <- ggplot(redux_df, aes(x = Var, y = Loading, fill = Loading)) +
geom_bar(stat = "identity") +
coord_flip() +
facet_wrap(~Factor, nrow = 1) +
scale_fill_gradient2(
name = "Loading: ", high = "blue", mid = "white",
low = "red", midpoint = 0, guide = "none"
) +
theme_light(base_size = 12) +
theme(
legend.position = "top",
legend.box.background = element_rect(colour = "black", fill = NA),
panel.grid.minor.x = element_blank(),
strip.text = element_text(face = "bold", colour = "black"),
strip.background = element_rect(fill = grey(0.9), colour = "black")
) +
labs(x = "", y = "Loading strength")
}
# split dataset into training and testing samples ------------------------------
data_split <- initial_split(my_data, prop = 0.75, strata = "Group")
sample_train <- training(data_split)
sample_test <- testing(data_split)
# create a 'recipe' object -----------------------------------------------------
sample_recipe <- recipe(Group ~ ., data = sample_train)
sample_prep <- sample_recipe %>% prep(training = sample_train, retain = TRUE)
# tune random forest hyperparameters (mtry) ------------------------------------
invisible(capture.output(
mtry <- tuneRF(sample_train[, -n], as.factor(sample_train$Group),
ntreeTry = n_trees, stepFactor = 1.5, improve = 0.01,
trace = FALSE, plot = FALSE
)
))
m <- mtry[mtry[, 2] == min(mtry[, 2]), 1][1] # best value of mtry
# fit random forest model ------------------------------------------------------
rf <- rand_forest(trees = n_trees, mtry = m, mode = "classification") %>%
set_engine("randomForest", importance = TRUE, localImp = TRUE) %>%
fit(Group ~ ., data = juice(sample_prep))
cat("(Print) basic results from random forest model\n\n")
print(rf$fit) # show basic results for the random forest model
# defining colors for each group
groups <- levels(sample_test$Group)
group_color <- c("#e41a1c", "#377eb8", "#4daf4a", "#984ea3")
# warning: the next plot will be displayed but not saved (save it manually)
cat("\n(New plot window) erro rates\n\n")
dev.new() # new plot window
plot(rf$fit, main = "Error rates", col = c("black", group_color)) # error rate
legend("topright",
legend = c("OOB", groups), col = c("black", group_color), lty = 1
) # legends
# evaluate the model -----------------------------------------------------------
# roc curves
pred_for_roc_curve <- predict(rf, sample_test[, -n], type = "prob")
auc <- rep(NA, length(groups)) # vector for holding values of area under ROC
names(auc) <- groups
auc
# warning: the next plot will be displayed but not saved (save it manually)
cat("(New plot window) ROC curves\n\n")
dev.new() # open new plot window
for (i in seq_len(length(groups))) {
# Define which observations belong to class[i]
true_values <- sample_test$Group == groups[i]
# Assess the performance of classifier for class[i]
pred <- prediction(pred_for_roc_curve[, i], true_values)
perf <- performance(pred, "tpr", "fpr")
if (i == 1) {
plot(perf, main = "ROC Curve", col = group_color[i])
}
else {
plot(perf, col = group_color[i], add = TRUE)
}
# Calculate the area under the curve (AUC) and print it to screen
auc[i] <- unlist(performance(pred, measure = "auc")@ y.values)
}
legend("bottomright", legend = groups, col = group_color, lty = 1) # legends
# confusion matrix
pred_for_table <- predict(rf, sample_test[, -n])
confusion_mat <- table(
observed = sample_test[, n],
predicted = unlist(pred_for_table)
)
cat("(Print) confusion matrix based on testing samples\n\n")
print(confusion_mat)
auc
# plot main results ------------------------------------------------------------
# tree with least number of nodes
tree_num <- which(rf$fit$forest$ndbigtree == min(rf$fit$forest$ndbigtree))[1]
p_rf_tree <- tree_func(final_model = rf$fit, tree_num)
# measures of variable importance for the fitted random forest model -----------
cat("\n(This process can take a while) please wait ...\n\n")
impt_measures <- measure_importance(rf$fit)
# chosing best set of importance measures to use
p_choose_imp_1 <- plot_importance_ggpairs(impt_measures)
p_choose_imp_2 <- plot_importance_rankings(impt_measures)
cat("(New plot window) Importance measures (plot 1)\n\n")
dev.new() # new plot window
print(p_choose_imp_1)
cat("(New plot window) Importance measures (plot 2)\n\n")
dev.new() # new plot window
print(p_choose_imp_2)
# !!! STOP HERE AND EVALUATE THE RESULTS BEFORE CHOOSING THE THREE MEASURES !!!
# define your chosen measures replacing NULL by the measure' name
first_measure <- "gini_decrease" # ex: first_measure <- "no_of_trees"
second_measure <- "no_of_nodes" # ex: second_measure <- "no_of_nodes"
third_measure <- "mean_min_depth" #mean_min_depth # ex: third_measure <- "mean_min_depth"
cat("(Again ... this process can take a while) please wait ...\n\n")
# test if user has chosen the three importance measures
if (is.null(first_measure) | is.null(first_measure) | is.null(first_measure)) {
stop("You did not choose the three inportance meansures...
please start the hole script again")
}
# plot the chosen measures
p_imp <- plot_multi_way_importance(impt_measures,
x_measure = first_measure,
y_measure = second_measure,
size_measure = third_measure,
no_of_labels = n_varibles_in_impt_plots
)+ theme(text = element_text(size = 18), axis.text = element_text(size = 18))
# plot variable depth distribution
min_depth <- min_depth_distribution(rf$fit)
p_min_depth <- plot_min_depth_distribution(min_depth, mean_sample = "top_trees")+
theme(text = element_text(size = 18), axis.text = element_text(size = 18)) +
scale_fill_manual(values = colorRampPalette(c("royalblue", "slategray1"))(12))
# plot interaction between pairs of variables in the random forest
impt_vars <- important_variables(impt_measures,
k = n_varibles_in_impt_plots,
measures = c(first_measure, second_measure, third_measure))
# level of interaction between variables (min depth interacitons)
interaction_vars <- min_depth_interactions(rf$fit, impt_vars)
p_interaction <- plot_min_depth_interactions(interaction_vars)+
theme(text = element_text(size = 12), axis.text = element_text(size = 9))
# generate outputs ----------------------------------------------------------
cat("(Saving plots and tables) please wait ...\n\n")
suppressWarnings(suppressMessages(source("./codes/outputs.R")))
cat("(Script finished) outputs in folder ", paste0(getwd(), "/outputs"), "\n")
#save.image("SALVAR.RData")
| /random forest/script random forest.R | no_license | lschimke/The-relationship-between-autoantibodies-targeting-GPCRs-and-the-renin-angiotensin-system-associates- | R | false | false | 11,464 | r | ################################################################################
# Data analysis of COVID-19 published at: (article submitted for publication)
# date of creation: 06/27/2021 (date in US format)
# R version: 4.0.5
# script name: script.R
# aim: data analysis
# input: files from the folder 'data'
# output: files saved in the subdirectories of folder 'outputs'
# external sources: none
################################################################################
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# WARNING: On lines 253 - 255 you must choose the three measures of variable
# importance in order to keep going with the data analysis. Run the script only
# til line 250, then evaluate the results obtained and choose the three measures
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# script's parameters ----------------------------------------------------------
# you can change the parameters below according to your needs
# data related parameters
glog_data <- FALSE # set to TRUE if you want to transform the data using glog
std_data <- FALSE # set to TRUE if you wnat to standardize the data
redux_method <- "none" # choose between "none", "PCA", "FA"
redux_n_dim <- 4 # number of dimensions to retain in PCA or FA
# random forest related parameters
n_varibles_in_impt_plots <- 10 # choose number of variables to show plots
n_trees <- 5000 # initial number of trees
# graphics related parameters
width_factor <- 2 # increase width_factor to save plots with larger width
height_factor <- 2 # increase height_factor to save plots with larger height
color_ramp <- RColorBrewer::brewer.pal(n = 7, name = "Blues") # heatmap color
color_border <- grey(0.4) # heatmap border color
fontsize <- 8 # heatmap font size
# load sources -----------------------------------------------------------------
source("./codes/packages.R") # load packages
source("./codes/functions.R") # load functions
# seed -------------------------------------------------------------------------
set.seed(2021) # seed for replicate the replicating the analysis
# load dataset -----------------------------------------------------------------
#my_data <- read.csv("./data/my_data.csv", header = TRUE) # original data
my_data$Group <- factor(my_data$Group) # groups as factors
n <- ncol(my_data) # number of columns
if (glog_data) my_data[, -n] <- LogSt(my_data[, -n]) # apply glog
if (std_data) my_data[, -n] <- scale(my_data[, -n]) # standardize data
# descriptive measures ---------------------------------------------------------
# basic data struture
desc_data <- c(
n_obs = nrow(my_data), n_vars = ncol(my_data[, -n]),
n_in_group = table(my_data$Group)
)
desc_stat <- mvn(my_data[, -n])$Descriptives # standard descriptive statistics
desc_cor <- cor(my_data[, -n], method = "spearman") # spearman correlation
# plot desc_cor using clustering on rows and columns
cat("(New plot window) heatmap for spearman correlation matrix\n\n")
p_cor <- pheatmap(desc_cor,
clustering_method = "ward.D",
cluster_rows = TRUE, cluster_cols = TRUE,
color = color_ramp, border_color = color_border, fontsize = fontsize
)
# apply none/PCA/FA method according to redux_method ---------------------------
# perform data reduction if redux_method != "none"
redux_data <- switch(redux_method,
"none" = list(data = my_data, fit = "none"),
"PCA" = {
my_pca <- pca(my_data[, -n], redux_n_dim, rotate = "varimax")
my_data <- data.frame(my_pca$scores, Group = my_data[, n])
list(data = my_data, fit = fa.sort(my_pca))
},
"FA" = {
my_fa <- fa(my_data[, -n], redux_n_dim, rotate = "varimax")
my_data <- data.frame(my_fa$scores, Group = my_data[, n])
list(data = my_data, fit = fa.sort(my_fa))
}
)
my_data <- redux_data$data # data to be used in random forest
# plot loading matrix if redux_method != "none"
if (redux_method != "none") {
cat("(Print) results for ", redux_method, "\n\n")
print(redux_data$fit)
cat("\n\n")
# reset some parameters due to PCA or FA
n <- ncol(my_data)
k <- n_varibles_in_impt_plots
n_varibles_in_impt_plots <- min(k, redux_n_dim)
if (k != n_varibles_in_impt_plots) {
cat("(Print) n_varibles_in_impt_plots was changed to ", redux_n_dim, "\n\n")
}
redux_fit <- loadings(redux_data$fit) # loading matrix
redux_sorted_load <- matrix(unlist(redux_fit), ncol = redux_n_dim)
rownames(redux_sorted_load) <- rownames(redux_fit)
colnames(redux_sorted_load) <- colnames(redux_fit)
redux_vars <- rownames(redux_fit) # variables
redux_n <- seq_len(redux_n_dim) # factors
# dataset for ggplot
redux_df <- data.frame(
Loading = c(redux_fit),
Var = rep(redux_vars, redux_n_dim),
Factor = paste0("Factor ", rep(redux_n, each = nrow(redux_fit)))
)
redux_df$Var <- factor(redux_df$Var,
levels = rownames(redux_fit),
ordered = TRUE
)
p_redux <- ggplot(redux_df, aes(x = Var, y = Loading, fill = Loading)) +
geom_bar(stat = "identity") +
coord_flip() +
facet_wrap(~Factor, nrow = 1) +
scale_fill_gradient2(
name = "Loading: ", high = "blue", mid = "white",
low = "red", midpoint = 0, guide = "none"
) +
theme_light(base_size = 12) +
theme(
legend.position = "top",
legend.box.background = element_rect(colour = "black", fill = NA),
panel.grid.minor.x = element_blank(),
strip.text = element_text(face = "bold", colour = "black"),
strip.background = element_rect(fill = grey(0.9), colour = "black")
) +
labs(x = "", y = "Loading strength")
}
# split dataset into training and testing samples ------------------------------
data_split <- initial_split(my_data, prop = 0.75, strata = "Group")
sample_train <- training(data_split)
sample_test <- testing(data_split)
# create a 'recipe' object -----------------------------------------------------
sample_recipe <- recipe(Group ~ ., data = sample_train)
sample_prep <- sample_recipe %>% prep(training = sample_train, retain = TRUE)
# tune random forest hyperparameters (mtry) ------------------------------------
invisible(capture.output(
mtry <- tuneRF(sample_train[, -n], as.factor(sample_train$Group),
ntreeTry = n_trees, stepFactor = 1.5, improve = 0.01,
trace = FALSE, plot = FALSE
)
))
m <- mtry[mtry[, 2] == min(mtry[, 2]), 1][1] # best value of mtry
# fit random forest model ------------------------------------------------------
rf <- rand_forest(trees = n_trees, mtry = m, mode = "classification") %>%
set_engine("randomForest", importance = TRUE, localImp = TRUE) %>%
fit(Group ~ ., data = juice(sample_prep))
cat("(Print) basic results from random forest model\n\n")
print(rf$fit) # show basic results for the random forest model
# defining colors for each group
groups <- levels(sample_test$Group)
group_color <- c("#e41a1c", "#377eb8", "#4daf4a", "#984ea3")
# warning: the next plot will be displayed but not saved (save it manually)
cat("\n(New plot window) erro rates\n\n")
dev.new() # new plot window
plot(rf$fit, main = "Error rates", col = c("black", group_color)) # error rate
legend("topright",
legend = c("OOB", groups), col = c("black", group_color), lty = 1
) # legends
# evaluate the model -----------------------------------------------------------
# roc curves
pred_for_roc_curve <- predict(rf, sample_test[, -n], type = "prob")
auc <- rep(NA, length(groups)) # vector for holding values of area under ROC
names(auc) <- groups
auc
# warning: the next plot will be displayed but not saved (save it manually)
cat("(New plot window) ROC curves\n\n")
dev.new() # open new plot window
for (i in seq_len(length(groups))) {
# Define which observations belong to class[i]
true_values <- sample_test$Group == groups[i]
# Assess the performance of classifier for class[i]
pred <- prediction(pred_for_roc_curve[, i], true_values)
perf <- performance(pred, "tpr", "fpr")
if (i == 1) {
plot(perf, main = "ROC Curve", col = group_color[i])
}
else {
plot(perf, col = group_color[i], add = TRUE)
}
# Calculate the area under the curve (AUC) and print it to screen
auc[i] <- unlist(performance(pred, measure = "auc")@ y.values)
}
legend("bottomright", legend = groups, col = group_color, lty = 1) # legends
# confusion matrix
pred_for_table <- predict(rf, sample_test[, -n])
confusion_mat <- table(
observed = sample_test[, n],
predicted = unlist(pred_for_table)
)
cat("(Print) confusion matrix based on testing samples\n\n")
print(confusion_mat)
auc
# plot main results ------------------------------------------------------------
# tree with least number of nodes
tree_num <- which(rf$fit$forest$ndbigtree == min(rf$fit$forest$ndbigtree))[1]
p_rf_tree <- tree_func(final_model = rf$fit, tree_num)
# measures of variable importance for the fitted random forest model -----------
cat("\n(This process can take a while) please wait ...\n\n")
impt_measures <- measure_importance(rf$fit)
# chosing best set of importance measures to use
p_choose_imp_1 <- plot_importance_ggpairs(impt_measures)
p_choose_imp_2 <- plot_importance_rankings(impt_measures)
cat("(New plot window) Importance measures (plot 1)\n\n")
dev.new() # new plot window
print(p_choose_imp_1)
cat("(New plot window) Importance measures (plot 2)\n\n")
dev.new() # new plot window
print(p_choose_imp_2)
# !!! STOP HERE AND EVALUATE THE RESULTS BEFORE CHOOSING THE THREE MEASURES !!!
# define your chosen measures replacing NULL by the measure' name
first_measure <- "gini_decrease" # ex: first_measure <- "no_of_trees"
second_measure <- "no_of_nodes" # ex: second_measure <- "no_of_nodes"
third_measure <- "mean_min_depth" #mean_min_depth # ex: third_measure <- "mean_min_depth"
cat("(Again ... this process can take a while) please wait ...\n\n")
# test if user has chosen the three importance measures
if (is.null(first_measure) | is.null(first_measure) | is.null(first_measure)) {
stop("You did not choose the three inportance meansures...
please start the hole script again")
}
# plot the chosen measures
p_imp <- plot_multi_way_importance(impt_measures,
x_measure = first_measure,
y_measure = second_measure,
size_measure = third_measure,
no_of_labels = n_varibles_in_impt_plots
)+ theme(text = element_text(size = 18), axis.text = element_text(size = 18))
# plot variable depth distribution
min_depth <- min_depth_distribution(rf$fit)
p_min_depth <- plot_min_depth_distribution(min_depth, mean_sample = "top_trees")+
theme(text = element_text(size = 18), axis.text = element_text(size = 18)) +
scale_fill_manual(values = colorRampPalette(c("royalblue", "slategray1"))(12))
# plot interaction between pairs of variables in the random forest
impt_vars <- important_variables(impt_measures,
k = n_varibles_in_impt_plots,
measures = c(first_measure, second_measure, third_measure))
# level of interaction between variables (min depth interacitons)
interaction_vars <- min_depth_interactions(rf$fit, impt_vars)
p_interaction <- plot_min_depth_interactions(interaction_vars)+
theme(text = element_text(size = 12), axis.text = element_text(size = 9))
# generate outputs ----------------------------------------------------------
cat("(Saving plots and tables) please wait ...\n\n")
suppressWarnings(suppressMessages(source("./codes/outputs.R")))
cat("(Script finished) outputs in folder ", paste0(getwd(), "/outputs"), "\n")
#save.image("SALVAR.RData")
|
#Load all necessary packages installed
library(DMwR)
library(randomForest)
library(caret)
library(dummies)
library(corrplot)
library(ggplot2)
library(reshape)
#The following codes are used for generating features
#FE1: Data cleaning
train <- read.csv("J:\\train.csv") #read the train CSV file
#FE1.1: Remove outliers of revenue from training data
revenueMatrix <- subset(train, select = c(Id, revenue))
outliers.scores <- lofactor(revenueMatrix, k=15) #LOF
outliers <- order(outliers.scores, decreasing = T)[1]
print(outliers)
train <- train[-(outliers), ] #remove outliers in
n.train <- nrow(train)
test <- read.csv("J:\\test.csv") #read test CSV file
test$revenue <- 1
myData <- train #used when cross-validation
myData <- rbind(train, test) #used when modelling and prediction
#FE1.2: Remove loosely-correlated variables
myData <- subset(myData, select = -c(City, City.Group, P22))
#FE2: Transformation of attributes
#FE2.1: change MB to DT
myData$Type[myData$Type=="MB"] <- "DT"
myData$Type <- as.factor(myData$Type)
#Calculate 'lasting days' until 1st Jan 2015
myData$year <- substr(as.character(myData$Open.Date),7,10)
myData$month <- substr(as.character(myData$Open.Date),1,2)
myData$day <- substr(as.character(myData$Open.Date),4,5)
myData$Date <- as.Date(strptime(myData$Open.Date, "%m/%d/%Y"))
myData$days <- as.numeric(as.Date("2015-01-01")-myData$Date)
myData<-subset(myData, select= -c(year, month, day, Date, Open.Date))
#FE2.2: SqrtRoot-Log transform of revenue & lasting days
myData$revenue <- sqrt(log(myData$revenue))
myData$days <- sqrt(log(myData$days))
#FE3: Convert P-Variables to dummies
#Convert P-Variables to dummies
myData <- dummy.data.frame(myData, names=c("P14", "P15", "P16", "P17", "P18", "P24", "P25", "P26", "P27", "P30", "P31", "P32", "P33", "P34", "P35", "P36", "P37"), all=T)
#Remove 0-indicator
myData <- subset(myData, select = -c(P140, P150, P160, P170, P180, P240, P250, P260, P270, P300, P310, P320, P330, P340, P350, P360, P370))
#Convert other P-Variables to dummies
myData <- dummy.data.frame(myData, names=c("P1", "P2", "P3", "P4", "P5", "P6", "P7", "P8", "P9", "P10", "P11", "P12", "P13", "P19", "P20", "P21", "P23", "P28", "P29"), all=T)
#Remove '0' indicator for P3 & P29
myData <- subset(myData, select = -c(P30, P290))
#---------------------------------------------------------#
#The following codes are used after features are generated
#RandomForest 10-fold CV
#Only when ‘train’ is assign to ‘myData’
modelCV <- train(revenue~., data=myData, method = "rf",
trControl=trainControl(method="cv", number=10),
prox = TRUE, allowParallel = TRUE)
print(modelCV)
print(modelCV$finalModel)
#Random Forest Modelling
#Change the last 4 parameters values if necessary
set.seed(24601)
model <- randomForest(revenue~.,
data=myData[1:n.train,], importance=TRUE,
mtry = 139, ntree=73500, nPerm=40, nodesize=17)
#Make a Prediction
prediction <- predict(model, myData[-c(1:n.train), ])
#Back-transform of revenue & write the prediction output to Excel CSV
Submit <- as.data.frame(cbind(seq(0, length(prediction) - 1, by=1),
exp(prediction^2)))
colnames(submit)<-c("Id","Prediction")
write.csv(submit,"winningSoulution_v2_4th.csv",row.names=FALSE,
quote=FALSE)
#---------------------------------------------------------#
#The following codes are for plotting graph
#Plot outliers score
plot(outliers.scores)
#Plot graph of variables correlation by hierarchcial clustering
#Convert City, City Group & Type to numeric values
train$City <- as.numeric(train$City)
train$City.Group <- as.numeric(train$City.Group)
train$Type <- as.numeric(train$Type)
numPVar <- sapply(train, is.numeric)
correlation <- cor(train[, numPVar])
corrplot(correlation, order = "hclust")
plot(train$Type) #plot Type in train
plot(test$Type) #plot Type in test
hist(train$revenue) #histogram of revenue
hist(log(train$revenue) #histogram of Log of revenue
hist(sqrt(train$revenue) #histogram of SqrtRoot of revenue
hist(sqrt(log(train$revenue))) #histogram of SqrtRoot-Log of revenue
colnames(myData) #check column number
#Histogram of P-Variable Cluster P14-P18, P24-P27, P30-P37
zeroVar <- c(1, 17:21, 26:29, 32:39) #column number
clusterA <- melt(myData[, zeroVar], id.vars="Id")
ggplot(clusterA, aes( x = value)) + facet_wrap(~variable, scales = "free_x") + geom_histogram()
#histogram of P-Variable Cluster P1-P13, P19-P21, P23, P28-P29
otherVar <- c(1, 4:16, 22:25, 30:31) #column number
clusterB <- melt(myData[, otherVar], id.vars="Id")
ggplot(clusterB, aes( x = value)) + facet_wrap(~variable, scales = "free_x") + geom_histogram() | /RVP_1st.r | no_license | cpld2001189/Kaggle_RestaurantRevenuePrediction | R | false | false | 4,886 | r | #Load all necessary packages installed
library(DMwR)
library(randomForest)
library(caret)
library(dummies)
library(corrplot)
library(ggplot2)
library(reshape)
#The following codes are used for generating features
#FE1: Data cleaning
train <- read.csv("J:\\train.csv") #read the train CSV file
#FE1.1: Remove outliers of revenue from training data
revenueMatrix <- subset(train, select = c(Id, revenue))
outliers.scores <- lofactor(revenueMatrix, k=15) #LOF
outliers <- order(outliers.scores, decreasing = T)[1]
print(outliers)
train <- train[-(outliers), ] #remove outliers in
n.train <- nrow(train)
test <- read.csv("J:\\test.csv") #read test CSV file
test$revenue <- 1
myData <- train #used when cross-validation
myData <- rbind(train, test) #used when modelling and prediction
#FE1.2: Remove loosely-correlated variables
myData <- subset(myData, select = -c(City, City.Group, P22))
#FE2: Transformation of attributes
#FE2.1: change MB to DT
myData$Type[myData$Type=="MB"] <- "DT"
myData$Type <- as.factor(myData$Type)
#Calculate 'lasting days' until 1st Jan 2015
myData$year <- substr(as.character(myData$Open.Date),7,10)
myData$month <- substr(as.character(myData$Open.Date),1,2)
myData$day <- substr(as.character(myData$Open.Date),4,5)
myData$Date <- as.Date(strptime(myData$Open.Date, "%m/%d/%Y"))
myData$days <- as.numeric(as.Date("2015-01-01")-myData$Date)
myData<-subset(myData, select= -c(year, month, day, Date, Open.Date))
#FE2.2: SqrtRoot-Log transform of revenue & lasting days
myData$revenue <- sqrt(log(myData$revenue))
myData$days <- sqrt(log(myData$days))
#FE3: Convert P-Variables to dummies
#Convert P-Variables to dummies
myData <- dummy.data.frame(myData, names=c("P14", "P15", "P16", "P17", "P18", "P24", "P25", "P26", "P27", "P30", "P31", "P32", "P33", "P34", "P35", "P36", "P37"), all=T)
#Remove 0-indicator
myData <- subset(myData, select = -c(P140, P150, P160, P170, P180, P240, P250, P260, P270, P300, P310, P320, P330, P340, P350, P360, P370))
#Convert other P-Variables to dummies
myData <- dummy.data.frame(myData, names=c("P1", "P2", "P3", "P4", "P5", "P6", "P7", "P8", "P9", "P10", "P11", "P12", "P13", "P19", "P20", "P21", "P23", "P28", "P29"), all=T)
#Remove '0' indicator for P3 & P29
myData <- subset(myData, select = -c(P30, P290))
#---------------------------------------------------------#
#The following codes are used after features are generated
#RandomForest 10-fold CV
#Only when ‘train’ is assign to ‘myData’
modelCV <- train(revenue~., data=myData, method = "rf",
trControl=trainControl(method="cv", number=10),
prox = TRUE, allowParallel = TRUE)
print(modelCV)
print(modelCV$finalModel)
#Random Forest Modelling
#Change the last 4 parameters values if necessary
set.seed(24601)
model <- randomForest(revenue~.,
data=myData[1:n.train,], importance=TRUE,
mtry = 139, ntree=73500, nPerm=40, nodesize=17)
#Make a Prediction
prediction <- predict(model, myData[-c(1:n.train), ])
#Back-transform of revenue & write the prediction output to Excel CSV
Submit <- as.data.frame(cbind(seq(0, length(prediction) - 1, by=1),
exp(prediction^2)))
colnames(submit)<-c("Id","Prediction")
write.csv(submit,"winningSoulution_v2_4th.csv",row.names=FALSE,
quote=FALSE)
#---------------------------------------------------------#
#The following codes are for plotting graph
#Plot outliers score
plot(outliers.scores)
#Plot graph of variables correlation by hierarchcial clustering
#Convert City, City Group & Type to numeric values
train$City <- as.numeric(train$City)
train$City.Group <- as.numeric(train$City.Group)
train$Type <- as.numeric(train$Type)
numPVar <- sapply(train, is.numeric)
correlation <- cor(train[, numPVar])
corrplot(correlation, order = "hclust")
plot(train$Type) #plot Type in train
plot(test$Type) #plot Type in test
hist(train$revenue) #histogram of revenue
hist(log(train$revenue) #histogram of Log of revenue
hist(sqrt(train$revenue) #histogram of SqrtRoot of revenue
hist(sqrt(log(train$revenue))) #histogram of SqrtRoot-Log of revenue
colnames(myData) #check column number
#Histogram of P-Variable Cluster P14-P18, P24-P27, P30-P37
zeroVar <- c(1, 17:21, 26:29, 32:39) #column number
clusterA <- melt(myData[, zeroVar], id.vars="Id")
ggplot(clusterA, aes( x = value)) + facet_wrap(~variable, scales = "free_x") + geom_histogram()
#histogram of P-Variable Cluster P1-P13, P19-P21, P23, P28-P29
otherVar <- c(1, 4:16, 22:25, 30:31) #column number
clusterB <- melt(myData[, otherVar], id.vars="Id")
ggplot(clusterB, aes( x = value)) + facet_wrap(~variable, scales = "free_x") + geom_histogram() |
rm(list=ls())
train_data <- read.csv("C:/Users/shrey/Downloads/public_dat/public_data/train.csv", sep=',', header = T)
building_structure <- read.csv("C:/Users/shrey/Downloads/public_dat/public_data/building_structure.csv", sep=',', header = T)
building_ownership <- read.csv("C:/Users/shrey/Downloads/public_dat/public_data/building_ownership.csv", sep=',', header = T)
ward_demographic <- read.csv("C:/Users/shrey/Downloads/public_dat/public_data/ward_demographic_data.csv", sep=',', header = T)
test_data <- read.csv("C:/Users/shrey/Downloads/public_dat/public_data/test.csv", sep=',', header = T)
#=====================================================================================================
#Merging the dataset into one giant dataset for training set and testing set
order(train_data$building_id)
order(test_data$building_id)
order(building_ownership$building_id)
order(building_structure$building_id)
train_total <- merge(train_data,building_ownership,by=c("building_id"))
#train_total_f <- merge(train_total, building_structure, by=c("building_id"))
test_total <- merge(test_data, building_ownership, by=c("building_id"))
#test_total_f <- merge(test_total, building_structure, by=c("building_id"))
#Removing the first column i.e. building ID as it is unrelated
train_total_f <- train_total[-c(1)]
test_total_f <- test_total[-c(1)]
#Converting each of the labels into right format i.e. categorical variables and numerical variables
summary(train_total_f)
library(dplyr)
train_total_f <- train_total_f %>% mutate_if(is.character,as.factor)
test_total_f <- test_total_f %>% mutate_if(is.character,as.factor)
#====================================================================================================
#Running Tests for just Numerical Values (Ignoring non-numerical values)
#Finding Correlation
#Chose this if using only non-character categorical variables
#correlation_train_set <- train_total_f[,-which(sapply(train_total_f, class) == "character")]
#correlation_test_set <- test_total_f[,-which(sapply(test_total_f, class) == "character")]
#Choose this if using the entire dataset
correlation_train_set <- train_total_f %>% mutate_if(is.factor,as.integer)
correlation_test_set <- test_total_f %>% mutate_if(is.factor,as.integer)
#====================================================================================================
library(corrplot)
cor(correlation_train_set, correlation_train_set$damage_grade)
#Finding Significant correlation between some variables
cor.test(correlation_train_set$has_superstructure_mud_mortar_stone, correlation_train_set$damage_grade)
cor.test(correlation_train_set$has_superstructure_mud_mortar_brick, correlation_train_set$damage_grade)
#========================================================================================================
#Creating a decision tree for the 5 classes
library(rpart)
library(ggplot2)
library(rpart.plot)
# tree based classification
fit <- rpart(damage_grade ~ .,
method="class", data=correlation_train_set)
# plot tree
rpart.plot(fit, type =5, extra = 101, digits=-3)
summary(fit)
library(ggfortify)
correlation_train_set<-na.omit(correlation_train_set)
pca_set <- prcomp(correlation_train_set,scale. = TRUE)
summary(pca_set)
#=====================================================================================================
#Gradient booster method
library(gbm)
set.seed(123)
gbm_fit <- gbm(
formula = damage_grade ~ .,
distribution = "gaussian",
data = correlation_train_set,
n.trees = 15,
interaction.depth = 5,
shrinkage = 0.1,
n.minobsinnode = 5,
bag.fraction = .65,
train.fraction = 1,
n.cores = NULL, # will use all cores by default
verbose = FALSE
)
write.csv(gbm_fit, file="gbm.csv",row.names = FALSE)
summary(gbm_fit,
cBars = 15,
method = relative.influence, # also can use permutation.test.gbm
las = 1
)
#====================================================================================================
#Fitting a Random Forest Classifier
library(caret)
dataset <- correlation_train_set
#dataset_test <- correlation_test_set
dataset$damage_grade <- as.factor(dataset$damage_grade)
dataset <- na.omit(dataset)
library(caTools)
split = sample.split(dataset$damage_grade, SplitRatio = 0.75)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
library(randomForest)
library(caret)
classifier = randomForest(x = training_set[,-9],
y = training_set$damage_grade, ntree = 15)
#plot(classifier)
y_pred = predict(classifier, newdata = test_set)
#dataset_test <- test_data
#y_pred = predict(classifier, newdata = dataset_test)
y_pred
#Use this to write the output variable/dataframe into a csv file for further usage
#length(y_pred)
building_id <- test_total$building_id
#write.csv(data.frame(building_id, y=y_pred), file='prediction.csv', row.names=FALSE)
# Making the Confusion Matrix
cm = table(test_set[,9], y_pred)
cm
print("====================================Random Forest=====================================")
library(ggplot2)
library(lattice)
library(caret)
confusionMatrix(cm)
#For Full Dataset: Balanced Accuracy 0.78263 0.67038 0.65114 0.6811 0.7749 (For Class 1-5)
#FOr Semi Dataset: Balanced Accuracy 0.77837 0.66751 0.64551 0.6761 0.7699 (For Class 1-5)
#===================================================================================================
#Ride Lasso and ElasticNet Implementation
library(glmnet)
split = sample.split(dataset$damage_grade, SplitRatio = 0.75)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
#change alpha=1 (ridge), alpha=0 (lasso), alpha=0.5 (elasticnet)
ridge.fit <- glmnet(x=as.matrix(training_set[,-9]),y=training_set[,9],
family='multinomial',alpha=0.5)
plot(ridge.fit,xvar='lambda',label=TRUE)
nlam<-length(ridge.fit$lambda)
ridge.pred.tr<-predict(ridge.fit,newx=as.matrix(training_set[,-9]),
type = 'class')
ridge.pred.te<-predict(ridge.fit,newx=as.matrix(test_set[,-9]),
type = 'class')
ridge.train <- ridge.test <- numeric(nlam)
for (i in 1:nlam){
ridge.train[i] <- mean(!(ridge.pred.tr[,i]==training_set$damage_grade))
ridge.test[i] <- mean(!(ridge.pred.te[,i]==test_set$damage_grade))
}
#To check for output accuracy with the current model
plot(log(ridge.fit$lambda),ridge.train,type='l')
lines(log(ridge.fit$lambda),ridge.test,col='red')
lines(log(ridge.fit$lambda),rep(0,nlam),lty='dotdash')
| /test_old.R | no_license | nshreyasvi/HackZurich2020 | R | false | false | 6,539 | r | rm(list=ls())
train_data <- read.csv("C:/Users/shrey/Downloads/public_dat/public_data/train.csv", sep=',', header = T)
building_structure <- read.csv("C:/Users/shrey/Downloads/public_dat/public_data/building_structure.csv", sep=',', header = T)
building_ownership <- read.csv("C:/Users/shrey/Downloads/public_dat/public_data/building_ownership.csv", sep=',', header = T)
ward_demographic <- read.csv("C:/Users/shrey/Downloads/public_dat/public_data/ward_demographic_data.csv", sep=',', header = T)
test_data <- read.csv("C:/Users/shrey/Downloads/public_dat/public_data/test.csv", sep=',', header = T)
#=====================================================================================================
#Merging the dataset into one giant dataset for training set and testing set
order(train_data$building_id)
order(test_data$building_id)
order(building_ownership$building_id)
order(building_structure$building_id)
train_total <- merge(train_data,building_ownership,by=c("building_id"))
#train_total_f <- merge(train_total, building_structure, by=c("building_id"))
test_total <- merge(test_data, building_ownership, by=c("building_id"))
#test_total_f <- merge(test_total, building_structure, by=c("building_id"))
#Removing the first column i.e. building ID as it is unrelated
train_total_f <- train_total[-c(1)]
test_total_f <- test_total[-c(1)]
#Converting each of the labels into right format i.e. categorical variables and numerical variables
summary(train_total_f)
library(dplyr)
train_total_f <- train_total_f %>% mutate_if(is.character,as.factor)
test_total_f <- test_total_f %>% mutate_if(is.character,as.factor)
#====================================================================================================
#Running Tests for just Numerical Values (Ignoring non-numerical values)
#Finding Correlation
#Chose this if using only non-character categorical variables
#correlation_train_set <- train_total_f[,-which(sapply(train_total_f, class) == "character")]
#correlation_test_set <- test_total_f[,-which(sapply(test_total_f, class) == "character")]
#Choose this if using the entire dataset
correlation_train_set <- train_total_f %>% mutate_if(is.factor,as.integer)
correlation_test_set <- test_total_f %>% mutate_if(is.factor,as.integer)
#====================================================================================================
library(corrplot)
cor(correlation_train_set, correlation_train_set$damage_grade)
#Finding Significant correlation between some variables
cor.test(correlation_train_set$has_superstructure_mud_mortar_stone, correlation_train_set$damage_grade)
cor.test(correlation_train_set$has_superstructure_mud_mortar_brick, correlation_train_set$damage_grade)
#========================================================================================================
#Creating a decision tree for the 5 classes
library(rpart)
library(ggplot2)
library(rpart.plot)
# tree based classification
fit <- rpart(damage_grade ~ .,
method="class", data=correlation_train_set)
# plot tree
rpart.plot(fit, type =5, extra = 101, digits=-3)
summary(fit)
library(ggfortify)
correlation_train_set<-na.omit(correlation_train_set)
pca_set <- prcomp(correlation_train_set,scale. = TRUE)
summary(pca_set)
#=====================================================================================================
#Gradient booster method
library(gbm)
set.seed(123)
gbm_fit <- gbm(
formula = damage_grade ~ .,
distribution = "gaussian",
data = correlation_train_set,
n.trees = 15,
interaction.depth = 5,
shrinkage = 0.1,
n.minobsinnode = 5,
bag.fraction = .65,
train.fraction = 1,
n.cores = NULL, # will use all cores by default
verbose = FALSE
)
write.csv(gbm_fit, file="gbm.csv",row.names = FALSE)
summary(gbm_fit,
cBars = 15,
method = relative.influence, # also can use permutation.test.gbm
las = 1
)
#====================================================================================================
#Fitting a Random Forest Classifier
library(caret)
dataset <- correlation_train_set
#dataset_test <- correlation_test_set
dataset$damage_grade <- as.factor(dataset$damage_grade)
dataset <- na.omit(dataset)
library(caTools)
split = sample.split(dataset$damage_grade, SplitRatio = 0.75)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
library(randomForest)
library(caret)
classifier = randomForest(x = training_set[,-9],
y = training_set$damage_grade, ntree = 15)
#plot(classifier)
y_pred = predict(classifier, newdata = test_set)
#dataset_test <- test_data
#y_pred = predict(classifier, newdata = dataset_test)
y_pred
#Use this to write the output variable/dataframe into a csv file for further usage
#length(y_pred)
building_id <- test_total$building_id
#write.csv(data.frame(building_id, y=y_pred), file='prediction.csv', row.names=FALSE)
# Making the Confusion Matrix
cm = table(test_set[,9], y_pred)
cm
print("====================================Random Forest=====================================")
library(ggplot2)
library(lattice)
library(caret)
confusionMatrix(cm)
#For Full Dataset: Balanced Accuracy 0.78263 0.67038 0.65114 0.6811 0.7749 (For Class 1-5)
#FOr Semi Dataset: Balanced Accuracy 0.77837 0.66751 0.64551 0.6761 0.7699 (For Class 1-5)
#===================================================================================================
#Ride Lasso and ElasticNet Implementation
library(glmnet)
split = sample.split(dataset$damage_grade, SplitRatio = 0.75)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
#change alpha=1 (ridge), alpha=0 (lasso), alpha=0.5 (elasticnet)
ridge.fit <- glmnet(x=as.matrix(training_set[,-9]),y=training_set[,9],
family='multinomial',alpha=0.5)
plot(ridge.fit,xvar='lambda',label=TRUE)
nlam<-length(ridge.fit$lambda)
ridge.pred.tr<-predict(ridge.fit,newx=as.matrix(training_set[,-9]),
type = 'class')
ridge.pred.te<-predict(ridge.fit,newx=as.matrix(test_set[,-9]),
type = 'class')
ridge.train <- ridge.test <- numeric(nlam)
for (i in 1:nlam){
ridge.train[i] <- mean(!(ridge.pred.tr[,i]==training_set$damage_grade))
ridge.test[i] <- mean(!(ridge.pred.te[,i]==test_set$damage_grade))
}
#To check for output accuracy with the current model
plot(log(ridge.fit$lambda),ridge.train,type='l')
lines(log(ridge.fit$lambda),ridge.test,col='red')
lines(log(ridge.fit$lambda),rep(0,nlam),lty='dotdash')
|
library(dplyr)
library(ggplot2)
train<-read.csv("F:/Hackathons/Loan Prediction/train.csv", stringsAsFactors = FALSE)
str(train)
test<-read.csv("F:/Hackathons/Loan Prediction/test.csv", stringsAsFactors = FALSE)
str(test$first_payment_date)
submission<-read.csv("F:/Hackathons/Loan Prediction/sample_submission.csv")
###Categorical###
ggplot(train, aes(source, m13)) + geom_bar(stat = "identity", color = "purple") +
ggtitle("source vs m13")
###X has highest impact on m13 followed by Y and Z#####
ggplot(train, aes(financial_institution, m13)) + geom_bar(stat = "identity", color = "purple") +
theme(axis.text.x = element_text(angle = 70, vjust = 0.5, color = "navy")) + xlab("financial_institution") + ylab("m13")+
ggtitle("financial_institution vs m13")
###Other followed by Browning hat has highest impact on m13#####
ggplot(train, aes(origination_date, m13)) + geom_bar(stat = "identity", color = "purple") +
ggtitle("origination_date vs m13")
###2012-01-01 has highest impact on m13 followed by 2012-02-01 and then 2012-03-01 #####
ggplot(train, aes(first_payment_date, m13)) + geom_bar(stat = "identity", color = "purple") +
ggtitle("first_payment_date vs m13")
###03/2012 has highest impact on m13 followed by 04/2012 and then 05/2012 #####
ggplot(train, aes(loan_purpose, m13)) + geom_bar(stat = "identity", color = "purple") +
xlab("loan_purpose") + ylab("m13") + ggtitle("loan_purpose vs m13")
####B12, followed by A23 and C86######
ggplot(train, aes(insurance_percent, m13)) + geom_bar(stat = "identity", color = "purple") +
xlab("insurance_percent") + ylab("m13") + ggtitle("insurance_percent vs m13")
####0 followed by 30 and then 25######
ggplot(train, aes(insurance_type, m13)) + geom_bar(stat = "identity", color = "purple") +
xlab("insurance_type") + ylab("m13") + ggtitle("insurance_type vs m13")
####-0.5 to 0.5 has max impact on m13 and cover more than 95%######
###Continuous###
ggplot(train, aes(x= m13, y = interest_rate)) + geom_point(size = 2.5, color="navy") +
xlab("interest_rate") + ylab("m13") + ggtitle("interest_rate vs m13")
####-0.5 to 0.5 has max impact on m13 and cover more than 95%######
ggplot(train, aes(x= interest_rate, y = m13)) + geom_point(size = 2.5, color="navy") +
xlab("interest_rate") + ylab("m13") + ggtitle("interest_rate vs m13")
ggplot(train, aes(x= unpaid_principal_bal, y = m13)) + geom_point(size = 2.5, color="navy") +
xlab("unpaid_principal_bal") + ylab("m13") + ggtitle("unpaid_principal_bal vs m13")
ggplot(train, aes(x= loan_term, y = m13)) + geom_point(size = 2.5, color="navy") +
xlab("loan_term") + ylab("m13") + ggtitle("loan_term vs m13")
ggplot(train, aes(x= loan_to_value, y = m13)) + geom_point(size = 2.5, color="navy") +
xlab("loan_to_value") + ylab("m13") + ggtitle("loan_to_value vs m13")
ggplot(train, aes(x= debt_to_income_ratio, y = m13)) + geom_point(size = 2.5, color="navy") +
xlab("debt_to_income_ratio") + ylab("m13") + ggtitle("debt_to_income_ratio vs m13")
ggplot(train, aes(x= borrower_credit_score, y = m13)) + geom_point(size = 2.5, color="navy") +
xlab("borrower_credit_score") + ylab("m13") + ggtitle("borrower_credit_score vs m13")
ggplot(train, aes(x= co.borrower_credit_score, y = m13)) + geom_point(size = 2.5, color="navy") +
xlab("co.borrower_credit_score") + ylab("m13") + ggtitle("co.borrower_cre m, dit_score vs m13")
table(train$m13)
test$m13<-0
data<-rbind(train,test)
sum(is.na(data))
str(data)
names(data)
list<-names(data[,-c(1,2,3,7,8,13)])
unique(data$m12)
par(mfrow=c(4,7), mar=c(1,1,1,1))
for(i in 1:length(list)){
boxplot(data[,list[i]], main = list[i])
}
#########Outlier Treatment############
dev.off()
summary(data$interest_rate)
x1 <- data$interest_rate
qnt <- quantile(x1, probs=c(.25, .75), na.rm = T)
caps <- quantile(x1, probs=c(.05, .95), na.rm = T)
H <- 1.5 * IQR(x1, na.rm = T)
x1[x1 < (qnt[1] - H)] <- caps[1]
x1[x1 > (qnt[2] + H)] <- caps[2]
boxplot(x1)
data$interest_rate <- x1
summary(data$interest_rate)
####
summary(data$unpaid_principal_bal)
boxplot(data$unpaid_principal_bal)
x2 <- data$unpaid_principal_bal
qnt <- quantile(x2, probs=c(.25, .75), na.rm = T)
caps <- quantile(x2, probs=c(.05, .95), na.rm = T)
H <- 1.5 * IQR(x2, na.rm = T)
x2[x2 > (qnt[2] + H)] <- caps[2]
boxplot(x2)
data$unpaid_principal_bal <- x2
summary(data$unpaid_principal_bal)
####
summary(data$loan_to_value)
boxplot(data$loan_to_value)
x3 <- data$loan_to_value
qnt <- quantile(x3, probs=c(.25, .75), na.rm = T)
caps <- quantile(x3, probs=c(.05, .95), na.rm = T)
H <- 1.5 * IQR(x3, na.rm = T)
x3[x3 < (qnt[1] - H)] <- caps[1]
boxplot(x3)
data$loan_to_value <- x3
summary(data$loan_to_value)
#####
summary(data$borrower_credit_score)
boxplot(data$borrower_credit_score)
x4 <- data$borrower_credit_score
qnt <- quantile(x4, probs=c(.25, .75), na.rm = T)
caps <- quantile(x4, probs=c(.05, .95), na.rm = T)
H <- 1.5 * IQR(x4, na.rm = T)
x4[x4 < (qnt[1] - H)] <- caps[1]
boxplot(x4)
data$borrower_credit_score <- x4
summary(data$borrower_credit_score)
###############
summary(data$insurance_percent)
boxplot(data$insurance_percent)
x5 <- data$insurance_percent
qnt <- quantile(x5, probs=c(.25, .75), na.rm = T)
caps <- quantile(x5, probs=c(.05, .95), na.rm = T)
H <- 1.5 * IQR(x5, na.rm = T)
x5[x5 > (qnt[2] + H)] <- caps[2]
boxplot(x5)
data$insurance_percent <- x5
summary(data$insurance_percent)
###
list<-names(data[,-c(1,2,3,7,8,13)])
for(i in 1:length(list)){
boxplot(data[,list[i]], main = list[i])
}
list
######Formatting dates########
unique(data$origination_date)
data$origination_date[data$origination_date=="01/01/12"]<-"2012-01-01"
data$origination_date[data$origination_date=="01/02/12"]<-"2012-02-01"
data$origination_date[data$origination_date=="01/03/12"]<-"2012-03-01"
unique(data$first_payment_date)
data$first_payment_date[data$first_payment_date=="Apr-12"]<-"04/2012"
data$first_payment_date[data$first_payment_date=="Mar-12"]<-"03/2012"
data$first_payment_date[data$first_payment_date=="May-12"]<-"05/2012"
data$first_payment_date[data$first_payment_date=="Feb-12"]<-"02/2012"
#######Binning and Creating dummy Variable######
unique(data$m13)
unique(data$financial_institution)
data%>%count(m13,levels=financial_institution)%>%filter(m13==1)->datC1
datC1$N<-unclass(data%>%filter(financial_institution%in%datC1$levels)%>%count(financial_institution))[[2]]
datC1$m13Perc<-datC1$n/datC1$N
datC1$Var.Name<-rep("financial_institution",nrow(datC1))
datC1$m13Perc<-sort(datC1$m13Perc, decreasing = FALSE)
data$financial_institution<-ifelse(data$financial_institution=="Anderson-Taylor"|data$financial_institution=="Chapman-Mcmahon"|data$financial_institution=="Browning-Hart"|data$financial_institution=="Cole, Brooks and Vincent"|
data$financial_institution=="Edwards-Hoffman",0,ifelse(data$financial_institution=="Martinez, Duffy and Bird"|data$financial_institution=="Miller, Mcclure and Allen"|data$financial_institution=="Nicholson Group"|
data$financial_institution=="OTHER"|data$financial_institution=="OTHER"|data$financial_institution=="Richards-Walters",1,2))
unique(data$financial_institution)
###Train and Test###
set.seed(200)
index<-sample(nrow(data), 0.7639215*nrow(data), replace = F)
train1<-data[index,]
test1<-data[-index,]
colnames(data)
####Model Building####
mod<-glm(m13~., data = train1[,-1], family = "binomial")
summary(mod)
#####
mod1<-glm(m13~source+interest_rate+unpaid_principal_bal+number_of_borrowers+
debt_to_income_ratio+borrower_credit_score+co.borrower_credit_score+
m1+m5+m9+m10+m11+m12, data = train1, family = "binomial")
summary(mod1)
########
unique(data$source)
train1$source_Y<-ifelse(train1$source=="Y", 1,0)
train1$source_Z<-ifelse(train1$source=="Z", 1,0)
test1$source_Y<-ifelse(test1$source=="Y", 1,0)
test1$source_Z<-ifelse(test1$source=="Z", 1,0)
#########
mod3<-glm(m13~source_Y+source_Z+interest_rate+unpaid_principal_bal+number_of_borrowers+
debt_to_income_ratio+borrower_credit_score+co.borrower_credit_score+
m1+m5+m9+m10+m11+m12, data = train1, family = "binomial")
summary(mod3)
mod4<-glm(m13~source_Y+source_Z+interest_rate+unpaid_principal_bal+co.borrower_credit_score+
debt_to_income_ratio+borrower_credit_score+
m1+m5+m9+m10+m12, data = train1, family = "binomial")
summary(mod4)
######
vif(mod4)
predicted<-mod4$fitted.values
head(predicted)
table(train1$m13)/nrow(train1)
predbkt<-ifelse(predicted>=0.0055,1,0)
table(predbkt,train1$m13)
library(ROCR)
pred<-prediction(predicted, train1$m13)
perf<-performance(pred, "tpr", "fpr")
plot(perf)
abline(0,1)
auc<-performance(pred, "auc")
auc
pr<-predict(mod4, test1[,-1], type = "response")
submission$m13=ifelse(pr>=0.0055,1,0)
write.csv(submission, "Loan_Prediction_Submit.csv", row.names = F)
#######
| /Loan Prediction.R | no_license | MithleshLabroo/Loan-Prediction | R | false | false | 9,196 | r | library(dplyr)
library(ggplot2)
train<-read.csv("F:/Hackathons/Loan Prediction/train.csv", stringsAsFactors = FALSE)
str(train)
test<-read.csv("F:/Hackathons/Loan Prediction/test.csv", stringsAsFactors = FALSE)
str(test$first_payment_date)
submission<-read.csv("F:/Hackathons/Loan Prediction/sample_submission.csv")
###Categorical###
ggplot(train, aes(source, m13)) + geom_bar(stat = "identity", color = "purple") +
ggtitle("source vs m13")
###X has highest impact on m13 followed by Y and Z#####
ggplot(train, aes(financial_institution, m13)) + geom_bar(stat = "identity", color = "purple") +
theme(axis.text.x = element_text(angle = 70, vjust = 0.5, color = "navy")) + xlab("financial_institution") + ylab("m13")+
ggtitle("financial_institution vs m13")
###Other followed by Browning hat has highest impact on m13#####
ggplot(train, aes(origination_date, m13)) + geom_bar(stat = "identity", color = "purple") +
ggtitle("origination_date vs m13")
###2012-01-01 has highest impact on m13 followed by 2012-02-01 and then 2012-03-01 #####
ggplot(train, aes(first_payment_date, m13)) + geom_bar(stat = "identity", color = "purple") +
ggtitle("first_payment_date vs m13")
###03/2012 has highest impact on m13 followed by 04/2012 and then 05/2012 #####
ggplot(train, aes(loan_purpose, m13)) + geom_bar(stat = "identity", color = "purple") +
xlab("loan_purpose") + ylab("m13") + ggtitle("loan_purpose vs m13")
####B12, followed by A23 and C86######
ggplot(train, aes(insurance_percent, m13)) + geom_bar(stat = "identity", color = "purple") +
xlab("insurance_percent") + ylab("m13") + ggtitle("insurance_percent vs m13")
####0 followed by 30 and then 25######
ggplot(train, aes(insurance_type, m13)) + geom_bar(stat = "identity", color = "purple") +
xlab("insurance_type") + ylab("m13") + ggtitle("insurance_type vs m13")
####-0.5 to 0.5 has max impact on m13 and cover more than 95%######
###Continuous###
ggplot(train, aes(x= m13, y = interest_rate)) + geom_point(size = 2.5, color="navy") +
xlab("interest_rate") + ylab("m13") + ggtitle("interest_rate vs m13")
####-0.5 to 0.5 has max impact on m13 and cover more than 95%######
ggplot(train, aes(x= interest_rate, y = m13)) + geom_point(size = 2.5, color="navy") +
xlab("interest_rate") + ylab("m13") + ggtitle("interest_rate vs m13")
ggplot(train, aes(x= unpaid_principal_bal, y = m13)) + geom_point(size = 2.5, color="navy") +
xlab("unpaid_principal_bal") + ylab("m13") + ggtitle("unpaid_principal_bal vs m13")
ggplot(train, aes(x= loan_term, y = m13)) + geom_point(size = 2.5, color="navy") +
xlab("loan_term") + ylab("m13") + ggtitle("loan_term vs m13")
ggplot(train, aes(x= loan_to_value, y = m13)) + geom_point(size = 2.5, color="navy") +
xlab("loan_to_value") + ylab("m13") + ggtitle("loan_to_value vs m13")
ggplot(train, aes(x= debt_to_income_ratio, y = m13)) + geom_point(size = 2.5, color="navy") +
xlab("debt_to_income_ratio") + ylab("m13") + ggtitle("debt_to_income_ratio vs m13")
ggplot(train, aes(x= borrower_credit_score, y = m13)) + geom_point(size = 2.5, color="navy") +
xlab("borrower_credit_score") + ylab("m13") + ggtitle("borrower_credit_score vs m13")
ggplot(train, aes(x= co.borrower_credit_score, y = m13)) + geom_point(size = 2.5, color="navy") +
xlab("co.borrower_credit_score") + ylab("m13") + ggtitle("co.borrower_cre m, dit_score vs m13")
table(train$m13)
test$m13<-0
data<-rbind(train,test)
sum(is.na(data))
str(data)
names(data)
list<-names(data[,-c(1,2,3,7,8,13)])
unique(data$m12)
par(mfrow=c(4,7), mar=c(1,1,1,1))
for(i in 1:length(list)){
boxplot(data[,list[i]], main = list[i])
}
#########Outlier Treatment############
dev.off()
summary(data$interest_rate)
x1 <- data$interest_rate
qnt <- quantile(x1, probs=c(.25, .75), na.rm = T)
caps <- quantile(x1, probs=c(.05, .95), na.rm = T)
H <- 1.5 * IQR(x1, na.rm = T)
x1[x1 < (qnt[1] - H)] <- caps[1]
x1[x1 > (qnt[2] + H)] <- caps[2]
boxplot(x1)
data$interest_rate <- x1
summary(data$interest_rate)
####
summary(data$unpaid_principal_bal)
boxplot(data$unpaid_principal_bal)
x2 <- data$unpaid_principal_bal
qnt <- quantile(x2, probs=c(.25, .75), na.rm = T)
caps <- quantile(x2, probs=c(.05, .95), na.rm = T)
H <- 1.5 * IQR(x2, na.rm = T)
x2[x2 > (qnt[2] + H)] <- caps[2]
boxplot(x2)
data$unpaid_principal_bal <- x2
summary(data$unpaid_principal_bal)
####
summary(data$loan_to_value)
boxplot(data$loan_to_value)
x3 <- data$loan_to_value
qnt <- quantile(x3, probs=c(.25, .75), na.rm = T)
caps <- quantile(x3, probs=c(.05, .95), na.rm = T)
H <- 1.5 * IQR(x3, na.rm = T)
x3[x3 < (qnt[1] - H)] <- caps[1]
boxplot(x3)
data$loan_to_value <- x3
summary(data$loan_to_value)
#####
summary(data$borrower_credit_score)
boxplot(data$borrower_credit_score)
x4 <- data$borrower_credit_score
qnt <- quantile(x4, probs=c(.25, .75), na.rm = T)
caps <- quantile(x4, probs=c(.05, .95), na.rm = T)
H <- 1.5 * IQR(x4, na.rm = T)
x4[x4 < (qnt[1] - H)] <- caps[1]
boxplot(x4)
data$borrower_credit_score <- x4
summary(data$borrower_credit_score)
###############
summary(data$insurance_percent)
boxplot(data$insurance_percent)
x5 <- data$insurance_percent
qnt <- quantile(x5, probs=c(.25, .75), na.rm = T)
caps <- quantile(x5, probs=c(.05, .95), na.rm = T)
H <- 1.5 * IQR(x5, na.rm = T)
x5[x5 > (qnt[2] + H)] <- caps[2]
boxplot(x5)
data$insurance_percent <- x5
summary(data$insurance_percent)
###
list<-names(data[,-c(1,2,3,7,8,13)])
for(i in 1:length(list)){
boxplot(data[,list[i]], main = list[i])
}
list
######Formatting dates########
unique(data$origination_date)
data$origination_date[data$origination_date=="01/01/12"]<-"2012-01-01"
data$origination_date[data$origination_date=="01/02/12"]<-"2012-02-01"
data$origination_date[data$origination_date=="01/03/12"]<-"2012-03-01"
unique(data$first_payment_date)
data$first_payment_date[data$first_payment_date=="Apr-12"]<-"04/2012"
data$first_payment_date[data$first_payment_date=="Mar-12"]<-"03/2012"
data$first_payment_date[data$first_payment_date=="May-12"]<-"05/2012"
data$first_payment_date[data$first_payment_date=="Feb-12"]<-"02/2012"
#######Binning and Creating dummy Variable######
unique(data$m13)
unique(data$financial_institution)
data%>%count(m13,levels=financial_institution)%>%filter(m13==1)->datC1
datC1$N<-unclass(data%>%filter(financial_institution%in%datC1$levels)%>%count(financial_institution))[[2]]
datC1$m13Perc<-datC1$n/datC1$N
datC1$Var.Name<-rep("financial_institution",nrow(datC1))
datC1$m13Perc<-sort(datC1$m13Perc, decreasing = FALSE)
data$financial_institution<-ifelse(data$financial_institution=="Anderson-Taylor"|data$financial_institution=="Chapman-Mcmahon"|data$financial_institution=="Browning-Hart"|data$financial_institution=="Cole, Brooks and Vincent"|
data$financial_institution=="Edwards-Hoffman",0,ifelse(data$financial_institution=="Martinez, Duffy and Bird"|data$financial_institution=="Miller, Mcclure and Allen"|data$financial_institution=="Nicholson Group"|
data$financial_institution=="OTHER"|data$financial_institution=="OTHER"|data$financial_institution=="Richards-Walters",1,2))
unique(data$financial_institution)
###Train and Test###
set.seed(200)
index<-sample(nrow(data), 0.7639215*nrow(data), replace = F)
train1<-data[index,]
test1<-data[-index,]
colnames(data)
####Model Building####
mod<-glm(m13~., data = train1[,-1], family = "binomial")
summary(mod)
#####
mod1<-glm(m13~source+interest_rate+unpaid_principal_bal+number_of_borrowers+
debt_to_income_ratio+borrower_credit_score+co.borrower_credit_score+
m1+m5+m9+m10+m11+m12, data = train1, family = "binomial")
summary(mod1)
########
unique(data$source)
train1$source_Y<-ifelse(train1$source=="Y", 1,0)
train1$source_Z<-ifelse(train1$source=="Z", 1,0)
test1$source_Y<-ifelse(test1$source=="Y", 1,0)
test1$source_Z<-ifelse(test1$source=="Z", 1,0)
#########
mod3<-glm(m13~source_Y+source_Z+interest_rate+unpaid_principal_bal+number_of_borrowers+
debt_to_income_ratio+borrower_credit_score+co.borrower_credit_score+
m1+m5+m9+m10+m11+m12, data = train1, family = "binomial")
summary(mod3)
mod4<-glm(m13~source_Y+source_Z+interest_rate+unpaid_principal_bal+co.borrower_credit_score+
debt_to_income_ratio+borrower_credit_score+
m1+m5+m9+m10+m12, data = train1, family = "binomial")
summary(mod4)
######
vif(mod4)
predicted<-mod4$fitted.values
head(predicted)
table(train1$m13)/nrow(train1)
predbkt<-ifelse(predicted>=0.0055,1,0)
table(predbkt,train1$m13)
library(ROCR)
pred<-prediction(predicted, train1$m13)
perf<-performance(pred, "tpr", "fpr")
plot(perf)
abline(0,1)
auc<-performance(pred, "auc")
auc
pr<-predict(mod4, test1[,-1], type = "response")
submission$m13=ifelse(pr>=0.0055,1,0)
write.csv(submission, "Loan_Prediction_Submit.csv", row.names = F)
#######
|
library(xtable)
setwd("~/Desktop/HuangGroup/cvtmle_plasmode/Code")
out.beta <- data.frame(plas_sims$TrueOutBeta[-2])
colnames(out.beta) <- "beta"
exp.beta <- data.frame(plas_sims$TrueExpBeta)
# exp.beta[12:21,] <- exp.beta[7:16,]
# exp.beta[7:11,] <- 0
exp.beta
beta.all <- cbind(out.beta, exp.beta)
colnames(beta.all) <- c("OR Coef", "PS Coef")
library(xtable)
xtable(beta.all)
library(ggplot2)
plot(out.beta[-1,1])
rownames(out.beta)
# Give var group labels and index. Include (Intercept)
# 1:"fixed", 2:"1st", 3:"int"
A.var.grp.map <- c("fixed", "1st", "int")
A.var.grp.idx <- c(rep(1, 6),rep(2, 40-4),rep(3,10))
A.var.grp <- A.var.grp.map[A.var.grp.idx]
A.var.grp <- as.factor(A.var.grp)
out.beta$A.var.grp <- A.var.grp
g0 = ggplot(out.beta[-1,], aes(x = (1:(nrow(out.beta)-1)), y = beta,color = A.var.grp))+
geom_point(size = 1) + geom_hline(yintercept=0, linetype="dashed",
color = "black")+
ggtitle("True Parameters in Scenario A, OR") +
xlab("Variables") + ylab("Parameters") +
theme(axis.text.x=element_blank(),
axis.ticks.x=element_blank())
g0
ggsave(paste0("~/Desktop/HuangGroup/cvtmle_plasmode/Code/Plot/","A-parameter-plot.jpeg"))
| /Code/OutputCoeff.R | no_license | mengeks/drml-plasmode | R | false | false | 1,200 | r | library(xtable)
setwd("~/Desktop/HuangGroup/cvtmle_plasmode/Code")
out.beta <- data.frame(plas_sims$TrueOutBeta[-2])
colnames(out.beta) <- "beta"
exp.beta <- data.frame(plas_sims$TrueExpBeta)
# exp.beta[12:21,] <- exp.beta[7:16,]
# exp.beta[7:11,] <- 0
exp.beta
beta.all <- cbind(out.beta, exp.beta)
colnames(beta.all) <- c("OR Coef", "PS Coef")
library(xtable)
xtable(beta.all)
library(ggplot2)
plot(out.beta[-1,1])
rownames(out.beta)
# Give var group labels and index. Include (Intercept)
# 1:"fixed", 2:"1st", 3:"int"
A.var.grp.map <- c("fixed", "1st", "int")
A.var.grp.idx <- c(rep(1, 6),rep(2, 40-4),rep(3,10))
A.var.grp <- A.var.grp.map[A.var.grp.idx]
A.var.grp <- as.factor(A.var.grp)
out.beta$A.var.grp <- A.var.grp
g0 = ggplot(out.beta[-1,], aes(x = (1:(nrow(out.beta)-1)), y = beta,color = A.var.grp))+
geom_point(size = 1) + geom_hline(yintercept=0, linetype="dashed",
color = "black")+
ggtitle("True Parameters in Scenario A, OR") +
xlab("Variables") + ylab("Parameters") +
theme(axis.text.x=element_blank(),
axis.ticks.x=element_blank())
g0
ggsave(paste0("~/Desktop/HuangGroup/cvtmle_plasmode/Code/Plot/","A-parameter-plot.jpeg"))
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 20830
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 20830
c
c Input Parameter (command line, file):
c input filename QBFLIB/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#124.A#48.c#.w#7.s#35.asp.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 7115
c no.of clauses 20830
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 20830
c
c QBFLIB/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#124.A#48.c#.w#7.s#35.asp.qdimacs 7115 20830 E1 [] 0 124 6991 20830 NONE
| /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#124.A#48.c#.w#7.s#35.asp/ctrl.e#1.a#3.E#124.A#48.c#.w#7.s#35.asp.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 732 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 20830
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 20830
c
c Input Parameter (command line, file):
c input filename QBFLIB/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#124.A#48.c#.w#7.s#35.asp.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 7115
c no.of clauses 20830
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 20830
c
c QBFLIB/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#124.A#48.c#.w#7.s#35.asp.qdimacs 7115 20830 E1 [] 0 124 6991 20830 NONE
|
#' mcnet: A package for N of 1 study analysis using Bayesian methods
#'
#' A package for running N of 1 study trials
#'
#' An N of 1 trial is a clinical trial in which a single patient is the entire trial, a single case study.
#' The main purpose of this package was to serve as an analysis tool for one of the PCORI grants we were working with.
#' It is designed for N of 1 trials and can fit bayesian versions of linear regression, logistic/ordinal regression, and poisson regression.
#' Package includes number of different plotting tools for visualization.
#'
#' @docType package
#' @name nof1-package
NULL
#' @import coda
#' @import MASS
#' @import ggplot2
#' @import scales
#' @import RColorBrewer
#' @import tidyr
#' @import dplyr
#' @import reshape2
#' @import splines
NULL
| /R/nof1.R | no_license | jiabei-yang/nof1ins | R | false | false | 783 | r | #' mcnet: A package for N of 1 study analysis using Bayesian methods
#'
#' A package for running N of 1 study trials
#'
#' An N of 1 trial is a clinical trial in which a single patient is the entire trial, a single case study.
#' The main purpose of this package was to serve as an analysis tool for one of the PCORI grants we were working with.
#' It is designed for N of 1 trials and can fit bayesian versions of linear regression, logistic/ordinal regression, and poisson regression.
#' Package includes number of different plotting tools for visualization.
#'
#' @docType package
#' @name nof1-package
NULL
#' @import coda
#' @import MASS
#' @import ggplot2
#' @import scales
#' @import RColorBrewer
#' @import tidyr
#' @import dplyr
#' @import reshape2
#' @import splines
NULL
|
# Copyright 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
#' @title Perform a flow frequency analysis on annual statistics
#'
#' @description Performs a volume frequency analysis on annual statistics from a streamflow dataset. Calculates the statistics from all
#' daily discharge values from all years, unless specified. Function will calculate using all values in the provided data (no grouped
#' analysis). Analysis methodology replicates that from \href{http://www.hec.usace.army.mil/software/hec-ssp/}{HEC-SSP}.
#'
#' @inheritParams compute_frequency_analysis
#' @inheritParams compute_annual_frequencies
#' @param station_number A character string vector of seven digit Water Survey of Canada station numbers (e.g. \code{"08NM116"}) of
#' which to extract annual peak minimum or maximum instantaneous streamflow data from a HYDAT database. Requires \code{tidyhydat}
#' package and a HYDAT database.
#'
#' @return A list with the following elements:
#' \item{Freq_Analysis_Data}{Data frame with computed annual summary statistics used in analysis.}
#' \item{Freq_Plot_Data}{Data frame with co-ordinates used in frequency plot.}
#' \item{Freq_Plot}{ggplot2 object with frequency plot}
#' \item{Freq_Fitting}{List of fitted objects from fitdistrplus.}
#' \item{Freq_Fitted_Quantiles}{Data frame with fitted quantiles.}
#'
#' @seealso \code{\link{compute_frequency_analysis}}
#'
#' @examples
#' \dontrun{
#'
#' # Working examples (see arguments for further analysis options):
#'
#' # Compute an annual peak frequency analysis using default arguments (instantaneous lows)
#' results <- compute_hydat_peak_frequencies(station_number = "08NM116",
#' start_year = 1980,
#' end_year = 2010)
#'
#' # Compute an annual peak frequency analysis using default arguments (instantaneous highs)
#' results <- compute_hydat_peak_frequencies(station_number = "08NM116",
#' start_year = 1980,
#' end_year = 2010,
#' use_max = TRUE)
#'
#' }
#' @export
compute_hydat_peak_frequencies <- function(station_number,
use_max = FALSE,
use_log = FALSE,
prob_plot_position = c("weibull", "median", "hazen"),
prob_scale_points = c(.9999, .999, .99, .9, .5, .2, .1, .02, .01, .001, .0001),
fit_distr = c("PIII", "weibull"),
fit_distr_method = ifelse(fit_distr == "PIII", "MOM", "MLE"),
fit_quantiles = c(.975, .99, .98, .95, .90, .80, .50, .20, .10, .05, .01),
start_year,
end_year,
exclude_years,
plot_curve = TRUE){
# replicate the frequency analysis of the HEC-SSP program
# refer to Chapter 7 of the user manual
## ARGUMENT CHECKS
## ---------------
if (missing(station_number)) {
station_number <- NULL
}
if (missing(start_year)) {
start_year <- 0
}
if (missing(end_year)) {
end_year <- 9999
}
if (missing(exclude_years)) {
exclude_years <- NULL
}
years_checks(start_year, end_year, exclude_years)
if (!is.logical(use_log))
stop("use_log must be logical (TRUE/FALSE).", call. = FALSE)
if (!is.logical(use_max))
stop("use_max must be logical (TRUE/FALSE).", call. = FALSE)
if (!all(prob_plot_position %in% c("weibull","median","hazen")))
stop("prob_plot_position must be one of weibull, median, or hazen.", call. = FALSE)
if (!is.numeric(prob_scale_points))
stop("prob_scale_points must be numeric and between 0 and 1 (not inclusive).", call. = FALSE)
if (!all(prob_scale_points > 0 & prob_scale_points < 1))
stop("prob_scale_points must be numeric and between 0 and 1 (not inclusive).", call. = FALSE)
if (!all(fit_distr %in% c("weibull", "PIII")))
stop("fit_distr must be one of weibull or PIII.", call. = FALSE)
if (!is.numeric(fit_quantiles))
stop("fit_quantiles must be numeric and between 0 and 1 (not inclusive).", call. = FALSE)
if (!all(fit_quantiles > 0 & fit_quantiles < 1))
stop("fit_quantiles must be numeric and between 0 and 1 (not inclusive).", call. = FALSE)
if (fit_distr[1] == 'weibull' & use_log)
stop("Cannot fit Weibull distribution on log-scale.", call. = FALSE)
if (fit_distr[1] != "PIII" & fit_distr_method[1] == "MOM")
stop('MOM only can be used with PIII distribution.', call. = FALSE)
if (!file.exists(file.path(tidyhydat::hy_dir(),"HYDAT.sqlite3")))
stop("A HYDAT database has not been downloaded yet using the tidyhydat::download_hydat() function.
Download HYDAT before using station_number argument.", call. = FALSE)
if (is.null(station_number)) stop("A station_number must be provided.", call. = FALSE)
if (length(station_number) != 1) stop("Only one station_number can be provided for this function.", call. = FALSE)
if (!all(station_number %in% dplyr::pull(suppressMessages(tidyhydat::hy_stations()[1]))))
stop("station_number listed does not exist in HYDAT.", call. = FALSE)
# Get peak data
inst_peaks <- suppressMessages(suppressWarnings(tidyhydat::hy_annual_instant_peaks(station_number)))
if (nrow(inst_peaks) == 0) stop("No peak data available for this station_number.", call. = FALSE)
inst_peaks <- dplyr::filter(inst_peaks, Parameter == "Flow")
if (nrow(inst_peaks) == 0) stop("No peak flow data available for this station_number.", call. = FALSE)
inst_peaks <- dplyr::filter(inst_peaks, PEAK_CODE == ifelse(use_max, "MAX", "MIN"))
if (use_max & nrow(inst_peaks) == 0) stop("No maximum peak flow data available for this station_number.", call. = FALSE)
if (!use_max & nrow(inst_peaks) == 0) stop("No minimum peak flow data available for this station_number.", call. = FALSE)
inst_peaks$Year <- lubridate::year(inst_peaks$Date)
inst_peaks <- dplyr::select(inst_peaks, Year, Measure = PEAK_CODE, Value)
inst_peaks <- dplyr::mutate(inst_peaks, Measure = paste0("Instantaneous ", ifelse(use_max,"Maximum", "Minimum")))
# Filter peak data
inst_peaks <- inst_peaks[ inst_peaks$Year >= start_year & inst_peaks$Year <= end_year,]
inst_peaks <- dplyr::filter(inst_peaks, !(Year %in% exclude_years))
# Data checks
if (nrow(inst_peaks) < 3) stop(paste0("Need at least 3 years of observations for analysis. There are only ",
nrow(inst_peaks),
" years available."), call. = FALSE)
Q_stat <- inst_peaks
## COMPUTE THE ANALYSIS
## -------------------------------
analysis <- compute_frequency_analysis(data = Q_stat,
events = "Year",
values = "Value",
measures = "Measure",
use_max = use_max,
use_log = use_log,
prob_plot_position = prob_plot_position,
prob_scale_points = prob_scale_points,
fit_distr = fit_distr,
fit_distr_method = fit_distr_method,
fit_quantiles = fit_quantiles,
plot_curve = plot_curve)
return(analysis)
}
| /R/compute_hydat_peak_frequencies.R | permissive | gravitytrope/fasstr | R | false | false | 8,535 | r | # Copyright 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
#' @title Perform a flow frequency analysis on annual statistics
#'
#' @description Performs a volume frequency analysis on annual statistics from a streamflow dataset. Calculates the statistics from all
#' daily discharge values from all years, unless specified. Function will calculate using all values in the provided data (no grouped
#' analysis). Analysis methodology replicates that from \href{http://www.hec.usace.army.mil/software/hec-ssp/}{HEC-SSP}.
#'
#' @inheritParams compute_frequency_analysis
#' @inheritParams compute_annual_frequencies
#' @param station_number A character string vector of seven digit Water Survey of Canada station numbers (e.g. \code{"08NM116"}) of
#' which to extract annual peak minimum or maximum instantaneous streamflow data from a HYDAT database. Requires \code{tidyhydat}
#' package and a HYDAT database.
#'
#' @return A list with the following elements:
#' \item{Freq_Analysis_Data}{Data frame with computed annual summary statistics used in analysis.}
#' \item{Freq_Plot_Data}{Data frame with co-ordinates used in frequency plot.}
#' \item{Freq_Plot}{ggplot2 object with frequency plot}
#' \item{Freq_Fitting}{List of fitted objects from fitdistrplus.}
#' \item{Freq_Fitted_Quantiles}{Data frame with fitted quantiles.}
#'
#' @seealso \code{\link{compute_frequency_analysis}}
#'
#' @examples
#' \dontrun{
#'
#' # Working examples (see arguments for further analysis options):
#'
#' # Compute an annual peak frequency analysis using default arguments (instantaneous lows)
#' results <- compute_hydat_peak_frequencies(station_number = "08NM116",
#' start_year = 1980,
#' end_year = 2010)
#'
#' # Compute an annual peak frequency analysis using default arguments (instantaneous highs)
#' results <- compute_hydat_peak_frequencies(station_number = "08NM116",
#' start_year = 1980,
#' end_year = 2010,
#' use_max = TRUE)
#'
#' }
#' @export
compute_hydat_peak_frequencies <- function(station_number,
use_max = FALSE,
use_log = FALSE,
prob_plot_position = c("weibull", "median", "hazen"),
prob_scale_points = c(.9999, .999, .99, .9, .5, .2, .1, .02, .01, .001, .0001),
fit_distr = c("PIII", "weibull"),
fit_distr_method = ifelse(fit_distr == "PIII", "MOM", "MLE"),
fit_quantiles = c(.975, .99, .98, .95, .90, .80, .50, .20, .10, .05, .01),
start_year,
end_year,
exclude_years,
plot_curve = TRUE){
# replicate the frequency analysis of the HEC-SSP program
# refer to Chapter 7 of the user manual
## ARGUMENT CHECKS
## ---------------
if (missing(station_number)) {
station_number <- NULL
}
if (missing(start_year)) {
start_year <- 0
}
if (missing(end_year)) {
end_year <- 9999
}
if (missing(exclude_years)) {
exclude_years <- NULL
}
years_checks(start_year, end_year, exclude_years)
if (!is.logical(use_log))
stop("use_log must be logical (TRUE/FALSE).", call. = FALSE)
if (!is.logical(use_max))
stop("use_max must be logical (TRUE/FALSE).", call. = FALSE)
if (!all(prob_plot_position %in% c("weibull","median","hazen")))
stop("prob_plot_position must be one of weibull, median, or hazen.", call. = FALSE)
if (!is.numeric(prob_scale_points))
stop("prob_scale_points must be numeric and between 0 and 1 (not inclusive).", call. = FALSE)
if (!all(prob_scale_points > 0 & prob_scale_points < 1))
stop("prob_scale_points must be numeric and between 0 and 1 (not inclusive).", call. = FALSE)
if (!all(fit_distr %in% c("weibull", "PIII")))
stop("fit_distr must be one of weibull or PIII.", call. = FALSE)
if (!is.numeric(fit_quantiles))
stop("fit_quantiles must be numeric and between 0 and 1 (not inclusive).", call. = FALSE)
if (!all(fit_quantiles > 0 & fit_quantiles < 1))
stop("fit_quantiles must be numeric and between 0 and 1 (not inclusive).", call. = FALSE)
if (fit_distr[1] == 'weibull' & use_log)
stop("Cannot fit Weibull distribution on log-scale.", call. = FALSE)
if (fit_distr[1] != "PIII" & fit_distr_method[1] == "MOM")
stop('MOM only can be used with PIII distribution.', call. = FALSE)
if (!file.exists(file.path(tidyhydat::hy_dir(),"HYDAT.sqlite3")))
stop("A HYDAT database has not been downloaded yet using the tidyhydat::download_hydat() function.
Download HYDAT before using station_number argument.", call. = FALSE)
if (is.null(station_number)) stop("A station_number must be provided.", call. = FALSE)
if (length(station_number) != 1) stop("Only one station_number can be provided for this function.", call. = FALSE)
if (!all(station_number %in% dplyr::pull(suppressMessages(tidyhydat::hy_stations()[1]))))
stop("station_number listed does not exist in HYDAT.", call. = FALSE)
# Get peak data
inst_peaks <- suppressMessages(suppressWarnings(tidyhydat::hy_annual_instant_peaks(station_number)))
if (nrow(inst_peaks) == 0) stop("No peak data available for this station_number.", call. = FALSE)
inst_peaks <- dplyr::filter(inst_peaks, Parameter == "Flow")
if (nrow(inst_peaks) == 0) stop("No peak flow data available for this station_number.", call. = FALSE)
inst_peaks <- dplyr::filter(inst_peaks, PEAK_CODE == ifelse(use_max, "MAX", "MIN"))
if (use_max & nrow(inst_peaks) == 0) stop("No maximum peak flow data available for this station_number.", call. = FALSE)
if (!use_max & nrow(inst_peaks) == 0) stop("No minimum peak flow data available for this station_number.", call. = FALSE)
inst_peaks$Year <- lubridate::year(inst_peaks$Date)
inst_peaks <- dplyr::select(inst_peaks, Year, Measure = PEAK_CODE, Value)
inst_peaks <- dplyr::mutate(inst_peaks, Measure = paste0("Instantaneous ", ifelse(use_max,"Maximum", "Minimum")))
# Filter peak data
inst_peaks <- inst_peaks[ inst_peaks$Year >= start_year & inst_peaks$Year <= end_year,]
inst_peaks <- dplyr::filter(inst_peaks, !(Year %in% exclude_years))
# Data checks
if (nrow(inst_peaks) < 3) stop(paste0("Need at least 3 years of observations for analysis. There are only ",
nrow(inst_peaks),
" years available."), call. = FALSE)
Q_stat <- inst_peaks
## COMPUTE THE ANALYSIS
## -------------------------------
analysis <- compute_frequency_analysis(data = Q_stat,
events = "Year",
values = "Value",
measures = "Measure",
use_max = use_max,
use_log = use_log,
prob_plot_position = prob_plot_position,
prob_scale_points = prob_scale_points,
fit_distr = fit_distr,
fit_distr_method = fit_distr_method,
fit_quantiles = fit_quantiles,
plot_curve = plot_curve)
return(analysis)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/settings.R
\name{cache_dir}
\alias{cache_dir}
\title{Settings for the GenomicDataCommons package}
\usage{
cache_dir()
}
\description{
Settings for the GenomicDataCommons package
}
| /man/settings.Rd | no_license | economistgame/GenomicDataCommons | R | false | true | 258 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/settings.R
\name{cache_dir}
\alias{cache_dir}
\title{Settings for the GenomicDataCommons package}
\usage{
cache_dir()
}
\description{
Settings for the GenomicDataCommons package
}
|
\name{ptf.wosten.n}
\alias{ptf.wosten.n}
\title{Wosten et al. 1999 PTF for van Genuchten 1980 n parameter.}
\description{Continuous pedotransfer functions to predict the n parameter
of the Van Genuchten water retention function
of a soil after its clay and silt content, bulk density,
organic matter content and topsoil or subsoil qualifier. }
\usage{ptf.wosten.n(clay, bulkD, silt, om, topSoil)}
\arguments{
\item{clay}{Vector of numericals. Clay content [\%] of each soil layer /
horizon. 0-2 micrometers.}
\item{bulkD}{Vector of numericals. Bulk density [kg.dm-3] of each soil
layer / horizon (unit not given in the article!). }
\item{silt}{Vector of numericals. Silt content [\%] of each soil layer /
horizon. 2-50 micrometers. }
\item{om}{Vector of numericals. Organic matter content [\%] of each soil layer /
horizon. }
\item{topSoil}{Vector of 0 or 1 integers. Set to 1 if the layer is a topsoil,
and to 0 if it is a subsoil, for each layer / horizon.}
}
\value{The function returns a vector of n values of the same
length as the vector of value provided to each parameter.
Unit of n is [-] dimensionless.
n (validation / calibration?) R2 is 54\%.}
\references{Wosten J.H.M., Lilly A., Nemes A.,
Le Bas C., 1999. Development and use of a database of hydraulic
properties of European soils. Geoderma 90:169-185.}
\author{Julien Moeys [aut, cre], Till Francke [ctb]}
\seealso{\code{\link{ptf.wosten}}, }
\examples{require( "soilwaterptf" )
# Example with the properties of the Footprint soil type P22i:
ptf.wosten.n(
# layer no: 1 2 3 4 5 6 7 7 8
clay = c( 15, 15, 16, 16, 20, 20, 25, 25, 14),
bulkD = c(1.296,1.44,1.48,1.48,1.51,1.51,1.55,1.55,1.56),
silt = c( 43, 43, 40, 40, 39, 39, 38, 38, 33),
om = c( 2,1.23, 0.7, 0.7, 0.5, 0.5, 0.4, 0.4, 0.3) * 1.724,
topSoil = c( 1, 1, 0, 0, 0, 0, 0, 0, 0)
) #}
| /pkg/soilwaterptf/man/ptf.wosten.n.Rd | no_license | r-forge/soilwater | R | false | false | 2,043 | rd | \name{ptf.wosten.n}
\alias{ptf.wosten.n}
\title{Wosten et al. 1999 PTF for van Genuchten 1980 n parameter.}
\description{Continuous pedotransfer functions to predict the n parameter
of the Van Genuchten water retention function
of a soil after its clay and silt content, bulk density,
organic matter content and topsoil or subsoil qualifier. }
\usage{ptf.wosten.n(clay, bulkD, silt, om, topSoil)}
\arguments{
\item{clay}{Vector of numericals. Clay content [\%] of each soil layer /
horizon. 0-2 micrometers.}
\item{bulkD}{Vector of numericals. Bulk density [kg.dm-3] of each soil
layer / horizon (unit not given in the article!). }
\item{silt}{Vector of numericals. Silt content [\%] of each soil layer /
horizon. 2-50 micrometers. }
\item{om}{Vector of numericals. Organic matter content [\%] of each soil layer /
horizon. }
\item{topSoil}{Vector of 0 or 1 integers. Set to 1 if the layer is a topsoil,
and to 0 if it is a subsoil, for each layer / horizon.}
}
\value{The function returns a vector of n values of the same
length as the vector of value provided to each parameter.
Unit of n is [-] dimensionless.
n (validation / calibration?) R2 is 54\%.}
\references{Wosten J.H.M., Lilly A., Nemes A.,
Le Bas C., 1999. Development and use of a database of hydraulic
properties of European soils. Geoderma 90:169-185.}
\author{Julien Moeys [aut, cre], Till Francke [ctb]}
\seealso{\code{\link{ptf.wosten}}, }
\examples{require( "soilwaterptf" )
# Example with the properties of the Footprint soil type P22i:
ptf.wosten.n(
# layer no: 1 2 3 4 5 6 7 7 8
clay = c( 15, 15, 16, 16, 20, 20, 25, 25, 14),
bulkD = c(1.296,1.44,1.48,1.48,1.51,1.51,1.55,1.55,1.56),
silt = c( 43, 43, 40, 40, 39, 39, 38, 38, 33),
om = c( 2,1.23, 0.7, 0.7, 0.5, 0.5, 0.4, 0.4, 0.3) * 1.724,
topSoil = c( 1, 1, 0, 0, 0, 0, 0, 0, 0)
) #}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aiplatform_objects.R
\name{GoogleCloudAiplatformV1UpdateTensorboardOperationMetadata}
\alias{GoogleCloudAiplatformV1UpdateTensorboardOperationMetadata}
\title{GoogleCloudAiplatformV1UpdateTensorboardOperationMetadata Object}
\usage{
GoogleCloudAiplatformV1UpdateTensorboardOperationMetadata(
genericMetadata = NULL
)
}
\arguments{
\item{genericMetadata}{Operation metadata for Tensorboard}
}
\value{
GoogleCloudAiplatformV1UpdateTensorboardOperationMetadata object
}
\description{
GoogleCloudAiplatformV1UpdateTensorboardOperationMetadata Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Details of operations that perform update Tensorboard.
}
\concept{GoogleCloudAiplatformV1UpdateTensorboardOperationMetadata functions}
| /googleaiplatformv1.auto/man/GoogleCloudAiplatformV1UpdateTensorboardOperationMetadata.Rd | no_license | justinjm/autoGoogleAPI | R | false | true | 841 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aiplatform_objects.R
\name{GoogleCloudAiplatformV1UpdateTensorboardOperationMetadata}
\alias{GoogleCloudAiplatformV1UpdateTensorboardOperationMetadata}
\title{GoogleCloudAiplatformV1UpdateTensorboardOperationMetadata Object}
\usage{
GoogleCloudAiplatformV1UpdateTensorboardOperationMetadata(
genericMetadata = NULL
)
}
\arguments{
\item{genericMetadata}{Operation metadata for Tensorboard}
}
\value{
GoogleCloudAiplatformV1UpdateTensorboardOperationMetadata object
}
\description{
GoogleCloudAiplatformV1UpdateTensorboardOperationMetadata Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Details of operations that perform update Tensorboard.
}
\concept{GoogleCloudAiplatformV1UpdateTensorboardOperationMetadata functions}
|
dat <- read.csv("https://vincentarelbundock.github.io/Rdatasets/csv/Ecdat/LaborSupply.csv", header=T)
head(dat)
mod <- lm(lnhr~lnwg + kids, data=dat)
summary(mod)
# Check for serial correlation in residuals
dat$resids <- mod$residuals
library(DataCombine)
dat <- slide(dat, Var="resids", TimeVar="year",
GroupVar="id",NewVar = "lag_resids")
plot(resids~lag_resids,data=dat)
summary(lm(resids~-1+lag_resids, data=dat))
library(lmtest)
bgtest(mod,type="Chisq",fill=NA)
# Do we have heteroskedasticity?
bptest(mod, studentize = F)
#Correct for both with Newey-West HAC robust SEs
library("sandwich")
mod_2 <- NeweyWest(mod,lag=1,prewhite=FALSE)
coeftest(mod,mod_2)
# Or
coeftest(mod, vcov=NeweyWest(mod, lag=1, prewhite=FALSE))
#compare to OLS
summary(mod)
#Prais-Winsten transformation
install.packages("prais")
library("prais")
#One-shot estimation where we supply value of rho
# Use DW test to get value of rho (rho-hat = 1 - d/2)
dwtest(mod,alternative="two.sided")
rho <- 1- 1.1279/2
mod_pw <- prais.winsten(mod,data=dat,rho=rho)
mod_pw
# Now iterate to calculate rho
mod_pw_it <- prais.winsten(mod, data=dat, iter=50)
mod_pw_it
vcov(mod)
| /serial_cor.R | no_license | tristinb/ps-research-methods | R | false | false | 1,182 | r |
dat <- read.csv("https://vincentarelbundock.github.io/Rdatasets/csv/Ecdat/LaborSupply.csv", header=T)
head(dat)
mod <- lm(lnhr~lnwg + kids, data=dat)
summary(mod)
# Check for serial correlation in residuals
dat$resids <- mod$residuals
library(DataCombine)
dat <- slide(dat, Var="resids", TimeVar="year",
GroupVar="id",NewVar = "lag_resids")
plot(resids~lag_resids,data=dat)
summary(lm(resids~-1+lag_resids, data=dat))
library(lmtest)
bgtest(mod,type="Chisq",fill=NA)
# Do we have heteroskedasticity?
bptest(mod, studentize = F)
#Correct for both with Newey-West HAC robust SEs
library("sandwich")
mod_2 <- NeweyWest(mod,lag=1,prewhite=FALSE)
coeftest(mod,mod_2)
# Or
coeftest(mod, vcov=NeweyWest(mod, lag=1, prewhite=FALSE))
#compare to OLS
summary(mod)
#Prais-Winsten transformation
install.packages("prais")
library("prais")
#One-shot estimation where we supply value of rho
# Use DW test to get value of rho (rho-hat = 1 - d/2)
dwtest(mod,alternative="two.sided")
rho <- 1- 1.1279/2
mod_pw <- prais.winsten(mod,data=dat,rho=rho)
mod_pw
# Now iterate to calculate rho
mod_pw_it <- prais.winsten(mod, data=dat, iter=50)
mod_pw_it
vcov(mod)
|
#' Basic Data Import for Water Flow Data
#'
#' Imports data from user-supplied data file. Specifically used to import water flow data for use in the WRTDS package.
#' For WRTDS usage, the first column is expected to be dates, the second column measured values.
#' The third column is optional, it contains any remark codes.
#'
#' @param filePath string specifying the path to the file
#' @param fileName string name of file to open
#' @param hasHeader logical true if the first row of data is the column headers
#' @param separator string character that separates data cells
#' @keywords data import file
#' @return retval dataframe with dateTime, value, and code columns
#' @export
#' @examples
#' # Examples of how to use getDataFromFile:
#' # Change the file path and file name to something meaningful:
#' #filePath <- '~/RData/' # Sample format
#' fileName <- 'ChoptankRiverFlow.txt'
#' #getDataFromFile(filePath,fileName, separator="\t")
getDataFromFile <- function (filePath,fileName,hasHeader=TRUE,separator=","){
totalPath <- paste(filePath,fileName,sep="");
tmp <- read.delim(
totalPath,
header = hasHeader,
sep=separator,
colClasses=c('character'),
fill = TRUE,
comment.char="#")
retval <- as.data.frame(tmp, stringsAsFactors=FALSE)
if(ncol(retval) == 2){
names(retval) <- c('dateTime', 'value')
} else if (ncol(retval) == 3){
names(retval) <- c('dateTime', 'value', 'code')
}
if(dateFormatCheck(retval$dateTime)){
retval$dateTime <- as.Date(retval$dateTime)
} else {
retval$dateTime <- as.Date(retval$dateTime,format="%m/%d/%Y")
}
retval$value <- as.numeric(retval$value)
return (retval)
} | /R/getDataFromFile.r | permissive | jlthomps/dataRetrieval | R | false | false | 1,680 | r | #' Basic Data Import for Water Flow Data
#'
#' Imports data from user-supplied data file. Specifically used to import water flow data for use in the WRTDS package.
#' For WRTDS usage, the first column is expected to be dates, the second column measured values.
#' The third column is optional, it contains any remark codes.
#'
#' @param filePath string specifying the path to the file
#' @param fileName string name of file to open
#' @param hasHeader logical true if the first row of data is the column headers
#' @param separator string character that separates data cells
#' @keywords data import file
#' @return retval dataframe with dateTime, value, and code columns
#' @export
#' @examples
#' # Examples of how to use getDataFromFile:
#' # Change the file path and file name to something meaningful:
#' #filePath <- '~/RData/' # Sample format
#' fileName <- 'ChoptankRiverFlow.txt'
#' #getDataFromFile(filePath,fileName, separator="\t")
getDataFromFile <- function (filePath,fileName,hasHeader=TRUE,separator=","){
totalPath <- paste(filePath,fileName,sep="");
tmp <- read.delim(
totalPath,
header = hasHeader,
sep=separator,
colClasses=c('character'),
fill = TRUE,
comment.char="#")
retval <- as.data.frame(tmp, stringsAsFactors=FALSE)
if(ncol(retval) == 2){
names(retval) <- c('dateTime', 'value')
} else if (ncol(retval) == 3){
names(retval) <- c('dateTime', 'value', 'code')
}
if(dateFormatCheck(retval$dateTime)){
retval$dateTime <- as.Date(retval$dateTime)
} else {
retval$dateTime <- as.Date(retval$dateTime,format="%m/%d/%Y")
}
retval$value <- as.numeric(retval$value)
return (retval)
} |
shinyUI(fluidPage(
#Includes
tags$head(tags$script(src = "//tinymce.cachefly.net/4.0/tinymce.min.js")),
tags$head(tags$script(src = 'shinyMCE/shiny-tinymce-bindings.js')),
includeScript(str_c(sb_dir, 'www/shiny-gridster-bindings.js')),
includeScript(str_c(sb_dir, 'www/json2.js')),
tags$head(tags$script(src = "//www.google.com/jsapi")),
includeScript(str_c(sb_dir, 'www/googleChart_init.js')),
#Navbar
div(class="navbar navbar-static-top navbar",
div(class = 'navbar-inner',
span(class = 'brand pull-left', list('Shiny Builder')),
column(3, selectInput('sel_dashboard', NULL, choices = available_dashboards)),
#File
withTags(
ul(class = "nav",
li(class = "dropdown",
a(class="dropdown-toggle", "data-toggle" = "dropdown", 'File', b(class = "caret")),
ul(class = "dropdown-menu",
li(a(id="save_dash_btn", class="action-button shiny-input-bound", icon('floppy-o'), 'Save')),
li(class = "divider"),
li(a(id="save_as_modal_btn", 'data-toggle' = "modal", 'data-target' = '#save_as_modal', icon('floppy-o'), 'Save As')),
li(a(id="new_dash_modal_btn", 'data-toggle' = "modal", 'data-target' = '#new_dash_modal', icon('dashboard'), 'New Dashboard')),
li(a(id="delete_dash_modal_btn", class = 'action-button', 'data-toggle' = "modal", 'data-target' = '#delete_modal', icon('trash-o'), 'Delete Dashboard'))
)
)
)),
#Edit
withTags(
ul(class = "nav",
li(class = "dropdown",
a(class="dropdown-toggle", "data-toggle" = "dropdown", 'Edit', b(class = "caret")),
ul(class = "dropdown-menu",
li(a(id="addChart", class="action-button shiny-input-bound", icon('bar-chart-o'), 'Add Chart')),
li(a(id="addText", class="action-button shiny-input-bound", icon('bars'), 'Add Text Area'))
)
)
))
)
),
#Gridster frame
br(),
fluidRow(gridster(id = 'gridster_frame', marginx = 10, marginy = 10, width = 100, height = 50)),
hr(),
#'Delete' modal
div(id = 'delete_modal', class = 'modal hide',
div(class = 'modal-header',
tags$div(class = 'button', class = 'close', 'data-dismiss' = 'modal', 'aria-hidden'='true', 'x'),
h3('Confirm Deletion')),
div(class = 'modal-body',
p('Are you sure you want to delete this dashboard? This operation cannot be undone.')),
div(class = 'modal-footer',
HTML('<button type="button" data-dismiss="modal" class="btn">Cancel</button>
<button type="button" data-dismiss="modal" class="btn btn-primary action-button" id="delete_dash_btn">Delete Dashboard</button>'))
),
#'New Dashboard' Modal
div(id = 'new_dash_modal', class = 'modal hide',
div(class = 'modal-header', tags$div(class = 'button', class = 'close', 'data-dismiss' = 'modal', 'aria-hidden'='true', 'x'), h3('New Dashboard')),
div(class = 'modal-body',
p('Enter a new dashboard title:'),
textInput('new_dash_file_name', label = NULL, value = '')
),
div(class = 'modal-footer',
HTML('<button type="button" data-dismiss="modal" class="btn">Close</button>
<button type="button" data-dismiss="modal" class="btn btn-primary action-button" id="new_dash_btn">Create Dashboard</button>')
)
),
#'Save As' Modal
div(id = 'save_as_modal', class = 'modal hide',
div(class = 'modal-header', tags$div(class = 'button', class = 'close', 'data-dismiss' = 'modal', 'aria-hidden'='true', 'x'), h3('Save As')),
div(class = 'modal-body',
p('Enter a new dashboard title:'),
textInput('save_as_file_name', label = NULL, value = '')
),
div(class = 'modal-footer',
HTML('<button type="button" data-dismiss="modal" class="btn">Close</button>
<button type="button" data-dismiss="modal" class="btn btn-primary action-button" id="save_as_dash_btn">Save Dashboard</button>')
)
),
#Query Editor Modal
div(id = 'full-width', class = 'modal container hide', style = 'width: 100%; margin: auto; left: 0', tabindex = '-1',
div(class = 'modal-header',
HTML('<button type="button" class="close" data-dismiss="modal" aria-hidden="true">x</button>'),
fluidRow(column(6,h3('Edit Query')), column(5,h3('Table Preview')))
),
div(class = 'modal-body',
fluidRow(
column(6, aceEditor("code",
mode="sql",
height = "300px",
value=''
),
HTML('<button class="btn btn-primary action-button shiny-bound-input" id="update_preview">Update Preview</button>'),
selectInput('selected_db', label = NULL, choices = names(db_list)),
tags$input(id = 'active_chart_id', type = 'text', value = '', class = 'shiny-bound-input', style = 'visibility: hidden; z-index: -1')
),
column(6, dataTableOutput("output_tbl"))
)
),
div(class = 'modal-footer',
HTML('<button type="button" data-dismiss="modal" class="btn">Cancel</button>
<button type="button" data-dismiss="modal" class="btn btn-primary action-button" id="save_changes">Save Query</button>')
)
),
#Main Stylesheet
includeCSS(str_c(sb_dir, '/www/main.css'))
))
| /inst/ui.R | permissive | mul118/ShinyBuilder | R | false | false | 5,619 | r | shinyUI(fluidPage(
#Includes
tags$head(tags$script(src = "//tinymce.cachefly.net/4.0/tinymce.min.js")),
tags$head(tags$script(src = 'shinyMCE/shiny-tinymce-bindings.js')),
includeScript(str_c(sb_dir, 'www/shiny-gridster-bindings.js')),
includeScript(str_c(sb_dir, 'www/json2.js')),
tags$head(tags$script(src = "//www.google.com/jsapi")),
includeScript(str_c(sb_dir, 'www/googleChart_init.js')),
#Navbar
div(class="navbar navbar-static-top navbar",
div(class = 'navbar-inner',
span(class = 'brand pull-left', list('Shiny Builder')),
column(3, selectInput('sel_dashboard', NULL, choices = available_dashboards)),
#File
withTags(
ul(class = "nav",
li(class = "dropdown",
a(class="dropdown-toggle", "data-toggle" = "dropdown", 'File', b(class = "caret")),
ul(class = "dropdown-menu",
li(a(id="save_dash_btn", class="action-button shiny-input-bound", icon('floppy-o'), 'Save')),
li(class = "divider"),
li(a(id="save_as_modal_btn", 'data-toggle' = "modal", 'data-target' = '#save_as_modal', icon('floppy-o'), 'Save As')),
li(a(id="new_dash_modal_btn", 'data-toggle' = "modal", 'data-target' = '#new_dash_modal', icon('dashboard'), 'New Dashboard')),
li(a(id="delete_dash_modal_btn", class = 'action-button', 'data-toggle' = "modal", 'data-target' = '#delete_modal', icon('trash-o'), 'Delete Dashboard'))
)
)
)),
#Edit
withTags(
ul(class = "nav",
li(class = "dropdown",
a(class="dropdown-toggle", "data-toggle" = "dropdown", 'Edit', b(class = "caret")),
ul(class = "dropdown-menu",
li(a(id="addChart", class="action-button shiny-input-bound", icon('bar-chart-o'), 'Add Chart')),
li(a(id="addText", class="action-button shiny-input-bound", icon('bars'), 'Add Text Area'))
)
)
))
)
),
#Gridster frame
br(),
fluidRow(gridster(id = 'gridster_frame', marginx = 10, marginy = 10, width = 100, height = 50)),
hr(),
#'Delete' modal
div(id = 'delete_modal', class = 'modal hide',
div(class = 'modal-header',
tags$div(class = 'button', class = 'close', 'data-dismiss' = 'modal', 'aria-hidden'='true', 'x'),
h3('Confirm Deletion')),
div(class = 'modal-body',
p('Are you sure you want to delete this dashboard? This operation cannot be undone.')),
div(class = 'modal-footer',
HTML('<button type="button" data-dismiss="modal" class="btn">Cancel</button>
<button type="button" data-dismiss="modal" class="btn btn-primary action-button" id="delete_dash_btn">Delete Dashboard</button>'))
),
#'New Dashboard' Modal
div(id = 'new_dash_modal', class = 'modal hide',
div(class = 'modal-header', tags$div(class = 'button', class = 'close', 'data-dismiss' = 'modal', 'aria-hidden'='true', 'x'), h3('New Dashboard')),
div(class = 'modal-body',
p('Enter a new dashboard title:'),
textInput('new_dash_file_name', label = NULL, value = '')
),
div(class = 'modal-footer',
HTML('<button type="button" data-dismiss="modal" class="btn">Close</button>
<button type="button" data-dismiss="modal" class="btn btn-primary action-button" id="new_dash_btn">Create Dashboard</button>')
)
),
#'Save As' Modal
div(id = 'save_as_modal', class = 'modal hide',
div(class = 'modal-header', tags$div(class = 'button', class = 'close', 'data-dismiss' = 'modal', 'aria-hidden'='true', 'x'), h3('Save As')),
div(class = 'modal-body',
p('Enter a new dashboard title:'),
textInput('save_as_file_name', label = NULL, value = '')
),
div(class = 'modal-footer',
HTML('<button type="button" data-dismiss="modal" class="btn">Close</button>
<button type="button" data-dismiss="modal" class="btn btn-primary action-button" id="save_as_dash_btn">Save Dashboard</button>')
)
),
#Query Editor Modal
div(id = 'full-width', class = 'modal container hide', style = 'width: 100%; margin: auto; left: 0', tabindex = '-1',
div(class = 'modal-header',
HTML('<button type="button" class="close" data-dismiss="modal" aria-hidden="true">x</button>'),
fluidRow(column(6,h3('Edit Query')), column(5,h3('Table Preview')))
),
div(class = 'modal-body',
fluidRow(
column(6, aceEditor("code",
mode="sql",
height = "300px",
value=''
),
HTML('<button class="btn btn-primary action-button shiny-bound-input" id="update_preview">Update Preview</button>'),
selectInput('selected_db', label = NULL, choices = names(db_list)),
tags$input(id = 'active_chart_id', type = 'text', value = '', class = 'shiny-bound-input', style = 'visibility: hidden; z-index: -1')
),
column(6, dataTableOutput("output_tbl"))
)
),
div(class = 'modal-footer',
HTML('<button type="button" data-dismiss="modal" class="btn">Cancel</button>
<button type="button" data-dismiss="modal" class="btn btn-primary action-button" id="save_changes">Save Query</button>')
)
),
#Main Stylesheet
includeCSS(str_c(sb_dir, '/www/main.css'))
))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/text.R
\name{dramaTail}
\alias{dramaTail}
\title{Extract section}
\usage{
dramaTail(input, by = c("Act", "Scene"), op = "==", n = 1)
}
\arguments{
\item{input}{Segmented text (can be multiple texts)}
\item{by}{Act or Scene, or matching substring}
\item{op}{Whether to extract exactly one or more than one}
\item{n}{The number of segments to extract}
}
\description{
Extracts a sub segment of the text(s).
The result is an empty table if more scenes or acts
are given than exist in the play. In this case, a
warning is printed.
}
\examples{
data(rksp.0)
# Extract the second last scene
dramaTail(rksp.0$mtext, by="Scene", op="==", n=2)
}
| /man/dramaTail.Rd | permissive | gitter-badger/DramaAnalysis | R | false | true | 718 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/text.R
\name{dramaTail}
\alias{dramaTail}
\title{Extract section}
\usage{
dramaTail(input, by = c("Act", "Scene"), op = "==", n = 1)
}
\arguments{
\item{input}{Segmented text (can be multiple texts)}
\item{by}{Act or Scene, or matching substring}
\item{op}{Whether to extract exactly one or more than one}
\item{n}{The number of segments to extract}
}
\description{
Extracts a sub segment of the text(s).
The result is an empty table if more scenes or acts
are given than exist in the play. In this case, a
warning is printed.
}
\examples{
data(rksp.0)
# Extract the second last scene
dramaTail(rksp.0$mtext, by="Scene", op="==", n=2)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{merge_clusters}
\alias{merge_clusters}
\title{Merge clusters of similar values.}
\usage{
merge_clusters(clusters, keys_vect, vect, keys_vect_sub, vect_sub)
}
\arguments{
\item{clusters}{character vector}
\item{keys_vect}{character vector}
\item{vect}{character vector}
\item{keys_vect_sub}{character vector}
\item{vect_sub}{character vector}
}
\description{
Function that performs all merges related to input value clusters.
}
| /man/merge_clusters.Rd | no_license | Henri-Lo/refinr | R | false | true | 529 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{merge_clusters}
\alias{merge_clusters}
\title{Merge clusters of similar values.}
\usage{
merge_clusters(clusters, keys_vect, vect, keys_vect_sub, vect_sub)
}
\arguments{
\item{clusters}{character vector}
\item{keys_vect}{character vector}
\item{vect}{character vector}
\item{keys_vect_sub}{character vector}
\item{vect_sub}{character vector}
}
\description{
Function that performs all merges related to input value clusters.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BchronDensityFast.R
\name{BchronDensityFast}
\alias{BchronDensityFast}
\title{Non-parametric phase model (faster version)}
\usage{
BchronDensityFast(
ages,
ageSds,
calCurves,
pathToCalCurves = system.file("data", package = "Bchron"),
dfs = rep(100, length(ages)),
samples = 2000,
G = 30
)
}
\arguments{
\item{ages}{A vector of ages (most likely 14C)}
\item{ageSds}{A vector of 1-sigma values for the ages given above}
\item{calCurves}{A vector of values containing either \code{intcal20}, \code{shcal20}, \code{marine20}, or \code{normal} (older calibration curves such as intcal13 are also supported). Should be the same length the number of ages supplied. Non-standard calibration curves can be used provided they are supplied in the same format as those previously mentioned and are placed in the same directory. Normal indicates a normally-distributed (non-14C) age.}
\item{pathToCalCurves}{File path to where the calibration curves are located. Defaults to the system directory where the 3 standard calibration curves are stored.}
\item{dfs}{Degrees-of-freedom values for the t-distribution associated with the calibration calculation. A large value indicates Gaussian distributions assumed for the 14C ages}
\item{samples}{Number of samples of calibrated dates required}
\item{G}{Number of Gaussian mixture components}
}
\value{
An object of class \code{BchronDensityRunFast} with the following components:
\item{out}{The output from the run of \code{\link{densityMclust}} with the given number of mixture components}
\item{calAges}{The calibrated ages from the \code{\link{BchronDensity}} function}
}
\description{
This function runs a non-parametric phase model on 14C and non-14C ages via Gaussian Mixture density estimation through the mclust package
}
\details{
This is a faster approximate version of \code{\link{BchronDensity}} that uses the \code{\link{densityMclust}} function to compute the Gaussian mixtures for a set of calibrated ages. The method is an approximation as it does not fit a fully Bayesian model as \code{\link{BchronDensity}} does. It is designed to be a probabilistic version of the Oxcal SUM command which takes calibrated ages and sums the probability distributions with the aim of estimating activity through age as a proxy.
}
\examples{
\donttest{
# Read in some data from Sluggan Moss
data(Sluggan)
# Run the model
SlugDensFast <- with(
Sluggan,
BchronDensityFast(
ages = ages,
ageSds = ageSds,
calCurves = calCurves
)
)
# plot it
plot(SlugDensFast)
}
}
\seealso{
\code{\link{Bchronology}}, \code{\link{BchronCalibrate}}, \code{\link{BchronRSL}}, \code{\link{BchronDensity}} for a slower exact version of this function
}
| /man/BchronDensityFast.Rd | no_license | andrewcparnell/Bchron | R | false | true | 2,778 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BchronDensityFast.R
\name{BchronDensityFast}
\alias{BchronDensityFast}
\title{Non-parametric phase model (faster version)}
\usage{
BchronDensityFast(
ages,
ageSds,
calCurves,
pathToCalCurves = system.file("data", package = "Bchron"),
dfs = rep(100, length(ages)),
samples = 2000,
G = 30
)
}
\arguments{
\item{ages}{A vector of ages (most likely 14C)}
\item{ageSds}{A vector of 1-sigma values for the ages given above}
\item{calCurves}{A vector of values containing either \code{intcal20}, \code{shcal20}, \code{marine20}, or \code{normal} (older calibration curves such as intcal13 are also supported). Should be the same length the number of ages supplied. Non-standard calibration curves can be used provided they are supplied in the same format as those previously mentioned and are placed in the same directory. Normal indicates a normally-distributed (non-14C) age.}
\item{pathToCalCurves}{File path to where the calibration curves are located. Defaults to the system directory where the 3 standard calibration curves are stored.}
\item{dfs}{Degrees-of-freedom values for the t-distribution associated with the calibration calculation. A large value indicates Gaussian distributions assumed for the 14C ages}
\item{samples}{Number of samples of calibrated dates required}
\item{G}{Number of Gaussian mixture components}
}
\value{
An object of class \code{BchronDensityRunFast} with the following components:
\item{out}{The output from the run of \code{\link{densityMclust}} with the given number of mixture components}
\item{calAges}{The calibrated ages from the \code{\link{BchronDensity}} function}
}
\description{
This function runs a non-parametric phase model on 14C and non-14C ages via Gaussian Mixture density estimation through the mclust package
}
\details{
This is a faster approximate version of \code{\link{BchronDensity}} that uses the \code{\link{densityMclust}} function to compute the Gaussian mixtures for a set of calibrated ages. The method is an approximation as it does not fit a fully Bayesian model as \code{\link{BchronDensity}} does. It is designed to be a probabilistic version of the Oxcal SUM command which takes calibrated ages and sums the probability distributions with the aim of estimating activity through age as a proxy.
}
\examples{
\donttest{
# Read in some data from Sluggan Moss
data(Sluggan)
# Run the model
SlugDensFast <- with(
Sluggan,
BchronDensityFast(
ages = ages,
ageSds = ageSds,
calCurves = calCurves
)
)
# plot it
plot(SlugDensFast)
}
}
\seealso{
\code{\link{Bchronology}}, \code{\link{BchronCalibrate}}, \code{\link{BchronRSL}}, \code{\link{BchronDensity}} for a slower exact version of this function
}
|
### Quadratic Programming
# minimize in x: f'*x + 0.5*x'*H*x
# subject to: A*x <= b
# subject to: Aeq*x == beq
# x >= lb
# x <= ub
quadprog <- function(H, f, A=NULL, b=NULL, Aeq=NULL, beq=NULL, lb=NULL, ub=NULL, x0=NULL, options=NULL, solver="quadprog") {
# currently only quadprog is supported
solver <- "quadprog"
# require quadprog
require("quadprog")
# build matrix A
if (!is.null(Aeq)) { Amat <- Aeq }
if (!is.null(A)) {if (exists("Amat")) { Amat <- rbind(Amat, -A) } else { Amat <- -A } }
# build vector b
if (!is.null(beq)) { bvec <- beq }
if (!is.null(b)) {if (exists("bvec")) { bvec <- c(bvec, -b) } else { bvec <- -b } }
# specify number of equality constraints
meq <- length(beq)
# add lower and upper bounds
if (!is.null(lb)) {
for(i in 1:length(lb)) {
lhs_lb = rep(0, length(lb))
lhs_lb[i] = 1
Amat <- rbind(Amat, lhs_lb)
bvec <- c(bvec, lb[i])
}
}
if (!is.null(ub)) {
for(i in 1:length(ub)) {
lhs_ub = rep(0, length(ub))
lhs_ub[i] = 1
Amat <- rbind(Amat, -lhs_ub)
bvec <- c(bvec, -ub[i])
}
}
# use quadprog
sol <- solve.QP(H, f, t(Amat), bvec, meq)
# return solution
result <- list()
result$x <- sol$solution
return(result)
}
| /R/quadprog.R | no_license | mcordero01/modopt | R | false | false | 1,282 | r | ### Quadratic Programming
# minimize in x: f'*x + 0.5*x'*H*x
# subject to: A*x <= b
# subject to: Aeq*x == beq
# x >= lb
# x <= ub
quadprog <- function(H, f, A=NULL, b=NULL, Aeq=NULL, beq=NULL, lb=NULL, ub=NULL, x0=NULL, options=NULL, solver="quadprog") {
# currently only quadprog is supported
solver <- "quadprog"
# require quadprog
require("quadprog")
# build matrix A
if (!is.null(Aeq)) { Amat <- Aeq }
if (!is.null(A)) {if (exists("Amat")) { Amat <- rbind(Amat, -A) } else { Amat <- -A } }
# build vector b
if (!is.null(beq)) { bvec <- beq }
if (!is.null(b)) {if (exists("bvec")) { bvec <- c(bvec, -b) } else { bvec <- -b } }
# specify number of equality constraints
meq <- length(beq)
# add lower and upper bounds
if (!is.null(lb)) {
for(i in 1:length(lb)) {
lhs_lb = rep(0, length(lb))
lhs_lb[i] = 1
Amat <- rbind(Amat, lhs_lb)
bvec <- c(bvec, lb[i])
}
}
if (!is.null(ub)) {
for(i in 1:length(ub)) {
lhs_ub = rep(0, length(ub))
lhs_ub[i] = 1
Amat <- rbind(Amat, -lhs_ub)
bvec <- c(bvec, -ub[i])
}
}
# use quadprog
sol <- solve.QP(H, f, t(Amat), bvec, meq)
# return solution
result <- list()
result$x <- sol$solution
return(result)
}
|
#' Simulate first differences for negative binomial models
#'
#' This function uses the \code{computeNegBinEV} function to simulate expected values, predicted values, and first differences for negative binomial models.
#' @param data imputed datasets in amelia object
#' @param x a vector of values for predictors
#' @param x1 a second vector of values for predictors
#' @param coef a matrix of coefficients
#' @param vcov a list of variance covariance matrices
#' @param num number of simulations. Default = 10000.
#' @param theta a vector of theta parameters from each fitted negative binomial model results
#' @export
qiNegBin <- function(data = NULL, x = NULL, x1 = NULL, coef = NULL, vcov = NULL, num = 10000, theta = NULL) {
qi1 <- computeNegBinEV(data, x, coef, vcov, num, theta)
qi2 <- computeNegBinEV(data, x1, coef, vcov, num, theta)
# Return quantities of interest, paired off with their titles
list("Expected Values: E(Y|X)" = qi1$ev,
"Expected Values: E(Y|X1)" = qi2$ev,
"Predicted Values: Y|X" = qi1$pv,
"Predicted Values: Y|X1" = qi2$pv,
"First Differences: E(Y|X1) - E(Y|X)" = qi2$ev - qi1$ev
)
}
| /R/qiNegBin.R | no_license | stevenliaotw/sltools | R | false | false | 1,162 | r | #' Simulate first differences for negative binomial models
#'
#' This function uses the \code{computeNegBinEV} function to simulate expected values, predicted values, and first differences for negative binomial models.
#' @param data imputed datasets in amelia object
#' @param x a vector of values for predictors
#' @param x1 a second vector of values for predictors
#' @param coef a matrix of coefficients
#' @param vcov a list of variance covariance matrices
#' @param num number of simulations. Default = 10000.
#' @param theta a vector of theta parameters from each fitted negative binomial model results
#' @export
qiNegBin <- function(data = NULL, x = NULL, x1 = NULL, coef = NULL, vcov = NULL, num = 10000, theta = NULL) {
qi1 <- computeNegBinEV(data, x, coef, vcov, num, theta)
qi2 <- computeNegBinEV(data, x1, coef, vcov, num, theta)
# Return quantities of interest, paired off with their titles
list("Expected Values: E(Y|X)" = qi1$ev,
"Expected Values: E(Y|X1)" = qi2$ev,
"Predicted Values: Y|X" = qi1$pv,
"Predicted Values: Y|X1" = qi2$pv,
"First Differences: E(Y|X1) - E(Y|X)" = qi2$ev - qi1$ev
)
}
|
library(caret)
dataset6=read.csv('diabetes-f6.csv')
dataset6[,1:3]=scale(dataset6[,1:3])
control <- trainControl(method="repeatedcv", number=10, repeats=10)
metric <- "Accuracy"
rf6<- function(x){
set.seed(7)
return(train(as.character(Outcome)~., data=dataset6, metric=metric, method=x, trControl=control))
}
fit.gamboost6=rf6("gamboost")
fit.regLogistic6=rf6("regLogistic")
fit.multinom6=rf6("multinom")
fit.bayesglm6=rf6("bayesglm")
fit.plr6=rf6("plr")
fit.glm6=rf6("glm")
fit.gpls6=rf6("gpls")
fit.svmLinear36=rf6("svmLinear3")
fit.sparseLDA6=rf6("sparseLDA")
fit.pda6=rf6("pda")
fit.lda6=rf6("lda")
fit.LMT6=rf6("LMT")
fit.RFlda6=rf6("RFlda")
fit.fda6=rf6("fda")
fit.kernelpls6=rf6("kernelpls")
fit.sda6=rf6("sda")
fit.sdwd6=rf6("sdwd")
fit.glmboost6=rf6("glmboost")
fit.pda26=rf6("pda2")
fit.gbm6=rf6("gbm")
fit.gcvEarth6=rf6("gcvEarth")
fit.svmLinear6=rf6("svmLinear")
fit.glmStepAIC6=rf6("glmStepAIC")
fit.gamLoess6=rf6("gamLoess")
fit.loclda6=rf6("loclda")
fit.hdda6=rf6("hdda")
fit.parRF6=rf6("parRF")
fit.Mlda6=rf6("Mlda")
fit.svmRadial6=rf6("svmRadial")
fit.c56=rf6("C5.0")
fit.mlp6=rf6("mlp")
fit.mda6=rf6("mda")
fit.nb6=rf6("nb")
fit.rpart26=rf6("rpart2")
fit.c5rules6=rf6("C5.0Rules")
fit.treebag6=rf6("treebag")
results6=resamples(list(GAMBOOST=fit.gamboost6,REGLOGISTIC=fit.regLogistic6,MULTINOM=fit.multinom6,BAYESGLM=fit.bayesglm6,
PLR=fit.plr6,GLM=fit.glm6,GPLS=fit.gpls6,SVMLINEAR3=fit.svmLinear36,SPARSELDA=fit.sparseLDA6,
PDA=fit.pda6,LDA=fit.lda6,LMT=fit.LMT6,RFLDA=fit.RFlda6,FDA=fit.fda6,KERNELPLS=fit.kernelpls6,
SDA=fit.sda6,SDWD=fit.sdwd6,GLMBOOST=fit.glmboost6,PDA2=fit.pda26,GBM=fit.gbm6,GCVEARTH=fit.gcvEarth6,
SVMLINEAR=fit.svmLinear6,GLMSTEPAIC=fit.glmStepAIC6,GAMLOESS=fit.gamLoess6,LOCLDA=fit.loclda6,
HDDA=fit.hdda6,PARRF=fit.parRF6,MLDA=fit.Mlda6,SVMRADIAL=fit.svmRadial6,C5=fit.c56,MLP=fit.mlp6,
MDA=fit.mda6,NB=fit.nb6,RPART2=fit.rpart26,C5RULES=fit.c5rules6,TREEBAG=fit.treebag6))
summary(results6)
bwplot(results6) | /Backup/ROCCalculation/Test6.R | no_license | shahriariit/DiabetesAnalysis | R | false | false | 2,113 | r | library(caret)
dataset6=read.csv('diabetes-f6.csv')
dataset6[,1:3]=scale(dataset6[,1:3])
control <- trainControl(method="repeatedcv", number=10, repeats=10)
metric <- "Accuracy"
rf6<- function(x){
set.seed(7)
return(train(as.character(Outcome)~., data=dataset6, metric=metric, method=x, trControl=control))
}
fit.gamboost6=rf6("gamboost")
fit.regLogistic6=rf6("regLogistic")
fit.multinom6=rf6("multinom")
fit.bayesglm6=rf6("bayesglm")
fit.plr6=rf6("plr")
fit.glm6=rf6("glm")
fit.gpls6=rf6("gpls")
fit.svmLinear36=rf6("svmLinear3")
fit.sparseLDA6=rf6("sparseLDA")
fit.pda6=rf6("pda")
fit.lda6=rf6("lda")
fit.LMT6=rf6("LMT")
fit.RFlda6=rf6("RFlda")
fit.fda6=rf6("fda")
fit.kernelpls6=rf6("kernelpls")
fit.sda6=rf6("sda")
fit.sdwd6=rf6("sdwd")
fit.glmboost6=rf6("glmboost")
fit.pda26=rf6("pda2")
fit.gbm6=rf6("gbm")
fit.gcvEarth6=rf6("gcvEarth")
fit.svmLinear6=rf6("svmLinear")
fit.glmStepAIC6=rf6("glmStepAIC")
fit.gamLoess6=rf6("gamLoess")
fit.loclda6=rf6("loclda")
fit.hdda6=rf6("hdda")
fit.parRF6=rf6("parRF")
fit.Mlda6=rf6("Mlda")
fit.svmRadial6=rf6("svmRadial")
fit.c56=rf6("C5.0")
fit.mlp6=rf6("mlp")
fit.mda6=rf6("mda")
fit.nb6=rf6("nb")
fit.rpart26=rf6("rpart2")
fit.c5rules6=rf6("C5.0Rules")
fit.treebag6=rf6("treebag")
results6=resamples(list(GAMBOOST=fit.gamboost6,REGLOGISTIC=fit.regLogistic6,MULTINOM=fit.multinom6,BAYESGLM=fit.bayesglm6,
PLR=fit.plr6,GLM=fit.glm6,GPLS=fit.gpls6,SVMLINEAR3=fit.svmLinear36,SPARSELDA=fit.sparseLDA6,
PDA=fit.pda6,LDA=fit.lda6,LMT=fit.LMT6,RFLDA=fit.RFlda6,FDA=fit.fda6,KERNELPLS=fit.kernelpls6,
SDA=fit.sda6,SDWD=fit.sdwd6,GLMBOOST=fit.glmboost6,PDA2=fit.pda26,GBM=fit.gbm6,GCVEARTH=fit.gcvEarth6,
SVMLINEAR=fit.svmLinear6,GLMSTEPAIC=fit.glmStepAIC6,GAMLOESS=fit.gamLoess6,LOCLDA=fit.loclda6,
HDDA=fit.hdda6,PARRF=fit.parRF6,MLDA=fit.Mlda6,SVMRADIAL=fit.svmRadial6,C5=fit.c56,MLP=fit.mlp6,
MDA=fit.mda6,NB=fit.nb6,RPART2=fit.rpart26,C5RULES=fit.c5rules6,TREEBAG=fit.treebag6))
summary(results6)
bwplot(results6) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/misc_and_utility.R
\name{umx_rename}
\alias{umx_rename}
\title{umx_rename}
\usage{
umx_rename(
data,
from = NULL,
to = NULL,
regex = NULL,
test = FALSE,
old = "deprecated",
replace = "deprecated"
)
}
\arguments{
\item{data}{The dataframe in which to rename variables}
\item{from}{List of existing names that will be found and replaced by the contents of replace. (optional: Defaults to NULL).}
\item{to}{If used alone, a named collection of c(oldName = "newName") pairs.
OR, if "from" is a list of existing names, the list of new names)
OR, if "regex" is a regular expression, the replace string)}
\item{regex}{Regular expression with matches will be replaced using replace as the replace string. (Optional: Defaults to NULL).}
\item{test}{Whether to report a "dry run", not changing anything. (Default = FALSE).}
\item{old}{deprecated: use from}
\item{replace}{deprecated: use to}
}
\value{
\itemize{
\item dataframe with columns renamed.
}
}
\description{
Returns a dataframe with variables renamed as desired.
}
\details{
Unlike similar functions in other packages, it checks that the variables exist, and that the new names do not.
Importantly, it also supports \link[=regex]{regular expressions}. This allows you to find and replace
text based on patterns and replacements. so to change "replacement" to "in place",
\verb{grep=re(place)ment}, \verb{replace= in \\\\1}.
\emph{note}:To use replace list, you must say c(old = "new"), not c(old -> "new")
}
\examples{
tmp = mtcars
tmp = umx_rename(tmp, to = c(cyl = "cylinder"))
# let's check cyl has been changed to cylinder...
namez(tmp, "c")
# Alternate style: from->to, first with a test-run
# Dry run
tmp = umx_rename(tmp, from = "disp", to = "displacement", test= TRUE)
# Actually do it
tmp = umx_rename(tmp, from = c("disp"), to = c("displacement"))
umx_check_names("displacement", data = tmp, die = TRUE)
namez(tmp, "disp")
# This will warn that "disp" does not exist (anymore)
new = c("auto", "displacement", "rear_axle_ratio")
tmp = umx_rename(tmp, from = c("am", "disp", "drat"), to = new)
namez(tmp, "a") # still updated am to auto (and rear_axle_ratio)
# Test using regex (in this case to revert "displacement" to "disp")
tmp = umx_rename(tmp, regex = "lacement", to = "", test= TRUE)
tmp = umx_rename(tmp, regex = "lacement", to = "") # revert to disp
umx_names(tmp, "^d") # all names beginning with a d
# advanced: checking deprecated format handled...
tmp = umx_rename(tmp, old = c("am", "disp", "drat"), replace = new)
}
\seealso{
\link{namez} to filter (and replace) names, Also \link{umx_check_names} to check for existence of names in a dataframe.
Other Data Functions:
\code{\link{umxFactor}()},
\code{\link{umxHetCor}()},
\code{\link{umx_as_numeric}()},
\code{\link{umx_cont_2_quantiles}()},
\code{\link{umx_lower2full}()},
\code{\link{umx_make_MR_data}()},
\code{\link{umx_make_TwinData}()},
\code{\link{umx_make_fake_data}()},
\code{\link{umx_make_raw_from_cov}()},
\code{\link{umx_polychoric}()},
\code{\link{umx_polypairwise}()},
\code{\link{umx_polytriowise}()},
\code{\link{umx_read_lower}()},
\code{\link{umx_read_prolific_demog}()},
\code{\link{umx_reorder}()},
\code{\link{umx_select_valid}()},
\code{\link{umx_stack}()},
\code{\link{umx}}
}
\concept{Data Functions}
| /man/umx_rename.Rd | no_license | jishanling/umx | R | false | true | 3,357 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/misc_and_utility.R
\name{umx_rename}
\alias{umx_rename}
\title{umx_rename}
\usage{
umx_rename(
data,
from = NULL,
to = NULL,
regex = NULL,
test = FALSE,
old = "deprecated",
replace = "deprecated"
)
}
\arguments{
\item{data}{The dataframe in which to rename variables}
\item{from}{List of existing names that will be found and replaced by the contents of replace. (optional: Defaults to NULL).}
\item{to}{If used alone, a named collection of c(oldName = "newName") pairs.
OR, if "from" is a list of existing names, the list of new names)
OR, if "regex" is a regular expression, the replace string)}
\item{regex}{Regular expression with matches will be replaced using replace as the replace string. (Optional: Defaults to NULL).}
\item{test}{Whether to report a "dry run", not changing anything. (Default = FALSE).}
\item{old}{deprecated: use from}
\item{replace}{deprecated: use to}
}
\value{
\itemize{
\item dataframe with columns renamed.
}
}
\description{
Returns a dataframe with variables renamed as desired.
}
\details{
Unlike similar functions in other packages, it checks that the variables exist, and that the new names do not.
Importantly, it also supports \link[=regex]{regular expressions}. This allows you to find and replace
text based on patterns and replacements. so to change "replacement" to "in place",
\verb{grep=re(place)ment}, \verb{replace= in \\\\1}.
\emph{note}:To use replace list, you must say c(old = "new"), not c(old -> "new")
}
\examples{
tmp = mtcars
tmp = umx_rename(tmp, to = c(cyl = "cylinder"))
# let's check cyl has been changed to cylinder...
namez(tmp, "c")
# Alternate style: from->to, first with a test-run
# Dry run
tmp = umx_rename(tmp, from = "disp", to = "displacement", test= TRUE)
# Actually do it
tmp = umx_rename(tmp, from = c("disp"), to = c("displacement"))
umx_check_names("displacement", data = tmp, die = TRUE)
namez(tmp, "disp")
# This will warn that "disp" does not exist (anymore)
new = c("auto", "displacement", "rear_axle_ratio")
tmp = umx_rename(tmp, from = c("am", "disp", "drat"), to = new)
namez(tmp, "a") # still updated am to auto (and rear_axle_ratio)
# Test using regex (in this case to revert "displacement" to "disp")
tmp = umx_rename(tmp, regex = "lacement", to = "", test= TRUE)
tmp = umx_rename(tmp, regex = "lacement", to = "") # revert to disp
umx_names(tmp, "^d") # all names beginning with a d
# advanced: checking deprecated format handled...
tmp = umx_rename(tmp, old = c("am", "disp", "drat"), replace = new)
}
\seealso{
\link{namez} to filter (and replace) names, Also \link{umx_check_names} to check for existence of names in a dataframe.
Other Data Functions:
\code{\link{umxFactor}()},
\code{\link{umxHetCor}()},
\code{\link{umx_as_numeric}()},
\code{\link{umx_cont_2_quantiles}()},
\code{\link{umx_lower2full}()},
\code{\link{umx_make_MR_data}()},
\code{\link{umx_make_TwinData}()},
\code{\link{umx_make_fake_data}()},
\code{\link{umx_make_raw_from_cov}()},
\code{\link{umx_polychoric}()},
\code{\link{umx_polypairwise}()},
\code{\link{umx_polytriowise}()},
\code{\link{umx_read_lower}()},
\code{\link{umx_read_prolific_demog}()},
\code{\link{umx_reorder}()},
\code{\link{umx_select_valid}()},
\code{\link{umx_stack}()},
\code{\link{umx}}
}
\concept{Data Functions}
|
# tune.R - DESC
# /tune.R
# Copyright European Union, 2017
# Author: Iago Mosqueira (EC JRC) <iago.mosqueira@ec.europa.eu>
#
# Distributed under the terms of the European Union Public Licence (EUPL) V.1.1.
library(ioalbmse)
library(doParallel)
registerDoParallel(4)
# --- SETUP
# data
data(indicators)
data(oms)
# PREPARE cpue
cpue <- propagate(window(ocpue$index, end=2040), 200)
cpuesel <- ocpue$sel.pattern[-1,'2014']
oemparams <- FLPar(sd=c(sqrt(yearVars(ocpue$index.res))),
b=c(apply(osr$residuals, c(1,3:6), function(x) acf(x, plot=FALSE)$acf[2])))
oemparams <- FLPar(sd=0.3, b=0)
# GENERATE SR residuals
sres <- rlnoise(200, len=FLQuant(0, dimnames=dimnames(cpue)), sd=oemparams$sd, b=oemparams$b, seed=2017)
resd <- c(sqrt(yearVars(osr$residuals)))
rerho <- c(apply(osr$residuals, c(1,3:6), function(x) acf(x, plot=FALSE)$acf[2]))
sres <- FLQuant(0, dimnames=dimnames(window(stock(omp), start=2014)))
for(i in seq(dim(sres)[6]))
sres[,,,,,i] <- rlnoise(1, sres[,,,,,i], sd=resd[1], b=rerho[i])
osr@.Data[[1]] <- sres
# ARGUMENTS
years <- seq(2014, 2034, by=2)
outyears <- years[1] + c(1, 5, 10, 20)
# --- TUNE 1: P(B > B_MSY) = 0.5, 20 y mean
grid <- list(lambda=seq(0.5, 1.50, length=5),
dltac=seq(0.10, 0.30, length=3), dhtac=seq(0.10, 0.30, length=3),
Dlimit=seq(0.10, 0.20, length=2), Dtarget=seq(0.30, 0.50, length=3))
system.time(rgPT <- doRuns(msePT, grid=grid, omp=omp, sr=osr, sa=FALSE,
cpue=cpue, cpuesel=cpuesel, years=years, oemparams=FLPar(sd=0.3, b=0)))
save(rgPT, file="pt/grid_msePT_5000.RData", compress="xz")
# COMPUTE P(SB_2015-2034 > SBMSY)
tun1 <- unlist(lapply(rgPT,
function(x) sum(c(ssb(x)[,ac(2015:2034)]) > c(rpts$SBMSY)) / (length(ssb(x)[,ac(2015:2034)]))))
# SUBSET runs with 0.48 > P < 0.25
idx1 <- names(tun1)[tun1 < 0.501 & tun1 > 0.499]
rg1pt <- rgPT[sample(idx1, 4)]
prg1pt <- performance(rg1pt, indicators, rpts, years=2034, grid=grid, mp="PT")
qrg1pt <- performance(rg1pt, indicators, rpts, years=2034,
probs=c(0.1, 0.25, 0.50, 0.75, 0.90), grid=grid,mp="PT")
save(rg1pt, prg1pt, qrg1pt, file="pt/rg1_msePT.RData", compress="xz")
# COMPUTE P(SB_2015-2034 > SBMSY & F_2015-2034 < FMSY)
tun2 <- unlist(lapply(rgPT, function(x)
sum(c(ssb(x)[,ac(2015:2034)]) > c(rpts$SBMSY) & c(fbar(x)[,ac(2015:2034)]) < c(rpts$FMSY)) /
(length(ssb(x)[,ac(2015:2034)]))))
# SUBSET runs with 0.72 > P < 0.78
idx2 <- names(tun2)[tun2 < 0.751 & tun2 > 0.749]
rg2pt <- rgPT[sample(idx2, 4)]
prg2pt <- performance(rg2pt, indicators, rpts, years=outyears, grid=grid, mp="PT")
qrg2pt <- performance(rg2pt, indicators, rpts, years=outyears,
probs=c(0.1, 0.25, 0.50, 0.75, 0.90), grid=grid, mp="PT")
save(rg2pt, prg2pt, qrg2pt, file="pt/rg2_msePT.RData", compress="xz")
# TUNE 2: P(kobe=green) = 0.75, 20 y mean
# COMBINE datasets
# TUNING 1
load("pt/tun1_msePT.RData")
load("ind/tun1_mseIndex.RData")
names(rg1pt) <- paste("PT_", names(rg1pt))
names(rg1ind) <- paste("IND_", names(rg1ind))
runs <- FLStocks(c(rg1ind, rg1pt))
perf <- rbind(prg1ind[,1:7], prg1pt[,1:7])
qperf <- rbind(qrg1ind[,1:10], qrg1pt[,1:10])
save(runs, perf, qperf, file="tun1.RData", compress="xz")
# TUNING 2
load("pt/tun2_msePT.RData")
load("ind/tun2_mseIndex.RData")
names(rg2pt) <- paste("PT_", names(rg2pt))
names(rg2ind) <- paste("IND_", names(rg2ind))
runs <- FLStocks(c(rg2ind, rg2pt))
perf <- rbind(prg2ind[,1:7], prg2pt[,1:7])
qperf <- rbind(qrg2ind[,1:10], qrg2pt[,1:10])
save(runs, perf, qperf, file="tun2.RData", compress="xz")
| /ioalbmse/exec/tune.R | no_license | pl202/ALB | R | false | false | 3,503 | r | # tune.R - DESC
# /tune.R
# Copyright European Union, 2017
# Author: Iago Mosqueira (EC JRC) <iago.mosqueira@ec.europa.eu>
#
# Distributed under the terms of the European Union Public Licence (EUPL) V.1.1.
library(ioalbmse)
library(doParallel)
registerDoParallel(4)
# --- SETUP
# data
data(indicators)
data(oms)
# PREPARE cpue
cpue <- propagate(window(ocpue$index, end=2040), 200)
cpuesel <- ocpue$sel.pattern[-1,'2014']
oemparams <- FLPar(sd=c(sqrt(yearVars(ocpue$index.res))),
b=c(apply(osr$residuals, c(1,3:6), function(x) acf(x, plot=FALSE)$acf[2])))
oemparams <- FLPar(sd=0.3, b=0)
# GENERATE SR residuals
sres <- rlnoise(200, len=FLQuant(0, dimnames=dimnames(cpue)), sd=oemparams$sd, b=oemparams$b, seed=2017)
resd <- c(sqrt(yearVars(osr$residuals)))
rerho <- c(apply(osr$residuals, c(1,3:6), function(x) acf(x, plot=FALSE)$acf[2]))
sres <- FLQuant(0, dimnames=dimnames(window(stock(omp), start=2014)))
for(i in seq(dim(sres)[6]))
sres[,,,,,i] <- rlnoise(1, sres[,,,,,i], sd=resd[1], b=rerho[i])
osr@.Data[[1]] <- sres
# ARGUMENTS
years <- seq(2014, 2034, by=2)
outyears <- years[1] + c(1, 5, 10, 20)
# --- TUNE 1: P(B > B_MSY) = 0.5, 20 y mean
grid <- list(lambda=seq(0.5, 1.50, length=5),
dltac=seq(0.10, 0.30, length=3), dhtac=seq(0.10, 0.30, length=3),
Dlimit=seq(0.10, 0.20, length=2), Dtarget=seq(0.30, 0.50, length=3))
system.time(rgPT <- doRuns(msePT, grid=grid, omp=omp, sr=osr, sa=FALSE,
cpue=cpue, cpuesel=cpuesel, years=years, oemparams=FLPar(sd=0.3, b=0)))
save(rgPT, file="pt/grid_msePT_5000.RData", compress="xz")
# COMPUTE P(SB_2015-2034 > SBMSY)
tun1 <- unlist(lapply(rgPT,
function(x) sum(c(ssb(x)[,ac(2015:2034)]) > c(rpts$SBMSY)) / (length(ssb(x)[,ac(2015:2034)]))))
# SUBSET runs with 0.48 > P < 0.25
idx1 <- names(tun1)[tun1 < 0.501 & tun1 > 0.499]
rg1pt <- rgPT[sample(idx1, 4)]
prg1pt <- performance(rg1pt, indicators, rpts, years=2034, grid=grid, mp="PT")
qrg1pt <- performance(rg1pt, indicators, rpts, years=2034,
probs=c(0.1, 0.25, 0.50, 0.75, 0.90), grid=grid,mp="PT")
save(rg1pt, prg1pt, qrg1pt, file="pt/rg1_msePT.RData", compress="xz")
# COMPUTE P(SB_2015-2034 > SBMSY & F_2015-2034 < FMSY)
tun2 <- unlist(lapply(rgPT, function(x)
sum(c(ssb(x)[,ac(2015:2034)]) > c(rpts$SBMSY) & c(fbar(x)[,ac(2015:2034)]) < c(rpts$FMSY)) /
(length(ssb(x)[,ac(2015:2034)]))))
# SUBSET runs with 0.72 > P < 0.78
idx2 <- names(tun2)[tun2 < 0.751 & tun2 > 0.749]
rg2pt <- rgPT[sample(idx2, 4)]
prg2pt <- performance(rg2pt, indicators, rpts, years=outyears, grid=grid, mp="PT")
qrg2pt <- performance(rg2pt, indicators, rpts, years=outyears,
probs=c(0.1, 0.25, 0.50, 0.75, 0.90), grid=grid, mp="PT")
save(rg2pt, prg2pt, qrg2pt, file="pt/rg2_msePT.RData", compress="xz")
# TUNE 2: P(kobe=green) = 0.75, 20 y mean
# COMBINE datasets
# TUNING 1
load("pt/tun1_msePT.RData")
load("ind/tun1_mseIndex.RData")
names(rg1pt) <- paste("PT_", names(rg1pt))
names(rg1ind) <- paste("IND_", names(rg1ind))
runs <- FLStocks(c(rg1ind, rg1pt))
perf <- rbind(prg1ind[,1:7], prg1pt[,1:7])
qperf <- rbind(qrg1ind[,1:10], qrg1pt[,1:10])
save(runs, perf, qperf, file="tun1.RData", compress="xz")
# TUNING 2
load("pt/tun2_msePT.RData")
load("ind/tun2_mseIndex.RData")
names(rg2pt) <- paste("PT_", names(rg2pt))
names(rg2ind) <- paste("IND_", names(rg2ind))
runs <- FLStocks(c(rg2ind, rg2pt))
perf <- rbind(prg2ind[,1:7], prg2pt[,1:7])
qperf <- rbind(qrg2ind[,1:10], qrg2pt[,1:10])
save(runs, perf, qperf, file="tun2.RData", compress="xz")
|
library(lsmetrics)
library(terra)
# read habitat data
r <- lsmetrics::lsm_toy_landscape(type = "binary")
# plot
plot(r, legend = FALSE, axes = FALSE, main = "Binary habitat")
plot(as.polygons(r, dissolve = FALSE), lwd = .1, add = TRUE)
plot(as.polygons(r), add = TRUE)
text(r)
# find grass
path_grass <- system("grass --config path", inter = TRUE) # windows users need to find the grass gis path installation, e.g. "C:/Program Files/GRASS GIS 8.3"
# create grassdb
rgrass::initGRASS(gisBase = path_grass,
SG = r,
gisDbase = "grassdb",
location = "newLocation",
mapset = "PERMANENT",
override = TRUE)
# import raster from r to grass
rgrass::write_RAST(x = r, flags = c("o", "overwrite"), vname = "r")
# percentage
lsmetrics::lsm_percentage_parallel(input = "r", buffer_radius = 100, grid_size = 1000, grid_delete = FALSE, nprocs = 5)
# files
# rgrass::execGRASS(cmd = "g.list", type = "raster")
# rgrass::execGRASS(cmd = "g.list", type = "vector")
# import from grass to r
v <- rgrass::read_VECT("grid_sel", flags = "quiet")
r_pct_buf100 <- rgrass::read_RAST("r_pct_buf100", flags = "quiet", return_format = "terra")
# plot
plot(r_pct_buf100, legend = FALSE, axes = FALSE, main = "Habitat percentage (buffer 100 m)")
plot(as.polygons(r, dissolve = FALSE), lwd = .1, add = TRUE)
plot(as.polygons(r), add = TRUE)
plot(v, lwd = 3, border = "blue", add = TRUE)
text(v, cex = 3, col = "blue")
text(r_pct_buf100, cex = .75)
# delete grassdb
unlink("grassdb", recursive = TRUE)
| /examples/lsm_percentage_parallel_example.R | no_license | mauriciovancine/lsmetrics | R | false | false | 1,570 | r | library(lsmetrics)
library(terra)
# read habitat data
r <- lsmetrics::lsm_toy_landscape(type = "binary")
# plot
plot(r, legend = FALSE, axes = FALSE, main = "Binary habitat")
plot(as.polygons(r, dissolve = FALSE), lwd = .1, add = TRUE)
plot(as.polygons(r), add = TRUE)
text(r)
# find grass
path_grass <- system("grass --config path", inter = TRUE) # windows users need to find the grass gis path installation, e.g. "C:/Program Files/GRASS GIS 8.3"
# create grassdb
rgrass::initGRASS(gisBase = path_grass,
SG = r,
gisDbase = "grassdb",
location = "newLocation",
mapset = "PERMANENT",
override = TRUE)
# import raster from r to grass
rgrass::write_RAST(x = r, flags = c("o", "overwrite"), vname = "r")
# percentage
lsmetrics::lsm_percentage_parallel(input = "r", buffer_radius = 100, grid_size = 1000, grid_delete = FALSE, nprocs = 5)
# files
# rgrass::execGRASS(cmd = "g.list", type = "raster")
# rgrass::execGRASS(cmd = "g.list", type = "vector")
# import from grass to r
v <- rgrass::read_VECT("grid_sel", flags = "quiet")
r_pct_buf100 <- rgrass::read_RAST("r_pct_buf100", flags = "quiet", return_format = "terra")
# plot
plot(r_pct_buf100, legend = FALSE, axes = FALSE, main = "Habitat percentage (buffer 100 m)")
plot(as.polygons(r, dissolve = FALSE), lwd = .1, add = TRUE)
plot(as.polygons(r), add = TRUE)
plot(v, lwd = 3, border = "blue", add = TRUE)
text(v, cex = 3, col = "blue")
text(r_pct_buf100, cex = .75)
# delete grassdb
unlink("grassdb", recursive = TRUE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rtemisInit.R
\name{errorSummary}
\alias{errorSummary}
\title{\code{rtemis-internals}: \code{errorSummary}}
\usage{
errorSummary(error, mod.name = NULL, pre = NULL)
}
\description{
Print Fit and Validation modError
}
\keyword{internal}
| /man/errorSummary.Rd | no_license | bakaibaiazbekov/rtemis | R | false | true | 313 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rtemisInit.R
\name{errorSummary}
\alias{errorSummary}
\title{\code{rtemis-internals}: \code{errorSummary}}
\usage{
errorSummary(error, mod.name = NULL, pre = NULL)
}
\description{
Print Fit and Validation modError
}
\keyword{internal}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.