content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
testlist <- list(A = structure(c(1.38997190089718e-309, 3.81575932257023e-236, 3.81571458063891e-236), .Dim = c(1L, 3L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) | /multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613126055-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 226 | r | testlist <- list(A = structure(c(1.38997190089718e-309, 3.81575932257023e-236, 3.81571458063891e-236), .Dim = c(1L, 3L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) |
data_path <- "../server_get/twitterdata"
rter_id <- read.csv(paste(data_path, "tweets", "612121494232145920", sep = "/"))$id
# 609418768578674688
# 628783838190305280
rter <- c()
for(id in rter_id) {
rter <- rbind(rter, rters[rters$id == id,])
}
rter_id <- c(fusion$id, rev(intersect(rter_id, dir(paste(data_path, "friends", sep = "/")))))
print(head(rter_id))
inDegrees <- c()
for(i in 2:length(rter_id)) {
inDegree <- intersect(rter_id[1:(i-1)],
read.csv(paste(data_path, "friends", rter_id[i], sep = "/"))$id)
if(length(inDegree)) {
inDegrees <- rbind(inDegrees, cbind(inDegree, rep(rter_id[i], length(inDegree))))
}
}
inDegrees <- data.frame(matrix(sapply(inDegrees, function(x) rters$screenName[rters$id == x]), ncol = 2))
simpleNetwork(inDegrees)
centra <- sort(alpha_centrality(graph(t(inDegrees[,c(2,1)])), alpha = 1), decreasing = TRUE)
centra[2]
names(centra)
sort(centra, decreasing = TRUE)
print(head(data.frame(Name = names(centra), Centrality = centra)))
| /visualization_app/shinyApp/test.R | no_license | latuji/fusion_capstone_project | R | false | false | 1,026 | r | data_path <- "../server_get/twitterdata"
rter_id <- read.csv(paste(data_path, "tweets", "612121494232145920", sep = "/"))$id
# 609418768578674688
# 628783838190305280
rter <- c()
for(id in rter_id) {
rter <- rbind(rter, rters[rters$id == id,])
}
rter_id <- c(fusion$id, rev(intersect(rter_id, dir(paste(data_path, "friends", sep = "/")))))
print(head(rter_id))
inDegrees <- c()
for(i in 2:length(rter_id)) {
inDegree <- intersect(rter_id[1:(i-1)],
read.csv(paste(data_path, "friends", rter_id[i], sep = "/"))$id)
if(length(inDegree)) {
inDegrees <- rbind(inDegrees, cbind(inDegree, rep(rter_id[i], length(inDegree))))
}
}
inDegrees <- data.frame(matrix(sapply(inDegrees, function(x) rters$screenName[rters$id == x]), ncol = 2))
simpleNetwork(inDegrees)
centra <- sort(alpha_centrality(graph(t(inDegrees[,c(2,1)])), alpha = 1), decreasing = TRUE)
centra[2]
names(centra)
sort(centra, decreasing = TRUE)
print(head(data.frame(Name = names(centra), Centrality = centra)))
|
"concareaExampleKola" <-
function(x, y, z, zname = deparse(substitute(z)), caname = deparse(substitute(z)), borders="bordersKola", logx = FALSE,
ifjit = FALSE, ifrev = FALSE, ngrid = 100, ncp = 0, xlim = NULL, xcoord = "Easting", ycoord =
"Northing", ifbw = FALSE, x.logfinetick=c(2,5,10),y.logfinetick=c(2,5,10))
{
# Original wrapper written by Graeme Bonham-Carter, April 2004, to prepare a concentration-
# area plot as in GEODAS; input data consist of x, y & z, where x & y are coordinates on a
# plane, and z is the measured value at point (x,y). The function uses the interpolation
# routine in S+ and assumes that area is proportional to the count of grid points. To be a
# reasonable model the data points should be 'evenly' spread over the plane. Interpolated
# values outside the convex hull of observed data points are set to NA. The interpolated
# grid size is computed as (max(x) - min(x))/ngrid, with a default value of 100 for ngrid.
# The user is prompted for the upper-left and bottom-right corners of a legend panel.
# If logx = T the data are log-transformed prior to the interpolation step. If ifjit = T
# the x and y coordinates are jittered so that no duplicate locations exist, which can
# cause function interp to fail. If ifrev= T the empirical concentration-area function is
# plotted from lowest value to highest. Triangulation is used if ncp = 0 (default), values
# of 2 and above result in partial derivatives being used and increased smoothing. The
# plot x-axes are labelled with zname, this can be set to "" and no label is plotted. The
# interpolated 'map' may be titled, often with the text that would be used as the x-axis
# label; if no title is required set caname = "".
#
# Example: rg.caplot(UTME/1000,UTMN/1000,Cu,zname="",caname="Cu (mg/kg) in O-horizon soil",
# logx=TRUE,ifrev=TRUE,xcoord="Kola Project UTM Easting (km)",
# ycoord="Kola Project UTM Northing (km)")
#
# If a "black and white" image is required for monochrome publication set ifbw = T.
#
oldpar <- par(mar=c(4,6,4,2))
on.exit(par(oldpar))
u <- na.exclude(cbind(x, y, abs(z)))
dx <- (max(u[, 1]) - min(u[, 1]))/ngrid
xo <- seq(from = min(u[, 1]), to = max(u[, 1]), by = dx)
yo <- seq(from = min(u[, 2]), to = max(u[, 2]), by = dx)
zlgnd <- deparse(substitute(z))
if(logx) {
u[, 3] <- log10(u[, 3])
zlgnd <- paste("Log10\n", deparse(substitute(z)))
}
#new <- interp.new(u[, 1], u[, 2], u[, 3], xo, yo, duplicate="median",extrap=TRUE)
new <- mba.surf(cbind(u[, 1], u[, 2], u[, 3]), no.X=length(xo), no.Y=length(yo),
n=1,m=1,extend=TRUE)
if (is.null(borders)){
#whichdraw <- matrix(as.vector(new$z), nrow=length(xo))
whichdraw <- matrix(as.vector(new$xyz.est$z), nrow=length(xo))
}
else {
bord <- get(eval(borders))
#in.poly=polygrid(new$x,new$y,borders=cbind(bord$x,bord$y),vec.inout=TRUE)
#whichdraw=matrix(as.vector(new$z)*in.poly$vec.inout, nrow=length(xo))
in.poly=polygrid(new$xyz.est$x,new$xyz.est$y,borders=cbind(bord$x,bord$y),vec.inout=TRUE)
whichdraw=matrix(as.vector(new$xyz.est$z)*in.poly$vec.inout, nrow=length(xo))
}
znew <- whichdraw[in.poly$vec.inout==TRUE]
if(logx)
znew <- 10^znew
xlim <- range(znew)
qpplot.das(u[,3],qdist=qnorm,xlab=zname,xlim=log10(xlim),ylim=qnorm(c(0.0001,0.9999)),
ylab="Cumulative probability [%]", pch=3,cex=0.7, logx=logx,
logfinetick=x.logfinetick,logfinelab=x.logfinetick,line=FALSE,cex.lab=1.2)
title(paste("Original data (n = ",length(u[,3]),")",sep=""))
qpplot.das(log10(znew),qdist=qnorm,xlab=zname,xlim=log10(xlim),ylim=qnorm(c(0.0001,0.9999)),
ylab="Cumulative probability [%]", pch=3,cex=0.7, logx=logx,
logfinetick=x.logfinetick,logfinelab=x.logfinetick,line=FALSE,cex.lab=1.2)
title(paste("Gridded data (n = ",length(znew),")",sep=""))
# plot map
# generate plot with background
im.br=quantile(u[,3],seq(from=0,to=1,by=0.01))
im.col=gray(seq(from=0.1,to=0.9,length=length(im.br)-1))
oldpar <- par(mar=c(1.5,1.5,1.5,1.5))
on.exit(par(oldpar))
plot(u[,1],u[,2],frame.plot=FALSE,xaxt="n",yaxt="n",xlab="",ylab="",type="n")
#image(new$x,new$y,whichdraw,breaks=im.br,col=im.col, add = TRUE,cex.lab=1.2)
image(new$xyz.est$x,new$xyz.est$y,whichdraw,breaks=im.br,col=im.col, add = TRUE,cex.lab=1.2)
plotbg(map.col=c("gray","gray","gray","gray"),map.lwd=c(1,1,1,1),add.plot=TRUE)
leg.ypos=seq(from=77.7e5,to=78.8e5,length=100)
rect(rep(7.8e5,99),leg.ypos[1:99],rep(8.0e5,99),leg.ypos[2:100],col=im.col,border=FALSE)
rect(7.8e5,leg.ypos[1],8.0e5,leg.ypos[100],border=1)
leg.ypos=seq(from=77.7e5,to=78.8e5,length=5)
text(rep(7.8e5,8),leg.ypos,round(100*c(0,0.25,0.50,0.75,1),2),pos=2,cex=0.8)
im.br=quantile(10^(u[,3]),c(0,0.25,0.50,0.75,1))
text(rep(8.7e5,8),leg.ypos,round(im.br,2),pos=2,cex=0.8)
text(7.4e5,79e5,"Percentile",cex=0.8)
text(8.35e5,79e5,"mg/kg",cex=0.8)
text(7.4e5,79e5,"Percentile",cex=0.8)
text(8.35e5,79e5,"mg/kg",cex=0.8)
# Concentration area plot:
oldpar <- par(mar=c(4,6,4,2))
on.exit(par(oldpar))
conc <- znew[order(znew)]
cumarea <- seq(1, length(znew))/length(znew) * 100
if(!ifrev) {
conc <- rev(conc)
plot(conc, cumarea, log = "xy", xlab = zname, ylab =
"% Cumulative area > values on x-axis",
main=paste("Concentration-area plot (n = ",length(conc),")",sep=""),
xlim = xlim, pch = 3,cex.lab=1.2,xaxt="n",yaxt="n")
}
else plot(conc, cumarea, log = "xy", xlab = zname, ylab =
"% Cumulative area < values on x-axis",
main=paste("Concentration-area plot (n = ",length(conc),")",sep=""),
xlim = xlim, pch = 3,cex.lab=1.2,xaxt="n",yaxt="n")
axis(1,at=(a<-sort(c((10^(-50:50))%*%t(x.logfinetick)))),labels=a)
axis(2,at=(a<-sort(c((10^(-50:50))%*%t(y.logfinetick)))),labels=a)
# Grid:
abline(v=sort(c((10^(-50:50)%*%t(x.logfinetick)))),lty=3,col=gray(0.5))
abline(h=sort(c((10^(-50:50)%*%t(y.logfinetick)))),lty=3,col=gray(0.5))
invisible()
}
| /R/concareaExampleKola.R | no_license | cran/StatDA | R | false | false | 5,982 | r | "concareaExampleKola" <-
function(x, y, z, zname = deparse(substitute(z)), caname = deparse(substitute(z)), borders="bordersKola", logx = FALSE,
ifjit = FALSE, ifrev = FALSE, ngrid = 100, ncp = 0, xlim = NULL, xcoord = "Easting", ycoord =
"Northing", ifbw = FALSE, x.logfinetick=c(2,5,10),y.logfinetick=c(2,5,10))
{
# Original wrapper written by Graeme Bonham-Carter, April 2004, to prepare a concentration-
# area plot as in GEODAS; input data consist of x, y & z, where x & y are coordinates on a
# plane, and z is the measured value at point (x,y). The function uses the interpolation
# routine in S+ and assumes that area is proportional to the count of grid points. To be a
# reasonable model the data points should be 'evenly' spread over the plane. Interpolated
# values outside the convex hull of observed data points are set to NA. The interpolated
# grid size is computed as (max(x) - min(x))/ngrid, with a default value of 100 for ngrid.
# The user is prompted for the upper-left and bottom-right corners of a legend panel.
# If logx = T the data are log-transformed prior to the interpolation step. If ifjit = T
# the x and y coordinates are jittered so that no duplicate locations exist, which can
# cause function interp to fail. If ifrev= T the empirical concentration-area function is
# plotted from lowest value to highest. Triangulation is used if ncp = 0 (default), values
# of 2 and above result in partial derivatives being used and increased smoothing. The
# plot x-axes are labelled with zname, this can be set to "" and no label is plotted. The
# interpolated 'map' may be titled, often with the text that would be used as the x-axis
# label; if no title is required set caname = "".
#
# Example: rg.caplot(UTME/1000,UTMN/1000,Cu,zname="",caname="Cu (mg/kg) in O-horizon soil",
# logx=TRUE,ifrev=TRUE,xcoord="Kola Project UTM Easting (km)",
# ycoord="Kola Project UTM Northing (km)")
#
# If a "black and white" image is required for monochrome publication set ifbw = T.
#
oldpar <- par(mar=c(4,6,4,2))
on.exit(par(oldpar))
u <- na.exclude(cbind(x, y, abs(z)))
dx <- (max(u[, 1]) - min(u[, 1]))/ngrid
xo <- seq(from = min(u[, 1]), to = max(u[, 1]), by = dx)
yo <- seq(from = min(u[, 2]), to = max(u[, 2]), by = dx)
zlgnd <- deparse(substitute(z))
if(logx) {
u[, 3] <- log10(u[, 3])
zlgnd <- paste("Log10\n", deparse(substitute(z)))
}
#new <- interp.new(u[, 1], u[, 2], u[, 3], xo, yo, duplicate="median",extrap=TRUE)
new <- mba.surf(cbind(u[, 1], u[, 2], u[, 3]), no.X=length(xo), no.Y=length(yo),
n=1,m=1,extend=TRUE)
if (is.null(borders)){
#whichdraw <- matrix(as.vector(new$z), nrow=length(xo))
whichdraw <- matrix(as.vector(new$xyz.est$z), nrow=length(xo))
}
else {
bord <- get(eval(borders))
#in.poly=polygrid(new$x,new$y,borders=cbind(bord$x,bord$y),vec.inout=TRUE)
#whichdraw=matrix(as.vector(new$z)*in.poly$vec.inout, nrow=length(xo))
in.poly=polygrid(new$xyz.est$x,new$xyz.est$y,borders=cbind(bord$x,bord$y),vec.inout=TRUE)
whichdraw=matrix(as.vector(new$xyz.est$z)*in.poly$vec.inout, nrow=length(xo))
}
znew <- whichdraw[in.poly$vec.inout==TRUE]
if(logx)
znew <- 10^znew
xlim <- range(znew)
qpplot.das(u[,3],qdist=qnorm,xlab=zname,xlim=log10(xlim),ylim=qnorm(c(0.0001,0.9999)),
ylab="Cumulative probability [%]", pch=3,cex=0.7, logx=logx,
logfinetick=x.logfinetick,logfinelab=x.logfinetick,line=FALSE,cex.lab=1.2)
title(paste("Original data (n = ",length(u[,3]),")",sep=""))
qpplot.das(log10(znew),qdist=qnorm,xlab=zname,xlim=log10(xlim),ylim=qnorm(c(0.0001,0.9999)),
ylab="Cumulative probability [%]", pch=3,cex=0.7, logx=logx,
logfinetick=x.logfinetick,logfinelab=x.logfinetick,line=FALSE,cex.lab=1.2)
title(paste("Gridded data (n = ",length(znew),")",sep=""))
# plot map
# generate plot with background
im.br=quantile(u[,3],seq(from=0,to=1,by=0.01))
im.col=gray(seq(from=0.1,to=0.9,length=length(im.br)-1))
oldpar <- par(mar=c(1.5,1.5,1.5,1.5))
on.exit(par(oldpar))
plot(u[,1],u[,2],frame.plot=FALSE,xaxt="n",yaxt="n",xlab="",ylab="",type="n")
#image(new$x,new$y,whichdraw,breaks=im.br,col=im.col, add = TRUE,cex.lab=1.2)
image(new$xyz.est$x,new$xyz.est$y,whichdraw,breaks=im.br,col=im.col, add = TRUE,cex.lab=1.2)
plotbg(map.col=c("gray","gray","gray","gray"),map.lwd=c(1,1,1,1),add.plot=TRUE)
leg.ypos=seq(from=77.7e5,to=78.8e5,length=100)
rect(rep(7.8e5,99),leg.ypos[1:99],rep(8.0e5,99),leg.ypos[2:100],col=im.col,border=FALSE)
rect(7.8e5,leg.ypos[1],8.0e5,leg.ypos[100],border=1)
leg.ypos=seq(from=77.7e5,to=78.8e5,length=5)
text(rep(7.8e5,8),leg.ypos,round(100*c(0,0.25,0.50,0.75,1),2),pos=2,cex=0.8)
im.br=quantile(10^(u[,3]),c(0,0.25,0.50,0.75,1))
text(rep(8.7e5,8),leg.ypos,round(im.br,2),pos=2,cex=0.8)
text(7.4e5,79e5,"Percentile",cex=0.8)
text(8.35e5,79e5,"mg/kg",cex=0.8)
text(7.4e5,79e5,"Percentile",cex=0.8)
text(8.35e5,79e5,"mg/kg",cex=0.8)
# Concentration area plot:
oldpar <- par(mar=c(4,6,4,2))
on.exit(par(oldpar))
conc <- znew[order(znew)]
cumarea <- seq(1, length(znew))/length(znew) * 100
if(!ifrev) {
conc <- rev(conc)
plot(conc, cumarea, log = "xy", xlab = zname, ylab =
"% Cumulative area > values on x-axis",
main=paste("Concentration-area plot (n = ",length(conc),")",sep=""),
xlim = xlim, pch = 3,cex.lab=1.2,xaxt="n",yaxt="n")
}
else plot(conc, cumarea, log = "xy", xlab = zname, ylab =
"% Cumulative area < values on x-axis",
main=paste("Concentration-area plot (n = ",length(conc),")",sep=""),
xlim = xlim, pch = 3,cex.lab=1.2,xaxt="n",yaxt="n")
axis(1,at=(a<-sort(c((10^(-50:50))%*%t(x.logfinetick)))),labels=a)
axis(2,at=(a<-sort(c((10^(-50:50))%*%t(y.logfinetick)))),labels=a)
# Grid:
abline(v=sort(c((10^(-50:50)%*%t(x.logfinetick)))),lty=3,col=gray(0.5))
abline(h=sort(c((10^(-50:50)%*%t(y.logfinetick)))),lty=3,col=gray(0.5))
invisible()
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spdep-tidiers.R
\name{glance.sarlm}
\alias{glance.sarlm}
\title{Glance at a(n) spatialreg object}
\usage{
\method{glance}{sarlm}(x, ...)
}
\arguments{
\item{x}{An object returned from \code{\link[spatialreg:ML_models]{spatialreg::lagsarlm()}}
or \code{\link[spatialreg:ML_models]{spatialreg::errorsarlm()}}.}
\item{...}{Additional arguments. Not used. Needed to match generic
signature only. \strong{Cautionary note:} Misspelled arguments will be
absorbed in \code{...}, where they will be ignored. If the misspelled
argument has a default value, the default value will be used.
For example, if you pass \code{conf.lvel = 0.9}, all computation will
proceed using \code{conf.level = 0.95}. Two exceptions here are:
\itemize{
\item \code{tidy()} methods will warn when supplied an \code{exponentiate} argument if
it will be ignored.
\item \code{augment()} methods will warn when supplied a \code{newdata} argument if it
will be ignored.
}}
}
\description{
Glance accepts a model object and returns a \code{\link[tibble:tibble]{tibble::tibble()}}
with exactly one row of model summaries. The summaries are typically
goodness of fit measures, p-values for hypothesis tests on residuals,
or model convergence information.
Glance never returns information from the original call to the modeling
function. This includes the name of the modeling function or any
arguments passed to the modeling function.
Glance does not calculate summary measures. Rather, it farms out these
computations to appropriate methods and gathers the results together.
Sometimes a goodness of fit measure will be undefined. In these cases
the measure will be reported as \code{NA}.
Glance returns the same number of columns regardless of whether the
model matrix is rank-deficient or not. If so, entries in columns
that no longer have a well-defined value are filled in with an \code{NA}
of the appropriate type.
}
\examples{
\dontshow{if ((rlang::is_installed("spdep") & rlang::is_installed("spatialreg") && identical(Sys.getenv("NOT_CRAN"), "true"))) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
# load libraries for models and data
library(spatialreg)
library(spdep)
# load data
data(oldcol, package = "spdep")
listw <- nb2listw(COL.nb, style = "W")
# fit model
crime_sar <-
lagsarlm(CRIME ~ INC + HOVAL,
data = COL.OLD,
listw = listw,
method = "eigen"
)
# summarize model fit with tidiers
tidy(crime_sar)
tidy(crime_sar, conf.int = TRUE)
glance(crime_sar)
augment(crime_sar)
# fit another model
crime_sem <- errorsarlm(CRIME ~ INC + HOVAL, data = COL.OLD, listw)
# summarize model fit with tidiers
tidy(crime_sem)
tidy(crime_sem, conf.int = TRUE)
glance(crime_sem)
augment(crime_sem)
# fit another model
crime_sac <- sacsarlm(CRIME ~ INC + HOVAL, data = COL.OLD, listw)
# summarize model fit with tidiers
tidy(crime_sac)
tidy(crime_sac, conf.int = TRUE)
glance(crime_sac)
augment(crime_sac)
\dontshow{\}) # examplesIf}
}
\seealso{
\code{\link[=glance]{glance()}}, \code{\link[spatialreg:ML_models]{spatialreg::lagsarlm()}}, \code{\link[spatialreg:ML_models]{spatialreg::errorsarlm()}},
\code{\link[spatialreg:ML_models]{spatialreg::sacsarlm()}}
Other spatialreg tidiers:
\code{\link{augment.sarlm}()},
\code{\link{tidy.sarlm}()}
}
\concept{spatialreg tidiers}
\value{
A \code{\link[tibble:tibble]{tibble::tibble()}} with exactly one row and columns:
\item{AIC}{Akaike's Information Criterion for the model.}
\item{BIC}{Bayesian Information Criterion for the model.}
\item{deviance}{Deviance of the model.}
\item{logLik}{The log-likelihood of the model. [stats::logLik()] may be a useful reference.}
\item{nobs}{Number of observations used.}
}
| /man/glance.sarlm.Rd | permissive | tidymodels/broom | R | false | true | 3,764 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spdep-tidiers.R
\name{glance.sarlm}
\alias{glance.sarlm}
\title{Glance at a(n) spatialreg object}
\usage{
\method{glance}{sarlm}(x, ...)
}
\arguments{
\item{x}{An object returned from \code{\link[spatialreg:ML_models]{spatialreg::lagsarlm()}}
or \code{\link[spatialreg:ML_models]{spatialreg::errorsarlm()}}.}
\item{...}{Additional arguments. Not used. Needed to match generic
signature only. \strong{Cautionary note:} Misspelled arguments will be
absorbed in \code{...}, where they will be ignored. If the misspelled
argument has a default value, the default value will be used.
For example, if you pass \code{conf.lvel = 0.9}, all computation will
proceed using \code{conf.level = 0.95}. Two exceptions here are:
\itemize{
\item \code{tidy()} methods will warn when supplied an \code{exponentiate} argument if
it will be ignored.
\item \code{augment()} methods will warn when supplied a \code{newdata} argument if it
will be ignored.
}}
}
\description{
Glance accepts a model object and returns a \code{\link[tibble:tibble]{tibble::tibble()}}
with exactly one row of model summaries. The summaries are typically
goodness of fit measures, p-values for hypothesis tests on residuals,
or model convergence information.
Glance never returns information from the original call to the modeling
function. This includes the name of the modeling function or any
arguments passed to the modeling function.
Glance does not calculate summary measures. Rather, it farms out these
computations to appropriate methods and gathers the results together.
Sometimes a goodness of fit measure will be undefined. In these cases
the measure will be reported as \code{NA}.
Glance returns the same number of columns regardless of whether the
model matrix is rank-deficient or not. If so, entries in columns
that no longer have a well-defined value are filled in with an \code{NA}
of the appropriate type.
}
\examples{
\dontshow{if ((rlang::is_installed("spdep") & rlang::is_installed("spatialreg") && identical(Sys.getenv("NOT_CRAN"), "true"))) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
# load libraries for models and data
library(spatialreg)
library(spdep)
# load data
data(oldcol, package = "spdep")
listw <- nb2listw(COL.nb, style = "W")
# fit model
crime_sar <-
lagsarlm(CRIME ~ INC + HOVAL,
data = COL.OLD,
listw = listw,
method = "eigen"
)
# summarize model fit with tidiers
tidy(crime_sar)
tidy(crime_sar, conf.int = TRUE)
glance(crime_sar)
augment(crime_sar)
# fit another model
crime_sem <- errorsarlm(CRIME ~ INC + HOVAL, data = COL.OLD, listw)
# summarize model fit with tidiers
tidy(crime_sem)
tidy(crime_sem, conf.int = TRUE)
glance(crime_sem)
augment(crime_sem)
# fit another model
crime_sac <- sacsarlm(CRIME ~ INC + HOVAL, data = COL.OLD, listw)
# summarize model fit with tidiers
tidy(crime_sac)
tidy(crime_sac, conf.int = TRUE)
glance(crime_sac)
augment(crime_sac)
\dontshow{\}) # examplesIf}
}
\seealso{
\code{\link[=glance]{glance()}}, \code{\link[spatialreg:ML_models]{spatialreg::lagsarlm()}}, \code{\link[spatialreg:ML_models]{spatialreg::errorsarlm()}},
\code{\link[spatialreg:ML_models]{spatialreg::sacsarlm()}}
Other spatialreg tidiers:
\code{\link{augment.sarlm}()},
\code{\link{tidy.sarlm}()}
}
\concept{spatialreg tidiers}
\value{
A \code{\link[tibble:tibble]{tibble::tibble()}} with exactly one row and columns:
\item{AIC}{Akaike's Information Criterion for the model.}
\item{BIC}{Bayesian Information Criterion for the model.}
\item{deviance}{Deviance of the model.}
\item{logLik}{The log-likelihood of the model. [stats::logLik()] may be a useful reference.}
\item{nobs}{Number of observations used.}
}
|
\name{decomma}
\alias{decomma}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
%% ~~function to do ... ~~
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
decomma(x)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
%% ~~Describe \code{x} here~~
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (x)
{
x <- gsub(",", "", x)
return(x)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
| /man/decomma.Rd | no_license | Libardo1/eeptools | R | false | false | 1,271 | rd | \name{decomma}
\alias{decomma}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
%% ~~function to do ... ~~
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
decomma(x)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
%% ~~Describe \code{x} here~~
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (x)
{
x <- gsub(",", "", x)
return(x)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
#
# U s e f u l f o r M a n u a l D e b u g g i n g
#
# NOTE: When running locally, make sure to turn off the setwd call at the start of each routine.
# and make sure that the file name location is one that you want.
#
# Running full plot for splined texture
# generateSplinedPlotTexture(sandClayStones@horizons$id, sandClayStones@horizons$name, sandClayStones@horizons$top, sandClayStones@horizons$bottom, sandClayStones@horizons$value, 'B', 0)
#
# Running shallow plot for splined texture
# generateSplinedPlotTexture(smallerSandClayStones$id, smallerSandClayStones$name, smallerSandClayStones$top, smallerSandClayStones$bottom, smallerSandClayStones$value, 'F', 0)
#
# Running full plot for splined water
#
# Running shallow plot for splined water
# generateSplinedPlotWater(smallerWaterValues$id, smallerWaterValues$name, smallerWaterValues$horizonTop, smallerWaterValues$horizonBottom, smallerWaterValues$value, 'F', 0)
#
# Running full plot for splined water
# generateSplinedPlotWater(waterValues$id, waterValues$name, waterValues$horizonTop, waterValues$horizonBottom, waterValues$value, 'B', 0)
#' @title g e n e r a t e A q p P l o t
#' @description g e n e r a t e A q p P l o t using the aqp package
#' @export
generateAqpPlot <- function (id, name, top, bottom, value) {
siblingParams <- data.frame(id, name, top, bottom, value, stringsAsFactors=FALSE)
aqp::depths(siblingParams) <- id ~ top + bottom
PuOrColours <- c("#7f3b08", "#b35806", "#e08214", "#fdb863", "#fee0b6", "#d8daeb",
"#b2abd2", "#8073ac", "#542788", "#2d004b" )
colourRamp11 <- colorRampPalette(PuOrColours)(10)
fileName <- tempfile(fileext='.png',tmpdir=tmpD)
png(fileName, width=480,height=480,units="px",pointsize=12)
par(mar=c(3.0, 0, 8.0, 0))
plotSPC(siblingParams, name='name', color='value', col.label='Percent',
col.palette=colourRamp11, axis.line.offset=-6, x.idx.offset=0.5, max.depth=100)
# addVolumeFraction(siblingParams, 'stones')
title(name, line=3.5, cex.main=1.6)
dev.flush()
dev.off()
return(fileName)
}
#' @export
testPlot <- function(n){
graphics::plot(n)
}
#' @title g e n e r a t e B l o c k P l o t W a t e r
#' @description g e n e r a t e B l o c k P l o t W a t e r via the aqp package
#' @export
generateBlockPlotWater <- function(id, name, top, bottom, value,runner=T) {
fhNames <- name[id == 'Field capacity']
numFhs <- length(fhNames)
wpValues <- value[id == 'Wilting point']
fcValues <- value[id == 'Field capacity']
tpValues <- value[id == 'Total porosity']
stonesValues <- value[id == 'Stones']
nonStoneValueAdjustment <- (100 - stonesValues) / 100
wpValues <- wpValues * nonStoneValueAdjustment;
fcValues <- fcValues * nonStoneValueAdjustment;
tpValues <- tpValues * nonStoneValueAdjustment;
awValues <- fcValues - wpValues
airValues <- tpValues - fcValues
unavailValues <- wpValues
earthValues <- 100 - tpValues - stonesValues
stonesXmin <- rep(0, numFhs)
stonesXmax <- stonesValues / 100.0
earthXmin <- stonesXmax
earthXmax <- (stonesValues + earthValues) / 100.0
unavailXmin <- earthXmax
unavailXmax <- (stonesValues + earthValues + unavailValues) / 100.0
availXmin <- unavailXmax
availXmax <- (stonesValues + earthValues + unavailValues + awValues) / 100.0
airXmin <- availXmax
airXmax <- rep(1.0, numFhs)
plot.new()
par(mar=c(2,1,6,2), xpd=TRUE, las=1, usr=c(0,1,1,0))
fhBottoms <- 1 - (bottom[1:numFhs] / 100.0)
fhTops <- 1 - (top[1:numFhs] / 100.0)
stonesColour <- rgb(84,84,84,maxColorValue=255)
earthColour <- rgb(128,64,64,maxColorValue=255)
unavailColour <- rgb(255,128,64,maxColorValue=255)
availColour <- rgb(0,128,255,maxColorValue=255)
airColour <- rgb(234,234,234,maxColorValue=255)
text(x=0.5,y=-0.15, labels=c("Water Retention"), font=2, cex=1.2)
legend(0.35,-0.16,title="",legend=c("Stones","Fine earth", "Air"),
horiz=TRUE,fill=c(stonesColour,earthColour,airColour), cex=0.8, bty="n", x.intersp=0.4)
legend(0.33,-0.12,title="", legend=c("Unavailable water","Available water"),horiz=TRUE,
fill=c(unavailColour,availColour, airColour), cex=0.8, bty="n", x.intersp=0.4)
rect(xleft=stonesXmin,ybottom=fhBottoms,xright=stonesXmax,ytop=fhTops, col=stonesColour)
rect(xleft=earthXmin,ybottom=fhBottoms,xright=earthXmax,ytop=fhTops, col=earthColour)
rect(xleft=unavailXmin,ybottom=fhBottoms,xright=unavailXmax,ytop=fhTops, col=unavailColour)
rect(xleft=availXmin,ybottom=fhBottoms,xright=availXmax,ytop=fhTops, col=availColour)
rect(xleft=airXmin,ybottom=fhBottoms,xright=airXmax,ytop=fhTops,col=airColour)
axis(4,at=c(0,.1,.2,.3,.4,.5,.6,.7,.8,.9,1),labels=c('0 cm','10 cm', '20 cm', '30 cm','40 cm','50 cm',
'60 cm','70 cm','80 cm','90 cm', '100 cm'), tcl=-.2, hadj=0.6, padj=0.2, cex.axis=0.5)
axis(1, at=c(0,0.25,0.5,0.75,1.00), labels=c("0%","25%","50%","75%","100%"), tcl=-.2, mgp=c(3,.1,.5),
hadj=0.3, cex.axis=0.5)
invisible()
}
#' @title g e n e r a t e S p l i n e d P l o t W a t e r
#' @description Generate a water plot using splining.
#' @export
generateSplinedPlotWater <- function(id, name, top, bottom, value, rootBarrier, rootBarrierFHNum) {
fhNames <- name[id == 'Field capacity']
fhTops <- top[id == 'Field capacity']
fhBottoms <- bottom[id == 'Field capacity']
numFhs <- length(fhNames)
bottom <- fhBottoms[numFhs]
sideLength <- bottom + 1
wpValues <- value[id == 'Wilting point']
fcValues <- value[id == 'Field capacity']
tpValues <- value[id == 'Total porosity']
stonesValues <- value[id == 'Stones']
nonStoneValueAdjustment <- (100 - stonesValues) / 100
wpValues <- wpValues * nonStoneValueAdjustment;
fcValues <- fcValues * nonStoneValueAdjustment;
tpValues <- tpValues * nonStoneValueAdjustment;
awValues <- fcValues - wpValues
airValues <- tpValues - fcValues
unavailValues <- wpValues
earthValues <- 100 - tpValues - stonesValues
earthValues <- earthValues + stonesValues
unavailValues <- unavailValues + earthValues
availValues <- awValues + unavailValues
airValues <- airValues + availValues
#library(aqp)
stonesProf <- data.frame(id=rep("Stones",times=numFhs), name=fhNames, top=fhTops,
bottom=fhBottoms, value=stonesValues, stringsAsFactors=FALSE)
aqp::depths(stonesProf) <- id ~ top + bottom
earthProf <- data.frame(id=rep("Earth",times=numFhs), name=fhNames, top=fhTops,
bottom=fhBottoms, value=earthValues, stringsAsFactors=FALSE)
aqp::depths(earthProf) <- id ~ top + bottom
unavailProf <- data.frame(id=rep("Unavail",times=numFhs), names=fhNames, top=fhTops,
bottom=fhBottoms, value=unavailValues, stringsAsFactors=FALSE)
aqp::depths(unavailProf) <- id ~ top + bottom
availProf <- data.frame(id=rep("Avail",times=numFhs), names=fhNames, top=fhTops,bottom=fhBottoms, value=availValues, stringsAsFactors=FALSE)
aqp::depths(availProf) <- id ~ top + bottom
airProf <- data.frame(id=rep("Air",times=numFhs), names=fhNames,top=fhTops,
bottom=fhBottoms, value=airValues, stringsAsFactors=FALSE)
aqp::depths(airProf) <- id ~ top + bottom
stonesSpline <- GSIF::mpspline(stonesProf, var.name="value", vlow=min(stonesProf@horizons$value),
vhigh=max(stonesProf@horizons$value),lam=0.1)
xPlotStones <- stonesSpline$var.1cm[!is.na(stonesSpline$var.1cm)]
xPlotStones <- c(xPlotStones[1], xPlotStones)
earthSpline <- GSIF::mpspline(earthProf, var.name="value", vlow=min(earthProf@horizons$value),
vhigh=max(earthProf@horizons$value),lam=0.1)
xPlotEarth <- earthSpline$var.1cm[!is.na(earthSpline$var.1cm)]
xPlotEarth <- c(xPlotEarth[1], xPlotEarth)
unavailSpline <- GSIF::mpspline(unavailProf, var.name="value", vlow=min(unavailProf@horizons$value),
vhigh=max(unavailProf@horizons$value),lam=0.1)
xPlotUnavail <- unavailSpline$var.1cm[!is.na(unavailSpline$var.1cm )]
xPlotUnavail <- c(xPlotUnavail[1], xPlotUnavail)
availSpline <- GSIF::mpspline(availProf, var.name="value", vlow=min(availProf@horizons$value),
vhigh=max(availProf@horizons$value),lam=0.1)
xPlotAvail <- availSpline$var.1cm[!is.na(availSpline$var.1cm)]
xPlotAvail <- c(xPlotAvail[1], xPlotAvail)
yPlotPoints <- c(seq(0,bottom,1),seq(bottom,0,-1)) / 100.0
stonesPolyXs <- generatePolygonXValues(rep(0.0,times=sideLength), xPlotStones)
earthPolyXs <- generatePolygonXValues(xPlotStones,xPlotEarth)
unavailPolyXs <- generatePolygonXValues(xPlotEarth,xPlotUnavail)
availPolyXs <- generatePolygonXValues(xPlotUnavail,xPlotAvail)
airPolyXs <- generatePolygonXValues(xPlotAvail, rep(100.0,times=sideLength))
stonesColour <- rgb(84,84,84,maxColorValue=255)
earthColour <- rgb(128,64,64,maxColorValue=255)
unavailColour <- rgb(255,128,64,maxColorValue=255)
availColour <- rgb(0,128,255,maxColorValue=255)
airColour <- rgb(234,234,234,maxColorValue=255)
plot.new()
par(mar=c(2,1,6,2), xpd=TRUE, las=1, usr=c(0,1,1,0))
text(x=0.5,y=-0.15, labels=c("Water Retention"), font=2, cex=1.2)
legend(0.35,-0.16,title="",legend=c("Stones","Fine earth", "Air"),
horiz=TRUE,fill=c(stonesColour,earthColour,airColour), cex=0.8, bty="n", x.intersp=0.4)
legend(0.33,-0.12,title="", legend=c("Unavailable water","Available water"),horiz=TRUE,
fill=c(unavailColour,availColour, airColour), cex=0.8, bty="n", x.intersp=0.4)
axis(4,at=c(0,.1,.2,.3,.4,.5,.6,.7,.8,.9,1),labels=c('0 cm','10 cm', '20 cm', '30 cm','40 cm','50 cm',
'60 cm','70 cm','80 cm','90 cm', '100 cm'), tcl=-.2, hadj=0.6, padj=0.2, cex.axis=0.5)
axis(1, at=c(0,0.25,0.5,0.75,1.00), labels=c("0%","25%","50%","75%","100%"), tcl=-.2, mgp=c(3,.1,.5),
hadj=0.3, cex.axis=0.5)
polygon(stonesPolyXs, yPlotPoints, col=stonesColour)
polygon(earthPolyXs, yPlotPoints, col=earthColour)
polygon(unavailPolyXs, yPlotPoints, col=unavailColour)
polygon(availPolyXs, yPlotPoints, col=availColour)
polygon(airPolyXs, yPlotPoints, col=airColour)
if(rootBarrier == 'F' || rootBarrier == 'M') {
rect(xleft=0,ybottom=1.0,xright=1.0,ytop=bottom/100.0, col=stonesColour)
rect(xleft=0,ybottom=1.0,xright=1.0,ytop=bottom/100.0, density=20, col="black")
rect(xleft=0,ybottom=1.0,xright=1.0,ytop=bottom/100.0, density=20, angle=135, col="black")
shadowtext(0.5, (1 + (bottom/100)) / 2, "Rock", col="black", bg=stonesColour, cex=0.5)
}
invisible()
}
#' @title g e n e r a t e S p l i n e d P l o t T e x t u r e
#' @description Generate a texture plot using splining.
#' @export
generateSplinedPlotTexture <- function(id, name, top, bottom, value, rootBarrier, rootBarrierFHNum) {
fhNames <- name[id == 'Clay']
fhTops <- top[id == 'Clay']
fhBottoms <- bottom[id == 'Clay']
numFhs <- length(fhNames)
bottom <- fhBottoms[numFhs]
sideLength <- bottom + 1
clayValues <- value[id == 'Clay']
sandValues <- value[id == 'Sand']
siltValues <- 100 - sandValues - clayValues
stonesValues <- value[id == 'Stones']
nonStoneValueAdjustment <- (100 - stonesValues) / 100
clayValues <- clayValues * nonStoneValueAdjustment
siltValues <- siltValues * nonStoneValueAdjustment
sandValues <- sandValues * nonStoneValueAdjustment
siltValues <- siltValues + clayValues
sandValues <- sandValues + siltValues
clayProf <- data.frame(id=rep("Clay",times=numFhs), name=fhNames, top=fhTops,
bottom=fhBottoms, value=clayValues, stringsAsFactors=FALSE)
aqp::depths(clayProf) <- id ~ top + bottom
siltProf <- data.frame(id=rep("Silt",times=numFhs), name=fhNames, top=fhTops,
bottom=fhBottoms, value=siltValues, stringsAsFactors=FALSE)
aqp::depths(siltProf) <- id ~ top + bottom
sandProf <- data.frame(id=rep("Sand",times=numFhs), names=fhNames, top=fhTops,
bottom=fhBottoms, value=sandValues, stringsAsFactors=FALSE)
aqp::depths(sandProf) <- id ~ top + bottom
claySpline <- GSIF::mpspline(clayProf, var.name="value", vlow=min(clayProf@horizons$value),
vhigh=max(clayProf@horizons$value),lam=0.1)
xPlotClay <- claySpline$var.1cm[!is.na(claySpline$var.1cm)]
xPlotClay <- c(xPlotClay[1], xPlotClay)
siltSpline <- GSIF::mpspline(siltProf, var.name="value", vlow=min(siltProf@horizons$value),
vhigh=max(siltProf@horizons$value),lam=0.1)
xPlotSilt <- siltSpline$var.1cm[!is.na(siltSpline$var.1cm)]
xPlotSilt <- c(xPlotSilt[1], xPlotSilt)
sandSpline <- GSIF::mpspline(sandProf, var.name="value", vlow=min(sandProf@horizons$value),
vhigh=max(sandProf@horizons$value),lam=0.1)
xPlotSand <- sandSpline$var.1cm[!is.na(sandSpline$var.1cm)]
xPlotSand <- c(xPlotSand[1], xPlotSand)
yPlotPoints <- c(seq(0,bottom,1),seq(bottom,0,-1)) / 100.0
clayPolyXs <- generatePolygonXValues(rep(0.0,times=sideLength), xPlotClay)
siltPolyXs <- generatePolygonXValues(xPlotClay,xPlotSilt)
sandPolyXs <- generatePolygonXValues(xPlotSilt,xPlotSand)
stonePolyXs <- generatePolygonXValues(xPlotSand, rep(100.0,times=sideLength))
siltColour <- rgb(255,184,113,maxColorValue=255)
clayColour <- rgb(182,73,82,maxColorValue=255)
sandColour <- rgb(211,236,155,maxColorValue=255)
stonesColour <- rgb(128,128,128,maxColorValue=255)
plot.new()
par(mar=c(2,1,6,2), xpd=TRUE, las=1, usr=c(0,1,1,0), lwd=0.8)
text(x=0.5,y=-0.092, labels=c("Texture"), font=2, cex=1.2)
legend(x="top",title="",inset=c(0,-.12),legend=c("Clay","Silt","Sand","Stones"),horiz=TRUE,
fill=c(clayColour,siltColour,sandColour, stonesColour), bty="n", cex=0.8, x.intersp=0.4)
axis(4,at=c(0,.1,.2,.3,.4,.5,.6,.7,.8,.9,1),labels=c('0 cm','10 cm', '20 cm', '30 cm','40 cm','50 cm',
'60 cm','70 cm','80 cm','90 cm', '100 cm'), tcl=-.2, hadj=0.6, padj=0.2, cex.axis=0.5)
axis(1, at=c(0,0.25,0.5,0.75,1.00), labels=c("0%","25%","50%","75%","100%"), tcl=-.2, mgp=c(3,.1,.5),
hadj=0.3, cex.axis=0.5)
polygon(clayPolyXs, yPlotPoints, col=clayColour)
polygon(siltPolyXs, yPlotPoints, col=siltColour)
polygon(sandPolyXs, yPlotPoints, col=sandColour)
polygon(stonePolyXs, yPlotPoints, col=stonesColour)
if(rootBarrier == 'F' || rootBarrier == 'M') {
rect(xleft=0,ybottom=1.0,xright=1.0,ytop=bottom/100.0, col=stonesColour)
rect(xleft=0,ybottom=1.0,xright=1.0,ytop=bottom/100.0, density=20, col="black")
rect(xleft=0,ybottom=1.0,xright=1.0,ytop=bottom/100.0, density=20, angle=135, col="black")
shadowtext(0.5, (1 + (bottom/100)) / 2, "Rock", col="black", bg=stonesColour, cex=0.5)
}
invisible()
}
#' @title g e n e r a t e B l o c k P l o t T e x t u r e
#' @description Generate a texture block plot using aqp package.
#' @export
generateBlockPlotTexture <- function(id, name, top, bottom, value) {
numFhs <- length(name[id == 'Clay'])
clayValues <- value[id == 'Clay']
sandValues <- value[id == 'Sand']
siltValues <- 100 - sandValues - clayValues
stonesValues <- value[id == 'Stones']
nonStoneValueAdjustment <- (100 - stonesValues) / 100
clayValues <- clayValues * nonStoneValueAdjustment
sandValues <- sandValues * nonStoneValueAdjustment
siltValues <- siltValues * nonStoneValueAdjustment
clayXmin <- rep(0, numFhs)
clayXmax <- clayValues / 100.0
siltXmin <- clayXmax
siltXmax <- (clayValues + siltValues) / 100.0
sandXmin <- siltXmax
sandXmax <- (sandValues + siltValues + clayValues) / 100.0
stonesXmin <- sandXmax
stonesXmax <- rep(1.0, numFhs)
plot.new()
par(mar=c(2,1,6,2), xpd=TRUE, las=1, usr=c(0,1,1,0))
fhBottoms <- (bottom[1:numFhs] / 100.0)
fhTops <- (top[1:numFhs] / 100.0)
siltColour <- rgb(255,184,113,maxColorValue=255)
clayColour <- rgb(182,73,82,maxColorValue=255)
sandColour <- rgb(211,236,155,maxColorValue=255)
stonesColour <- rgb(128,128,128,maxColorValue=255)
text(x=0.5,y=-0.092, labels=c("Texture"), font=2, cex=1.2)
legend(x="top",title="",inset=c(0,-.12),legend=c("Clay","Silt","Sand","Stones"),horiz=TRUE,
fill=c(clayColour,siltColour,sandColour, stonesColour), bty="n", cex=0.8, x.intersp=0.4)
rect(xleft=sandXmin,ybottom=fhBottoms,xright=sandXmax,ytop=fhTops, col=sandColour)
rect(xleft=siltXmin,ybottom=fhBottoms,xright=siltXmax,ytop=fhTops, col=siltColour)
rect(xleft=clayXmin,ybottom=fhBottoms,xright=clayXmax,ytop=fhTops, col=clayColour)
rect(xleft=stonesXmin,ybottom=fhBottoms,xright=stonesXmax,ytop=fhTops,col=stonesColour)
axis(4,at=c(0,.1,.2,.3,.4,.5,.6,.7,.8,.9,1),labels=c('0 cm','10 cm', '20 cm', '30 cm','40 cm','50 cm',
'60 cm','70 cm','80 cm','90 cm', '100 cm'), tcl=-.2, hadj=0.6, padj=0.2, cex.axis=0.5)
axis(4,at=c(0,.1,.2,.3,.4,.5,.6,.7,.8,.9,1),labels=c('0 cm','10 cm', '20 cm', '30 cm','40 cm','50 cm',
'60 cm','70 cm','80 cm','90 cm', '100 cm'), tcl=-.2, hadj=0.6, padj=0.2, cex.axis=0.4, lwd.ticks=0.8)
axis(1, at=c(0,0.25,0.5,0.75,1.00), labels=c("0%","25%","50%","75%","100%"), tcl=-.2, mgp=c(3,.1,.5),
hadj=0.3, cex.axis=0.5)
invisible()
}
######################################## HELPER FUNCTIONS ##################################################
#
# g e n e r a t e P o l y g o n X V a l u e s
#
# A helper function to combine a left hand side spline, with a right hand side spline, so that
# they are merged into a polygon. Make sure to clip the polygon at 0 and 100.
#
generatePolygonXValues <- function(leftSideX, rightSideX, yValues) {
# first do a sanity check. No X points can be less than 0 or greater than 100
leftSideX[leftSideX < 0] <- 0
leftSideX[leftSideX > 100] <- 100.0
rightSideX[rightSideX < 0] <- 0
rightSideX[rightSideX > 100] <- 100.0
# now join up the left and right side
xValues <- c(leftSideX,rev(rightSideX)) / 100.0
return(xValues)
}
#
# s h a d o w t e x t
#
# A helper routine to provide a shadow around drawn text.
#
shadowtext <- function(x, y=NULL, labels, col='white', bg='black',
theta= seq(0, 2*pi, length.out=50), r=0.1, ... ) {
xy <- xy.coords(x,y)
xo <- r*strwidth('A')
yo <- r*strheight('A')
# draw background text with small shift in x and y in background colour
for (i in theta) {
text( xy$x + cos(i)*xo, xy$y + sin(i)*yo, labels, col=bg, ... )
}
# draw actual text in exact xy position in foreground colour
text(xy$x, xy$y, labels, col=col, ... )
}
#
# r e t r i e v e I m a g e
#
# Retrieve the named image with the specified file size.
#
retrieveImage <- function(filename) {
fileSize <- file.info(filename)$size
print(filename)
print(fileSize)
# setwd("/home/users/cuthillt/SoilDiagramImages")
blob = readBin(filename, 'raw', fileSize)
unlink(filename)
return(blob)
}
| /R/soil-diagram-generator.R | no_license | pascal082/aqpplot | R | false | false | 19,116 | r | #
# U s e f u l f o r M a n u a l D e b u g g i n g
#
# NOTE: When running locally, make sure to turn off the setwd call at the start of each routine.
# and make sure that the file name location is one that you want.
#
# Running full plot for splined texture
# generateSplinedPlotTexture(sandClayStones@horizons$id, sandClayStones@horizons$name, sandClayStones@horizons$top, sandClayStones@horizons$bottom, sandClayStones@horizons$value, 'B', 0)
#
# Running shallow plot for splined texture
# generateSplinedPlotTexture(smallerSandClayStones$id, smallerSandClayStones$name, smallerSandClayStones$top, smallerSandClayStones$bottom, smallerSandClayStones$value, 'F', 0)
#
# Running full plot for splined water
#
# Running shallow plot for splined water
# generateSplinedPlotWater(smallerWaterValues$id, smallerWaterValues$name, smallerWaterValues$horizonTop, smallerWaterValues$horizonBottom, smallerWaterValues$value, 'F', 0)
#
# Running full plot for splined water
# generateSplinedPlotWater(waterValues$id, waterValues$name, waterValues$horizonTop, waterValues$horizonBottom, waterValues$value, 'B', 0)
#' @title g e n e r a t e A q p P l o t
#' @description g e n e r a t e A q p P l o t using the aqp package
#' @export
generateAqpPlot <- function (id, name, top, bottom, value) {
siblingParams <- data.frame(id, name, top, bottom, value, stringsAsFactors=FALSE)
aqp::depths(siblingParams) <- id ~ top + bottom
PuOrColours <- c("#7f3b08", "#b35806", "#e08214", "#fdb863", "#fee0b6", "#d8daeb",
"#b2abd2", "#8073ac", "#542788", "#2d004b" )
colourRamp11 <- colorRampPalette(PuOrColours)(10)
fileName <- tempfile(fileext='.png',tmpdir=tmpD)
png(fileName, width=480,height=480,units="px",pointsize=12)
par(mar=c(3.0, 0, 8.0, 0))
plotSPC(siblingParams, name='name', color='value', col.label='Percent',
col.palette=colourRamp11, axis.line.offset=-6, x.idx.offset=0.5, max.depth=100)
# addVolumeFraction(siblingParams, 'stones')
title(name, line=3.5, cex.main=1.6)
dev.flush()
dev.off()
return(fileName)
}
#' @export
testPlot <- function(n){
graphics::plot(n)
}
#' @title g e n e r a t e B l o c k P l o t W a t e r
#' @description g e n e r a t e B l o c k P l o t W a t e r via the aqp package
#' @export
generateBlockPlotWater <- function(id, name, top, bottom, value,runner=T) {
fhNames <- name[id == 'Field capacity']
numFhs <- length(fhNames)
wpValues <- value[id == 'Wilting point']
fcValues <- value[id == 'Field capacity']
tpValues <- value[id == 'Total porosity']
stonesValues <- value[id == 'Stones']
nonStoneValueAdjustment <- (100 - stonesValues) / 100
wpValues <- wpValues * nonStoneValueAdjustment;
fcValues <- fcValues * nonStoneValueAdjustment;
tpValues <- tpValues * nonStoneValueAdjustment;
awValues <- fcValues - wpValues
airValues <- tpValues - fcValues
unavailValues <- wpValues
earthValues <- 100 - tpValues - stonesValues
stonesXmin <- rep(0, numFhs)
stonesXmax <- stonesValues / 100.0
earthXmin <- stonesXmax
earthXmax <- (stonesValues + earthValues) / 100.0
unavailXmin <- earthXmax
unavailXmax <- (stonesValues + earthValues + unavailValues) / 100.0
availXmin <- unavailXmax
availXmax <- (stonesValues + earthValues + unavailValues + awValues) / 100.0
airXmin <- availXmax
airXmax <- rep(1.0, numFhs)
plot.new()
par(mar=c(2,1,6,2), xpd=TRUE, las=1, usr=c(0,1,1,0))
fhBottoms <- 1 - (bottom[1:numFhs] / 100.0)
fhTops <- 1 - (top[1:numFhs] / 100.0)
stonesColour <- rgb(84,84,84,maxColorValue=255)
earthColour <- rgb(128,64,64,maxColorValue=255)
unavailColour <- rgb(255,128,64,maxColorValue=255)
availColour <- rgb(0,128,255,maxColorValue=255)
airColour <- rgb(234,234,234,maxColorValue=255)
text(x=0.5,y=-0.15, labels=c("Water Retention"), font=2, cex=1.2)
legend(0.35,-0.16,title="",legend=c("Stones","Fine earth", "Air"),
horiz=TRUE,fill=c(stonesColour,earthColour,airColour), cex=0.8, bty="n", x.intersp=0.4)
legend(0.33,-0.12,title="", legend=c("Unavailable water","Available water"),horiz=TRUE,
fill=c(unavailColour,availColour, airColour), cex=0.8, bty="n", x.intersp=0.4)
rect(xleft=stonesXmin,ybottom=fhBottoms,xright=stonesXmax,ytop=fhTops, col=stonesColour)
rect(xleft=earthXmin,ybottom=fhBottoms,xright=earthXmax,ytop=fhTops, col=earthColour)
rect(xleft=unavailXmin,ybottom=fhBottoms,xright=unavailXmax,ytop=fhTops, col=unavailColour)
rect(xleft=availXmin,ybottom=fhBottoms,xright=availXmax,ytop=fhTops, col=availColour)
rect(xleft=airXmin,ybottom=fhBottoms,xright=airXmax,ytop=fhTops,col=airColour)
axis(4,at=c(0,.1,.2,.3,.4,.5,.6,.7,.8,.9,1),labels=c('0 cm','10 cm', '20 cm', '30 cm','40 cm','50 cm',
'60 cm','70 cm','80 cm','90 cm', '100 cm'), tcl=-.2, hadj=0.6, padj=0.2, cex.axis=0.5)
axis(1, at=c(0,0.25,0.5,0.75,1.00), labels=c("0%","25%","50%","75%","100%"), tcl=-.2, mgp=c(3,.1,.5),
hadj=0.3, cex.axis=0.5)
invisible()
}
#' @title g e n e r a t e S p l i n e d P l o t W a t e r
#' @description Generate a water plot using splining.
#' @export
generateSplinedPlotWater <- function(id, name, top, bottom, value, rootBarrier, rootBarrierFHNum) {
fhNames <- name[id == 'Field capacity']
fhTops <- top[id == 'Field capacity']
fhBottoms <- bottom[id == 'Field capacity']
numFhs <- length(fhNames)
bottom <- fhBottoms[numFhs]
sideLength <- bottom + 1
wpValues <- value[id == 'Wilting point']
fcValues <- value[id == 'Field capacity']
tpValues <- value[id == 'Total porosity']
stonesValues <- value[id == 'Stones']
nonStoneValueAdjustment <- (100 - stonesValues) / 100
wpValues <- wpValues * nonStoneValueAdjustment;
fcValues <- fcValues * nonStoneValueAdjustment;
tpValues <- tpValues * nonStoneValueAdjustment;
awValues <- fcValues - wpValues
airValues <- tpValues - fcValues
unavailValues <- wpValues
earthValues <- 100 - tpValues - stonesValues
earthValues <- earthValues + stonesValues
unavailValues <- unavailValues + earthValues
availValues <- awValues + unavailValues
airValues <- airValues + availValues
#library(aqp)
stonesProf <- data.frame(id=rep("Stones",times=numFhs), name=fhNames, top=fhTops,
bottom=fhBottoms, value=stonesValues, stringsAsFactors=FALSE)
aqp::depths(stonesProf) <- id ~ top + bottom
earthProf <- data.frame(id=rep("Earth",times=numFhs), name=fhNames, top=fhTops,
bottom=fhBottoms, value=earthValues, stringsAsFactors=FALSE)
aqp::depths(earthProf) <- id ~ top + bottom
unavailProf <- data.frame(id=rep("Unavail",times=numFhs), names=fhNames, top=fhTops,
bottom=fhBottoms, value=unavailValues, stringsAsFactors=FALSE)
aqp::depths(unavailProf) <- id ~ top + bottom
availProf <- data.frame(id=rep("Avail",times=numFhs), names=fhNames, top=fhTops,bottom=fhBottoms, value=availValues, stringsAsFactors=FALSE)
aqp::depths(availProf) <- id ~ top + bottom
airProf <- data.frame(id=rep("Air",times=numFhs), names=fhNames,top=fhTops,
bottom=fhBottoms, value=airValues, stringsAsFactors=FALSE)
aqp::depths(airProf) <- id ~ top + bottom
stonesSpline <- GSIF::mpspline(stonesProf, var.name="value", vlow=min(stonesProf@horizons$value),
vhigh=max(stonesProf@horizons$value),lam=0.1)
xPlotStones <- stonesSpline$var.1cm[!is.na(stonesSpline$var.1cm)]
xPlotStones <- c(xPlotStones[1], xPlotStones)
earthSpline <- GSIF::mpspline(earthProf, var.name="value", vlow=min(earthProf@horizons$value),
vhigh=max(earthProf@horizons$value),lam=0.1)
xPlotEarth <- earthSpline$var.1cm[!is.na(earthSpline$var.1cm)]
xPlotEarth <- c(xPlotEarth[1], xPlotEarth)
unavailSpline <- GSIF::mpspline(unavailProf, var.name="value", vlow=min(unavailProf@horizons$value),
vhigh=max(unavailProf@horizons$value),lam=0.1)
xPlotUnavail <- unavailSpline$var.1cm[!is.na(unavailSpline$var.1cm )]
xPlotUnavail <- c(xPlotUnavail[1], xPlotUnavail)
availSpline <- GSIF::mpspline(availProf, var.name="value", vlow=min(availProf@horizons$value),
vhigh=max(availProf@horizons$value),lam=0.1)
xPlotAvail <- availSpline$var.1cm[!is.na(availSpline$var.1cm)]
xPlotAvail <- c(xPlotAvail[1], xPlotAvail)
yPlotPoints <- c(seq(0,bottom,1),seq(bottom,0,-1)) / 100.0
stonesPolyXs <- generatePolygonXValues(rep(0.0,times=sideLength), xPlotStones)
earthPolyXs <- generatePolygonXValues(xPlotStones,xPlotEarth)
unavailPolyXs <- generatePolygonXValues(xPlotEarth,xPlotUnavail)
availPolyXs <- generatePolygonXValues(xPlotUnavail,xPlotAvail)
airPolyXs <- generatePolygonXValues(xPlotAvail, rep(100.0,times=sideLength))
stonesColour <- rgb(84,84,84,maxColorValue=255)
earthColour <- rgb(128,64,64,maxColorValue=255)
unavailColour <- rgb(255,128,64,maxColorValue=255)
availColour <- rgb(0,128,255,maxColorValue=255)
airColour <- rgb(234,234,234,maxColorValue=255)
plot.new()
par(mar=c(2,1,6,2), xpd=TRUE, las=1, usr=c(0,1,1,0))
text(x=0.5,y=-0.15, labels=c("Water Retention"), font=2, cex=1.2)
legend(0.35,-0.16,title="",legend=c("Stones","Fine earth", "Air"),
horiz=TRUE,fill=c(stonesColour,earthColour,airColour), cex=0.8, bty="n", x.intersp=0.4)
legend(0.33,-0.12,title="", legend=c("Unavailable water","Available water"),horiz=TRUE,
fill=c(unavailColour,availColour, airColour), cex=0.8, bty="n", x.intersp=0.4)
axis(4,at=c(0,.1,.2,.3,.4,.5,.6,.7,.8,.9,1),labels=c('0 cm','10 cm', '20 cm', '30 cm','40 cm','50 cm',
'60 cm','70 cm','80 cm','90 cm', '100 cm'), tcl=-.2, hadj=0.6, padj=0.2, cex.axis=0.5)
axis(1, at=c(0,0.25,0.5,0.75,1.00), labels=c("0%","25%","50%","75%","100%"), tcl=-.2, mgp=c(3,.1,.5),
hadj=0.3, cex.axis=0.5)
polygon(stonesPolyXs, yPlotPoints, col=stonesColour)
polygon(earthPolyXs, yPlotPoints, col=earthColour)
polygon(unavailPolyXs, yPlotPoints, col=unavailColour)
polygon(availPolyXs, yPlotPoints, col=availColour)
polygon(airPolyXs, yPlotPoints, col=airColour)
if(rootBarrier == 'F' || rootBarrier == 'M') {
rect(xleft=0,ybottom=1.0,xright=1.0,ytop=bottom/100.0, col=stonesColour)
rect(xleft=0,ybottom=1.0,xright=1.0,ytop=bottom/100.0, density=20, col="black")
rect(xleft=0,ybottom=1.0,xright=1.0,ytop=bottom/100.0, density=20, angle=135, col="black")
shadowtext(0.5, (1 + (bottom/100)) / 2, "Rock", col="black", bg=stonesColour, cex=0.5)
}
invisible()
}
#' @title g e n e r a t e S p l i n e d P l o t T e x t u r e
#' @description Generate a texture plot using splining.
#' @export
generateSplinedPlotTexture <- function(id, name, top, bottom, value, rootBarrier, rootBarrierFHNum) {
fhNames <- name[id == 'Clay']
fhTops <- top[id == 'Clay']
fhBottoms <- bottom[id == 'Clay']
numFhs <- length(fhNames)
bottom <- fhBottoms[numFhs]
sideLength <- bottom + 1
clayValues <- value[id == 'Clay']
sandValues <- value[id == 'Sand']
siltValues <- 100 - sandValues - clayValues
stonesValues <- value[id == 'Stones']
nonStoneValueAdjustment <- (100 - stonesValues) / 100
clayValues <- clayValues * nonStoneValueAdjustment
siltValues <- siltValues * nonStoneValueAdjustment
sandValues <- sandValues * nonStoneValueAdjustment
siltValues <- siltValues + clayValues
sandValues <- sandValues + siltValues
clayProf <- data.frame(id=rep("Clay",times=numFhs), name=fhNames, top=fhTops,
bottom=fhBottoms, value=clayValues, stringsAsFactors=FALSE)
aqp::depths(clayProf) <- id ~ top + bottom
siltProf <- data.frame(id=rep("Silt",times=numFhs), name=fhNames, top=fhTops,
bottom=fhBottoms, value=siltValues, stringsAsFactors=FALSE)
aqp::depths(siltProf) <- id ~ top + bottom
sandProf <- data.frame(id=rep("Sand",times=numFhs), names=fhNames, top=fhTops,
bottom=fhBottoms, value=sandValues, stringsAsFactors=FALSE)
aqp::depths(sandProf) <- id ~ top + bottom
claySpline <- GSIF::mpspline(clayProf, var.name="value", vlow=min(clayProf@horizons$value),
vhigh=max(clayProf@horizons$value),lam=0.1)
xPlotClay <- claySpline$var.1cm[!is.na(claySpline$var.1cm)]
xPlotClay <- c(xPlotClay[1], xPlotClay)
siltSpline <- GSIF::mpspline(siltProf, var.name="value", vlow=min(siltProf@horizons$value),
vhigh=max(siltProf@horizons$value),lam=0.1)
xPlotSilt <- siltSpline$var.1cm[!is.na(siltSpline$var.1cm)]
xPlotSilt <- c(xPlotSilt[1], xPlotSilt)
sandSpline <- GSIF::mpspline(sandProf, var.name="value", vlow=min(sandProf@horizons$value),
vhigh=max(sandProf@horizons$value),lam=0.1)
xPlotSand <- sandSpline$var.1cm[!is.na(sandSpline$var.1cm)]
xPlotSand <- c(xPlotSand[1], xPlotSand)
yPlotPoints <- c(seq(0,bottom,1),seq(bottom,0,-1)) / 100.0
clayPolyXs <- generatePolygonXValues(rep(0.0,times=sideLength), xPlotClay)
siltPolyXs <- generatePolygonXValues(xPlotClay,xPlotSilt)
sandPolyXs <- generatePolygonXValues(xPlotSilt,xPlotSand)
stonePolyXs <- generatePolygonXValues(xPlotSand, rep(100.0,times=sideLength))
siltColour <- rgb(255,184,113,maxColorValue=255)
clayColour <- rgb(182,73,82,maxColorValue=255)
sandColour <- rgb(211,236,155,maxColorValue=255)
stonesColour <- rgb(128,128,128,maxColorValue=255)
plot.new()
par(mar=c(2,1,6,2), xpd=TRUE, las=1, usr=c(0,1,1,0), lwd=0.8)
text(x=0.5,y=-0.092, labels=c("Texture"), font=2, cex=1.2)
legend(x="top",title="",inset=c(0,-.12),legend=c("Clay","Silt","Sand","Stones"),horiz=TRUE,
fill=c(clayColour,siltColour,sandColour, stonesColour), bty="n", cex=0.8, x.intersp=0.4)
axis(4,at=c(0,.1,.2,.3,.4,.5,.6,.7,.8,.9,1),labels=c('0 cm','10 cm', '20 cm', '30 cm','40 cm','50 cm',
'60 cm','70 cm','80 cm','90 cm', '100 cm'), tcl=-.2, hadj=0.6, padj=0.2, cex.axis=0.5)
axis(1, at=c(0,0.25,0.5,0.75,1.00), labels=c("0%","25%","50%","75%","100%"), tcl=-.2, mgp=c(3,.1,.5),
hadj=0.3, cex.axis=0.5)
polygon(clayPolyXs, yPlotPoints, col=clayColour)
polygon(siltPolyXs, yPlotPoints, col=siltColour)
polygon(sandPolyXs, yPlotPoints, col=sandColour)
polygon(stonePolyXs, yPlotPoints, col=stonesColour)
if(rootBarrier == 'F' || rootBarrier == 'M') {
rect(xleft=0,ybottom=1.0,xright=1.0,ytop=bottom/100.0, col=stonesColour)
rect(xleft=0,ybottom=1.0,xright=1.0,ytop=bottom/100.0, density=20, col="black")
rect(xleft=0,ybottom=1.0,xright=1.0,ytop=bottom/100.0, density=20, angle=135, col="black")
shadowtext(0.5, (1 + (bottom/100)) / 2, "Rock", col="black", bg=stonesColour, cex=0.5)
}
invisible()
}
#' @title g e n e r a t e B l o c k P l o t T e x t u r e
#' @description Generate a texture block plot using aqp package.
#' @export
generateBlockPlotTexture <- function(id, name, top, bottom, value) {
numFhs <- length(name[id == 'Clay'])
clayValues <- value[id == 'Clay']
sandValues <- value[id == 'Sand']
siltValues <- 100 - sandValues - clayValues
stonesValues <- value[id == 'Stones']
nonStoneValueAdjustment <- (100 - stonesValues) / 100
clayValues <- clayValues * nonStoneValueAdjustment
sandValues <- sandValues * nonStoneValueAdjustment
siltValues <- siltValues * nonStoneValueAdjustment
clayXmin <- rep(0, numFhs)
clayXmax <- clayValues / 100.0
siltXmin <- clayXmax
siltXmax <- (clayValues + siltValues) / 100.0
sandXmin <- siltXmax
sandXmax <- (sandValues + siltValues + clayValues) / 100.0
stonesXmin <- sandXmax
stonesXmax <- rep(1.0, numFhs)
plot.new()
par(mar=c(2,1,6,2), xpd=TRUE, las=1, usr=c(0,1,1,0))
fhBottoms <- (bottom[1:numFhs] / 100.0)
fhTops <- (top[1:numFhs] / 100.0)
siltColour <- rgb(255,184,113,maxColorValue=255)
clayColour <- rgb(182,73,82,maxColorValue=255)
sandColour <- rgb(211,236,155,maxColorValue=255)
stonesColour <- rgb(128,128,128,maxColorValue=255)
text(x=0.5,y=-0.092, labels=c("Texture"), font=2, cex=1.2)
legend(x="top",title="",inset=c(0,-.12),legend=c("Clay","Silt","Sand","Stones"),horiz=TRUE,
fill=c(clayColour,siltColour,sandColour, stonesColour), bty="n", cex=0.8, x.intersp=0.4)
rect(xleft=sandXmin,ybottom=fhBottoms,xright=sandXmax,ytop=fhTops, col=sandColour)
rect(xleft=siltXmin,ybottom=fhBottoms,xright=siltXmax,ytop=fhTops, col=siltColour)
rect(xleft=clayXmin,ybottom=fhBottoms,xright=clayXmax,ytop=fhTops, col=clayColour)
rect(xleft=stonesXmin,ybottom=fhBottoms,xright=stonesXmax,ytop=fhTops,col=stonesColour)
axis(4,at=c(0,.1,.2,.3,.4,.5,.6,.7,.8,.9,1),labels=c('0 cm','10 cm', '20 cm', '30 cm','40 cm','50 cm',
'60 cm','70 cm','80 cm','90 cm', '100 cm'), tcl=-.2, hadj=0.6, padj=0.2, cex.axis=0.5)
axis(4,at=c(0,.1,.2,.3,.4,.5,.6,.7,.8,.9,1),labels=c('0 cm','10 cm', '20 cm', '30 cm','40 cm','50 cm',
'60 cm','70 cm','80 cm','90 cm', '100 cm'), tcl=-.2, hadj=0.6, padj=0.2, cex.axis=0.4, lwd.ticks=0.8)
axis(1, at=c(0,0.25,0.5,0.75,1.00), labels=c("0%","25%","50%","75%","100%"), tcl=-.2, mgp=c(3,.1,.5),
hadj=0.3, cex.axis=0.5)
invisible()
}
######################################## HELPER FUNCTIONS ##################################################
#
# g e n e r a t e P o l y g o n X V a l u e s
#
# A helper function to combine a left hand side spline, with a right hand side spline, so that
# they are merged into a polygon. Make sure to clip the polygon at 0 and 100.
#
generatePolygonXValues <- function(leftSideX, rightSideX, yValues) {
# first do a sanity check. No X points can be less than 0 or greater than 100
leftSideX[leftSideX < 0] <- 0
leftSideX[leftSideX > 100] <- 100.0
rightSideX[rightSideX < 0] <- 0
rightSideX[rightSideX > 100] <- 100.0
# now join up the left and right side
xValues <- c(leftSideX,rev(rightSideX)) / 100.0
return(xValues)
}
#
# s h a d o w t e x t
#
# A helper routine to provide a shadow around drawn text.
#
shadowtext <- function(x, y=NULL, labels, col='white', bg='black',
theta= seq(0, 2*pi, length.out=50), r=0.1, ... ) {
xy <- xy.coords(x,y)
xo <- r*strwidth('A')
yo <- r*strheight('A')
# draw background text with small shift in x and y in background colour
for (i in theta) {
text( xy$x + cos(i)*xo, xy$y + sin(i)*yo, labels, col=bg, ... )
}
# draw actual text in exact xy position in foreground colour
text(xy$x, xy$y, labels, col=col, ... )
}
#
# r e t r i e v e I m a g e
#
# Retrieve the named image with the specified file size.
#
retrieveImage <- function(filename) {
fileSize <- file.info(filename)$size
print(filename)
print(fileSize)
# setwd("/home/users/cuthillt/SoilDiagramImages")
blob = readBin(filename, 'raw', fileSize)
unlink(filename)
return(blob)
}
|
# October 26, 2018
#' Class \code{ExpSurrogate}
#'
#' Exponential surrogate for 0/1 loss.
#'
#' @name ExpSurrogate-class
#'
#' @keywords internal
#'
#' @include L_Surrogate.R
setClass(Class = "ExpSurrogate", contains = "Surrogate")
##########
## METHODS
##########
#' Methods Available for Objects of Class \code{ExpSurrogate}
#'
#' @name ExpSurrogate-methods
#'
#' @keywords internal
NULL
#' \code{.phiFunc}
#' calculates exponential surrogate loss-function
#'
#' @rdname ExpSurrogate-methods
setMethod(f = ".phiFunc",
signature = c(surrogate = "ExpSurrogate"),
definition = function(surrogate, u) { return( exp(x = -u) ) })
#' \code{.dphiFunc}
#' calculates derivative of exponential surrogate loss-function
#'
#' @rdname ExpSurrogate-methods
setMethod(f = ".dPhiFunc",
signature = c(surrogate = "ExpSurrogate"),
definition = function(surrogate, u, du) {
expu <- exp(x = -u)
return( -expu * du )
})
| /R/L_ExpSurrogate.R | no_license | cran/DynTxRegime | R | false | false | 1,030 | r | # October 26, 2018
#' Class \code{ExpSurrogate}
#'
#' Exponential surrogate for 0/1 loss.
#'
#' @name ExpSurrogate-class
#'
#' @keywords internal
#'
#' @include L_Surrogate.R
setClass(Class = "ExpSurrogate", contains = "Surrogate")
##########
## METHODS
##########
#' Methods Available for Objects of Class \code{ExpSurrogate}
#'
#' @name ExpSurrogate-methods
#'
#' @keywords internal
NULL
#' \code{.phiFunc}
#' calculates exponential surrogate loss-function
#'
#' @rdname ExpSurrogate-methods
setMethod(f = ".phiFunc",
signature = c(surrogate = "ExpSurrogate"),
definition = function(surrogate, u) { return( exp(x = -u) ) })
#' \code{.dphiFunc}
#' calculates derivative of exponential surrogate loss-function
#'
#' @rdname ExpSurrogate-methods
setMethod(f = ".dPhiFunc",
signature = c(surrogate = "ExpSurrogate"),
definition = function(surrogate, u, du) {
expu <- exp(x = -u)
return( -expu * du )
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/network_window.R
\name{network_window}
\alias{network_window}
\title{Create network window zones}
\usage{
network_window(adjacency_matrix, dist_matrix, type, cluster_max)
}
\arguments{
\item{adjacency_matrix}{A boolean matrix, with element (\emph{i},\emph{j}) set
to TRUE if location \emph{j} is adjacent to location \emph{i}.}
\item{dist_matrix}{Distance matrix}
\item{type}{Currently, "connected_B" only.}
\item{cluster_max}{Maximum cluster size.
Zone If this value is reached, the area will not be expanded any further.
It's a good idea to keep it to the number of stops on the line you're
dealing with.}
}
\description{
Create network window zones
}
| /man/network_window.Rd | permissive | uribo/ssrn | R | false | true | 735 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/network_window.R
\name{network_window}
\alias{network_window}
\title{Create network window zones}
\usage{
network_window(adjacency_matrix, dist_matrix, type, cluster_max)
}
\arguments{
\item{adjacency_matrix}{A boolean matrix, with element (\emph{i},\emph{j}) set
to TRUE if location \emph{j} is adjacent to location \emph{i}.}
\item{dist_matrix}{Distance matrix}
\item{type}{Currently, "connected_B" only.}
\item{cluster_max}{Maximum cluster size.
Zone If this value is reached, the area will not be expanded any further.
It's a good idea to keep it to the number of stops on the line you're
dealing with.}
}
\description{
Create network window zones
}
|
library(ape)
testtree <- read.tree("9045_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="9045_0_unrooted.txt") | /codeml_files/newick_trees_processed/9045_0/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 135 | r | library(ape)
testtree <- read.tree("9045_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="9045_0_unrooted.txt") |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/location.R
\name{pull_location}
\alias{pull_location}
\title{Read an HDX location}
\usage{
pull_location(identifier = NULL, include_datasets = FALSE,
configuration = NULL, ...)
}
\arguments{
\item{identifier}{Character location uuid}
\item{configuration}{Configuration a configuration object}
\item{...}{Extra parameters}
}
\value{
Location
}
\description{
Read an HDX location
}
\examples{
\dontrun{
#Setting the config to use HDX default server
set_rhdx_config()
res <- pull_location("mli")
res
}
}
| /man/pull_location.Rd | permissive | bmpacifique/rhdx | R | false | true | 586 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/location.R
\name{pull_location}
\alias{pull_location}
\title{Read an HDX location}
\usage{
pull_location(identifier = NULL, include_datasets = FALSE,
configuration = NULL, ...)
}
\arguments{
\item{identifier}{Character location uuid}
\item{configuration}{Configuration a configuration object}
\item{...}{Extra parameters}
}
\value{
Location
}
\description{
Read an HDX location
}
\examples{
\dontrun{
#Setting the config to use HDX default server
set_rhdx_config()
res <- pull_location("mli")
res
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/shape_measures.R
\name{LER}
\alias{LER}
\title{Life Expectancy Ratio}
\usage{
LER(x, nx, ex, harmonized)
}
\description{
Life Expectancy Ratio
}
\keyword{internal}
| /man/LER.Rd | no_license | jschoeley/pash | R | false | true | 242 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/shape_measures.R
\name{LER}
\alias{LER}
\title{Life Expectancy Ratio}
\usage{
LER(x, nx, ex, harmonized)
}
\description{
Life Expectancy Ratio
}
\keyword{internal}
|
### Use mvabund and model selection to determine which wood traits best explain variation in OTU abundances
#- Author: Marissa Lee
#- Type: modified project code
#- Test data source: https://github.com/Zanne-Lab/woodEndophytes
# -------------------------------------------------------------------#
# A bit of intro into using mvabund...
# this is largely pinched from http://environmentalcomputing.net/introduction-to-mvabund/
# also see David Warton's mvabund video that was paired with his article in Methods in Ecology and Evolution (http://eco-stats.blogspot.com/2012/03/introducing-mvabund-package-and-why.html)
## How does this method differ from other multivariate analyses?
# Many commonly used analyses for multivariate data sets (e.g. PERMANOVA, ANOSIM, CCA, RDA etc.) are “distance-based analyses”.
# This means the first step of the analysis is to calculate a measure of similarity between each pair of samples, thus converting a multivariate dataset into a univariate one.
## Why is this a problem?
# 1) Low statisical power, except for variables with high variance. This means that for variables which are less variable, the analyses are less likely to detect a treatment effect.
# 2) Does not account for a very important property of multivariate data, which is the mean-variance relationship. Typically, in multivariate datasets like species-abundance data sets, counts for rare species will have many zeros with little variance, and the higher counts for more abundant species will be more variable.
## What is the solution?
# Include an assumption of a mean-variance relationship.
# Fit a single generalised linear model (GLM) to each response variable (or all response variables with site as a row effect if composition = TRUE) with a common set of predictor variables.
# We can then use resampling to test for significant community level or species level responses to our predictors.
# Plus, model-based framework makes it easier to check our assumptions and interpret uncertainty around our findings
# -------------------------------------------------------------------#
# load libraries
require(mvabund)
require(dplyr) # this has handy dataframe manipulation tools
require(tidyr) # ditto
require(ggplot2)
# -------------------------------------------------------------------#
# load data
# this dataset has been modified and simplified... the original version has not yet been published
exdata <- readRDS(file = "data/woodEndophyte_exData.RData")
# here's the OTU matrix
otus <- exdata[['otus']]
head(otus)
# the OTU matrix has been minimally processed
# it has been quality checked to remove failed samples (i.e. abundance of reads below a threshold) and remove OTUs that clearly are not fungal
dim(otus)
# there are 50 samples (rows) and 1000 OTUS (cols)
# here's the covariate dataframe (with sample metadata too)
covariates <- exdata[['covariates']]
head(covariates)
# Trim out rare OTUs because they don't contribute much information
minPerc <- 20
numSamps <- dim(otus)[1]
minSamps <- floor( numSamps * (minPerc/100) )
x <- apply(otus > 0, 2, sum) # remove OTUs that don't meet this criteria
otus <- otus[, x > minSamps]
dim(otus)
# now we have 55 OTUs
# select environmental variables you'd like to use to explain OTU composition/abundances
covariates %>%
select(size, waterperc, density) -> envVars
# -------------------------------------------------------------------#
# Check out how the raw data are distributed
df <- data.frame(samp = row.names(otus), otus)
df %>%
gather(key = "OTU", value = "num.reads", -samp) -> tmp
ggplot(tmp, aes(x = OTU, y = num.reads)) +
geom_point() +
coord_flip()
# It looks like some OTUs are much more abundant and variable than others
# It’s probably a good idea to check our mean-variance relationship then... first let's put this data into mvabund-readable format
# -------------------------------------------------------------------#
# Put data in mvabund-friendly format
fung <- list(abund = otus, x = envVars)
Dat <- mvabund(fung$abund, row.names=row.names(fung$abund)) # so R knows to treat Dat as multivariate abundance
size <- factor(fung$x$size)
waterperc <- fung$x$waterperc
density <- fung$x$density
# -------------------------------------------------------------------#
# Check out the mean-variance relationship
meanvar.plot(Dat, xlab="Mean OTU reads", ylab="Variance in OTU reads") # with log scale
# -------------------------------------------------------------------#
# Specify mvabund models
## Data structure considerations:
# Mean and variance are positively correlated in our data...
# We can deal with this relationship by choosing a family of GLMs with an appropriate mean-variance assumption
# default family = "negative binomial", assumes a quadratic mean-variance relationship and a log-linear relationship between the response variables and any continuous variables
# composition = TRUE/ FALSE (default) fits a separate model to each species. TRUE fits a single model to all variables, including site as a row effect, such that all other terms model relative abundance (compositional effects).
## Biological questions:
# (1) Are endophyte compositions shaped by each of the following host habitat characteristics:
# stem diamter size (categorical)
# host species density (continuous)
# water content (continuous)
# (2) Assuming that size is a key factor, which is more important is shaping the community -- density or water content?
# -------------------------------------------------------------------#
# Approach for Q1:
# We can test the multivariate hypothesis of whether species composition varied across the habitats by using the anova function.
# This gives an analysis of deviance table where we use likelihood ratio tests and resampled p values to look for a significant effect of Habitat on the community data.
ft.size <- manyglm(Dat ~ size, family="negative.binomial", composition = T)
#ft.density <- manyglm(Dat ~ density, family="negative.binomial", composition = T)
#ft.waterperc <- manyglm(Dat ~ waterperc, family="negative.binomial", composition = T)
# check out what is the in the fitted model object
str(ft.size)
ft.size$coefficients
ft.size$stderr.coefficients
# check residuals
# if you have a linear relationship or fan shape, you might be modeling the wrong distribution
# each color is associated with an OTU (and there aren't enough colors)
plot(ft.size, which=1, cex=0.5, caption = "", xlab = "")
#plot(ft.density, which=1, cex=0.5, caption = "", xlab = "")
#plot(ft.waterperc, which=1, cex=0.5, caption = "", xlab = "")
# compute the analysis of deviance table -- be careful this can take a while
# resamp = pit.trap (default); Method of resamping the distribution that accounts for correlation in testing
# test = LR (default); Likelihood-Ratio test
# p.uni = none (default); Calculate univariate (e.g. each OTU) test stats and their p-values
# nBoot = 999 (default); Number of bootstrap iterations
an.size <- anova(ft.size, resamp = "pit.trap", test = "LR", p.uni="none",
nBoot = 99, show.time = "all")
an.size
#We can see from this table that there is a significant effect of size (LRT = 153.1, P = 0.01),
# meaning that the OTU composition clearly differs between the small and large wood stems.
#To examine this further, and see which OTUs are more likely to be found on which size stems,
# we can run univariate tests for each OTU separately...
# This is done by using the p.uni="adjusted" argument in the anova function.
# The “adjusted” part of the argument refers to the resampling method used to compute the p values, taking into account the correlation between the response variables.
an.size.uni <- anova(ft.size, resamp = "pit.trap", test = "LR", p.uni="adjusted",
nBoot = 99, show.time = "all")
an.size.uni
# many individual OTUs are not sensitive to stem size, but a few are like ITSall_OTUa_7145
# now let's figure out whether ITSall_OTUa_7145 increases/decreases with stem size by looking the coefficient estimate
ft.size$coefficients[,'ITSall_OTUa_7145']
# plot the real data to confirm
df <- data.frame(samp = row.names(otus), otus)
df %>%
gather(key = "OTU", value = "num.reads", -samp) %>%
filter(OTU == "ITSall_OTUa_7145") %>%
rename('seq_sampName'='samp') %>%
left_join(covariates) -> tmp
ggplot(tmp, aes(x = size, y = num.reads)) +
geom_jitter(width = .2)
# -------------------------------------------------------------------#
# Approach for Q2: Compare the AIC value of candidate model to that of the base model
ft.base <- manyglm(Dat ~ size, family="negative.binomial", composition = T)
ft.m.density <- manyglm(Dat ~ size + density, family="negative.binomial", composition = T)
ft.m.waterperc <- manyglm(Dat ~ size + waterperc, family="negative.binomial", composition = T)
mod.list <- list(base = ft.base,
density = ft.m.density,
waterperc = ft.m.waterperc)
# check residuals
# each color is associated with an OTU (and there aren't enough colors)
plot(ft.base, which=1, cex=0.5, caption = "", xlab = "")
plot(ft.m.density, which=1, cex=0.5, caption = "", xlab = "")
plot(ft.m.waterperc, which=1, cex=0.5, caption = "", xlab = "")
#calculate the total AIC for each model
aic.list <- lapply(mod.list, function(x){
sum(AIC(x)) # sum AIC values for each OTUId
})
aic.list
bic.list <- lapply(mod.list, function(x){
sum(BIC(x)) # sum AIC values for each OTUId
})
bic.list
#calculate deltaAIC = base AIC - candidate AIC
baseAIC <- aic.list[['base']]
aic.df <- data.frame(modelName = names(unlist(aic.list)),
AIC = unlist(aic.list))
aic.df %>%
mutate(deltaAIC_base_candidate = baseAIC - AIC) -> aic.df
aic.df
# lower AIC indicates a better model fit
# so, adding waterperc to the model increases the model fit (decreases the AIC) whereas adding density decreases the model fit (increases the AIC)
| /code/modelselection_mvabund.R | no_license | neiljun/tutorials_communityAnalyses | R | false | false | 9,915 | r |
### Use mvabund and model selection to determine which wood traits best explain variation in OTU abundances
#- Author: Marissa Lee
#- Type: modified project code
#- Test data source: https://github.com/Zanne-Lab/woodEndophytes
# -------------------------------------------------------------------#
# A bit of intro into using mvabund...
# this is largely pinched from http://environmentalcomputing.net/introduction-to-mvabund/
# also see David Warton's mvabund video that was paired with his article in Methods in Ecology and Evolution (http://eco-stats.blogspot.com/2012/03/introducing-mvabund-package-and-why.html)
## How does this method differ from other multivariate analyses?
# Many commonly used analyses for multivariate data sets (e.g. PERMANOVA, ANOSIM, CCA, RDA etc.) are “distance-based analyses”.
# This means the first step of the analysis is to calculate a measure of similarity between each pair of samples, thus converting a multivariate dataset into a univariate one.
## Why is this a problem?
# 1) Low statisical power, except for variables with high variance. This means that for variables which are less variable, the analyses are less likely to detect a treatment effect.
# 2) Does not account for a very important property of multivariate data, which is the mean-variance relationship. Typically, in multivariate datasets like species-abundance data sets, counts for rare species will have many zeros with little variance, and the higher counts for more abundant species will be more variable.
## What is the solution?
# Include an assumption of a mean-variance relationship.
# Fit a single generalised linear model (GLM) to each response variable (or all response variables with site as a row effect if composition = TRUE) with a common set of predictor variables.
# We can then use resampling to test for significant community level or species level responses to our predictors.
# Plus, model-based framework makes it easier to check our assumptions and interpret uncertainty around our findings
# -------------------------------------------------------------------#
# load libraries
require(mvabund)
require(dplyr) # this has handy dataframe manipulation tools
require(tidyr) # ditto
require(ggplot2)
# -------------------------------------------------------------------#
# load data
# this dataset has been modified and simplified... the original version has not yet been published
exdata <- readRDS(file = "data/woodEndophyte_exData.RData")
# here's the OTU matrix
otus <- exdata[['otus']]
head(otus)
# the OTU matrix has been minimally processed
# it has been quality checked to remove failed samples (i.e. abundance of reads below a threshold) and remove OTUs that clearly are not fungal
dim(otus)
# there are 50 samples (rows) and 1000 OTUS (cols)
# here's the covariate dataframe (with sample metadata too)
covariates <- exdata[['covariates']]
head(covariates)
# Trim out rare OTUs because they don't contribute much information
minPerc <- 20
numSamps <- dim(otus)[1]
minSamps <- floor( numSamps * (minPerc/100) )
x <- apply(otus > 0, 2, sum) # remove OTUs that don't meet this criteria
otus <- otus[, x > minSamps]
dim(otus)
# now we have 55 OTUs
# select environmental variables you'd like to use to explain OTU composition/abundances
covariates %>%
select(size, waterperc, density) -> envVars
# -------------------------------------------------------------------#
# Check out how the raw data are distributed
df <- data.frame(samp = row.names(otus), otus)
df %>%
gather(key = "OTU", value = "num.reads", -samp) -> tmp
ggplot(tmp, aes(x = OTU, y = num.reads)) +
geom_point() +
coord_flip()
# It looks like some OTUs are much more abundant and variable than others
# It’s probably a good idea to check our mean-variance relationship then... first let's put this data into mvabund-readable format
# -------------------------------------------------------------------#
# Put data in mvabund-friendly format
fung <- list(abund = otus, x = envVars)
Dat <- mvabund(fung$abund, row.names=row.names(fung$abund)) # so R knows to treat Dat as multivariate abundance
size <- factor(fung$x$size)
waterperc <- fung$x$waterperc
density <- fung$x$density
# -------------------------------------------------------------------#
# Check out the mean-variance relationship
meanvar.plot(Dat, xlab="Mean OTU reads", ylab="Variance in OTU reads") # with log scale
# -------------------------------------------------------------------#
# Specify mvabund models
## Data structure considerations:
# Mean and variance are positively correlated in our data...
# We can deal with this relationship by choosing a family of GLMs with an appropriate mean-variance assumption
# default family = "negative binomial", assumes a quadratic mean-variance relationship and a log-linear relationship between the response variables and any continuous variables
# composition = TRUE/ FALSE (default) fits a separate model to each species. TRUE fits a single model to all variables, including site as a row effect, such that all other terms model relative abundance (compositional effects).
## Biological questions:
# (1) Are endophyte compositions shaped by each of the following host habitat characteristics:
# stem diamter size (categorical)
# host species density (continuous)
# water content (continuous)
# (2) Assuming that size is a key factor, which is more important is shaping the community -- density or water content?
# -------------------------------------------------------------------#
# Approach for Q1:
# We can test the multivariate hypothesis of whether species composition varied across the habitats by using the anova function.
# This gives an analysis of deviance table where we use likelihood ratio tests and resampled p values to look for a significant effect of Habitat on the community data.
ft.size <- manyglm(Dat ~ size, family="negative.binomial", composition = T)
#ft.density <- manyglm(Dat ~ density, family="negative.binomial", composition = T)
#ft.waterperc <- manyglm(Dat ~ waterperc, family="negative.binomial", composition = T)
# check out what is the in the fitted model object
str(ft.size)
ft.size$coefficients
ft.size$stderr.coefficients
# check residuals
# if you have a linear relationship or fan shape, you might be modeling the wrong distribution
# each color is associated with an OTU (and there aren't enough colors)
plot(ft.size, which=1, cex=0.5, caption = "", xlab = "")
#plot(ft.density, which=1, cex=0.5, caption = "", xlab = "")
#plot(ft.waterperc, which=1, cex=0.5, caption = "", xlab = "")
# compute the analysis of deviance table -- be careful this can take a while
# resamp = pit.trap (default); Method of resamping the distribution that accounts for correlation in testing
# test = LR (default); Likelihood-Ratio test
# p.uni = none (default); Calculate univariate (e.g. each OTU) test stats and their p-values
# nBoot = 999 (default); Number of bootstrap iterations
an.size <- anova(ft.size, resamp = "pit.trap", test = "LR", p.uni="none",
nBoot = 99, show.time = "all")
an.size
#We can see from this table that there is a significant effect of size (LRT = 153.1, P = 0.01),
# meaning that the OTU composition clearly differs between the small and large wood stems.
#To examine this further, and see which OTUs are more likely to be found on which size stems,
# we can run univariate tests for each OTU separately...
# This is done by using the p.uni="adjusted" argument in the anova function.
# The “adjusted” part of the argument refers to the resampling method used to compute the p values, taking into account the correlation between the response variables.
an.size.uni <- anova(ft.size, resamp = "pit.trap", test = "LR", p.uni="adjusted",
nBoot = 99, show.time = "all")
an.size.uni
# many individual OTUs are not sensitive to stem size, but a few are like ITSall_OTUa_7145
# now let's figure out whether ITSall_OTUa_7145 increases/decreases with stem size by looking the coefficient estimate
ft.size$coefficients[,'ITSall_OTUa_7145']
# plot the real data to confirm
df <- data.frame(samp = row.names(otus), otus)
df %>%
gather(key = "OTU", value = "num.reads", -samp) %>%
filter(OTU == "ITSall_OTUa_7145") %>%
rename('seq_sampName'='samp') %>%
left_join(covariates) -> tmp
ggplot(tmp, aes(x = size, y = num.reads)) +
geom_jitter(width = .2)
# -------------------------------------------------------------------#
# Approach for Q2: Compare the AIC value of candidate model to that of the base model
ft.base <- manyglm(Dat ~ size, family="negative.binomial", composition = T)
ft.m.density <- manyglm(Dat ~ size + density, family="negative.binomial", composition = T)
ft.m.waterperc <- manyglm(Dat ~ size + waterperc, family="negative.binomial", composition = T)
mod.list <- list(base = ft.base,
density = ft.m.density,
waterperc = ft.m.waterperc)
# check residuals
# each color is associated with an OTU (and there aren't enough colors)
plot(ft.base, which=1, cex=0.5, caption = "", xlab = "")
plot(ft.m.density, which=1, cex=0.5, caption = "", xlab = "")
plot(ft.m.waterperc, which=1, cex=0.5, caption = "", xlab = "")
#calculate the total AIC for each model
aic.list <- lapply(mod.list, function(x){
sum(AIC(x)) # sum AIC values for each OTUId
})
aic.list
bic.list <- lapply(mod.list, function(x){
sum(BIC(x)) # sum AIC values for each OTUId
})
bic.list
#calculate deltaAIC = base AIC - candidate AIC
baseAIC <- aic.list[['base']]
aic.df <- data.frame(modelName = names(unlist(aic.list)),
AIC = unlist(aic.list))
aic.df %>%
mutate(deltaAIC_base_candidate = baseAIC - AIC) -> aic.df
aic.df
# lower AIC indicates a better model fit
# so, adding waterperc to the model increases the model fit (decreases the AIC) whereas adding density decreases the model fit (increases the AIC)
|
# Authors: Clémentine Decamps, UGA
# clementine.decamps@univ-grenoble-alpes.fr
#
#---------------------------------------------
#'detect_zero_value
#'
#' This function detects genes with more than threshold percent of expression values under the min value.
#' NA expression values are not considered.
#'
#'@param controls A matrix with datas to analyze.
#'@param cancer_data A matrix with other conditions datas to analyze.
#'@param threshold The maximum proportion of expression under min tolerated for each gene.
#'@param min The minimum value accepted.
#'
#'@return This function returns a true false vector with true for the values to exclude.
#'
#'@example examples/ex_detect_zero_value.R
#'
#'@export
detect_zero_value = function(controls, cancer_data, threshold, min = 0) {
binded_data = cbind(controls, cancer_data)
idx_ctrl = 1:ncol(controls)
idx_cancer = 1:ncol(cancer_data) + ncol(controls)
#If any NA, we don't consider these values
if(anyNA(controls)){
values0 = apply(binded_data, 1, function(l) {
idx_cancer_sans_na = idx_cancer[!is.na(l[idx_cancer])]
idx_ctrl_sans_na = idx_ctrl[!is.na(l[idx_ctrl])]
#Computing the proportion of genes values < min
percentCancer = sum(l[idx_cancer_sans_na] <= min) / length(idx_cancer_sans_na)
percentCtrl = sum(l[idx_ctrl_sans_na] <= min) / length(idx_ctrl_sans_na)
#If the proportion is above the threshold, we return true
if (percentCancer >= threshold & percentCtrl >= threshold){
return(TRUE)
} else {
return(FALSE)
}
})
} else {
values0 = apply(binded_data, 1, function(l) {
#Computing the proportion of genes values < min
percentCancer = sum(l[idx_cancer] <= min)/length(idx_cancer)
percentCtrl = sum(l[idx_ctrl] <= min)/length(idx_ctrl)
#If the proportion is above the threshold, we return true
if (percentCancer >= threshold & percentCtrl >= threshold) {
return(TRUE)
}
else {
return(FALSE)
}
})
}
print(paste0(sum(values0), " genes have less than ", min, " counts in ", threshold*100, " % of the samples."))
return(values0)
}
# Authors: Clémentine Decamps, UGA
# clementine.decamps@univ-grenoble-alpes.fr
#
#---------------------------------------------
#'detect_na_value
#'
#' This function detects probes with more than threshold percent of value undefined (NA).
#'
#'@param controls A matrix with datas to analyze.
#'@param cancer_data A matrix with other conditions datas to analyze.
#'@param threshold The maximum proportion of NA tolerated for each probe.
#'
#'@return This function returns a true false vector with true for the values to exclude.
#'
#'@example
#'
#'@export
detect_na_value = function(controls, cancer_data, threshold) {
nactrl = rowSums(is.na(controls))
nacancer = rowSums(is.na(cancer_data))
sans_na = (nactrl >= threshold*ncol(controls) & nacancer >= threshold*ncol(cancer_data))
print(paste0(sum(sans_na), " probes are NA in at least ", threshold*100, " % of the samples."))
return(sans_na)
}
# Authors: Clémentine Decamps, UGA
# clementine.decamps@univ-grenoble-alpes.fr
#
#---------------------------------------------
#' compute_down_and_up_list
#'
#' This function ranks each gene and finds the genes which are more or less exprimed.
#' It's the same than "find_D_U_ctrl_size" but faster because uses rcpp to rank.
#'
#'@param controls A matrix with the gene expressions for each patient.
#'@param threshold The proportion of expression that must be in the conditions.
#'@param s_max The maximum number of down and up-expressed gene for each genes.
#'
#'@return This function returns a list of two logical matrices :
#'the D matrix, with TRUE if the row gene has a lower expression than the column gene,
#'and the U Matrix with TRUE if the row gene has a higher expression than the column gene.
#'
#'@example examples/ex_compute_down_and_up_list.R
#'
#'@export
compute_down_and_up_list = function (controls, threshold, s_max = 50){
print("Computing down and up-expressed genes")
#Using DU_rcpp to compute down and up-expressed genes.
DU = penda::compute_DU_cpp(controls, threshold)
genes_U = unlist(DU$U)
dimnames(genes_U) = list(DU$n, DU$n)
genes_D = unlist(DU$D)
dimnames(genes_D) = list(DU$n, DU$n)
median_gene = apply(controls, 1, median)
print("Size restriction")
#For each gene, if D or U list are too big, we select the closer to g.
for (i in 1:ncol(genes_D)){
d_genes = median_gene[genes_D[,i]==1]
u_genes = median_gene[genes_U[,i]==1]
if (length(d_genes) > s_max){
sort_median = sort(d_genes)
sort_median = sort_median[(length(d_genes) - (s_max-1)) : length(d_genes)]
genes_D[,i] = FALSE
genes_D[names(sort_median),i] = TRUE
}
if (length(u_genes) > s_max){
sort_median = sort(u_genes)
sort_median = sort_median[1 : s_max]
genes_U[,i] = FALSE
genes_U[names(sort_median),i] = TRUE
}
}
gc()
return(list(D = genes_D, U = genes_U))
}
| /R/ranking.R | no_license | privefl/penda | R | false | false | 5,027 | r | # Authors: Clémentine Decamps, UGA
# clementine.decamps@univ-grenoble-alpes.fr
#
#---------------------------------------------
#'detect_zero_value
#'
#' This function detects genes with more than threshold percent of expression values under the min value.
#' NA expression values are not considered.
#'
#'@param controls A matrix with datas to analyze.
#'@param cancer_data A matrix with other conditions datas to analyze.
#'@param threshold The maximum proportion of expression under min tolerated for each gene.
#'@param min The minimum value accepted.
#'
#'@return This function returns a true false vector with true for the values to exclude.
#'
#'@example examples/ex_detect_zero_value.R
#'
#'@export
detect_zero_value = function(controls, cancer_data, threshold, min = 0) {
binded_data = cbind(controls, cancer_data)
idx_ctrl = 1:ncol(controls)
idx_cancer = 1:ncol(cancer_data) + ncol(controls)
#If any NA, we don't consider these values
if(anyNA(controls)){
values0 = apply(binded_data, 1, function(l) {
idx_cancer_sans_na = idx_cancer[!is.na(l[idx_cancer])]
idx_ctrl_sans_na = idx_ctrl[!is.na(l[idx_ctrl])]
#Computing the proportion of genes values < min
percentCancer = sum(l[idx_cancer_sans_na] <= min) / length(idx_cancer_sans_na)
percentCtrl = sum(l[idx_ctrl_sans_na] <= min) / length(idx_ctrl_sans_na)
#If the proportion is above the threshold, we return true
if (percentCancer >= threshold & percentCtrl >= threshold){
return(TRUE)
} else {
return(FALSE)
}
})
} else {
values0 = apply(binded_data, 1, function(l) {
#Computing the proportion of genes values < min
percentCancer = sum(l[idx_cancer] <= min)/length(idx_cancer)
percentCtrl = sum(l[idx_ctrl] <= min)/length(idx_ctrl)
#If the proportion is above the threshold, we return true
if (percentCancer >= threshold & percentCtrl >= threshold) {
return(TRUE)
}
else {
return(FALSE)
}
})
}
print(paste0(sum(values0), " genes have less than ", min, " counts in ", threshold*100, " % of the samples."))
return(values0)
}
# Authors: Clémentine Decamps, UGA
# clementine.decamps@univ-grenoble-alpes.fr
#
#---------------------------------------------
#'detect_na_value
#'
#' This function detects probes with more than threshold percent of value undefined (NA).
#'
#'@param controls A matrix with datas to analyze.
#'@param cancer_data A matrix with other conditions datas to analyze.
#'@param threshold The maximum proportion of NA tolerated for each probe.
#'
#'@return This function returns a true false vector with true for the values to exclude.
#'
#'@example
#'
#'@export
detect_na_value = function(controls, cancer_data, threshold) {
nactrl = rowSums(is.na(controls))
nacancer = rowSums(is.na(cancer_data))
sans_na = (nactrl >= threshold*ncol(controls) & nacancer >= threshold*ncol(cancer_data))
print(paste0(sum(sans_na), " probes are NA in at least ", threshold*100, " % of the samples."))
return(sans_na)
}
# Authors: Clémentine Decamps, UGA
# clementine.decamps@univ-grenoble-alpes.fr
#
#---------------------------------------------
#' compute_down_and_up_list
#'
#' This function ranks each gene and finds the genes which are more or less exprimed.
#' It's the same than "find_D_U_ctrl_size" but faster because uses rcpp to rank.
#'
#'@param controls A matrix with the gene expressions for each patient.
#'@param threshold The proportion of expression that must be in the conditions.
#'@param s_max The maximum number of down and up-expressed gene for each genes.
#'
#'@return This function returns a list of two logical matrices :
#'the D matrix, with TRUE if the row gene has a lower expression than the column gene,
#'and the U Matrix with TRUE if the row gene has a higher expression than the column gene.
#'
#'@example examples/ex_compute_down_and_up_list.R
#'
#'@export
compute_down_and_up_list = function (controls, threshold, s_max = 50){
print("Computing down and up-expressed genes")
#Using DU_rcpp to compute down and up-expressed genes.
DU = penda::compute_DU_cpp(controls, threshold)
genes_U = unlist(DU$U)
dimnames(genes_U) = list(DU$n, DU$n)
genes_D = unlist(DU$D)
dimnames(genes_D) = list(DU$n, DU$n)
median_gene = apply(controls, 1, median)
print("Size restriction")
#For each gene, if D or U list are too big, we select the closer to g.
for (i in 1:ncol(genes_D)){
d_genes = median_gene[genes_D[,i]==1]
u_genes = median_gene[genes_U[,i]==1]
if (length(d_genes) > s_max){
sort_median = sort(d_genes)
sort_median = sort_median[(length(d_genes) - (s_max-1)) : length(d_genes)]
genes_D[,i] = FALSE
genes_D[names(sort_median),i] = TRUE
}
if (length(u_genes) > s_max){
sort_median = sort(u_genes)
sort_median = sort_median[1 : s_max]
genes_U[,i] = FALSE
genes_U[names(sort_median),i] = TRUE
}
}
gc()
return(list(D = genes_D, U = genes_U))
}
|
ccdf$maxowe = apply(ccdf[, c('Amount_owe1', 'Amount_owe2', 'Amount_owe3',
'Amount_owe4', 'Amount_owe5', 'Amount_owe6')],
1,
max)
ccdf$maxloan = apply(ccdf[, c('Loan_amount1', 'Loan_amount2', 'Loan_amount3',
'Loan_amount4', 'Loan_amount5', 'Loan_amount6')],
1,
max)
ggplot(data = ccdf, aes(x = maxowe)) +
geom_histogram(binwidth = 500) +
scale_x_continuous(limits = c(-500,20000))
sort(table(ccdf$maxowe), decreasing = T)
sort(table(ccdf$maxloan), decreasing = T)
ggplot(data = ccdf, aes(x = nloans, fill = Ever30plus_n12MTH)) +
geom_histogram(position = 'dodge', binwidth = 1) +
scale_x_continuous(breaks = seq(0,100,1))
ggplot(data = ccdf, aes(x=stdUtil123, fill = Ever30plus_n12MTH)) +
geom_histogram(binwidth = 0.01) +
scale_x_continuous(limits = c(-0.01,1.05))
table(ccdf$stdUtil1to6)
| /temp2.R | no_license | pobch/R_NCB | R | false | false | 951 | r | ccdf$maxowe = apply(ccdf[, c('Amount_owe1', 'Amount_owe2', 'Amount_owe3',
'Amount_owe4', 'Amount_owe5', 'Amount_owe6')],
1,
max)
ccdf$maxloan = apply(ccdf[, c('Loan_amount1', 'Loan_amount2', 'Loan_amount3',
'Loan_amount4', 'Loan_amount5', 'Loan_amount6')],
1,
max)
ggplot(data = ccdf, aes(x = maxowe)) +
geom_histogram(binwidth = 500) +
scale_x_continuous(limits = c(-500,20000))
sort(table(ccdf$maxowe), decreasing = T)
sort(table(ccdf$maxloan), decreasing = T)
ggplot(data = ccdf, aes(x = nloans, fill = Ever30plus_n12MTH)) +
geom_histogram(position = 'dodge', binwidth = 1) +
scale_x_continuous(breaks = seq(0,100,1))
ggplot(data = ccdf, aes(x=stdUtil123, fill = Ever30plus_n12MTH)) +
geom_histogram(binwidth = 0.01) +
scale_x_continuous(limits = c(-0.01,1.05))
table(ccdf$stdUtil1to6)
|
library(shiny)
shinyUI(
pageWithSidebar(
headerPanel('Predict Age of Abalone'),
sidebarPanel(
img(src="AbaloneMeatByJackLikins.jpg",height=175,width=245),
a("Documentation", href="doc.html", target="_blank"),
h3('Inputs'),
radioButtons("infantNonInfant", label = "Infant/Not-Infant",
choices = list("Infant" = 1, "Non-Infant" = 2), selected = 1),
numericInput('meanSize','Mean Size (0.1 to 1)',NULL,min=0.1, max=1, step=0.1),
numericInput('shuckedWeight','Shucked Weight (0.1 to 1.5)',NULL,min=0.1, max=1.5, step=0.1),
numericInput('shellWeight','Shell Weight (0.1 to 1.1)',NULL,min=0.1, max=1.1, step=0.1),
actionButton('goButton','Go!')
),
mainPanel(
h4('Predicted age'),
verbatimTextOutput("prediction"),
h4('Your fortune'),
verbatimTextOutput("fortune"),
h4('Your abalone'),
plotOutput("newRings")
)
)
)
| /ui.R | no_license | calvins/DataProducts_AbaloneApp | R | false | false | 1,050 | r | library(shiny)
shinyUI(
pageWithSidebar(
headerPanel('Predict Age of Abalone'),
sidebarPanel(
img(src="AbaloneMeatByJackLikins.jpg",height=175,width=245),
a("Documentation", href="doc.html", target="_blank"),
h3('Inputs'),
radioButtons("infantNonInfant", label = "Infant/Not-Infant",
choices = list("Infant" = 1, "Non-Infant" = 2), selected = 1),
numericInput('meanSize','Mean Size (0.1 to 1)',NULL,min=0.1, max=1, step=0.1),
numericInput('shuckedWeight','Shucked Weight (0.1 to 1.5)',NULL,min=0.1, max=1.5, step=0.1),
numericInput('shellWeight','Shell Weight (0.1 to 1.1)',NULL,min=0.1, max=1.1, step=0.1),
actionButton('goButton','Go!')
),
mainPanel(
h4('Predicted age'),
verbatimTextOutput("prediction"),
h4('Your fortune'),
verbatimTextOutput("fortune"),
h4('Your abalone'),
plotOutput("newRings")
)
)
)
|
#######################################################################################
### Purpose: Estimate Sex-ratio and crosswalks for GBD2019
#######################################################################################
bundle_id <- 159
acause <-"mental_unipolar_mdd"
covariates <- c("cv_recall_1yr", "cv_symptom_scales" , "cv_whs", "cv_lay_interviewer")
uses_csmr <- F
test_sex_by_super_region <- F
crosswalk_pairs <- 'FILEPATH.csv'
age_sex_split_estimates <- "FILEPATH.xlsx"
need_to_age_split <- F
need_save_bundle_version <- 7118 # Set as true to save bundle version, otherwise specify bundle version here
sex_ratio_by_age <- T
library(data.table)
library(openxlsx)
library(msm)
source("FILEPATH.R")
source("FILEPATH.R")
source("FILEPATH.R")
source("FILEPATH.R")
source("FILEPATH.R")
source("FILEPATH.R")
source("FILEPATH.R")
source("FILEPATH.R")
## Get latest review sheet ##
if(need_save_bundle_version == T){
v_id <- save_bundle_version(bundle_id, "step2")$bundle_version_id
} else {
v_id <- need_save_bundle_version
}
review_sheet <- get_bundle_version(v_id)
## Flag if age-split by regional pattern estimates exist ##
if(length(review_sheet[(grepl("age-split child", specificity)),unique(nid)]) > 0){
print(paste0("STOP! The following nid still has age-split estimates by regional pattern in your bundle version: ", review_sheet[(grepl("age-split child", specificity)),unique(nid)]))
}
## Remove excluded estimates ##
review_sheet[is.na(group_review), group_review := 1]
review_sheet <- review_sheet[group_review == 1, ]
review_sheet[, study_covariate := "ref"]
review_sheet[is.na(standard_error) & !is.na(lower), standard_error := (upper - lower) / (qnorm(0.975,0,1)*2)]
review_sheet[is.na(standard_error) & measure == "prevalence", standard_error := sqrt(1/sample_size * mean * (1-mean) + 1/(4*sample_size^2)*qnorm(0.975,0,1)^2)]
review_sheet[is.na(standard_error) & measure %in% c("incidence", "remission"), standard_error := ifelse(mean*sample_size <= 5, ((5-mean*sample_size) / sample_size + mean * sample_size * sqrt(5/sample_size^2))/5, ((mean*sample_size)^0.5)/sample_size)]
##### Estimate and apply sex-ratios -----------------------------------------------------------------------
## Create paired dataset where each row is a sex pair ##
match_columns <- c("nid", "age_start", "age_end", "location_id", "site_memo", "year_start", "year_end", "measure", covariates)
males <- review_sheet[sex == "Male" & is_outlier == 0, c(match_columns, "mean", "standard_error"), with = F]
females <- review_sheet[sex == "Female" & is_outlier == 0, c(match_columns, "mean", "standard_error"), with = F]
setnames(males, "mean", "mean_m")
setnames(males, "standard_error", "se_m")
setnames(females, "mean", "mean_f")
setnames(females, "standard_error", "se_f")
sex_ratios <- merge(males, females, by = match_columns)
## Match on regions ##
locations <- get_location_metadata(location_set_id=9)
sex_ratios <- merge(sex_ratios, locations[,.(location_id, region_id, region_name, super_region_id, super_region_name)], by = "location_id")
sex_ratios<- sex_ratios[!(measure %in% c("mtspecific")), ]
sex_ratios[, `:=` (ratio = mean_m / mean_f, se = sqrt(((mean_m^2 / mean_f^2) * ((se_m^2) / (mean_m^2) + (se_f^2) / (mean_f^2)))),
mid_age = (age_start + age_end) / 2, mid_year = (year_start + year_end) / 2)]
mean_mid_age <- mean(sex_ratios$mid_age)
sex_ratios[, mc_mid_age := mid_age - mean(mid_age)]
sex_ratios[, log_ratio := log(ratio)]
sex_ratios[, log_ratio_se := deltamethod(~log(x1), ratio, se^2), by = c("ratio", "se")]
table(sex_ratios[!is.na(ratio) & ratio != 0, measure])
# Create measure CVs
if(uses_csmr == T){sex_ratios <- sex_ratios[!(measure %in% c("mtstandard"))]}
measures <- unique(sex_ratios$measure)
for(m in measures){
sex_ratios[, paste0("cv_", m) := ifelse(measure == m, 1, 0)]
}
table(sex_ratios$super_region_name)
# Create geographic CVs #
sex_locations <- unique(sex_ratios$super_region_id)
for(r in sex_locations){
sex_ratios[, paste0("cv_", r) := ifelse(super_region_id == r, 1, 0)]
}
# Create covlist
for(c in paste0("cv_", measures)){
cov <- cov_info(c, "X")
if(c == paste0("cv_", measures)[1]){
cov_list <- list(cov)
} else {
cov_list <- c(cov_list, list(cov))
}
}
table(sex_ratios$super_region_name)
if(test_sex_by_super_region == T){
for(c in paste0("cv_", sex_locations[sex_locations != 64])){ # set 64 (High income) as reference
cov <- cov_info(c, "X")
cov_list <- c(cov_list, list(cov))
}
}
sex_ratios[, prev_by_mid_age := cv_prevalence * mc_mid_age]
if(sex_ratio_by_age == T){ cov_list <- c(cov_list, list(cov_info("prev_by_mid_age", "X")))}
dir.create(file.path(paste0("FILEPATH", acause, "/")), showWarnings = FALSE)
## Run MR-BRT ##
model <- run_mr_brt(
output_dir = paste0("FILEPATH", acause, "/"),
model_label = "sex",
data = sex_ratios[!is.na(ratio) & ratio != 0 & ratio != Inf,],
mean_var = "log_ratio",
se_var = "log_ratio_se",
covs = cov_list,
remove_x_intercept = T,
method = "trim_maxL",
trim_pct = 0.1,
study_id = "nid",
overwrite_previous = TRUE,
lasso = F)
sex_coefs <- data.table(load_mr_brt_outputs(model)$model_coef)
sex_coefs[, `:=` (lower = beta_soln - sqrt(beta_var)*qnorm(0.975, 0, 1), upper = beta_soln + sqrt(beta_var)*qnorm(0.975, 0, 1))]
sex_coefs[, `:=` (sig = ifelse(lower * upper > 0, "Yes", "No"))]
sex_coefs
check_for_outputs(model)
if(sex_ratio_by_age == F){
eval(parse(text = paste0("sex_ratio <- expand.grid(", paste0(paste0("cv_", measures), "=c(0, 1)", collapse = ", "), ")")))
sex_ratio <- as.data.table(predict_mr_brt(model, newdata = sex_ratio)["model_summaries"])
names(sex_ratio) <- gsub("model_summaries.", "", names(sex_ratio))
names(sex_ratio) <- gsub("X_", "", names(sex_ratio))
} else {
eval(parse(text = paste0("sex_ratio <- expand.grid(", paste0(paste0("cv_", measures), "=c(0, 1)", collapse = ", "), ", prev_by_mid_age = c((0-mean_mid_age):(100-mean_mid_age), 0))")))
sex_ratio <- as.data.table(predict_mr_brt(model, newdata = sex_ratio)["model_summaries"])
names(sex_ratio) <- gsub("model_summaries.", "", names(sex_ratio))
names(sex_ratio) <- gsub("X_", "", names(sex_ratio))
sex_ratio <- rbind(sex_ratio[cv_relrisk == 1 & cv_prevalence == 0 & prev_by_mid_age == 0, ], sex_ratio[cv_relrisk == 0 & cv_prevalence == 1, ])
}
sex_ratio[, measure := ""]
for(m in names(sex_ratio)[names(sex_ratio) %like% "cv_"]){
sex_ratio[get(m) == 1, measure := ifelse(measure != "", paste0(measure, ", "), m)]
}
sex_ratio[, measure := gsub("cv_", "", measure)]
sex_ratio <- sex_ratio[measure %in% measures,]
sex_ratio[, `:=` (ratio = exp(Y_mean), ratio_se = (exp(Y_mean_hi) - exp(Y_mean_lo))/(2*qnorm(0.975,0,1)))]
sex_ratio[, (c(paste0("cv_", measures), "Y_mean", "Z_intercept", "Y_negp", "Y_mean_lo", "Y_mean_hi", "Y_mean_fe", "Y_negp_fe", "Y_mean_lo_fe", "Y_mean_hi_fe")) := NULL]
if(sex_ratio_by_age == T){
sex_ratio[, `:=` (mid_age = prev_by_mid_age+mean_mid_age, prev_by_mid_age = NULL)]
sex_ratio[, mid_age := round(mid_age)]
sex_ratio <- unique(sex_ratio, by = c("measure", "mid_age"))
sex_ratio <- rbind(sex_ratio[measure == "prevalence", ], data.table(measure = "relrisk", ratio = sex_ratio[measure == "relrisk", ratio],
ratio_se = sex_ratio[measure == "relrisk", ratio_se], mid_age = c(0:100)))
}
write.csv(sex_ratio, paste0("FILEPATH", acause, "FILEPATH.csv"),row.names=F)
## Load in estimates that are age-sex split using the study sex-ratio
age_sex_split <- data.table(read.xlsx(age_sex_split_estimates))
sex_parents <- age_sex_split[age_sex_split == -1 & sex != "Both", seq]
age_parents <- age_sex_split[age_sex_split == -1 & sex == "Both", seq]
review_sheet[seq %in% c(sex_parents, age_parents) & group_review == 0,]
outlier_agesexsplit <- review_sheet[seq %in% c(sex_parents, age_parents) & is_outlier == 1, seq]
age_sex_split <- age_sex_split[age_sex_split == 1, ]
age_sex_split[age_parent %in% outlier_agesexsplit, is_outlier := 1]
age_sex_split[sex_parent %in% outlier_agesexsplit, is_outlier := 1]
age_sex_split[, seq := NA]
## Crosswalk both-sex data ##
review_sheet_both <- review_sheet[sex == "Both" & !(seq %in% age_parents), ]
review_sheet_both[, `:=` (crosswalk_parent_seq = NA)]
review_sheet_both[, mid_age := round((age_start + age_end) / 2)]
population <- get_population(location_id = unique(review_sheet_both$location_id), decomp_step = 'step2', age_group_id = c(1, 6:20, 30:32, 235), sex_id = c(1, 2), year_id = seq(min(review_sheet_both$year_start), max(review_sheet_both$year_end)))
age_ids <- get_ids('age_group')[age_group_id %in% c(1, 6:20, 30:32, 235),]
suppressWarnings(age_ids[, `:=` (age_start = as.numeric(unlist(strsplit(age_group_name, " "))[1]), age_end = as.numeric(unlist(strsplit(age_group_name, " "))[3])), by = "age_group_id"])
age_ids[age_group_id == 1, `:=` (age_start = 0, age_end = 4)]
age_ids[age_group_id == 235, `:=` (age_end = 99)]
population <- merge(population, age_ids, by = "age_group_id")
if(sex_ratio_by_age == T){
review_sheet_both <- merge(review_sheet_both, sex_ratio, by = c("measure", "mid_age"))
} else {
review_sheet_both <- merge(review_sheet_both, sex_ratio, by = "measure")
}
review_sheet_both[, `:=` (mid_age = (age_start + age_end) / 2, age_start_r = round(age_start/5)*5, age_end_r = round(age_end/5)*5)]
review_sheet_both[age_start_r == age_end_r & mid_age < age_start_r, age_start_r := age_start_r - 5]
review_sheet_both[age_start_r == age_end_r & mid_age >= age_start_r, age_end_r := age_end_r + 5]
review_sheet_both[, age_end_r := age_end_r - 1]
pop_agg <- function(l, a_s, a_e, y_s, y_e, s){
a_ids <- age_ids[age_start %in% c(a_s:a_e-4) & age_end %in% c(a_s+4:a_e), age_group_id]
pop <- population[location_id == l & age_group_id %in% a_ids & sex_id == s & year_id %in% c(y_s:y_e),sum(population)]
return(pop)
}
review_sheet_both[, pop_m := pop_agg(location_id, age_start_r, age_end_r, year_start, year_end, s = 1), by = "seq"]
review_sheet_both[, pop_f := pop_agg(location_id, age_start_r, age_end_r, year_start, year_end, s = 2), by = "seq"]
review_sheet_both[, pop_b := pop_m + pop_f]
review_sheet_female <- copy(review_sheet_both)
review_sheet_female[, `:=` (sex = "Female", mean_n = mean * (pop_b), mean_d =(pop_f + ratio * pop_m),
var_n = (standard_error^2 * pop_b^2), var_d = ratio_se^2 * pop_m^2)]
review_sheet_female[, `:=` (mean = mean_n / mean_d, standard_error = sqrt(((mean_n^2) / (mean_d^2)) * (var_n / (mean_n^2) + var_d / (mean_d^2))))]
review_sheet_female[, `:=` (study_covariate = "sex", crosswalk_parent_seq = seq, seq = NA)]
review_sheet_male <- copy(review_sheet_both)
review_sheet_male[, `:=` (sex = "Male", mean_n = mean * (pop_b), mean_d =(pop_m + (1/ratio) * pop_f),
var_n = (standard_error^2 * pop_b^2), var_d = ratio_se^2 * pop_f^2)]
review_sheet_male[, `:=` (mean = mean_n / mean_d, standard_error = sqrt(((mean_n^2) / (mean_d^2)) * (var_n / (mean_n^2) + var_d / (mean_d^2))))]
review_sheet_male[, `:=` (study_covariate = "sex", crosswalk_parent_seq = seq, seq = NA)]
review_sheet_final <- rbind(review_sheet_male, review_sheet_female, review_sheet[sex != "Both",], fill = T)
col_remove <- c("mid_age", "age_start_r", "age_end_r", "pop_m", "pop_f", "pop_b", "mean_n", "mean_d", "var_n", "var_d", "ratio", "ratio_se")
review_sheet_final[, (col_remove) := NULL]
## Re-add estimates that are age-sex split using the study sex-ratio
setnames(age_sex_split, "age_parent", "crosswalk_parent_seq")
age_sex_split[, `:=` (study_covariate = "sex", sex_parent = NULL, seq = NULL)]
age_sex_split[, `:=` (seq = NA)] #
review_sheet_final <- review_sheet_final[!(seq %in% c(sex_parents, age_parents)),]
review_sheet_final <- rbind(review_sheet_final, age_sex_split, fill = T)
review_sheet_final[is.na(standard_error), standard_error := (upper-lower) / 3.92]
##### Estimate and apply study-level covariates -----------------------------------------------------------------------
covariates <- gsub("cv_", "d_", covariates)
for(c in covariates){
cov <- cov_info(c, "X")
if(c == covariates[1]){
cov_list <- list(cov)
} else {
cov_list <- c(cov_list, list(cov))
}
}
crosswalk_pairs_data <- fread(crosswalk_pairs)
crosswalk_pairs_data[, ratio := a_mean / r_mean]
crosswalk_pairs_data[, se := sqrt(((a_mean^2 / r_mean^2) * ((a_se^2) / (a_mean^2) + (r_se^2) / (r_mean^2))))]
crosswalk_pairs_data[, log_effect_size := log(ratio)]
crosswalk_pairs_data[, log_effect_size_se := deltamethod(~log(x1), ratio, se^2), by = c("ratio", "se")]
dir.create(file.path(paste0("FILEPATH", acause, "/outputs/")), showWarnings = FALSE)
crosswalk_fit <- run_mr_brt(
output_dir = paste0("FILEPATH", acause, "/outputs/"),
model_label = "mdd",
data = crosswalk_pairs_data[!is.na(log_effect_size_se)],
mean_var = "log_effect_size",
se_var = "log_effect_size_se",
covs = cov_list,
remove_x_intercept = TRUE,
method = "trim_maxL",
trim_pct = 0.1,
study_id = "id",
overwrite_previous = TRUE,
lasso = FALSE)
check_for_outputs(crosswalk_fit)
eval(parse(text = paste0("predicted <- expand.grid(", paste0(covariates, "=c(0, 1)", collapse = ", "), ")")))
predicted <- as.data.table(predict_mr_brt(crosswalk_fit, newdata = predicted)["model_summaries"])
names(predicted) <- gsub("model_summaries.", "", names(predicted))
names(predicted) <- gsub("X_d_", "cv_", names(predicted))
predicted[, `:=` (Y_se = (Y_mean_hi - Y_mean_lo)/(2*qnorm(0.975,0,1)))]
crosswalk_reporting <- copy(predicted) # for reporting later
predicted[, (c("Z_intercept", "Y_negp", "Y_mean_lo", "Y_mean_hi", "Y_mean_fe", "Y_negp_fe", "Y_mean_lo_fe", "Y_mean_hi_fe")) := NULL]
review_sheet_final <- merge(review_sheet_final, predicted, by=gsub("d_", "cv_", covariates))
review_sheet_final[, `:=` (log_mean = log(mean), log_se = deltamethod(~log(x1), mean, standard_error^2)), by = c("mean", "standard_error")]
review_sheet_final[Y_mean != predicted[1,Y_mean] & mean != 0, `:=` (log_mean = log_mean - Y_mean, log_se = sqrt(log_se^2 + Y_se^2))]
review_sheet_final[Y_mean != predicted[1,Y_mean] & mean != 0, `:=` (mean = exp(log_mean), standard_error = deltamethod(~exp(x1), log_mean, log_se^2)), by = c("log_mean", "log_se")]
review_sheet_final[Y_mean != predicted[1,Y_mean] & mean != 0, `:=` (cases = NA, lower = NA, upper = NA)]
review_sheet_final[Y_mean != predicted[1,Y_mean] & mean != 0 & is.na(crosswalk_parent_seq), `:=` (crosswalk_parent_seq = seq, seq = NA)]
for(c in covariates){
c <- gsub("d_", "cv_", c)
review_sheet_final[get(c) == 1, study_covariate := ifelse(is.na(study_covariate) | study_covariate == "ref", gsub("cv_", "", c), paste0(study_covariate, ", ", gsub("cv_", "", c)))]
}
review_sheet_final[, (c("Y_mean", "Y_se", "log_mean", "log_se")) := NULL]
# For upload validation #
review_sheet_final[study_covariate != "ref", `:=` (lower = NA, upper = NA, cases = NA, sample_size = NA)]
review_sheet_final[is.na(lower), uncertainty_type_value := NA]
review_sheet_final <- review_sheet_final[location_id != 95,]
review_sheet_final <- review_sheet_final[group_review == 1, ]
crosswalk_save_folder <- paste0("FILEPATH", acause, "/", bundle_id, "FILEPATH")
dir.create(file.path(crosswalk_save_folder), showWarnings = FALSE)
crosswalk_save_file <- paste0(crosswalk_save_folder, "crosswalk_", Sys.Date(), ".xlsx")
write.xlsx(review_sheet_final, crosswalk_save_file, sheetName = "extraction")
##### Upload crosswalked dataset to database -----------------------------------------------------------------------
save_crosswalk_version(v_id, crosswalk_save_file, description = "Global sex-ratio & MR-BRT crosswalks applied #1")
## Save study-level covariates to a csv for later reporting ##
crosswalk_reporting[, covariate := ""]
for(c in names(crosswalk_reporting)[names(crosswalk_reporting) %like% "cv_"]){
crosswalk_reporting[get(c) == 1, covariate := ifelse(covariate != "", paste0(covariate, ", "), c)]
}
crosswalk_reporting[, covariate := gsub("cv_", "", covariate)]
crosswalk_reporting <- crosswalk_reporting[covariate %in% substring(covariates, 3, nchar(covariates)),]
crosswalk_reporting <- crosswalk_reporting[,.(covariate, beta = Y_mean, beta_low = Y_mean_lo, beta_high = Y_mean_hi,
exp_beta = exp(Y_mean), exp_beta_low = exp(Y_mean_lo), exp_beta_high = exp(Y_mean_hi))]
write.csv(crosswalk_reporting, paste0("FILEPATH", acause, "FILEPATH.csv"),row.names=F)
| /gbd_2019/nonfatal_code/mental_unipolar_mdd/mr_brt_crosswalks/crosswalking_mdd_cleaned.R | no_license | Nermin-Ghith/ihme-modeling | R | false | false | 16,489 | r | #######################################################################################
### Purpose: Estimate Sex-ratio and crosswalks for GBD2019
#######################################################################################
bundle_id <- 159
acause <-"mental_unipolar_mdd"
covariates <- c("cv_recall_1yr", "cv_symptom_scales" , "cv_whs", "cv_lay_interviewer")
uses_csmr <- F
test_sex_by_super_region <- F
crosswalk_pairs <- 'FILEPATH.csv'
age_sex_split_estimates <- "FILEPATH.xlsx"
need_to_age_split <- F
need_save_bundle_version <- 7118 # Set as true to save bundle version, otherwise specify bundle version here
sex_ratio_by_age <- T
library(data.table)
library(openxlsx)
library(msm)
source("FILEPATH.R")
source("FILEPATH.R")
source("FILEPATH.R")
source("FILEPATH.R")
source("FILEPATH.R")
source("FILEPATH.R")
source("FILEPATH.R")
source("FILEPATH.R")
## Get latest review sheet ##
if(need_save_bundle_version == T){
v_id <- save_bundle_version(bundle_id, "step2")$bundle_version_id
} else {
v_id <- need_save_bundle_version
}
review_sheet <- get_bundle_version(v_id)
## Flag if age-split by regional pattern estimates exist ##
if(length(review_sheet[(grepl("age-split child", specificity)),unique(nid)]) > 0){
print(paste0("STOP! The following nid still has age-split estimates by regional pattern in your bundle version: ", review_sheet[(grepl("age-split child", specificity)),unique(nid)]))
}
## Remove excluded estimates ##
review_sheet[is.na(group_review), group_review := 1]
review_sheet <- review_sheet[group_review == 1, ]
review_sheet[, study_covariate := "ref"]
review_sheet[is.na(standard_error) & !is.na(lower), standard_error := (upper - lower) / (qnorm(0.975,0,1)*2)]
review_sheet[is.na(standard_error) & measure == "prevalence", standard_error := sqrt(1/sample_size * mean * (1-mean) + 1/(4*sample_size^2)*qnorm(0.975,0,1)^2)]
review_sheet[is.na(standard_error) & measure %in% c("incidence", "remission"), standard_error := ifelse(mean*sample_size <= 5, ((5-mean*sample_size) / sample_size + mean * sample_size * sqrt(5/sample_size^2))/5, ((mean*sample_size)^0.5)/sample_size)]
##### Estimate and apply sex-ratios -----------------------------------------------------------------------
## Create paired dataset where each row is a sex pair ##
match_columns <- c("nid", "age_start", "age_end", "location_id", "site_memo", "year_start", "year_end", "measure", covariates)
males <- review_sheet[sex == "Male" & is_outlier == 0, c(match_columns, "mean", "standard_error"), with = F]
females <- review_sheet[sex == "Female" & is_outlier == 0, c(match_columns, "mean", "standard_error"), with = F]
setnames(males, "mean", "mean_m")
setnames(males, "standard_error", "se_m")
setnames(females, "mean", "mean_f")
setnames(females, "standard_error", "se_f")
sex_ratios <- merge(males, females, by = match_columns)
## Match on regions ##
locations <- get_location_metadata(location_set_id=9)
sex_ratios <- merge(sex_ratios, locations[,.(location_id, region_id, region_name, super_region_id, super_region_name)], by = "location_id")
sex_ratios<- sex_ratios[!(measure %in% c("mtspecific")), ]
sex_ratios[, `:=` (ratio = mean_m / mean_f, se = sqrt(((mean_m^2 / mean_f^2) * ((se_m^2) / (mean_m^2) + (se_f^2) / (mean_f^2)))),
mid_age = (age_start + age_end) / 2, mid_year = (year_start + year_end) / 2)]
mean_mid_age <- mean(sex_ratios$mid_age)
sex_ratios[, mc_mid_age := mid_age - mean(mid_age)]
sex_ratios[, log_ratio := log(ratio)]
sex_ratios[, log_ratio_se := deltamethod(~log(x1), ratio, se^2), by = c("ratio", "se")]
table(sex_ratios[!is.na(ratio) & ratio != 0, measure])
# Create measure CVs
if(uses_csmr == T){sex_ratios <- sex_ratios[!(measure %in% c("mtstandard"))]}
measures <- unique(sex_ratios$measure)
for(m in measures){
sex_ratios[, paste0("cv_", m) := ifelse(measure == m, 1, 0)]
}
table(sex_ratios$super_region_name)
# Create geographic CVs #
sex_locations <- unique(sex_ratios$super_region_id)
for(r in sex_locations){
sex_ratios[, paste0("cv_", r) := ifelse(super_region_id == r, 1, 0)]
}
# Create covlist
for(c in paste0("cv_", measures)){
cov <- cov_info(c, "X")
if(c == paste0("cv_", measures)[1]){
cov_list <- list(cov)
} else {
cov_list <- c(cov_list, list(cov))
}
}
table(sex_ratios$super_region_name)
if(test_sex_by_super_region == T){
for(c in paste0("cv_", sex_locations[sex_locations != 64])){ # set 64 (High income) as reference
cov <- cov_info(c, "X")
cov_list <- c(cov_list, list(cov))
}
}
sex_ratios[, prev_by_mid_age := cv_prevalence * mc_mid_age]
if(sex_ratio_by_age == T){ cov_list <- c(cov_list, list(cov_info("prev_by_mid_age", "X")))}
dir.create(file.path(paste0("FILEPATH", acause, "/")), showWarnings = FALSE)
## Run MR-BRT ##
model <- run_mr_brt(
output_dir = paste0("FILEPATH", acause, "/"),
model_label = "sex",
data = sex_ratios[!is.na(ratio) & ratio != 0 & ratio != Inf,],
mean_var = "log_ratio",
se_var = "log_ratio_se",
covs = cov_list,
remove_x_intercept = T,
method = "trim_maxL",
trim_pct = 0.1,
study_id = "nid",
overwrite_previous = TRUE,
lasso = F)
sex_coefs <- data.table(load_mr_brt_outputs(model)$model_coef)
sex_coefs[, `:=` (lower = beta_soln - sqrt(beta_var)*qnorm(0.975, 0, 1), upper = beta_soln + sqrt(beta_var)*qnorm(0.975, 0, 1))]
sex_coefs[, `:=` (sig = ifelse(lower * upper > 0, "Yes", "No"))]
sex_coefs
check_for_outputs(model)
if(sex_ratio_by_age == F){
eval(parse(text = paste0("sex_ratio <- expand.grid(", paste0(paste0("cv_", measures), "=c(0, 1)", collapse = ", "), ")")))
sex_ratio <- as.data.table(predict_mr_brt(model, newdata = sex_ratio)["model_summaries"])
names(sex_ratio) <- gsub("model_summaries.", "", names(sex_ratio))
names(sex_ratio) <- gsub("X_", "", names(sex_ratio))
} else {
eval(parse(text = paste0("sex_ratio <- expand.grid(", paste0(paste0("cv_", measures), "=c(0, 1)", collapse = ", "), ", prev_by_mid_age = c((0-mean_mid_age):(100-mean_mid_age), 0))")))
sex_ratio <- as.data.table(predict_mr_brt(model, newdata = sex_ratio)["model_summaries"])
names(sex_ratio) <- gsub("model_summaries.", "", names(sex_ratio))
names(sex_ratio) <- gsub("X_", "", names(sex_ratio))
sex_ratio <- rbind(sex_ratio[cv_relrisk == 1 & cv_prevalence == 0 & prev_by_mid_age == 0, ], sex_ratio[cv_relrisk == 0 & cv_prevalence == 1, ])
}
sex_ratio[, measure := ""]
for(m in names(sex_ratio)[names(sex_ratio) %like% "cv_"]){
sex_ratio[get(m) == 1, measure := ifelse(measure != "", paste0(measure, ", "), m)]
}
sex_ratio[, measure := gsub("cv_", "", measure)]
sex_ratio <- sex_ratio[measure %in% measures,]
sex_ratio[, `:=` (ratio = exp(Y_mean), ratio_se = (exp(Y_mean_hi) - exp(Y_mean_lo))/(2*qnorm(0.975,0,1)))]
sex_ratio[, (c(paste0("cv_", measures), "Y_mean", "Z_intercept", "Y_negp", "Y_mean_lo", "Y_mean_hi", "Y_mean_fe", "Y_negp_fe", "Y_mean_lo_fe", "Y_mean_hi_fe")) := NULL]
if(sex_ratio_by_age == T){
sex_ratio[, `:=` (mid_age = prev_by_mid_age+mean_mid_age, prev_by_mid_age = NULL)]
sex_ratio[, mid_age := round(mid_age)]
sex_ratio <- unique(sex_ratio, by = c("measure", "mid_age"))
sex_ratio <- rbind(sex_ratio[measure == "prevalence", ], data.table(measure = "relrisk", ratio = sex_ratio[measure == "relrisk", ratio],
ratio_se = sex_ratio[measure == "relrisk", ratio_se], mid_age = c(0:100)))
}
write.csv(sex_ratio, paste0("FILEPATH", acause, "FILEPATH.csv"),row.names=F)
## Load in estimates that are age-sex split using the study sex-ratio
age_sex_split <- data.table(read.xlsx(age_sex_split_estimates))
sex_parents <- age_sex_split[age_sex_split == -1 & sex != "Both", seq]
age_parents <- age_sex_split[age_sex_split == -1 & sex == "Both", seq]
review_sheet[seq %in% c(sex_parents, age_parents) & group_review == 0,]
outlier_agesexsplit <- review_sheet[seq %in% c(sex_parents, age_parents) & is_outlier == 1, seq]
age_sex_split <- age_sex_split[age_sex_split == 1, ]
age_sex_split[age_parent %in% outlier_agesexsplit, is_outlier := 1]
age_sex_split[sex_parent %in% outlier_agesexsplit, is_outlier := 1]
age_sex_split[, seq := NA]
## Crosswalk both-sex data ##
review_sheet_both <- review_sheet[sex == "Both" & !(seq %in% age_parents), ]
review_sheet_both[, `:=` (crosswalk_parent_seq = NA)]
review_sheet_both[, mid_age := round((age_start + age_end) / 2)]
population <- get_population(location_id = unique(review_sheet_both$location_id), decomp_step = 'step2', age_group_id = c(1, 6:20, 30:32, 235), sex_id = c(1, 2), year_id = seq(min(review_sheet_both$year_start), max(review_sheet_both$year_end)))
age_ids <- get_ids('age_group')[age_group_id %in% c(1, 6:20, 30:32, 235),]
suppressWarnings(age_ids[, `:=` (age_start = as.numeric(unlist(strsplit(age_group_name, " "))[1]), age_end = as.numeric(unlist(strsplit(age_group_name, " "))[3])), by = "age_group_id"])
age_ids[age_group_id == 1, `:=` (age_start = 0, age_end = 4)]
age_ids[age_group_id == 235, `:=` (age_end = 99)]
population <- merge(population, age_ids, by = "age_group_id")
if(sex_ratio_by_age == T){
review_sheet_both <- merge(review_sheet_both, sex_ratio, by = c("measure", "mid_age"))
} else {
review_sheet_both <- merge(review_sheet_both, sex_ratio, by = "measure")
}
review_sheet_both[, `:=` (mid_age = (age_start + age_end) / 2, age_start_r = round(age_start/5)*5, age_end_r = round(age_end/5)*5)]
review_sheet_both[age_start_r == age_end_r & mid_age < age_start_r, age_start_r := age_start_r - 5]
review_sheet_both[age_start_r == age_end_r & mid_age >= age_start_r, age_end_r := age_end_r + 5]
review_sheet_both[, age_end_r := age_end_r - 1]
pop_agg <- function(l, a_s, a_e, y_s, y_e, s){
a_ids <- age_ids[age_start %in% c(a_s:a_e-4) & age_end %in% c(a_s+4:a_e), age_group_id]
pop <- population[location_id == l & age_group_id %in% a_ids & sex_id == s & year_id %in% c(y_s:y_e),sum(population)]
return(pop)
}
review_sheet_both[, pop_m := pop_agg(location_id, age_start_r, age_end_r, year_start, year_end, s = 1), by = "seq"]
review_sheet_both[, pop_f := pop_agg(location_id, age_start_r, age_end_r, year_start, year_end, s = 2), by = "seq"]
review_sheet_both[, pop_b := pop_m + pop_f]
review_sheet_female <- copy(review_sheet_both)
review_sheet_female[, `:=` (sex = "Female", mean_n = mean * (pop_b), mean_d =(pop_f + ratio * pop_m),
var_n = (standard_error^2 * pop_b^2), var_d = ratio_se^2 * pop_m^2)]
review_sheet_female[, `:=` (mean = mean_n / mean_d, standard_error = sqrt(((mean_n^2) / (mean_d^2)) * (var_n / (mean_n^2) + var_d / (mean_d^2))))]
review_sheet_female[, `:=` (study_covariate = "sex", crosswalk_parent_seq = seq, seq = NA)]
review_sheet_male <- copy(review_sheet_both)
review_sheet_male[, `:=` (sex = "Male", mean_n = mean * (pop_b), mean_d =(pop_m + (1/ratio) * pop_f),
var_n = (standard_error^2 * pop_b^2), var_d = ratio_se^2 * pop_f^2)]
review_sheet_male[, `:=` (mean = mean_n / mean_d, standard_error = sqrt(((mean_n^2) / (mean_d^2)) * (var_n / (mean_n^2) + var_d / (mean_d^2))))]
review_sheet_male[, `:=` (study_covariate = "sex", crosswalk_parent_seq = seq, seq = NA)]
review_sheet_final <- rbind(review_sheet_male, review_sheet_female, review_sheet[sex != "Both",], fill = T)
col_remove <- c("mid_age", "age_start_r", "age_end_r", "pop_m", "pop_f", "pop_b", "mean_n", "mean_d", "var_n", "var_d", "ratio", "ratio_se")
review_sheet_final[, (col_remove) := NULL]
## Re-add estimates that are age-sex split using the study sex-ratio
setnames(age_sex_split, "age_parent", "crosswalk_parent_seq")
age_sex_split[, `:=` (study_covariate = "sex", sex_parent = NULL, seq = NULL)]
age_sex_split[, `:=` (seq = NA)] #
review_sheet_final <- review_sheet_final[!(seq %in% c(sex_parents, age_parents)),]
review_sheet_final <- rbind(review_sheet_final, age_sex_split, fill = T)
review_sheet_final[is.na(standard_error), standard_error := (upper-lower) / 3.92]
##### Estimate and apply study-level covariates -----------------------------------------------------------------------
covariates <- gsub("cv_", "d_", covariates)
for(c in covariates){
cov <- cov_info(c, "X")
if(c == covariates[1]){
cov_list <- list(cov)
} else {
cov_list <- c(cov_list, list(cov))
}
}
crosswalk_pairs_data <- fread(crosswalk_pairs)
crosswalk_pairs_data[, ratio := a_mean / r_mean]
crosswalk_pairs_data[, se := sqrt(((a_mean^2 / r_mean^2) * ((a_se^2) / (a_mean^2) + (r_se^2) / (r_mean^2))))]
crosswalk_pairs_data[, log_effect_size := log(ratio)]
crosswalk_pairs_data[, log_effect_size_se := deltamethod(~log(x1), ratio, se^2), by = c("ratio", "se")]
dir.create(file.path(paste0("FILEPATH", acause, "/outputs/")), showWarnings = FALSE)
crosswalk_fit <- run_mr_brt(
output_dir = paste0("FILEPATH", acause, "/outputs/"),
model_label = "mdd",
data = crosswalk_pairs_data[!is.na(log_effect_size_se)],
mean_var = "log_effect_size",
se_var = "log_effect_size_se",
covs = cov_list,
remove_x_intercept = TRUE,
method = "trim_maxL",
trim_pct = 0.1,
study_id = "id",
overwrite_previous = TRUE,
lasso = FALSE)
check_for_outputs(crosswalk_fit)
eval(parse(text = paste0("predicted <- expand.grid(", paste0(covariates, "=c(0, 1)", collapse = ", "), ")")))
predicted <- as.data.table(predict_mr_brt(crosswalk_fit, newdata = predicted)["model_summaries"])
names(predicted) <- gsub("model_summaries.", "", names(predicted))
names(predicted) <- gsub("X_d_", "cv_", names(predicted))
predicted[, `:=` (Y_se = (Y_mean_hi - Y_mean_lo)/(2*qnorm(0.975,0,1)))]
crosswalk_reporting <- copy(predicted) # for reporting later
predicted[, (c("Z_intercept", "Y_negp", "Y_mean_lo", "Y_mean_hi", "Y_mean_fe", "Y_negp_fe", "Y_mean_lo_fe", "Y_mean_hi_fe")) := NULL]
review_sheet_final <- merge(review_sheet_final, predicted, by=gsub("d_", "cv_", covariates))
review_sheet_final[, `:=` (log_mean = log(mean), log_se = deltamethod(~log(x1), mean, standard_error^2)), by = c("mean", "standard_error")]
review_sheet_final[Y_mean != predicted[1,Y_mean] & mean != 0, `:=` (log_mean = log_mean - Y_mean, log_se = sqrt(log_se^2 + Y_se^2))]
review_sheet_final[Y_mean != predicted[1,Y_mean] & mean != 0, `:=` (mean = exp(log_mean), standard_error = deltamethod(~exp(x1), log_mean, log_se^2)), by = c("log_mean", "log_se")]
review_sheet_final[Y_mean != predicted[1,Y_mean] & mean != 0, `:=` (cases = NA, lower = NA, upper = NA)]
review_sheet_final[Y_mean != predicted[1,Y_mean] & mean != 0 & is.na(crosswalk_parent_seq), `:=` (crosswalk_parent_seq = seq, seq = NA)]
for(c in covariates){
c <- gsub("d_", "cv_", c)
review_sheet_final[get(c) == 1, study_covariate := ifelse(is.na(study_covariate) | study_covariate == "ref", gsub("cv_", "", c), paste0(study_covariate, ", ", gsub("cv_", "", c)))]
}
review_sheet_final[, (c("Y_mean", "Y_se", "log_mean", "log_se")) := NULL]
# For upload validation #
review_sheet_final[study_covariate != "ref", `:=` (lower = NA, upper = NA, cases = NA, sample_size = NA)]
review_sheet_final[is.na(lower), uncertainty_type_value := NA]
review_sheet_final <- review_sheet_final[location_id != 95,]
review_sheet_final <- review_sheet_final[group_review == 1, ]
crosswalk_save_folder <- paste0("FILEPATH", acause, "/", bundle_id, "FILEPATH")
dir.create(file.path(crosswalk_save_folder), showWarnings = FALSE)
crosswalk_save_file <- paste0(crosswalk_save_folder, "crosswalk_", Sys.Date(), ".xlsx")
write.xlsx(review_sheet_final, crosswalk_save_file, sheetName = "extraction")
##### Upload crosswalked dataset to database -----------------------------------------------------------------------
save_crosswalk_version(v_id, crosswalk_save_file, description = "Global sex-ratio & MR-BRT crosswalks applied #1")
## Save study-level covariates to a csv for later reporting ##
crosswalk_reporting[, covariate := ""]
for(c in names(crosswalk_reporting)[names(crosswalk_reporting) %like% "cv_"]){
crosswalk_reporting[get(c) == 1, covariate := ifelse(covariate != "", paste0(covariate, ", "), c)]
}
crosswalk_reporting[, covariate := gsub("cv_", "", covariate)]
crosswalk_reporting <- crosswalk_reporting[covariate %in% substring(covariates, 3, nchar(covariates)),]
crosswalk_reporting <- crosswalk_reporting[,.(covariate, beta = Y_mean, beta_low = Y_mean_lo, beta_high = Y_mean_hi,
exp_beta = exp(Y_mean), exp_beta_low = exp(Y_mean_lo), exp_beta_high = exp(Y_mean_hi))]
write.csv(crosswalk_reporting, paste0("FILEPATH", acause, "FILEPATH.csv"),row.names=F)
|
\name{dist.Horseshoe}
\alias{dhs}
\alias{rhs}
\title{Horseshoe Distribution}
\description{
This is the density function and random generation from the horseshoe
distribution.
}
\usage{
dhs(x, lambda, tau, sigma, log=FALSE)
rhs(n, lambda, tau, sigma)
}
\arguments{
\item{n}{This is the number of draws from the distribution.}
\item{x}{This is a location vector at which to evaluate density.}
\item{lambda}{This vector is a positive-only local parameter
\eqn{\lambda}{lambda}.}
\item{tau}{This scalar is a positive-only global parameter
\eqn{\tau}{tau}.}
\item{sigma}{This scalar is a positive-only global parameter
\eqn{\sigma}{sigma}.}
\item{log}{Logical. If \code{log=TRUE}, then the logarithm of the
density is returned.}
}
\details{
\itemize{
\item Application: Discrete Scale Mixture
\item Density: (see below)
\item Inventor: Carvalho et al. (2008)
\item Notation 1: \eqn{\theta \sim \mathcal{HS}(\lambda, \tau,
\sigma)}{theta ~ HS(lambda, tau, sigma)}
\item Notation 2: \eqn{p(\theta) = \mathcal{HS}(\theta | \lambda,
\tau, \sigma)}{p(theta) = HS(theta | lambda, tau, sigma)}
\item Parameter 1: local scale \eqn{\lambda > 0}{lambda > 0}
\item Parameter 2: global scale \eqn{\tau > 0}{tau > 0}
\item Parameter 3: global scale \eqn{\sigma > 0}{sigma > 0}
\item Mean: \eqn{E(\theta)}{E(theta)}
\item Variance: \eqn{var(\theta)}{var(theta)}
\item Mode: \eqn{mode(\theta)}{mode(theta)}
}
The horseshoe distribution (Carvalho et al., 2008) is a heavy-tailed
discrete mixture distribution that can be considered a variance mixture,
and it is in the family of multivariate scale mixtures of normals.
The horseshoe distribution was proposed as a prior distribution, and
recommended as a default choice for shrinkage priors in the presence of
sparsity. Horseshoe priors are most appropriate in large-p models where
dimension reduction is necessary to avoid overly complex models that
predict poorly, and also perform well in estimating a sparse covariance
matrix via Cholesky decomposition (Carvalho et al., 2009).
When the number of parameters in variable selection is assumed to be
sparse, meaning that most elements are zero or nearly zero, a horseshoe
prior is a desirable alternative to the Laplace-distributed parameters
in the LASSO, or the parameterization in ridge regression. When the true
value is far from zero, the horseshoe prior leaves the parameter
unshrunk. Yet, the horseshoe prior is accurate in shrinking parameters
that are truly zero or near-zero. Parameters near zero are shrunk more
than parameters far from zero. Therefore, parameters far from zero
experience less shrinkage and are closer to their true values. The
horseshoe prior is valuable in discriminating signal from noise.
The horseshoe distribution is the following discrete mixture:
\deqn{p(\theta | \lambda) \sim \mathcal{N}(0, \lambda^2)}{p(theta |
lambda) ~ N(0, lambda^2)}
\deqn{p(\lambda | \tau) \sim \mathcal{HC}(\tau)}{p(lambda | tau) ~
HC(tau)}
\deqn{p(\tau) \sim \mathcal{HC}(\sigma)}{p(tau) ~ HC(sigma)}
where lambda is a vector of local shrinkage parameters, and tau and
sigma are global shrinkage parameters.
By replacing the Laplace-distributed parameters in LASSO with
horseshoe-distributed parameters, the result is called horseshoe
regression.
}
\value{
\code{dhs} gives the density and
\code{rhs} generates random deviates.
}
\references{
Carvalho, C.M., Polson, N.G., and Scott, J.G. (2008). "The Horseshoe
Estimator for Sparse Signals". \emph{Discussion Paper 2008-31}. Duke
University Department of Statistical Science.
Carvalho, C.M., Polson, N.G., and Scott, J.G. (2009). "Handling
Sparsity via the Horseshoe". \emph{Journal of Machine Learning
Research}, 5, p. 73--80.
}
\seealso{
\code{\link{dlaplace}}
}
\examples{
library(LaplacesDemon)
x <- rnorm(100)
lambda <- rhalfcauchy(100, 5)
tau <- 5
sigma <- 10
x <- dhs(x, lambda, tau, sigma, log=TRUE)
x <- rhs(100, lambda=lambda, tau=tau, sigma=sigma)
x <- rhs(100, tau=tau, sigma=sigma)
x <- rhs(100, sigma=sigma)
plot(density(x))
}
\keyword{Distribution}
| /man/dist.Horseshoe.Rd | permissive | benmarwick/LaplacesDemon | R | false | false | 4,234 | rd | \name{dist.Horseshoe}
\alias{dhs}
\alias{rhs}
\title{Horseshoe Distribution}
\description{
This is the density function and random generation from the horseshoe
distribution.
}
\usage{
dhs(x, lambda, tau, sigma, log=FALSE)
rhs(n, lambda, tau, sigma)
}
\arguments{
\item{n}{This is the number of draws from the distribution.}
\item{x}{This is a location vector at which to evaluate density.}
\item{lambda}{This vector is a positive-only local parameter
\eqn{\lambda}{lambda}.}
\item{tau}{This scalar is a positive-only global parameter
\eqn{\tau}{tau}.}
\item{sigma}{This scalar is a positive-only global parameter
\eqn{\sigma}{sigma}.}
\item{log}{Logical. If \code{log=TRUE}, then the logarithm of the
density is returned.}
}
\details{
\itemize{
\item Application: Discrete Scale Mixture
\item Density: (see below)
\item Inventor: Carvalho et al. (2008)
\item Notation 1: \eqn{\theta \sim \mathcal{HS}(\lambda, \tau,
\sigma)}{theta ~ HS(lambda, tau, sigma)}
\item Notation 2: \eqn{p(\theta) = \mathcal{HS}(\theta | \lambda,
\tau, \sigma)}{p(theta) = HS(theta | lambda, tau, sigma)}
\item Parameter 1: local scale \eqn{\lambda > 0}{lambda > 0}
\item Parameter 2: global scale \eqn{\tau > 0}{tau > 0}
\item Parameter 3: global scale \eqn{\sigma > 0}{sigma > 0}
\item Mean: \eqn{E(\theta)}{E(theta)}
\item Variance: \eqn{var(\theta)}{var(theta)}
\item Mode: \eqn{mode(\theta)}{mode(theta)}
}
The horseshoe distribution (Carvalho et al., 2008) is a heavy-tailed
discrete mixture distribution that can be considered a variance mixture,
and it is in the family of multivariate scale mixtures of normals.
The horseshoe distribution was proposed as a prior distribution, and
recommended as a default choice for shrinkage priors in the presence of
sparsity. Horseshoe priors are most appropriate in large-p models where
dimension reduction is necessary to avoid overly complex models that
predict poorly, and also perform well in estimating a sparse covariance
matrix via Cholesky decomposition (Carvalho et al., 2009).
When the number of parameters in variable selection is assumed to be
sparse, meaning that most elements are zero or nearly zero, a horseshoe
prior is a desirable alternative to the Laplace-distributed parameters
in the LASSO, or the parameterization in ridge regression. When the true
value is far from zero, the horseshoe prior leaves the parameter
unshrunk. Yet, the horseshoe prior is accurate in shrinking parameters
that are truly zero or near-zero. Parameters near zero are shrunk more
than parameters far from zero. Therefore, parameters far from zero
experience less shrinkage and are closer to their true values. The
horseshoe prior is valuable in discriminating signal from noise.
The horseshoe distribution is the following discrete mixture:
\deqn{p(\theta | \lambda) \sim \mathcal{N}(0, \lambda^2)}{p(theta |
lambda) ~ N(0, lambda^2)}
\deqn{p(\lambda | \tau) \sim \mathcal{HC}(\tau)}{p(lambda | tau) ~
HC(tau)}
\deqn{p(\tau) \sim \mathcal{HC}(\sigma)}{p(tau) ~ HC(sigma)}
where lambda is a vector of local shrinkage parameters, and tau and
sigma are global shrinkage parameters.
By replacing the Laplace-distributed parameters in LASSO with
horseshoe-distributed parameters, the result is called horseshoe
regression.
}
\value{
\code{dhs} gives the density and
\code{rhs} generates random deviates.
}
\references{
Carvalho, C.M., Polson, N.G., and Scott, J.G. (2008). "The Horseshoe
Estimator for Sparse Signals". \emph{Discussion Paper 2008-31}. Duke
University Department of Statistical Science.
Carvalho, C.M., Polson, N.G., and Scott, J.G. (2009). "Handling
Sparsity via the Horseshoe". \emph{Journal of Machine Learning
Research}, 5, p. 73--80.
}
\seealso{
\code{\link{dlaplace}}
}
\examples{
library(LaplacesDemon)
x <- rnorm(100)
lambda <- rhalfcauchy(100, 5)
tau <- 5
sigma <- 10
x <- dhs(x, lambda, tau, sigma, log=TRUE)
x <- rhs(100, lambda=lambda, tau=tau, sigma=sigma)
x <- rhs(100, tau=tau, sigma=sigma)
x <- rhs(100, sigma=sigma)
plot(density(x))
}
\keyword{Distribution}
|
library(testthat)
library(mjcstats)
test_package("mjcstats")
| /tests/test-all.R | no_license | drmjc/mjcstats | R | false | false | 62 | r | library(testthat)
library(mjcstats)
test_package("mjcstats")
|
#'eucli_dist
#'
#'A function to compute adapted eucliddean distance on differential methylation profile. This function is called by dmDistance and dmDistance_translocate functions.
#'
#'@param m1 a dmProfile to be compared with another one.
#'@param m2 the other one dmProfile.
#'
#'@example examples/example-dmRandomDataset.R
#'@example examples/example-dmTable.R
#'@example examples/example-getalldmProfile.R
#'@example examples/example-eucli_dist.R
#'
#'@export
eucli_dist <- function(m1, m2){ #where m1 and m2 are methylation profile
##here we go, first the distance
D.probe <- (m1$y - m2$y)^2 / (m1$var + m2$var)
##then the moderation
D.gene <- sum(D.probe * m1$pond * m2$pond, na.rm = TRUE) / sum(m1$pond * m2$pond, na.rm = TRUE)
return(D.gene)
}
| /R/eucli_dist.R | no_license | pterzian/dmprocr | R | false | false | 776 | r | #'eucli_dist
#'
#'A function to compute adapted eucliddean distance on differential methylation profile. This function is called by dmDistance and dmDistance_translocate functions.
#'
#'@param m1 a dmProfile to be compared with another one.
#'@param m2 the other one dmProfile.
#'
#'@example examples/example-dmRandomDataset.R
#'@example examples/example-dmTable.R
#'@example examples/example-getalldmProfile.R
#'@example examples/example-eucli_dist.R
#'
#'@export
eucli_dist <- function(m1, m2){ #where m1 and m2 are methylation profile
##here we go, first the distance
D.probe <- (m1$y - m2$y)^2 / (m1$var + m2$var)
##then the moderation
D.gene <- sum(D.probe * m1$pond * m2$pond, na.rm = TRUE) / sum(m1$pond * m2$pond, na.rm = TRUE)
return(D.gene)
}
|
\name{pcovsum}
\alias{pcovsum}
\title{Print covariate summary Latex}
\usage{
pcovsum(data, covs, maincov = NULL, numobs = NULL,
TeX = F)
}
\arguments{
\item{data}{dataframe containing data}
\item{covs}{character vector with the names of columns to
include in table}
\item{maincov}{covariate to stratify table by}
\item{numobs}{named list overriding the number of people
you expect to have the covariate}
\item{TeX}{boolean indicating if you want to be able to
view extra long tables in the LaTeX pdf. If TeX is T then
the table will not convert properly to docx}
}
\description{
Returns a dataframe corresponding to a descriptive table
}
\keyword{print}
| /man/pcovsum.Rd | no_license | cran/reportRx | R | false | false | 685 | rd | \name{pcovsum}
\alias{pcovsum}
\title{Print covariate summary Latex}
\usage{
pcovsum(data, covs, maincov = NULL, numobs = NULL,
TeX = F)
}
\arguments{
\item{data}{dataframe containing data}
\item{covs}{character vector with the names of columns to
include in table}
\item{maincov}{covariate to stratify table by}
\item{numobs}{named list overriding the number of people
you expect to have the covariate}
\item{TeX}{boolean indicating if you want to be able to
view extra long tables in the LaTeX pdf. If TeX is T then
the table will not convert properly to docx}
}
\description{
Returns a dataframe corresponding to a descriptive table
}
\keyword{print}
|
test_dbGetInfo_connection <- function()
{
db <- dbConnect(SQLite(), dbname = ":memory:")
on.exit(dbDisconnect(db))
info <- dbGetInfo(db)
checkEquals(6L, length(info))
checkEquals(":memory:", info[["dbname"]])
checkEquals("3.7.9", info[["serverVersion"]])
checkEquals(integer(0), info[["rsId"]])
checkEquals("on", info[["loadableExtensions"]])
checkEquals(SQLITE_RWC, info[["flags"]])
checkEquals("", info[["vfs"]])
}
test_dbGetInfo_connection_vfs <- function()
{
if (.Platform[["OS.type"]] == "windows") {
cat("Skipping test: vfs customization not available on Windows\n")
return(TRUE)
}
db <- dbConnect(SQLite(), dbname = "", vfs = "unix-none")
on.exit(dbDisconnect(db))
info <- dbGetInfo(db)
checkEquals("", info[["dbname"]])
checkEquals("unix-none", info[["vfs"]])
}
test_dbGetInfo_extensions <- function()
{
db <- dbConnect(SQLite(), dbname = "", loadable.extensions = FALSE)
on.exit(dbDisconnect(db))
info <- dbGetInfo(db)
checkEquals("off", info[["loadableExtensions"]])
}
| /R-library/RSQLite/UnitTests/dbGetInfo_test.R | permissive | jmswenson/src-from-orona | R | false | false | 1,082 | r | test_dbGetInfo_connection <- function()
{
db <- dbConnect(SQLite(), dbname = ":memory:")
on.exit(dbDisconnect(db))
info <- dbGetInfo(db)
checkEquals(6L, length(info))
checkEquals(":memory:", info[["dbname"]])
checkEquals("3.7.9", info[["serverVersion"]])
checkEquals(integer(0), info[["rsId"]])
checkEquals("on", info[["loadableExtensions"]])
checkEquals(SQLITE_RWC, info[["flags"]])
checkEquals("", info[["vfs"]])
}
test_dbGetInfo_connection_vfs <- function()
{
if (.Platform[["OS.type"]] == "windows") {
cat("Skipping test: vfs customization not available on Windows\n")
return(TRUE)
}
db <- dbConnect(SQLite(), dbname = "", vfs = "unix-none")
on.exit(dbDisconnect(db))
info <- dbGetInfo(db)
checkEquals("", info[["dbname"]])
checkEquals("unix-none", info[["vfs"]])
}
test_dbGetInfo_extensions <- function()
{
db <- dbConnect(SQLite(), dbname = "", loadable.extensions = FALSE)
on.exit(dbDisconnect(db))
info <- dbGetInfo(db)
checkEquals("off", info[["loadableExtensions"]])
}
|
# test fou_tmb.cpp compiled on-the-fly
require(testthat)
require(TMB)
compile("fou_tmb.cpp", PKG_CXXFLAGS = paste0("-I", system.file("include", package = "realPSD")))
dyn.load(dynlib("fou_tmb"))
model <- "fou_tmb"
# helper function: calculate the psd in R
fou_psd_r <- function(f, phi) {
psd <- abs(f)^(1-2*phi[1]) / (f^2 + phi[2]^2)
}
context("Test: fractional OU model compiled on-the-fly")
test_that("The UFun returned by TMB (C++) is the same as that given by R", {
ntests <- 20
for(ii in 1:ntests) {
N <- sample(10:20, 1)
f <- matrix(runif(N, 0, 2*N))
phi <- matrix(c(runif(1), rexp(1))) # phi = c(H, gamma) where 0 < H < 1, gamma > 0
obj <- MakeADFun(data = list(f = f, method = "UFun"),
parameters = list(phi = matrix(c(runif(1), rexp(1)))),
DLL = model, silent = TRUE)
psd_tmb <- obj$simulate(c(phi))$U
psd_r <- fou_psd_r(f, phi)
expect_equal(psd_tmb, psd_r)
}
})
# helpfer function
mle_tau_r <- function(f, Y, phi, ufun, fs) {
U <- fs * ufun(f, phi)
mean(Y/U)
}
sim_Y <- function(n) rchisq(n, df = 2)
# normalized PSD
# phi = c(H, gamma)
fou_ufun <- function(f, phi) {
abs(f)^(1-2*phi[1]) / (f^2 + phi[2]^2)
}
# simulate the sampling freq
sim_fs <- function() sample(10:1000, size = 1)
test_that("sigma^2 returned by TMB (C++) is the same as that given by R", {
ntests <- 20
for(ii in 1:ntests) {
N <- sample(50:100, 1)
f <- matrix(runif(N, 0, 2*N))
fs <- sim_fs()
phi <- matrix(c(runif(1), rexp(1)))
Y <- matrix(sim_Y(N))
# TMB obj
obj <- MakeADFun(
data = list(f = f, Y = Y, fs = fs, method = "MLE_tau"),
parameters = list(phi = matrix(0.5, 2)),
DLL = model, silent = TRUE
)
# define mle_tau from TMB
mle_tau_tmb <- function(phi) obj$fn(phi)
# comparison
tau_tmb <- mle_tau_tmb(phi)
tau_r <- mle_tau_r(f, Y, phi, ufun = fou_ufun, fs)
expect_equal(tau_tmb, tau_r)
}
})
| /tests/dontrun/old/test-fou.R | no_license | mlysy/realPSD | R | false | false | 1,945 | r | # test fou_tmb.cpp compiled on-the-fly
require(testthat)
require(TMB)
compile("fou_tmb.cpp", PKG_CXXFLAGS = paste0("-I", system.file("include", package = "realPSD")))
dyn.load(dynlib("fou_tmb"))
model <- "fou_tmb"
# helper function: calculate the psd in R
fou_psd_r <- function(f, phi) {
psd <- abs(f)^(1-2*phi[1]) / (f^2 + phi[2]^2)
}
context("Test: fractional OU model compiled on-the-fly")
test_that("The UFun returned by TMB (C++) is the same as that given by R", {
ntests <- 20
for(ii in 1:ntests) {
N <- sample(10:20, 1)
f <- matrix(runif(N, 0, 2*N))
phi <- matrix(c(runif(1), rexp(1))) # phi = c(H, gamma) where 0 < H < 1, gamma > 0
obj <- MakeADFun(data = list(f = f, method = "UFun"),
parameters = list(phi = matrix(c(runif(1), rexp(1)))),
DLL = model, silent = TRUE)
psd_tmb <- obj$simulate(c(phi))$U
psd_r <- fou_psd_r(f, phi)
expect_equal(psd_tmb, psd_r)
}
})
# helpfer function
mle_tau_r <- function(f, Y, phi, ufun, fs) {
U <- fs * ufun(f, phi)
mean(Y/U)
}
sim_Y <- function(n) rchisq(n, df = 2)
# normalized PSD
# phi = c(H, gamma)
fou_ufun <- function(f, phi) {
abs(f)^(1-2*phi[1]) / (f^2 + phi[2]^2)
}
# simulate the sampling freq
sim_fs <- function() sample(10:1000, size = 1)
test_that("sigma^2 returned by TMB (C++) is the same as that given by R", {
ntests <- 20
for(ii in 1:ntests) {
N <- sample(50:100, 1)
f <- matrix(runif(N, 0, 2*N))
fs <- sim_fs()
phi <- matrix(c(runif(1), rexp(1)))
Y <- matrix(sim_Y(N))
# TMB obj
obj <- MakeADFun(
data = list(f = f, Y = Y, fs = fs, method = "MLE_tau"),
parameters = list(phi = matrix(0.5, 2)),
DLL = model, silent = TRUE
)
# define mle_tau from TMB
mle_tau_tmb <- function(phi) obj$fn(phi)
# comparison
tau_tmb <- mle_tau_tmb(phi)
tau_r <- mle_tau_r(f, Y, phi, ufun = fou_ufun, fs)
expect_equal(tau_tmb, tau_r)
}
})
|
if(!is.factor(train$datetime))
train$datetime = factor(train$datetime)
r2 = rep(0,length(columns)); fstatistic = rep(0,length(columns))
indexes = sample(1:length(train$count), size=length(train$count)/2)
indexes.v = sample(setdiff(1:length(train$count), indexes))
x = train[columns]
y = log(train$count)
x.v = x[indexes.v,]
x = x[indexes,]
y.v = y[indexes.v]
y = y[indexes]
for(i in 1:length(columns)) {
print(columns[i])
myCol = x[columns[i]]
train.lm = lm(y ~ ., data = myCol)
r2[i] = summary(train.lm)$r.squared
fstatistic[i] = summary(train.lm)$fstatistic[1]
print(paste(columns[i], "has R-squared:", r2[i], " and F-statistic: ", fstatistic[i]))
}
best = 1
for(i in 2:length(columns)) {
if(fstatistic[i] > fstatistic[best])
best = i
}
print(paste(columns[best], "has best F-statistic:", fstatistic[best]))
print(paste(columns[best], "will be used to calculate the linear model w/ 1 variable"))
train.lm = lm(y ~ ., data = x[columns[best]])
already_present = c(columns[best])
source("scripts/linear_model_forward_steps.R")
print(already_present)
pLinear = predict(train.lm,x.v)
aLinear = lift.roc(pLinear, y.v, type="crude")
#clean
rm(already_present)
rm(best)
rm(fstatistic)
rm(i)
rm(indexes)
rm(indexes.v)
rm(myCol)
rm(pLinear)
rm(r2)
rm(x)
rm(x.v)
rm(y)
rm(y.v)
| /scripts/linearModel.R | no_license | snate/bike_sharing_demand | R | false | false | 1,297 | r | if(!is.factor(train$datetime))
train$datetime = factor(train$datetime)
r2 = rep(0,length(columns)); fstatistic = rep(0,length(columns))
indexes = sample(1:length(train$count), size=length(train$count)/2)
indexes.v = sample(setdiff(1:length(train$count), indexes))
x = train[columns]
y = log(train$count)
x.v = x[indexes.v,]
x = x[indexes,]
y.v = y[indexes.v]
y = y[indexes]
for(i in 1:length(columns)) {
print(columns[i])
myCol = x[columns[i]]
train.lm = lm(y ~ ., data = myCol)
r2[i] = summary(train.lm)$r.squared
fstatistic[i] = summary(train.lm)$fstatistic[1]
print(paste(columns[i], "has R-squared:", r2[i], " and F-statistic: ", fstatistic[i]))
}
best = 1
for(i in 2:length(columns)) {
if(fstatistic[i] > fstatistic[best])
best = i
}
print(paste(columns[best], "has best F-statistic:", fstatistic[best]))
print(paste(columns[best], "will be used to calculate the linear model w/ 1 variable"))
train.lm = lm(y ~ ., data = x[columns[best]])
already_present = c(columns[best])
source("scripts/linear_model_forward_steps.R")
print(already_present)
pLinear = predict(train.lm,x.v)
aLinear = lift.roc(pLinear, y.v, type="crude")
#clean
rm(already_present)
rm(best)
rm(fstatistic)
rm(i)
rm(indexes)
rm(indexes.v)
rm(myCol)
rm(pLinear)
rm(r2)
rm(x)
rm(x.v)
rm(y)
rm(y.v)
|
set.seed(2)
source("ABMASE-function.R")
logistic <- function(x){ exp(x)/(1+exp(x)) }
# settings
p <- 30 # maximum number of covariates
N <- 10000 # population size
n <- 400 # sample size
# covariates
rho <- 0.2
C <- matrix(NA,p,p)
for(j in 1:p){
for(k in 1:p){ C[k,j] <- rho^(abs(j-k)) }
}
D <- diag( rep(sqrt(2), p) )
X <- mvrnorm(N, rep(1,p), D%*%C%*%D)
tX <- X # population covariate vector
# true coeffieicnts and varaince
Beta <- rep(0, p)
Beta[c(1, 4, 7, 10)] <- c(1, -0.5, 1, -0.5)
sig <- 2
# response
pp <- logistic( as.vector(-1+X%*%Beta) )
Y <- rbinom(N, 1, pp)
# true mean
Mu <- mean(Y)
###----------------------------------------------------------------###
### Scenario 1 (simple random sampling) ###
###----------------------------------------------------------------###
Pi <- rep(n/N, N)
Sel <- sort( sample(1:N, n) ) # simple random sampling
Ind <- rep(0, N)
Ind[Sel] <- 1
sY <- Y[Ind==1]
sX <- X[Ind==1,]
sPi <- Pi[Ind==1]
# HT estimator
Delta <- matrix(NA,n,n)
for(i in 1:n){
Delta[i,i] <- 1-n/N
pp2 <- n*(n-1)/(N*(N-1))
Delta[i,-i] <- 1-(n/N)^2/pp2
}
HT <- sum(sY/sPi)/N
HTV <- as.vector( t(sY/sPi)%*%Delta%*%(sY/sPi)/N^2 )
# Proposed methods
fit1 <- ABMA.BIN(sY, sX, sPi, tX, N, samp="SRS", prior="default", mc=3000, burn=1000)
fit2 <- ABMA.BIN(sY, sX, sPi, tX, N, samp="SRS", prior="Laplace", mc=3000, burn=1000)
fit3 <- ABMA.BIN(sY, sX, sPi, tX, N, samp="SRS", prior="HS", mc=3000, burn=1000)
## point estimates ##
HT
mean(fit1$Mu)
mean(fit2$Mu)
mean(fit3$Mu)
Mu # true value
## 95% confidence (credible) intervals ##
zz <- qnorm(0.975)
HT+c(-1,1)*zz*sqrt(HTV)
quantile(fit1$Mu, prob=c(0.025, 0.975))
quantile(fit2$Mu, prob=c(0.025, 0.975))
quantile(fit3$Mu, prob=c(0.025, 0.975))
###----------------------------------------------------------------###
### Scenario 2 (probability-proportional-to-size sampling) ###
###----------------------------------------------------------------###
zz <- log(1+abs(0.5*Y+rexp(N,3))) # size variable
zz[ zz<0.5 ] <- 0.5
Pi <- n*zz/sum(zz)
Sel <- sort(sample(1:N, n, prob=zz/sum(zz))) # PPS sampling
Ind <- rep(0,N)
Ind[Sel] <- 1
sY <- Y[Ind==1]
sX <- X[Ind==1,]
sPi <- Pi[Ind==1]
# HT estimator
HT <- sum(sY/sPi)/N
HTV <- n*var(sY/sPi)/N^2
# Proposed methods
fit1 <- ABMA.BIN(sY, sX, sPi, tX, N, samp="PPS", prior="default", mc=3000, burn=1000)
fit2 <- ABMA.BIN(sY, sX, sPi, tX, N, samp="PPS", prior="Laplace", mc=3000, burn=1000)
fit3 <- ABMA.BIN(sY, sX, sPi, tX, N, samp="PPS", prior="HS", mc=3000, burn=1000)
## point estimates ##
HT
mean(fit1$Mu)
mean(fit2$Mu)
mean(fit3$Mu)
Mu # true value
## 95% confidence (credible) intervals ##
zz <- qnorm(0.975)
HT+c(-1,1)*zz*sqrt(HTV)
quantile(fit1$Mu, prob=c(0.025, 0.975))
quantile(fit2$Mu, prob=c(0.025, 0.975))
quantile(fit3$Mu, prob=c(0.025, 0.975))
| /sim-logistic.R | no_license | sshonosuke/ABMASE | R | false | false | 2,867 | r | set.seed(2)
source("ABMASE-function.R")
logistic <- function(x){ exp(x)/(1+exp(x)) }
# settings
p <- 30 # maximum number of covariates
N <- 10000 # population size
n <- 400 # sample size
# covariates
rho <- 0.2
C <- matrix(NA,p,p)
for(j in 1:p){
for(k in 1:p){ C[k,j] <- rho^(abs(j-k)) }
}
D <- diag( rep(sqrt(2), p) )
X <- mvrnorm(N, rep(1,p), D%*%C%*%D)
tX <- X # population covariate vector
# true coeffieicnts and varaince
Beta <- rep(0, p)
Beta[c(1, 4, 7, 10)] <- c(1, -0.5, 1, -0.5)
sig <- 2
# response
pp <- logistic( as.vector(-1+X%*%Beta) )
Y <- rbinom(N, 1, pp)
# true mean
Mu <- mean(Y)
###----------------------------------------------------------------###
### Scenario 1 (simple random sampling) ###
###----------------------------------------------------------------###
Pi <- rep(n/N, N)
Sel <- sort( sample(1:N, n) ) # simple random sampling
Ind <- rep(0, N)
Ind[Sel] <- 1
sY <- Y[Ind==1]
sX <- X[Ind==1,]
sPi <- Pi[Ind==1]
# HT estimator
Delta <- matrix(NA,n,n)
for(i in 1:n){
Delta[i,i] <- 1-n/N
pp2 <- n*(n-1)/(N*(N-1))
Delta[i,-i] <- 1-(n/N)^2/pp2
}
HT <- sum(sY/sPi)/N
HTV <- as.vector( t(sY/sPi)%*%Delta%*%(sY/sPi)/N^2 )
# Proposed methods
fit1 <- ABMA.BIN(sY, sX, sPi, tX, N, samp="SRS", prior="default", mc=3000, burn=1000)
fit2 <- ABMA.BIN(sY, sX, sPi, tX, N, samp="SRS", prior="Laplace", mc=3000, burn=1000)
fit3 <- ABMA.BIN(sY, sX, sPi, tX, N, samp="SRS", prior="HS", mc=3000, burn=1000)
## point estimates ##
HT
mean(fit1$Mu)
mean(fit2$Mu)
mean(fit3$Mu)
Mu # true value
## 95% confidence (credible) intervals ##
zz <- qnorm(0.975)
HT+c(-1,1)*zz*sqrt(HTV)
quantile(fit1$Mu, prob=c(0.025, 0.975))
quantile(fit2$Mu, prob=c(0.025, 0.975))
quantile(fit3$Mu, prob=c(0.025, 0.975))
###----------------------------------------------------------------###
### Scenario 2 (probability-proportional-to-size sampling) ###
###----------------------------------------------------------------###
zz <- log(1+abs(0.5*Y+rexp(N,3))) # size variable
zz[ zz<0.5 ] <- 0.5
Pi <- n*zz/sum(zz)
Sel <- sort(sample(1:N, n, prob=zz/sum(zz))) # PPS sampling
Ind <- rep(0,N)
Ind[Sel] <- 1
sY <- Y[Ind==1]
sX <- X[Ind==1,]
sPi <- Pi[Ind==1]
# HT estimator
HT <- sum(sY/sPi)/N
HTV <- n*var(sY/sPi)/N^2
# Proposed methods
fit1 <- ABMA.BIN(sY, sX, sPi, tX, N, samp="PPS", prior="default", mc=3000, burn=1000)
fit2 <- ABMA.BIN(sY, sX, sPi, tX, N, samp="PPS", prior="Laplace", mc=3000, burn=1000)
fit3 <- ABMA.BIN(sY, sX, sPi, tX, N, samp="PPS", prior="HS", mc=3000, burn=1000)
## point estimates ##
HT
mean(fit1$Mu)
mean(fit2$Mu)
mean(fit3$Mu)
Mu # true value
## 95% confidence (credible) intervals ##
zz <- qnorm(0.975)
HT+c(-1,1)*zz*sqrt(HTV)
quantile(fit1$Mu, prob=c(0.025, 0.975))
quantile(fit2$Mu, prob=c(0.025, 0.975))
quantile(fit3$Mu, prob=c(0.025, 0.975))
|
# May 2 2017
## Goal: Power check for multiple regression (NOT MV)
# selected x1 SCSADFI outcome, as germaine demand in the revision
rm(list=ls()) # clear workspace; # ls() # list objects in the workspace
cat("\014") # same as ctrl-L
options(max.print=3000) # default 1000, and rstudio set at 5000
options(warn=1) # default = 0. To check loop warnings
# Load -----------------------------------------------------
load("results/mv_EWAS_lm_semen_chems_in.Rdata")
## \\ name-change ----
## These 133 name are not R fault-proof (e.g.4_HB_f). Order of (papername) content is matched with (name(papername))
papername_m <- c("pcb101amt", "pcb105amt", "pcb110amt", "pcb114amt", "pcb118amt", "pcb128amt", "pcb138amt", "pcb146amt", "pcb149amt", "pcb151amt", "pcb153amt", "pcb156amt",
"pcb157amt", "pcb167amt", "pcb170amt", "pcb172amt", "pcb177amt", "pcb178amt", "pcb180amt", "pcb183amt", "pcb187amt", "pcb189amt", "pcb194amt", "pcb195amt",
"pcb196amt", "pcb201amt", "pcb206amt", "pcb209amt", "pcb028amt", "pcb044amt", "pcb049amt", "pcb052amt", "pcb066amt", "pcb074amt", "pcb087amt", "pcb099amt",
"pfcepahamt", "pfcmpahamt", "pfcpfdeamt", "pfcpfnaamt", "pfcpfsaamt", "pfcpfosamt", "pfcpfoaamt", "metbcdamt", "metbpbamt", "metthgamt", "popbb1amt", "popbhcamt",
"popghcamt", "pophcbamt", "popmiramt", "popodtamt", "popoxyamt", "poppdeamt", "poppdtamt", "poptnaamt", "pbdebr1amt", "pbdebr2amt", "pbdebr3amt", "pbdebr4amt",
"pbdebr5amt", "pbdebr6amt", "pbdebr7amt", "pbdebr8amt", "pbdebr9amt", "pbdebr66amt",
"DAZAMOUNT", "DMAAMOUNT", "EQUAMOUNT", "ETDAMOUNT", "ETLAMOUNT", "GNSAMOUNT", "CREAMOUNT", "fcholamt", "cholamt", "trigamt", "phosamt", "cotamt", "Selenium",
"Arsenic", "Manganese", "Chromium", "Beryllium", "Cobalt", "Molybdenum", "Cadmium_Corrected", "Tin", "Antimony", "Tellurium", "Caesium", "Barium", "Nickel",
"Copper", "Zinc", "Tungsten", "Platinum", "Thallium", "Lead", "Uranium", "mMethylPhthalate", "mEthylPhthalate", "mCarboxyPropylPhthalate", "mButylPhthalate",
"mIsobutylPhthalate", "mCarboxyEthylPentylPhthalate", "mCarboxyMethylHexylPhthalate", "mEthylHydroxyHexylPhthalate", "mEthylOxoHexylPhthalate", "mCycloHexylPhthalate",
"mBenzylPhthalate", "mEthylHexylPhthalate", "mOctylPhthalate", "mIsononylPhthalate", "BPA", "HydroxyMethoxyBenzoPhenone", "HydroxyBenzoPhenone", "DiHydroxyBenzoPhenone",
"DiHydroxyMethoxyBenzoPhenone", "TetraHydroxyBenzoPhenone", "MeP", "EtP", "PrP", "BuP", "BzP", "HeP", "X_4_HB", "X_3_4_DHB", "OH_MeP", "OH_EtP", "TCS", "TCC", "PAP", "APAP")
names(papername_m) <- c("PCB_101_m", "PCB_105_m", "PCB_110_m", "PCB_118_m", "PCB_114_m", "PCB_128_m", "PCB_138_m", "PCB_146_m", "PCB_149_m", "PCB_151_m", "PCB_153_m", "PCB_156_m", "PCB_157_m", "PCB_167_m", "PCB_170_m", "PCB_172_m", "PCB_177_m", "PCB_178_m", "PCB_180_m", "PCB_183_m", "PCB_187_m", "PCB_189_m", "PCB_194_m", "PCB_195_m", "PCB_196_m", "PCB_201_m", "PCB_206_m", "PCB_209_m", "PCB_28_m", "PCB_44_m", "PCB_49_m", "PCB_52_m", "PCB_66_m", "PCB_74_m", "PCB_87_m", "PCB_99_m", "Et_PFOSA_AcOH_m", "Me_PFOSA_AcOH_m", "PFDeA_m", "PFNA_m", "PFOSA_m", "PFOS_m", "PFOA_m", "blood_Cd_m", "blood_Pb_m", "blood_Hg_m", "BB_153_m", "b_HCB_m", "g_HCB_m", "HCB_m", "mirex_m", "op_DDT_m", "oxychlordane_m", "pp_DDE_m", "pp_DDT_m", "tr_nonachlor_m", "BDE_17_m", "BDE_28_m", "BDE_47_m", "BDE_85_m", "BDE_99_m", "BDE_100_m", "BDE_153_m", "BDE_154_m", "BDE_183_m", "BDE_66_m", "daidzein_m", "O_DMA_m", "equol_m", "enterodiol_m", "enterolactone_m", "genistein_m", "CREAMOUNT_m", "fcholamt_m", "cholamt_m", "trigamt_m", "phosamt_m", "cotinine_m", "selenium_m", "arsenic_m", "manganese_m", "chromium_m", "beryllium_m", "cobalt_m", "molybdenum_m", "cadmium_m", "tin_m", "antimony_m", "tellurium_m", "caesium_m", "barium_m", "nickel_m", "copper_m", "zinc_m", "tungsten_m", "platinum_m", "thallium_m", "lead_m", "uranium_m", "mMP_m", "mEP_m", "mCPP_m", "mBP_m", "miBP_m", "mECPP_m", "mCMHP_m", "mEHHP_m", "mEOHP_m", "mCHP_m", "mBzP_m", "mEHP_m", "mOP_m", "mNP_m", "BPA_m", "2_OH_4MeO_BP_m", "4_OH_BP_m", "24_OH_BP_m", "22_OH_4MeO_BP_m", "2244_OH_BP_m", "MP_m", "EP_m", "PP_m", "BP_m", "BzP_m", "HP_m", "4_HB_m", "34_DHB_m", "OH_Me_P_m", "OH_Et_P_m", "TCS_m", "TCC_m", "paracetamol_m", "4_aminophenol_m")
# remove _m
no_m <- gsub("_m$", "", colnames(lifemerg), ignore.case = T)
# match and get the index order by female_no_f
index_m <- vector(mode = "numeric")
for(i in 1:length(no_m)) {
mtchM <- which(papername_m %in% no_m[i])
print(mtchM) # error check
index_m <- append(index_m, mtchM)
}
#
colnames(lifemerg)[which(no_m %in% papername_m)] <- names(papername_m[index_m])
# prevent loop error because of the name
colnames(lifemerg) <- make.names(colnames(lifemerg))
## \\ mice ----
library(mice)
lifemice <- as.mids(lifemerg, .imp=1, .id = 2)
# ******** -----
# A. Y-full -----------------------------------------------------
# \\ models ----
## Male
mvlm_ml <- function(indvar, dat) {
setform <- sprintf("log(SCSADFI + 1e-10) ~ I(scale(log10(%s+1))) + lipids_m + Age_m + catbmi_m + LMSMKNOW + LMEXERCS + parity_m", indvar)
mod <- with(data = dat, lm(as.formula(setform)))
}
mvlm_mc <- function(indvar, dat) {
setform <- sprintf("log(SCSADFI + 1e-10) ~ I(scale(log10(%s+1))) + CREAMOUNT_m + Age_m + catbmi_m + LMSMKNOW + LMEXERCS + parity_m", indvar)
mod <- with(data = dat, lm(as.formula(setform)))
}
mvlm_mn <- function(indvar, dat) {
setform <- sprintf("log(SCSADFI + 1e-10) ~ I(scale(log10(%s+1))) + Age_m + catbmi_m + LMSMKNOW + LMEXERCS + parity_m", indvar)
mod <- with(data = dat, lm(as.formula(setform)))
}
# \\ chem-list ----
# 3.1. Create list for matching in the loop to choose model
cl_f = list(
#PCBs
PCBs = c(
"PCB_28_f",
"PCB_44_f",
"PCB_49_f",
"PCB_52_f",
"PCB_66_f",
"PCB_74_f",
"PCB_87_f",
"PCB_99_f",
"PCB_101_f",
"PCB_105_f",
"PCB_110_f",
"PCB_118_f",
"PCB_114_f",
"PCB_128_f",
"PCB_138_f",
"PCB_146_f",
"PCB_149_f",
"PCB_151_f",
"PCB_153_f",
"PCB_156_f",
"PCB_157_f",
"PCB_167_f",
"PCB_170_f",
"PCB_172_f",
"PCB_177_f",
"PCB_178_f",
"PCB_180_f",
"PCB_183_f",
"PCB_187_f",
"PCB_189_f",
"PCB_194_f",
"PCB_195_f",
"PCB_196_f",
"PCB_201_f",
"PCB_206_f",
"PCB_209_f"
),
#OCPs
OCPs = c(
"HCB_f",
"b_HCB_f",
"g_HCB_f",
"op_DDT_f",
"pp_DDE_f",
"pp_DDT_f",
"oxychlordane_f",
"tr_nonachlor_f",
"mirex_f"),
#PBC
Polybrominated_cpds = c(
"BB_153_f",
"BDE_17_f",
"BDE_28_f",
"BDE_47_f",
"BDE_66_f",
"BDE_85_f",
"BDE_99_f",
"BDE_100_f",
"BDE_153_f",
"BDE_154_f",
"BDE_183_f"),
#PFASs
PFASs = c(
"Et_PFOSA_AcOH_f",
"Me_PFOSA_AcOH_f",
"PFDeA_f",
"PFNA_f",
"PFOSA_f",
"PFOS_f",
"PFOA_f"),
#blood metals
Blood_metals = c(
"blood_Cd_f",
"blood_Pb_f",
"blood_Hg_f"),
#cotinine
Cotinine = c(
"cotinine_f"),
#phytoestrogens
Phytoestrogens = c(
"genistein_f",
"daidzein_f",
"O_DMA_f",
"equol_f",
"enterodiol_f",
"enterolactone_f"),
#phthalates
Phthalates = c(
"mMP_f",
"mEP_f",
"mCPP_f",
"mBP_f",
"miBP_f",
"mECPP_f",
"mCMHP_f",
"mEHHP_f",
"mEOHP_f",
"mCHP_f",
"mBzP_f",
"mEHP_f",
"mOP_f",
"mNP_f"),
#phenols
Phenols = c(
"BPA_f",
"X2_OH_4MeO_BP_m",
"X4_OH_BP_f",
"X24_OH_BP_f",
"X22_OH_4MeO_BP_f",
"X2244_OH_BP_f"),
#anti microbial
Anti_microbial_cpds = c(
"MP_f",
"EP_f",
"PP_f",
"BP_f",
"BzP_f",
"HP_f",
"X4_HB_f",
"X34_DHB_f",
"OH_Me_P_f",
"OH_Et_P_f",
"TCS_f",
"TCC_f"),
#paracetamol
Paracetamols = c(
"paracetamol_f",
"X4_aminophenol_f"),
#urine metals
Urine_metals = c(
"manganese_f",
"chromium_f",
"beryllium_f",
"cobalt_f",
"molybdenum_f",
"cadmium_f",
"tin_f",
"caesium_f",
"barium_f",
"nickel_f",
"copper_f",
"zinc_f",
"tungsten_f",
"platinum_f",
"thallium_f",
"lead_f",
"uranium_f"),
#urine metalloids
Urine_metalloids = c(
"selenium_f",
"arsenic_f",
"antimony_f",
"tellurium_f")
)
cl_m <- cl_f
# replace the _f in the cl_m to _m
for(i in 1:length(cl_m)){
cl_m[[i]] <- gsub("_f$", "_m", cl_m[[i]], ignore.case = T)
# print(cl_m[[i]])
}
# Operators
# not 15 groups in germaine paper OK
list_m_oper <- list(
M_creat = c("Phytoestrogens", "Phthalates", "Phenols", "Urine_metalloids", "Urine_metals", "Anti_microbial_cpds", "Paracetamols"),
M_lipid = c("PCBs", "OCPs", "Polybrominated_cpds"),
M_null = c("PFASs", "Cotinine", "Blood_metals"))
# \\ loop ----
ret_list <- list()
for(i in 1:length(unlist(cl_m))) {
tmpm_var <- unlist(cl_m)[i]
tmpm_oper <- names(cl_m[sapply(cl_m, "%in%", x = tmpm_var)])
final_oper <- names(list_m_oper[sapply(list_m_oper, "%in%", x = tmpm_oper)])
if (final_oper == "M_null"){
frm <- mvlm_mn(tmpm_var, lifemice)
} else if (final_oper == "M_creat"){
frm <- mvlm_mc(tmpm_var, lifemice)
} else {
frm <- mvlm_ml(tmpm_var, lifemice)
}
print(tmpm_var) # after the warnings
ret_list[[tmpm_var]] <- frm
}
## \\ pool ----
# run
library(miceadds)
ret_df_full_r2 <- data.frame()
for(i in 1:length(unlist(cl_m))) {
tmpm_var <- unlist(cl_m)[i]
input_m <- ret_list[[tmpm_var]]
frm <- data.frame(r2est = pool.r.squared(input_m)[1], pvaledc = summary(pool(input_m))[2,5])
frm$indvar <- tmpm_var
print(tmpm_var)
ret_df_full_r2 <- rbind(ret_df_full_r2, frm)
}
# ******** -----
# A.1. For-simulate -----------------------------------------------------
# 060118 FDR simulation
ret_list_full <- list()
for(i in 1:length(unlist(cl_m))) {
tmpm_var <- unlist(cl_m)[i]
input_m <- ret_list[[tmpm_var]]
out1 <- summary(pool(input_m))[, 1]
ret_list_full[[tmpm_var]] <- out1
}
ret_df_full_r2_adj <- data.frame()
for(i in 1:length(unlist(cl_m))) {
tmpm_var <- unlist(cl_m)[i]
input_m <- ret_list[[tmpm_var]]
frm <- data.frame(r2est = pool.r.squared(input_m)[1], ad_r2est = pool.r.squared(input_m, adjusted = TRUE)[1], pvaledc = summary(pool(input_m))[2,5])
# get y SD (sigma)
ysigma <- sapply(input_m$analyses, function(x){
tmp1 <- summary(x)
tmp1$sigma
})
frm$ysigma_sd <- mean(ysigma)
frm$indvar <- tmpm_var
print(tmpm_var)
ret_df_full_r2_adj <- rbind(ret_df_full_r2_adj, frm)
}
# save(ret_list_full, file = "results/sim_fragment_ind_SCSADFI.RData", compress = FALSE)
# save(ret_df_full_r2_adj, file = "results/sim_r2_fragment_ind_SCSADFI.RData", compress = FALSE)
# ******** -----
# B. Y-redu -----------------------------------------------------
# \\ models ----
## Male
mvlm_ml <- function(indvar, dat) {
setform <- sprintf("log(SCSADFI + 1e-10) ~ lipids_m + Age_m + catbmi_m + LMSMKNOW + LMEXERCS + parity_m", indvar)
mod <- with(data = dat, lm(as.formula(setform)))
}
mvlm_mc <- function(indvar, dat) {
setform <- sprintf("log(SCSADFI + 1e-10) ~ CREAMOUNT_m + Age_m + catbmi_m + LMSMKNOW + LMEXERCS + parity_m", indvar)
mod <- with(data = dat, lm(as.formula(setform)))
}
mvlm_mn <- function(indvar, dat) {
setform <- sprintf("log(SCSADFI + 1e-10) ~ Age_m + catbmi_m + LMSMKNOW + LMEXERCS + parity_m", indvar)
mod <- with(data = dat, lm(as.formula(setform)))
}
# \\ chem-list ----
# 3.1. Create list for matching in the loop to choose model
cl_f = list(
#PCBs
PCBs = c(
"PCB_28_f",
"PCB_44_f",
"PCB_49_f",
"PCB_52_f",
"PCB_66_f",
"PCB_74_f",
"PCB_87_f",
"PCB_99_f",
"PCB_101_f",
"PCB_105_f",
"PCB_110_f",
"PCB_118_f",
"PCB_114_f",
"PCB_128_f",
"PCB_138_f",
"PCB_146_f",
"PCB_149_f",
"PCB_151_f",
"PCB_153_f",
"PCB_156_f",
"PCB_157_f",
"PCB_167_f",
"PCB_170_f",
"PCB_172_f",
"PCB_177_f",
"PCB_178_f",
"PCB_180_f",
"PCB_183_f",
"PCB_187_f",
"PCB_189_f",
"PCB_194_f",
"PCB_195_f",
"PCB_196_f",
"PCB_201_f",
"PCB_206_f",
"PCB_209_f"
),
#OCPs
OCPs = c(
"HCB_f",
"b_HCB_f",
"g_HCB_f",
"op_DDT_f",
"pp_DDE_f",
"pp_DDT_f",
"oxychlordane_f",
"tr_nonachlor_f",
"mirex_f"),
#PBC
Polybrominated_cpds = c(
"BB_153_f",
"BDE_17_f",
"BDE_28_f",
"BDE_47_f",
"BDE_66_f",
"BDE_85_f",
"BDE_99_f",
"BDE_100_f",
"BDE_153_f",
"BDE_154_f",
"BDE_183_f"),
#PFASs
PFASs = c(
"Et_PFOSA_AcOH_f",
"Me_PFOSA_AcOH_f",
"PFDeA_f",
"PFNA_f",
"PFOSA_f",
"PFOS_f",
"PFOA_f"),
#blood metals
Blood_metals = c(
"blood_Cd_f",
"blood_Pb_f",
"blood_Hg_f"),
#cotinine
Cotinine = c(
"cotinine_f"),
#phytoestrogens
Phytoestrogens = c(
"genistein_f",
"daidzein_f",
"O_DMA_f",
"equol_f",
"enterodiol_f",
"enterolactone_f"),
#phthalates
Phthalates = c(
"mMP_f",
"mEP_f",
"mCPP_f",
"mBP_f",
"miBP_f",
"mECPP_f",
"mCMHP_f",
"mEHHP_f",
"mEOHP_f",
"mCHP_f",
"mBzP_f",
"mEHP_f",
"mOP_f",
"mNP_f"),
#phenols
Phenols = c(
"BPA_f",
"X2_OH_4MeO_BP_m",
"X4_OH_BP_f",
"X24_OH_BP_f",
"X22_OH_4MeO_BP_f",
"X2244_OH_BP_f"),
#anti microbial
Anti_microbial_cpds = c(
"MP_f",
"EP_f",
"PP_f",
"BP_f",
"BzP_f",
"HP_f",
"X4_HB_f",
"X34_DHB_f",
"OH_Me_P_f",
"OH_Et_P_f",
"TCS_f",
"TCC_f"),
#paracetamol
Paracetamols = c(
"paracetamol_f",
"X4_aminophenol_f"),
#urine metals
Urine_metals = c(
"manganese_f",
"chromium_f",
"beryllium_f",
"cobalt_f",
"molybdenum_f",
"cadmium_f",
"tin_f",
"caesium_f",
"barium_f",
"nickel_f",
"copper_f",
"zinc_f",
"tungsten_f",
"platinum_f",
"thallium_f",
"lead_f",
"uranium_f"),
#urine metalloids
Urine_metalloids = c(
"selenium_f",
"arsenic_f",
"antimony_f",
"tellurium_f")
)
cl_m <- cl_f
# replace the _f in the cl_m to _m
for(i in 1:length(cl_m)){
cl_m[[i]] <- gsub("_f$", "_m", cl_m[[i]], ignore.case = T)
# print(cl_m[[i]])
}
# Operators
list_m_oper <- list(
M_creat = c("Phytoestrogens", "Phthalates", "Phenols", "Urine_metalloids", "Urine_metals", "Anti_microbial_cpds", "Paracetamols"),
M_lipid = c("PCBs", "OCPs", "Polybrominated_cpds"),
M_null = c("PFASs", "Cotinine", "Blood_metals"))
# \\ loop ----
ret_list <- list()
for(i in 1:length(unlist(cl_m))) {
tmpm_var <- unlist(cl_m)[i]
tmpm_oper <- names(cl_m[sapply(cl_m, "%in%", x = tmpm_var)])
final_oper <- names(list_m_oper[sapply(list_m_oper, "%in%", x = tmpm_oper)])
if (final_oper == "M_null"){
frm <- mvlm_mn(tmpm_var, lifemice)
} else if (final_oper == "M_creat"){
frm <- mvlm_mc(tmpm_var, lifemice)
} else {
frm <- mvlm_ml(tmpm_var, lifemice)
}
print(tmpm_var) # after the warnings
ret_list[[tmpm_var]] <- frm
}
## \\ pool ----
# run
library(miceadds)
ret_df_redu_r2 <- data.frame()
for(i in 1:length(unlist(cl_m))) {
tmpm_var <- unlist(cl_m)[i]
input_m <- ret_list[[tmpm_var]]
frm <- as.data.frame(pool.r.squared(input_m)[1])
# frm <- as.data.frame(pool.r.squared(input_m, adjusted = TRUE)[1])
colnames(frm) <- "r2est"
frm$indvar <- tmpm_var
print(tmpm_var)
ret_df_redu_r2 <- rbind(ret_df_redu_r2, frm)
}
# ******** -----
## X. Saving Rdata ----
## merging
es <- cbind(ret_df_full_r2, ret_df_redu_r2)[, c(1, 4, 3, 2)]
colnames(es) <- c("r2full", "r2redu", "indvar", "pvaledc")
es$f2 <- (es$r2full-es$r2redu)/(1-es$r2full)
# remove -ve f2
es <- es[(es$f2 > 0),]
# dir
outdirectory <- "results"
outfilename <- "mv_EWAS_power_semen_chems_SCSADFI.Rdata"
# outfilename <- sprintf("%s_reg_7.Rdata", depVariable)
save(file=file.path(outdirectory,outfilename),
lifemerg, impute_f, impute_m, life, es)
# load("results/mv_EWAS_power_semen_chems_SCSADFI.Rdata")
# ******** -----
# C. Effect-size -----------------------------------------------------
library(pwr)
# \\ n_80%
v_list <- list()
for(i in 1:nrow(es)) {
input_1 <- es$f2[i]
if (input_1 > 0){
v <- pwr.f2.test(u = 1, f2 = input_1, sig.level = 0.05/128, power = 0.8)$v # *Bonf p* value
v_list[[i]] <- v
} else {
v_list[[i]] <- 0 # optional, removed f2 <0
}
}
es$n_80 <- ceiling(unlist(v_list)) + 7 + 1
# \\ power
p_list <- list()
for(i in 1:nrow(es)) { # 126 rows
input_1 <- es$f2[i]
if (input_1 > 0){
p <- pwr.f2.test(u = 1, v = 473 - 7 - 1, f2 = input_1, sig.level = 0.05/128)$p # *Bonf p* value; # full model residual/error df
p_list[[i]] <- p
} else {
p_list[[i]] <- 0 # optional, removed f2 <0
}
}
es$power <- round(unlist(p_list),2)
es$power2 <- round(unlist(p_list),4)
# sample size for 95% f2
quantile(es$f2, 0.95)
pwr.f2.test(u = 1, f2 = quantile(es$f2, 0.95), sig.level = 0.05/128, power = 0.8)
# write
# write.csv(es, file = "results/power_bon_adj_p_morphology.csv")
# write.csv(es, file = "results/power_nil_adj_p_morphology.csv")
# \\ cp-power-plot----
# figure in the paper
# power vs sample size
cp_p_list <- list()
cpseq <- seq(300, 700, by = 40) # step in sample size
for(i in 1:nrow(es)) {
input_1 <- es$f2[i]
power_life <- sapply(cpseq, function(x){
pwr.f2.test(u = 1, v = x - 7 - 1, f2 = input_1, sig.level = 0.05/128)$p
})
cp_p_list[[i]] <- data.frame(rep(es$indvar[i], length(cpseq)), rep(input_1, length(cpseq)), cpseq, power_life)
}
# create df
powerplot_df <- do.call("rbind", cp_p_list)
colnames(powerplot_df) <- c("EDC", "effect_size", "sample_size", "power")
cl_f = list(
#PCBs
PCBs = c(
"PCB_28_f",
"PCB_44_f",
"PCB_49_f",
"PCB_52_f",
"PCB_66_f",
"PCB_74_f",
"PCB_87_f",
"PCB_99_f",
"PCB_101_f",
"PCB_105_f",
"PCB_110_f",
"PCB_118_f",
"PCB_114_f",
"PCB_128_f",
"PCB_138_f",
"PCB_146_f",
"PCB_149_f",
"PCB_151_f",
"PCB_153_f",
"PCB_156_f",
"PCB_157_f",
"PCB_167_f",
"PCB_170_f",
"PCB_172_f",
"PCB_177_f",
"PCB_178_f",
"PCB_180_f",
"PCB_183_f",
"PCB_187_f",
"PCB_189_f",
"PCB_194_f",
"PCB_195_f",
"PCB_196_f",
"PCB_201_f",
"PCB_206_f",
"PCB_209_f"
),
#OCPs
OCPs = c(
"HCB_f",
"b_HCB_f",
"g_HCB_f",
"op_DDT_f",
"pp_DDE_f",
"pp_DDT_f",
"oxychlordane_f",
"tr_nonachlor_f",
"mirex_f"),
#PBC
PBBs = c(
"BB_153_f"),
PBDEs = c(
"BDE_17_f",
"BDE_28_f",
"BDE_47_f",
"BDE_66_f",
"BDE_85_f",
"BDE_99_f",
"BDE_100_f",
"BDE_153_f",
"BDE_154_f",
"BDE_183_f"),
#PFASs
PFASs = c(
"Et_PFOSA_AcOH_f",
"Me_PFOSA_AcOH_f",
"PFDeA_f",
"PFNA_f",
"PFOSA_f",
"PFOS_f",
"PFOA_f"),
#blood metals
Blood_metals = c(
"blood_Cd_f",
"blood_Pb_f",
"blood_Hg_f"),
#cotinine
Cotinine = c(
"cotinine_f"),
#phytoestrogens
Phytoestrogens = c(
"genistein_f",
"daidzein_f",
"O_DMA_f",
"equol_f",
"enterodiol_f",
"enterolactone_f"),
#phthalates
Phthalates = c(
"mMP_f",
"mEP_f",
"mCPP_f",
"mBP_f",
"miBP_f",
"mECPP_f",
"mCMHP_f",
"mEHHP_f",
"mEOHP_f",
"mCHP_f",
"mBzP_f",
"mEHP_f",
"mOP_f",
"mNP_f"),
#phenols
Bisphenol_A = c(
"BPA_f"),
Benzophenones = c(
"X2_OH_4MeO_BP_m",
"X4_OH_BP_f",
"X24_OH_BP_f",
"X22_OH_4MeO_BP_f",
"X2244_OH_BP_f"),
#anti microbial
Anti_microbial_cpds = c(
"MP_f",
"EP_f",
"PP_f",
"BP_f",
"BzP_f",
"HP_f",
"X4_HB_f",
"X34_DHB_f",
"OH_Me_P_f",
"OH_Et_P_f",
"TCS_f",
"TCC_f"),
#paracetamol
Paracetamols = c(
"paracetamol_f",
"X4_aminophenol_f"),
#urine metals
Urine_metals = c(
"manganese_f",
"chromium_f",
"beryllium_f",
"cobalt_f",
"molybdenum_f",
"cadmium_f",
"tin_f",
"caesium_f",
"barium_f",
"nickel_f",
"copper_f",
"zinc_f",
"tungsten_f",
"platinum_f",
"thallium_f",
"lead_f",
"uranium_f"),
#urine metalloids
Urine_metalloids = c(
"selenium_f",
"arsenic_f",
"antimony_f",
"tellurium_f")
)
cl_m <- cl_f
# replace the _f in the cl_m to _m
for(i in 1:length(cl_m)){
cl_m[[i]] <- gsub("_f$", "_m", cl_m[[i]], ignore.case = T)
# print(cl_m[[i]])
}
# create category
EDC_class <- "n____"
for(i in 1:length(cl_m)) {
mtchC <- which(powerplot_df$EDC %in% cl_m[[i]])
EDC_class[mtchC] <- names(cl_m)[i]
}
powerplot_df <- cbind(powerplot_df, EDC_class)
# create var for ordering x axis (thru factor lv)
powerplot_df$EDC <- factor(powerplot_df$EDC, levels = unlist(cl_m), ordered = TRUE) # manhat X in my order
powerplot_df$EDC_class <- factor(powerplot_df$EDC_class, levels = names(cl_m), ordered = TRUE) # manhat legend in my order
# color code
colorCodes <- c("#ea7b00",
"#0195fb",
"#3aae24",
"#c821a7",
"#01df98",
"#da0085",
"#e5c440",
"#f18cff",
"#535800",
"#972064",
"#00b2b6",
"#964400",
"#5e4882",
"#ff9288",
"#b67696") # 15 color
names(colorCodes) <- names(cl_m)
# names(colorCodes) <- levels(factor(get(tempname)$categ)) # by factor name to match the plot
# ggplot
library(ggplot2); library(ggrepel)
colorCodes_gg <- scale_colour_manual(name = "ECD_class",values = colorCodes)
# txt df
tmp1 <- subset(powerplot_df, sample_size == 700)
tmp1 <- tmp1[order(tmp1[,4]),]
tmp1 <- tail(tmp1, 5)
# dput(as.character(tmp1$EDC))
tmp1$EDC <- c("lead", "mOP", "BPA", "uranium", "4-aminophenol") # manual
# plot
p <- ggplot(powerplot_df, aes(sample_size, power, group = EDC))
p <- p + geom_line(aes(colour = factor(EDC_class))) + xlim(300, 750)
p <- p + colorCodes_gg
p <- p + ggtitle("Power vs sample size for the endpoint DNA fragmentation") +
xlab("Sample size") +
ylab("Statistical power")
p <- p + geom_text_repel(data = tmp1, aes(label = EDC),
size = 3,
box.padding = 0.2, # Add extra padding around each text label.
point.padding = 0.3, # Add extra padding around each data point.
segment.color = '#000000', # Color of the line segments.
segment.size = 0.5, # Width of the line segments.
arrow = arrow(length = unit(0.01, 'npc')), # Draw an arrow from the label to the data point.
force = 5, # Strength of the repulsion force.
nudge_x = 30,
nudge_y = 0.005)
p
ggsave("results/power_n_SCSADFI.png", scale=1, dpi=400)
# ggsave("results/power_n_SCSADFI.svg", scale=1, dpi=400)
# library(plotly)
# ggplotly(p)
# quantile
library(dplyr); library(magrittr)
powerquan <- powerplot_df %>% filter(sample_size == 500)
quantile(powerquan$power)
# 0% 25% 50% 75% 100%
# 0.0003948346 0.0008584900 0.0021847938 0.0116988981 0.4028237221
| /src/2 power_f2_SCSADFI.R | permissive | jakemkc/ewas_sample_size | R | false | false | 25,563 | r | # May 2 2017
## Goal: Power check for multiple regression (NOT MV)
# selected x1 SCSADFI outcome, as germaine demand in the revision
rm(list=ls()) # clear workspace; # ls() # list objects in the workspace
cat("\014") # same as ctrl-L
options(max.print=3000) # default 1000, and rstudio set at 5000
options(warn=1) # default = 0. To check loop warnings
# Load -----------------------------------------------------
load("results/mv_EWAS_lm_semen_chems_in.Rdata")
## \\ name-change ----
## These 133 name are not R fault-proof (e.g.4_HB_f). Order of (papername) content is matched with (name(papername))
papername_m <- c("pcb101amt", "pcb105amt", "pcb110amt", "pcb114amt", "pcb118amt", "pcb128amt", "pcb138amt", "pcb146amt", "pcb149amt", "pcb151amt", "pcb153amt", "pcb156amt",
"pcb157amt", "pcb167amt", "pcb170amt", "pcb172amt", "pcb177amt", "pcb178amt", "pcb180amt", "pcb183amt", "pcb187amt", "pcb189amt", "pcb194amt", "pcb195amt",
"pcb196amt", "pcb201amt", "pcb206amt", "pcb209amt", "pcb028amt", "pcb044amt", "pcb049amt", "pcb052amt", "pcb066amt", "pcb074amt", "pcb087amt", "pcb099amt",
"pfcepahamt", "pfcmpahamt", "pfcpfdeamt", "pfcpfnaamt", "pfcpfsaamt", "pfcpfosamt", "pfcpfoaamt", "metbcdamt", "metbpbamt", "metthgamt", "popbb1amt", "popbhcamt",
"popghcamt", "pophcbamt", "popmiramt", "popodtamt", "popoxyamt", "poppdeamt", "poppdtamt", "poptnaamt", "pbdebr1amt", "pbdebr2amt", "pbdebr3amt", "pbdebr4amt",
"pbdebr5amt", "pbdebr6amt", "pbdebr7amt", "pbdebr8amt", "pbdebr9amt", "pbdebr66amt",
"DAZAMOUNT", "DMAAMOUNT", "EQUAMOUNT", "ETDAMOUNT", "ETLAMOUNT", "GNSAMOUNT", "CREAMOUNT", "fcholamt", "cholamt", "trigamt", "phosamt", "cotamt", "Selenium",
"Arsenic", "Manganese", "Chromium", "Beryllium", "Cobalt", "Molybdenum", "Cadmium_Corrected", "Tin", "Antimony", "Tellurium", "Caesium", "Barium", "Nickel",
"Copper", "Zinc", "Tungsten", "Platinum", "Thallium", "Lead", "Uranium", "mMethylPhthalate", "mEthylPhthalate", "mCarboxyPropylPhthalate", "mButylPhthalate",
"mIsobutylPhthalate", "mCarboxyEthylPentylPhthalate", "mCarboxyMethylHexylPhthalate", "mEthylHydroxyHexylPhthalate", "mEthylOxoHexylPhthalate", "mCycloHexylPhthalate",
"mBenzylPhthalate", "mEthylHexylPhthalate", "mOctylPhthalate", "mIsononylPhthalate", "BPA", "HydroxyMethoxyBenzoPhenone", "HydroxyBenzoPhenone", "DiHydroxyBenzoPhenone",
"DiHydroxyMethoxyBenzoPhenone", "TetraHydroxyBenzoPhenone", "MeP", "EtP", "PrP", "BuP", "BzP", "HeP", "X_4_HB", "X_3_4_DHB", "OH_MeP", "OH_EtP", "TCS", "TCC", "PAP", "APAP")
names(papername_m) <- c("PCB_101_m", "PCB_105_m", "PCB_110_m", "PCB_118_m", "PCB_114_m", "PCB_128_m", "PCB_138_m", "PCB_146_m", "PCB_149_m", "PCB_151_m", "PCB_153_m", "PCB_156_m", "PCB_157_m", "PCB_167_m", "PCB_170_m", "PCB_172_m", "PCB_177_m", "PCB_178_m", "PCB_180_m", "PCB_183_m", "PCB_187_m", "PCB_189_m", "PCB_194_m", "PCB_195_m", "PCB_196_m", "PCB_201_m", "PCB_206_m", "PCB_209_m", "PCB_28_m", "PCB_44_m", "PCB_49_m", "PCB_52_m", "PCB_66_m", "PCB_74_m", "PCB_87_m", "PCB_99_m", "Et_PFOSA_AcOH_m", "Me_PFOSA_AcOH_m", "PFDeA_m", "PFNA_m", "PFOSA_m", "PFOS_m", "PFOA_m", "blood_Cd_m", "blood_Pb_m", "blood_Hg_m", "BB_153_m", "b_HCB_m", "g_HCB_m", "HCB_m", "mirex_m", "op_DDT_m", "oxychlordane_m", "pp_DDE_m", "pp_DDT_m", "tr_nonachlor_m", "BDE_17_m", "BDE_28_m", "BDE_47_m", "BDE_85_m", "BDE_99_m", "BDE_100_m", "BDE_153_m", "BDE_154_m", "BDE_183_m", "BDE_66_m", "daidzein_m", "O_DMA_m", "equol_m", "enterodiol_m", "enterolactone_m", "genistein_m", "CREAMOUNT_m", "fcholamt_m", "cholamt_m", "trigamt_m", "phosamt_m", "cotinine_m", "selenium_m", "arsenic_m", "manganese_m", "chromium_m", "beryllium_m", "cobalt_m", "molybdenum_m", "cadmium_m", "tin_m", "antimony_m", "tellurium_m", "caesium_m", "barium_m", "nickel_m", "copper_m", "zinc_m", "tungsten_m", "platinum_m", "thallium_m", "lead_m", "uranium_m", "mMP_m", "mEP_m", "mCPP_m", "mBP_m", "miBP_m", "mECPP_m", "mCMHP_m", "mEHHP_m", "mEOHP_m", "mCHP_m", "mBzP_m", "mEHP_m", "mOP_m", "mNP_m", "BPA_m", "2_OH_4MeO_BP_m", "4_OH_BP_m", "24_OH_BP_m", "22_OH_4MeO_BP_m", "2244_OH_BP_m", "MP_m", "EP_m", "PP_m", "BP_m", "BzP_m", "HP_m", "4_HB_m", "34_DHB_m", "OH_Me_P_m", "OH_Et_P_m", "TCS_m", "TCC_m", "paracetamol_m", "4_aminophenol_m")
# remove _m
no_m <- gsub("_m$", "", colnames(lifemerg), ignore.case = T)
# match and get the index order by female_no_f
index_m <- vector(mode = "numeric")
for(i in 1:length(no_m)) {
mtchM <- which(papername_m %in% no_m[i])
print(mtchM) # error check
index_m <- append(index_m, mtchM)
}
#
colnames(lifemerg)[which(no_m %in% papername_m)] <- names(papername_m[index_m])
# prevent loop error because of the name
colnames(lifemerg) <- make.names(colnames(lifemerg))
## \\ mice ----
library(mice)
lifemice <- as.mids(lifemerg, .imp=1, .id = 2)
# ******** -----
# A. Y-full -----------------------------------------------------
# \\ models ----
## Male
mvlm_ml <- function(indvar, dat) {
setform <- sprintf("log(SCSADFI + 1e-10) ~ I(scale(log10(%s+1))) + lipids_m + Age_m + catbmi_m + LMSMKNOW + LMEXERCS + parity_m", indvar)
mod <- with(data = dat, lm(as.formula(setform)))
}
mvlm_mc <- function(indvar, dat) {
setform <- sprintf("log(SCSADFI + 1e-10) ~ I(scale(log10(%s+1))) + CREAMOUNT_m + Age_m + catbmi_m + LMSMKNOW + LMEXERCS + parity_m", indvar)
mod <- with(data = dat, lm(as.formula(setform)))
}
mvlm_mn <- function(indvar, dat) {
setform <- sprintf("log(SCSADFI + 1e-10) ~ I(scale(log10(%s+1))) + Age_m + catbmi_m + LMSMKNOW + LMEXERCS + parity_m", indvar)
mod <- with(data = dat, lm(as.formula(setform)))
}
# \\ chem-list ----
# 3.1. Create list for matching in the loop to choose model
cl_f = list(
#PCBs
PCBs = c(
"PCB_28_f",
"PCB_44_f",
"PCB_49_f",
"PCB_52_f",
"PCB_66_f",
"PCB_74_f",
"PCB_87_f",
"PCB_99_f",
"PCB_101_f",
"PCB_105_f",
"PCB_110_f",
"PCB_118_f",
"PCB_114_f",
"PCB_128_f",
"PCB_138_f",
"PCB_146_f",
"PCB_149_f",
"PCB_151_f",
"PCB_153_f",
"PCB_156_f",
"PCB_157_f",
"PCB_167_f",
"PCB_170_f",
"PCB_172_f",
"PCB_177_f",
"PCB_178_f",
"PCB_180_f",
"PCB_183_f",
"PCB_187_f",
"PCB_189_f",
"PCB_194_f",
"PCB_195_f",
"PCB_196_f",
"PCB_201_f",
"PCB_206_f",
"PCB_209_f"
),
#OCPs
OCPs = c(
"HCB_f",
"b_HCB_f",
"g_HCB_f",
"op_DDT_f",
"pp_DDE_f",
"pp_DDT_f",
"oxychlordane_f",
"tr_nonachlor_f",
"mirex_f"),
#PBC
Polybrominated_cpds = c(
"BB_153_f",
"BDE_17_f",
"BDE_28_f",
"BDE_47_f",
"BDE_66_f",
"BDE_85_f",
"BDE_99_f",
"BDE_100_f",
"BDE_153_f",
"BDE_154_f",
"BDE_183_f"),
#PFASs
PFASs = c(
"Et_PFOSA_AcOH_f",
"Me_PFOSA_AcOH_f",
"PFDeA_f",
"PFNA_f",
"PFOSA_f",
"PFOS_f",
"PFOA_f"),
#blood metals
Blood_metals = c(
"blood_Cd_f",
"blood_Pb_f",
"blood_Hg_f"),
#cotinine
Cotinine = c(
"cotinine_f"),
#phytoestrogens
Phytoestrogens = c(
"genistein_f",
"daidzein_f",
"O_DMA_f",
"equol_f",
"enterodiol_f",
"enterolactone_f"),
#phthalates
Phthalates = c(
"mMP_f",
"mEP_f",
"mCPP_f",
"mBP_f",
"miBP_f",
"mECPP_f",
"mCMHP_f",
"mEHHP_f",
"mEOHP_f",
"mCHP_f",
"mBzP_f",
"mEHP_f",
"mOP_f",
"mNP_f"),
#phenols
Phenols = c(
"BPA_f",
"X2_OH_4MeO_BP_m",
"X4_OH_BP_f",
"X24_OH_BP_f",
"X22_OH_4MeO_BP_f",
"X2244_OH_BP_f"),
#anti microbial
Anti_microbial_cpds = c(
"MP_f",
"EP_f",
"PP_f",
"BP_f",
"BzP_f",
"HP_f",
"X4_HB_f",
"X34_DHB_f",
"OH_Me_P_f",
"OH_Et_P_f",
"TCS_f",
"TCC_f"),
#paracetamol
Paracetamols = c(
"paracetamol_f",
"X4_aminophenol_f"),
#urine metals
Urine_metals = c(
"manganese_f",
"chromium_f",
"beryllium_f",
"cobalt_f",
"molybdenum_f",
"cadmium_f",
"tin_f",
"caesium_f",
"barium_f",
"nickel_f",
"copper_f",
"zinc_f",
"tungsten_f",
"platinum_f",
"thallium_f",
"lead_f",
"uranium_f"),
#urine metalloids
Urine_metalloids = c(
"selenium_f",
"arsenic_f",
"antimony_f",
"tellurium_f")
)
cl_m <- cl_f
# replace the _f in the cl_m to _m
for(i in 1:length(cl_m)){
cl_m[[i]] <- gsub("_f$", "_m", cl_m[[i]], ignore.case = T)
# print(cl_m[[i]])
}
# Operators
# not 15 groups in germaine paper OK
list_m_oper <- list(
M_creat = c("Phytoestrogens", "Phthalates", "Phenols", "Urine_metalloids", "Urine_metals", "Anti_microbial_cpds", "Paracetamols"),
M_lipid = c("PCBs", "OCPs", "Polybrominated_cpds"),
M_null = c("PFASs", "Cotinine", "Blood_metals"))
# \\ loop ----
ret_list <- list()
for(i in 1:length(unlist(cl_m))) {
tmpm_var <- unlist(cl_m)[i]
tmpm_oper <- names(cl_m[sapply(cl_m, "%in%", x = tmpm_var)])
final_oper <- names(list_m_oper[sapply(list_m_oper, "%in%", x = tmpm_oper)])
if (final_oper == "M_null"){
frm <- mvlm_mn(tmpm_var, lifemice)
} else if (final_oper == "M_creat"){
frm <- mvlm_mc(tmpm_var, lifemice)
} else {
frm <- mvlm_ml(tmpm_var, lifemice)
}
print(tmpm_var) # after the warnings
ret_list[[tmpm_var]] <- frm
}
## \\ pool ----
# run
library(miceadds)
ret_df_full_r2 <- data.frame()
for(i in 1:length(unlist(cl_m))) {
tmpm_var <- unlist(cl_m)[i]
input_m <- ret_list[[tmpm_var]]
frm <- data.frame(r2est = pool.r.squared(input_m)[1], pvaledc = summary(pool(input_m))[2,5])
frm$indvar <- tmpm_var
print(tmpm_var)
ret_df_full_r2 <- rbind(ret_df_full_r2, frm)
}
# ******** -----
# A.1. For-simulate -----------------------------------------------------
# 060118 FDR simulation
ret_list_full <- list()
for(i in 1:length(unlist(cl_m))) {
tmpm_var <- unlist(cl_m)[i]
input_m <- ret_list[[tmpm_var]]
out1 <- summary(pool(input_m))[, 1]
ret_list_full[[tmpm_var]] <- out1
}
ret_df_full_r2_adj <- data.frame()
for(i in 1:length(unlist(cl_m))) {
tmpm_var <- unlist(cl_m)[i]
input_m <- ret_list[[tmpm_var]]
frm <- data.frame(r2est = pool.r.squared(input_m)[1], ad_r2est = pool.r.squared(input_m, adjusted = TRUE)[1], pvaledc = summary(pool(input_m))[2,5])
# get y SD (sigma)
ysigma <- sapply(input_m$analyses, function(x){
tmp1 <- summary(x)
tmp1$sigma
})
frm$ysigma_sd <- mean(ysigma)
frm$indvar <- tmpm_var
print(tmpm_var)
ret_df_full_r2_adj <- rbind(ret_df_full_r2_adj, frm)
}
# save(ret_list_full, file = "results/sim_fragment_ind_SCSADFI.RData", compress = FALSE)
# save(ret_df_full_r2_adj, file = "results/sim_r2_fragment_ind_SCSADFI.RData", compress = FALSE)
# ******** -----
# B. Y-redu -----------------------------------------------------
# \\ models ----
## Male
mvlm_ml <- function(indvar, dat) {
setform <- sprintf("log(SCSADFI + 1e-10) ~ lipids_m + Age_m + catbmi_m + LMSMKNOW + LMEXERCS + parity_m", indvar)
mod <- with(data = dat, lm(as.formula(setform)))
}
mvlm_mc <- function(indvar, dat) {
setform <- sprintf("log(SCSADFI + 1e-10) ~ CREAMOUNT_m + Age_m + catbmi_m + LMSMKNOW + LMEXERCS + parity_m", indvar)
mod <- with(data = dat, lm(as.formula(setform)))
}
mvlm_mn <- function(indvar, dat) {
setform <- sprintf("log(SCSADFI + 1e-10) ~ Age_m + catbmi_m + LMSMKNOW + LMEXERCS + parity_m", indvar)
mod <- with(data = dat, lm(as.formula(setform)))
}
# \\ chem-list ----
# 3.1. Create list for matching in the loop to choose model
cl_f = list(
#PCBs
PCBs = c(
"PCB_28_f",
"PCB_44_f",
"PCB_49_f",
"PCB_52_f",
"PCB_66_f",
"PCB_74_f",
"PCB_87_f",
"PCB_99_f",
"PCB_101_f",
"PCB_105_f",
"PCB_110_f",
"PCB_118_f",
"PCB_114_f",
"PCB_128_f",
"PCB_138_f",
"PCB_146_f",
"PCB_149_f",
"PCB_151_f",
"PCB_153_f",
"PCB_156_f",
"PCB_157_f",
"PCB_167_f",
"PCB_170_f",
"PCB_172_f",
"PCB_177_f",
"PCB_178_f",
"PCB_180_f",
"PCB_183_f",
"PCB_187_f",
"PCB_189_f",
"PCB_194_f",
"PCB_195_f",
"PCB_196_f",
"PCB_201_f",
"PCB_206_f",
"PCB_209_f"
),
#OCPs
OCPs = c(
"HCB_f",
"b_HCB_f",
"g_HCB_f",
"op_DDT_f",
"pp_DDE_f",
"pp_DDT_f",
"oxychlordane_f",
"tr_nonachlor_f",
"mirex_f"),
#PBC
Polybrominated_cpds = c(
"BB_153_f",
"BDE_17_f",
"BDE_28_f",
"BDE_47_f",
"BDE_66_f",
"BDE_85_f",
"BDE_99_f",
"BDE_100_f",
"BDE_153_f",
"BDE_154_f",
"BDE_183_f"),
#PFASs
PFASs = c(
"Et_PFOSA_AcOH_f",
"Me_PFOSA_AcOH_f",
"PFDeA_f",
"PFNA_f",
"PFOSA_f",
"PFOS_f",
"PFOA_f"),
#blood metals
Blood_metals = c(
"blood_Cd_f",
"blood_Pb_f",
"blood_Hg_f"),
#cotinine
Cotinine = c(
"cotinine_f"),
#phytoestrogens
Phytoestrogens = c(
"genistein_f",
"daidzein_f",
"O_DMA_f",
"equol_f",
"enterodiol_f",
"enterolactone_f"),
#phthalates
Phthalates = c(
"mMP_f",
"mEP_f",
"mCPP_f",
"mBP_f",
"miBP_f",
"mECPP_f",
"mCMHP_f",
"mEHHP_f",
"mEOHP_f",
"mCHP_f",
"mBzP_f",
"mEHP_f",
"mOP_f",
"mNP_f"),
#phenols
Phenols = c(
"BPA_f",
"X2_OH_4MeO_BP_m",
"X4_OH_BP_f",
"X24_OH_BP_f",
"X22_OH_4MeO_BP_f",
"X2244_OH_BP_f"),
#anti microbial
Anti_microbial_cpds = c(
"MP_f",
"EP_f",
"PP_f",
"BP_f",
"BzP_f",
"HP_f",
"X4_HB_f",
"X34_DHB_f",
"OH_Me_P_f",
"OH_Et_P_f",
"TCS_f",
"TCC_f"),
#paracetamol
Paracetamols = c(
"paracetamol_f",
"X4_aminophenol_f"),
#urine metals
Urine_metals = c(
"manganese_f",
"chromium_f",
"beryllium_f",
"cobalt_f",
"molybdenum_f",
"cadmium_f",
"tin_f",
"caesium_f",
"barium_f",
"nickel_f",
"copper_f",
"zinc_f",
"tungsten_f",
"platinum_f",
"thallium_f",
"lead_f",
"uranium_f"),
#urine metalloids
Urine_metalloids = c(
"selenium_f",
"arsenic_f",
"antimony_f",
"tellurium_f")
)
cl_m <- cl_f
# replace the _f in the cl_m to _m
for(i in 1:length(cl_m)){
cl_m[[i]] <- gsub("_f$", "_m", cl_m[[i]], ignore.case = T)
# print(cl_m[[i]])
}
# Operators
list_m_oper <- list(
M_creat = c("Phytoestrogens", "Phthalates", "Phenols", "Urine_metalloids", "Urine_metals", "Anti_microbial_cpds", "Paracetamols"),
M_lipid = c("PCBs", "OCPs", "Polybrominated_cpds"),
M_null = c("PFASs", "Cotinine", "Blood_metals"))
# \\ loop ----
ret_list <- list()
for(i in 1:length(unlist(cl_m))) {
tmpm_var <- unlist(cl_m)[i]
tmpm_oper <- names(cl_m[sapply(cl_m, "%in%", x = tmpm_var)])
final_oper <- names(list_m_oper[sapply(list_m_oper, "%in%", x = tmpm_oper)])
if (final_oper == "M_null"){
frm <- mvlm_mn(tmpm_var, lifemice)
} else if (final_oper == "M_creat"){
frm <- mvlm_mc(tmpm_var, lifemice)
} else {
frm <- mvlm_ml(tmpm_var, lifemice)
}
print(tmpm_var) # after the warnings
ret_list[[tmpm_var]] <- frm
}
## \\ pool ----
# run
library(miceadds)
ret_df_redu_r2 <- data.frame()
for(i in 1:length(unlist(cl_m))) {
tmpm_var <- unlist(cl_m)[i]
input_m <- ret_list[[tmpm_var]]
frm <- as.data.frame(pool.r.squared(input_m)[1])
# frm <- as.data.frame(pool.r.squared(input_m, adjusted = TRUE)[1])
colnames(frm) <- "r2est"
frm$indvar <- tmpm_var
print(tmpm_var)
ret_df_redu_r2 <- rbind(ret_df_redu_r2, frm)
}
# ******** -----
## X. Saving Rdata ----
## merging
es <- cbind(ret_df_full_r2, ret_df_redu_r2)[, c(1, 4, 3, 2)]
colnames(es) <- c("r2full", "r2redu", "indvar", "pvaledc")
es$f2 <- (es$r2full-es$r2redu)/(1-es$r2full)
# remove -ve f2
es <- es[(es$f2 > 0),]
# dir
outdirectory <- "results"
outfilename <- "mv_EWAS_power_semen_chems_SCSADFI.Rdata"
# outfilename <- sprintf("%s_reg_7.Rdata", depVariable)
save(file=file.path(outdirectory,outfilename),
lifemerg, impute_f, impute_m, life, es)
# load("results/mv_EWAS_power_semen_chems_SCSADFI.Rdata")
# ******** -----
# C. Effect-size -----------------------------------------------------
library(pwr)
# \\ n_80%
v_list <- list()
for(i in 1:nrow(es)) {
input_1 <- es$f2[i]
if (input_1 > 0){
v <- pwr.f2.test(u = 1, f2 = input_1, sig.level = 0.05/128, power = 0.8)$v # *Bonf p* value
v_list[[i]] <- v
} else {
v_list[[i]] <- 0 # optional, removed f2 <0
}
}
es$n_80 <- ceiling(unlist(v_list)) + 7 + 1
# \\ power
p_list <- list()
for(i in 1:nrow(es)) { # 126 rows
input_1 <- es$f2[i]
if (input_1 > 0){
p <- pwr.f2.test(u = 1, v = 473 - 7 - 1, f2 = input_1, sig.level = 0.05/128)$p # *Bonf p* value; # full model residual/error df
p_list[[i]] <- p
} else {
p_list[[i]] <- 0 # optional, removed f2 <0
}
}
es$power <- round(unlist(p_list),2)
es$power2 <- round(unlist(p_list),4)
# sample size for 95% f2
quantile(es$f2, 0.95)
pwr.f2.test(u = 1, f2 = quantile(es$f2, 0.95), sig.level = 0.05/128, power = 0.8)
# write
# write.csv(es, file = "results/power_bon_adj_p_morphology.csv")
# write.csv(es, file = "results/power_nil_adj_p_morphology.csv")
# \\ cp-power-plot----
# figure in the paper
# power vs sample size
cp_p_list <- list()
cpseq <- seq(300, 700, by = 40) # step in sample size
for(i in 1:nrow(es)) {
input_1 <- es$f2[i]
power_life <- sapply(cpseq, function(x){
pwr.f2.test(u = 1, v = x - 7 - 1, f2 = input_1, sig.level = 0.05/128)$p
})
cp_p_list[[i]] <- data.frame(rep(es$indvar[i], length(cpseq)), rep(input_1, length(cpseq)), cpseq, power_life)
}
# create df
powerplot_df <- do.call("rbind", cp_p_list)
colnames(powerplot_df) <- c("EDC", "effect_size", "sample_size", "power")
cl_f = list(
#PCBs
PCBs = c(
"PCB_28_f",
"PCB_44_f",
"PCB_49_f",
"PCB_52_f",
"PCB_66_f",
"PCB_74_f",
"PCB_87_f",
"PCB_99_f",
"PCB_101_f",
"PCB_105_f",
"PCB_110_f",
"PCB_118_f",
"PCB_114_f",
"PCB_128_f",
"PCB_138_f",
"PCB_146_f",
"PCB_149_f",
"PCB_151_f",
"PCB_153_f",
"PCB_156_f",
"PCB_157_f",
"PCB_167_f",
"PCB_170_f",
"PCB_172_f",
"PCB_177_f",
"PCB_178_f",
"PCB_180_f",
"PCB_183_f",
"PCB_187_f",
"PCB_189_f",
"PCB_194_f",
"PCB_195_f",
"PCB_196_f",
"PCB_201_f",
"PCB_206_f",
"PCB_209_f"
),
#OCPs
OCPs = c(
"HCB_f",
"b_HCB_f",
"g_HCB_f",
"op_DDT_f",
"pp_DDE_f",
"pp_DDT_f",
"oxychlordane_f",
"tr_nonachlor_f",
"mirex_f"),
#PBC
PBBs = c(
"BB_153_f"),
PBDEs = c(
"BDE_17_f",
"BDE_28_f",
"BDE_47_f",
"BDE_66_f",
"BDE_85_f",
"BDE_99_f",
"BDE_100_f",
"BDE_153_f",
"BDE_154_f",
"BDE_183_f"),
#PFASs
PFASs = c(
"Et_PFOSA_AcOH_f",
"Me_PFOSA_AcOH_f",
"PFDeA_f",
"PFNA_f",
"PFOSA_f",
"PFOS_f",
"PFOA_f"),
#blood metals
Blood_metals = c(
"blood_Cd_f",
"blood_Pb_f",
"blood_Hg_f"),
#cotinine
Cotinine = c(
"cotinine_f"),
#phytoestrogens
Phytoestrogens = c(
"genistein_f",
"daidzein_f",
"O_DMA_f",
"equol_f",
"enterodiol_f",
"enterolactone_f"),
#phthalates
Phthalates = c(
"mMP_f",
"mEP_f",
"mCPP_f",
"mBP_f",
"miBP_f",
"mECPP_f",
"mCMHP_f",
"mEHHP_f",
"mEOHP_f",
"mCHP_f",
"mBzP_f",
"mEHP_f",
"mOP_f",
"mNP_f"),
#phenols
Bisphenol_A = c(
"BPA_f"),
Benzophenones = c(
"X2_OH_4MeO_BP_m",
"X4_OH_BP_f",
"X24_OH_BP_f",
"X22_OH_4MeO_BP_f",
"X2244_OH_BP_f"),
#anti microbial
Anti_microbial_cpds = c(
"MP_f",
"EP_f",
"PP_f",
"BP_f",
"BzP_f",
"HP_f",
"X4_HB_f",
"X34_DHB_f",
"OH_Me_P_f",
"OH_Et_P_f",
"TCS_f",
"TCC_f"),
#paracetamol
Paracetamols = c(
"paracetamol_f",
"X4_aminophenol_f"),
#urine metals
Urine_metals = c(
"manganese_f",
"chromium_f",
"beryllium_f",
"cobalt_f",
"molybdenum_f",
"cadmium_f",
"tin_f",
"caesium_f",
"barium_f",
"nickel_f",
"copper_f",
"zinc_f",
"tungsten_f",
"platinum_f",
"thallium_f",
"lead_f",
"uranium_f"),
#urine metalloids
Urine_metalloids = c(
"selenium_f",
"arsenic_f",
"antimony_f",
"tellurium_f")
)
cl_m <- cl_f
# replace the _f in the cl_m to _m
for(i in 1:length(cl_m)){
cl_m[[i]] <- gsub("_f$", "_m", cl_m[[i]], ignore.case = T)
# print(cl_m[[i]])
}
# create category
EDC_class <- "n____"
for(i in 1:length(cl_m)) {
mtchC <- which(powerplot_df$EDC %in% cl_m[[i]])
EDC_class[mtchC] <- names(cl_m)[i]
}
powerplot_df <- cbind(powerplot_df, EDC_class)
# create var for ordering x axis (thru factor lv)
powerplot_df$EDC <- factor(powerplot_df$EDC, levels = unlist(cl_m), ordered = TRUE) # manhat X in my order
powerplot_df$EDC_class <- factor(powerplot_df$EDC_class, levels = names(cl_m), ordered = TRUE) # manhat legend in my order
# color code
colorCodes <- c("#ea7b00",
"#0195fb",
"#3aae24",
"#c821a7",
"#01df98",
"#da0085",
"#e5c440",
"#f18cff",
"#535800",
"#972064",
"#00b2b6",
"#964400",
"#5e4882",
"#ff9288",
"#b67696") # 15 color
names(colorCodes) <- names(cl_m)
# names(colorCodes) <- levels(factor(get(tempname)$categ)) # by factor name to match the plot
# ggplot
library(ggplot2); library(ggrepel)
colorCodes_gg <- scale_colour_manual(name = "ECD_class",values = colorCodes)
# txt df
tmp1 <- subset(powerplot_df, sample_size == 700)
tmp1 <- tmp1[order(tmp1[,4]),]
tmp1 <- tail(tmp1, 5)
# dput(as.character(tmp1$EDC))
tmp1$EDC <- c("lead", "mOP", "BPA", "uranium", "4-aminophenol") # manual
# plot
p <- ggplot(powerplot_df, aes(sample_size, power, group = EDC))
p <- p + geom_line(aes(colour = factor(EDC_class))) + xlim(300, 750)
p <- p + colorCodes_gg
p <- p + ggtitle("Power vs sample size for the endpoint DNA fragmentation") +
xlab("Sample size") +
ylab("Statistical power")
p <- p + geom_text_repel(data = tmp1, aes(label = EDC),
size = 3,
box.padding = 0.2, # Add extra padding around each text label.
point.padding = 0.3, # Add extra padding around each data point.
segment.color = '#000000', # Color of the line segments.
segment.size = 0.5, # Width of the line segments.
arrow = arrow(length = unit(0.01, 'npc')), # Draw an arrow from the label to the data point.
force = 5, # Strength of the repulsion force.
nudge_x = 30,
nudge_y = 0.005)
p
ggsave("results/power_n_SCSADFI.png", scale=1, dpi=400)
# ggsave("results/power_n_SCSADFI.svg", scale=1, dpi=400)
# library(plotly)
# ggplotly(p)
# quantile
library(dplyr); library(magrittr)
powerquan <- powerplot_df %>% filter(sample_size == 500)
quantile(powerquan$power)
# 0% 25% 50% 75% 100%
# 0.0003948346 0.0008584900 0.0021847938 0.0116988981 0.4028237221
|
## Reuse code to get the data for the plots
source("GetData.R")
powsub <- getdata()
## Plot 1 - Global Active Power histogram
png(filename="plot1.png", width=480, height=480)
hist(powsub$Global_active_power, breaks=12, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)", ylab="Frequency")
dev.off()
## Plot 2
png(filename="plot2.png", width=480, height=480)
plot(powsub$DateTime,powsub$Global_active_power, type="l", xlab="Day", ylab="Global Active Power (kilowatts)")
dev.off()
## Plot 3
png(filename="plot3.png", width=480, height=480)
plot(powsub$DateTime, powsub$Sub_metering_1, type="l", xlab="Day", ylab="Energy Sub Metering")
points(powsub$DateTime, powsub$Sub_metering_2, type="l", col="red")
points(powsub$DateTime, powsub$Sub_metering_3, type="l", col="blue")
legend("topright", col=c("black","red","blue"), lwd=1, legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
dev.off()
## Plot 4
png(filename="plot4.png", width=480, height=480)
par(mfrow=c(2,2))
plot(powsub$DateTime,powsub$Global_active_power, type="l", xlab="Day", ylab="Global Active Power (kilowatts)")
plot(powsub$DateTime,powsub$Voltage, type="l", xlab="Day", ylab="Voltage")
plot(powsub$DateTime, powsub$Sub_metering_1, type="l", xlab="Day", ylab="Energy Sub Metering")
points(powsub$DateTime, powsub$Sub_metering_2, type="l", col="red")
points(powsub$DateTime, powsub$Sub_metering_3, type="l", col="blue")
legend("topright", col=c("black","red","blue"), lwd=1, legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
plot(powsub$DateTime,powsub$Global_reactive_power, type="l", xlab="Day", ylab="Global Reactive Power (kilowatts)")
dev.off()
| /plotAll.R | no_license | mauricesingleton/ExData_Plotting1 | R | false | false | 1,714 | r |
## Reuse code to get the data for the plots
source("GetData.R")
powsub <- getdata()
## Plot 1 - Global Active Power histogram
png(filename="plot1.png", width=480, height=480)
hist(powsub$Global_active_power, breaks=12, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)", ylab="Frequency")
dev.off()
## Plot 2
png(filename="plot2.png", width=480, height=480)
plot(powsub$DateTime,powsub$Global_active_power, type="l", xlab="Day", ylab="Global Active Power (kilowatts)")
dev.off()
## Plot 3
png(filename="plot3.png", width=480, height=480)
plot(powsub$DateTime, powsub$Sub_metering_1, type="l", xlab="Day", ylab="Energy Sub Metering")
points(powsub$DateTime, powsub$Sub_metering_2, type="l", col="red")
points(powsub$DateTime, powsub$Sub_metering_3, type="l", col="blue")
legend("topright", col=c("black","red","blue"), lwd=1, legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
dev.off()
## Plot 4
png(filename="plot4.png", width=480, height=480)
par(mfrow=c(2,2))
plot(powsub$DateTime,powsub$Global_active_power, type="l", xlab="Day", ylab="Global Active Power (kilowatts)")
plot(powsub$DateTime,powsub$Voltage, type="l", xlab="Day", ylab="Voltage")
plot(powsub$DateTime, powsub$Sub_metering_1, type="l", xlab="Day", ylab="Energy Sub Metering")
points(powsub$DateTime, powsub$Sub_metering_2, type="l", col="red")
points(powsub$DateTime, powsub$Sub_metering_3, type="l", col="blue")
legend("topright", col=c("black","red","blue"), lwd=1, legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
plot(powsub$DateTime,powsub$Global_reactive_power, type="l", xlab="Day", ylab="Global Reactive Power (kilowatts)")
dev.off()
|
setwd("D:/workspace/R")
score <- read.csv("score.csv", header = TRUE)
# test1 test2 final
# 1 3 2 1
# 2 5 4 5
# 3 1 2 5
# 4 2 9 10
# 5 10 10 10
# 6 9 8 9
head(score)
str(score)
index_v <- rep(FALSE, 11)
index_v[2:5] <- TRUE
# FALSE TRUE TRUE TRUE TRUE FALSE FALSE FALSE FALSE FALSE FALSE
index_which <- which(index_v)
# 2 3 4 5
score[index_which, 1]
# 5 1 2 10
# 3 보다 작은 값들의 위치
index_v <- score$test1<3
score[index_v, 1]
# 1 2 1
index_which <- which(score$test1 < 3)
# 3 4 9
score[index_which,1]
# 1 2 1
index <- rep(FALSE, 11 * 3)
index[13:14] <- TRUE
index_matrix <- matrix(index, ncol=3)
# [,1] [,2] [,3]
# [1,] FALSE FALSE FALSE
# [2,] FALSE TRUE FALSE
# [3,] FALSE TRUE FALSE
# [4,] FALSE FALSE FALSE
# [5,] FALSE FALSE FALSE
# [6,] FALSE FALSE FALSE
# [7,] FALSE FALSE FALSE
# [8,] FALSE FALSE FALSE
# [9,] FALSE FALSE FALSE
# [10,] FALSE FALSE FALSE
# [11,] FALSE FALSE FALSE
score[index_matrix]
# 4 2
which(index_matrix)
# 13 14
| /5.which.R | no_license | dgjung0220/R_Study | R | false | false | 1,049 | r | setwd("D:/workspace/R")
score <- read.csv("score.csv", header = TRUE)
# test1 test2 final
# 1 3 2 1
# 2 5 4 5
# 3 1 2 5
# 4 2 9 10
# 5 10 10 10
# 6 9 8 9
head(score)
str(score)
index_v <- rep(FALSE, 11)
index_v[2:5] <- TRUE
# FALSE TRUE TRUE TRUE TRUE FALSE FALSE FALSE FALSE FALSE FALSE
index_which <- which(index_v)
# 2 3 4 5
score[index_which, 1]
# 5 1 2 10
# 3 보다 작은 값들의 위치
index_v <- score$test1<3
score[index_v, 1]
# 1 2 1
index_which <- which(score$test1 < 3)
# 3 4 9
score[index_which,1]
# 1 2 1
index <- rep(FALSE, 11 * 3)
index[13:14] <- TRUE
index_matrix <- matrix(index, ncol=3)
# [,1] [,2] [,3]
# [1,] FALSE FALSE FALSE
# [2,] FALSE TRUE FALSE
# [3,] FALSE TRUE FALSE
# [4,] FALSE FALSE FALSE
# [5,] FALSE FALSE FALSE
# [6,] FALSE FALSE FALSE
# [7,] FALSE FALSE FALSE
# [8,] FALSE FALSE FALSE
# [9,] FALSE FALSE FALSE
# [10,] FALSE FALSE FALSE
# [11,] FALSE FALSE FALSE
score[index_matrix]
# 4 2
which(index_matrix)
# 13 14
|
test_that("dssSetOption and dssGetOption work", {
x <- dssSetOption(list(stringsAsFactors = FALSE), datasources = opals)
expect_true(x[[1]])
expect_false(dssGetOption('stringsAsFactors', datasources = opals)[[1]][[1]])
})
| /tests/testthat/test-dssSetOption.R | no_license | IulianD/dsSwissKnifeClient | R | false | false | 229 | r | test_that("dssSetOption and dssGetOption work", {
x <- dssSetOption(list(stringsAsFactors = FALSE), datasources = opals)
expect_true(x[[1]])
expect_false(dssGetOption('stringsAsFactors', datasources = opals)[[1]][[1]])
})
|
rm(list = ls())
library(forecast)
library(readxl)
library(MASS)
library(randomForest)
library(dplyr)
library(DMwR)
library(gdata)
pacman::p_load("caret","partykit","ROCR","lift","rpart","e1071","glmnet","MASS","xgboost")
credit_data<- read_excel(file.choose())
application_data <-read_excel(file.choose())
#Feature Engineering
credit_data$SEX <- as.factor(credit_data$SEX)
credit_data$EDUCATION <- as.factor(credit_data$EDUCATION)
credit_data$MARRIAGE <- as.factor(credit_data$MARRIAGE)
application_data$SEX <- as.factor(application_data$SEX)
application_data$EDUCATION <- as.factor(application_data$EDUCATION)
application_data$MARRIAGE <- as.factor(application_data$MARRIAGE)
#Bill Amount Ratio
credit_data$bill_amt_ratio1 <- credit_data$BILL_AMT1 * credit_data$PAY_1
credit_data$bill_amt_ratio2 <- credit_data$BILL_AMT2 * credit_data$PAY_2
credit_data$bill_amt_ratio3 <- credit_data$BILL_AMT3 * credit_data$PAY_3
credit_data$bill_amt_ratio4 <- credit_data$BILL_AMT4 * credit_data$PAY_4
credit_data$bill_amt_ratio5 <- credit_data$BILL_AMT5 * credit_data$PAY_5
credit_data$bill_amt_ratio6 <- credit_data$BILL_AMT6 * credit_data$PAY_6
application_data$bill_amt_ratio1 <- application_data$BILL_AMT1 * application_data$PAY_1
application_data$bill_amt_ratio2 <- application_data$BILL_AMT2 * application_data$PAY_2
application_data$bill_amt_ratio3 <- application_data$BILL_AMT3 * application_data$PAY_3
application_data$bill_amt_ratio4 <- application_data$BILL_AMT4 * application_data$PAY_4
application_data$bill_amt_ratio5 <- application_data$BILL_AMT5 * application_data$PAY_5
application_data$bill_amt_ratio6 <- application_data$BILL_AMT6 * application_data$PAY_6
#Pay Delay Factor
for(i in c("PAY_1","PAY_2","PAY_3","PAY_4","PAY_5","PAY_6")){
col_name <- paste( i , "Delay", sep = "_", collapse = NULL)
credit_data[[col_name]] <- ifelse(credit_data[[i]] %in% c(-2,-1,0), 0, credit_data[[i]])
application_data[[col_name]] <- ifelse(application_data[[i]] %in% c(-2,-1,0), 0, application_data[[i]])
}
#Total Bill
credit_data$total_bill <- credit_data$BILL_AMT1 + credit_data$BILL_AMT2 + credit_data$BILL_AMT3 + credit_data$BILL_AMT4 + credit_data$BILL_AMT5 + credit_data$BILL_AMT6
application_data$total_bill <- application_data$BILL_AMT1 + application_data$BILL_AMT2 + application_data$BILL_AMT3 + application_data$BILL_AMT4 + application_data$BILL_AMT5 + application_data$BILL_AMT6
#High Educated Single
credit_data$high_educated_single <- ifelse(as.numeric(credit_data$EDUCATION)<=2 & credit_data$MARRIAGE==2,1,0)
application_data$high_educated_single <- ifelse(as.numeric(application_data$EDUCATION)<=2 & application_data$MARRIAGE==2,1,0)
#Grouping for education and marriage factor
credit_data$EDUCATION <- ifelse(credit_data$EDUCATION %in% c(4,5,6), 0, credit_data$EDUCATION)
credit_data$MARRIAGE <- ifelse(credit_data$MARRIAGE %in% c(3), 0, credit_data$MARRIAGE)
application_data$EDUCATION <- ifelse(application_data$EDUCATION %in% c(4,5,6), 0, application_data$EDUCATION)
application_data$MARRIAGE <- ifelse(application_data$MARRIAGE %in% c(3), 0, application_data$MARRIAGE)
#split train and test data (steps)
set.seed(77850) #set a random number generation seed to ensure that the split is the same everytime
inTrain <- createDataPartition(y = credit_data$default_0,
p = 0.8, list = FALSE)
training <- credit_data[ inTrain,]
testing <- credit_data[ -inTrain,]
####XGBoost (steps)
credit_data_matrix <- model.matrix(default_0~ ., data = credit_data)[,-1]
x_train <- credit_data_matrix[inTrain,]
x_test <- credit_data_matrix[-inTrain,]
y_train <-training$default_0
y_test <-testing$default_0
model_XGboost<-xgboost(data = data.matrix(x_train),
label = as.numeric(as.character(y_train)),
eta = 0.01, # hyperparameter: learning rate
max_depth = 3, # hyperparameter: size of a tree in each boosting
nround=450, # hyperparameter: number of boosting iterations
objective = "binary:logistic"
)
XGboost_prediction<-predict(model_XGboost,newdata=x_test, type="response") #Predict classification (for confusion matrix)
xgb_confMatrix <- confusionMatrix(as.factor(ifelse(XGboost_prediction>0.2211,1,0)),as.factor(y_test),positive="1") #Display confusion matrix
####ROC Curve
XGboost_ROC_prediction <- prediction(XGboost_prediction, y_test) #Calculate errors
XGboost_ROC_testing <- performance(XGboost_ROC_prediction,"tpr","fpr") #Create ROC curve data
plot(XGboost_ROC_testing) #Plot ROC curve
####AUC
auc.tmp <- performance(XGboost_ROC_prediction,"auc") #Create AUC data
XGboost_auc_testing <- as.numeric(auc.tmp@y.values) #Calculate AUC
XGboost_auc_testing #Display AUC value: 90+% - excellent, 80-90% - very good, 70-80% - good, 60-70% - so so, below 60% - not much value
####
#### Calculating possible Gain & Loss ###
####
xgb_loss <-xgb_confMatrix$table[3] * (-5000)
xgb_loss
xgb_gain <-xgb_confMatrix$table[1] * (1500)
xgb_gain
xgb_profit <- xgb_loss + xgb_gain
xgb_profit/4800
###### Prediction
testing <- application_data
training <- credit_data
testing$default_0 <- 0 #add dummy column to testing dataset
x_train <- model.matrix(default_0~.-ID, data = training)[,-1]
x_test <- model.matrix(default_0~.-ID, data = testing)[,-1]
y_train <-training$default_0
model_XGboost<-xgboost(data = data.matrix(x_train),
label = as.numeric(as.character(y_train)),
eta = 0.01, # hyperparameter: learning rate
max_depth = 3, # hyperparameter: size of a tree in each boosting
nround=450, # hyperparameter: number of boosting iterations
objective = "binary:logistic"
)
credit_prediction<-predict(model_XGboost,newdata=x_test, type="response") #Predict classification (for confusion matrix)
prediction_output <- as.factor(ifelse(credit_prediction>0.2211,1,0))
| /Credit Prediction.R | no_license | liansiulokman/Credit-Prediction | R | false | false | 6,170 | r |
rm(list = ls())
library(forecast)
library(readxl)
library(MASS)
library(randomForest)
library(dplyr)
library(DMwR)
library(gdata)
pacman::p_load("caret","partykit","ROCR","lift","rpart","e1071","glmnet","MASS","xgboost")
credit_data<- read_excel(file.choose())
application_data <-read_excel(file.choose())
#Feature Engineering
credit_data$SEX <- as.factor(credit_data$SEX)
credit_data$EDUCATION <- as.factor(credit_data$EDUCATION)
credit_data$MARRIAGE <- as.factor(credit_data$MARRIAGE)
application_data$SEX <- as.factor(application_data$SEX)
application_data$EDUCATION <- as.factor(application_data$EDUCATION)
application_data$MARRIAGE <- as.factor(application_data$MARRIAGE)
#Bill Amount Ratio
credit_data$bill_amt_ratio1 <- credit_data$BILL_AMT1 * credit_data$PAY_1
credit_data$bill_amt_ratio2 <- credit_data$BILL_AMT2 * credit_data$PAY_2
credit_data$bill_amt_ratio3 <- credit_data$BILL_AMT3 * credit_data$PAY_3
credit_data$bill_amt_ratio4 <- credit_data$BILL_AMT4 * credit_data$PAY_4
credit_data$bill_amt_ratio5 <- credit_data$BILL_AMT5 * credit_data$PAY_5
credit_data$bill_amt_ratio6 <- credit_data$BILL_AMT6 * credit_data$PAY_6
application_data$bill_amt_ratio1 <- application_data$BILL_AMT1 * application_data$PAY_1
application_data$bill_amt_ratio2 <- application_data$BILL_AMT2 * application_data$PAY_2
application_data$bill_amt_ratio3 <- application_data$BILL_AMT3 * application_data$PAY_3
application_data$bill_amt_ratio4 <- application_data$BILL_AMT4 * application_data$PAY_4
application_data$bill_amt_ratio5 <- application_data$BILL_AMT5 * application_data$PAY_5
application_data$bill_amt_ratio6 <- application_data$BILL_AMT6 * application_data$PAY_6
#Pay Delay Factor
for(i in c("PAY_1","PAY_2","PAY_3","PAY_4","PAY_5","PAY_6")){
col_name <- paste( i , "Delay", sep = "_", collapse = NULL)
credit_data[[col_name]] <- ifelse(credit_data[[i]] %in% c(-2,-1,0), 0, credit_data[[i]])
application_data[[col_name]] <- ifelse(application_data[[i]] %in% c(-2,-1,0), 0, application_data[[i]])
}
#Total Bill
credit_data$total_bill <- credit_data$BILL_AMT1 + credit_data$BILL_AMT2 + credit_data$BILL_AMT3 + credit_data$BILL_AMT4 + credit_data$BILL_AMT5 + credit_data$BILL_AMT6
application_data$total_bill <- application_data$BILL_AMT1 + application_data$BILL_AMT2 + application_data$BILL_AMT3 + application_data$BILL_AMT4 + application_data$BILL_AMT5 + application_data$BILL_AMT6
#High Educated Single
credit_data$high_educated_single <- ifelse(as.numeric(credit_data$EDUCATION)<=2 & credit_data$MARRIAGE==2,1,0)
application_data$high_educated_single <- ifelse(as.numeric(application_data$EDUCATION)<=2 & application_data$MARRIAGE==2,1,0)
#Grouping for education and marriage factor
credit_data$EDUCATION <- ifelse(credit_data$EDUCATION %in% c(4,5,6), 0, credit_data$EDUCATION)
credit_data$MARRIAGE <- ifelse(credit_data$MARRIAGE %in% c(3), 0, credit_data$MARRIAGE)
application_data$EDUCATION <- ifelse(application_data$EDUCATION %in% c(4,5,6), 0, application_data$EDUCATION)
application_data$MARRIAGE <- ifelse(application_data$MARRIAGE %in% c(3), 0, application_data$MARRIAGE)
#split train and test data (steps)
set.seed(77850) #set a random number generation seed to ensure that the split is the same everytime
inTrain <- createDataPartition(y = credit_data$default_0,
p = 0.8, list = FALSE)
training <- credit_data[ inTrain,]
testing <- credit_data[ -inTrain,]
####XGBoost (steps)
credit_data_matrix <- model.matrix(default_0~ ., data = credit_data)[,-1]
x_train <- credit_data_matrix[inTrain,]
x_test <- credit_data_matrix[-inTrain,]
y_train <-training$default_0
y_test <-testing$default_0
model_XGboost<-xgboost(data = data.matrix(x_train),
label = as.numeric(as.character(y_train)),
eta = 0.01, # hyperparameter: learning rate
max_depth = 3, # hyperparameter: size of a tree in each boosting
nround=450, # hyperparameter: number of boosting iterations
objective = "binary:logistic"
)
XGboost_prediction<-predict(model_XGboost,newdata=x_test, type="response") #Predict classification (for confusion matrix)
xgb_confMatrix <- confusionMatrix(as.factor(ifelse(XGboost_prediction>0.2211,1,0)),as.factor(y_test),positive="1") #Display confusion matrix
####ROC Curve
XGboost_ROC_prediction <- prediction(XGboost_prediction, y_test) #Calculate errors
XGboost_ROC_testing <- performance(XGboost_ROC_prediction,"tpr","fpr") #Create ROC curve data
plot(XGboost_ROC_testing) #Plot ROC curve
####AUC
auc.tmp <- performance(XGboost_ROC_prediction,"auc") #Create AUC data
XGboost_auc_testing <- as.numeric(auc.tmp@y.values) #Calculate AUC
XGboost_auc_testing #Display AUC value: 90+% - excellent, 80-90% - very good, 70-80% - good, 60-70% - so so, below 60% - not much value
####
#### Calculating possible Gain & Loss ###
####
xgb_loss <-xgb_confMatrix$table[3] * (-5000)
xgb_loss
xgb_gain <-xgb_confMatrix$table[1] * (1500)
xgb_gain
xgb_profit <- xgb_loss + xgb_gain
xgb_profit/4800
###### Prediction
testing <- application_data
training <- credit_data
testing$default_0 <- 0 #add dummy column to testing dataset
x_train <- model.matrix(default_0~.-ID, data = training)[,-1]
x_test <- model.matrix(default_0~.-ID, data = testing)[,-1]
y_train <-training$default_0
model_XGboost<-xgboost(data = data.matrix(x_train),
label = as.numeric(as.character(y_train)),
eta = 0.01, # hyperparameter: learning rate
max_depth = 3, # hyperparameter: size of a tree in each boosting
nround=450, # hyperparameter: number of boosting iterations
objective = "binary:logistic"
)
credit_prediction<-predict(model_XGboost,newdata=x_test, type="response") #Predict classification (for confusion matrix)
prediction_output <- as.factor(ifelse(credit_prediction>0.2211,1,0))
|
#===============================================================================
# 2021-06-16 -- MPIDR dataviz
# Dataviz challenge -- Altmetric top-100 2019
# Ilya Kashnitsky, ilya.kashnitsky@gmail.com
#===============================================================================
# Altmetric is a company that tracks the media attention of the academic output. Each year they publish the list of top-100 noticed papers.
# https://www.altmetric.com/top100/home/
# You challenge is to find an interesting way to represent (part of) the dataset.
# Hint: try to find a story in the dataset.
# Citation of the dataset:
# Engineering, Altmetric (2019): 2019 Altmetric Top 100 - dataset. Altmetric. Dataset. https://doi.org/10.6084/m9.figshare.11371860.v3
# SUBMIT your result via Google form before 2020-07-16 23:59 CET (end day 4)
# Please, name your plot with your surname -- eg "KASHNITSKY.png"
# Note: you need a Google account to upload files in the form
# https://bit.ly/dv-mpidr-alt
library(tidyverse)
data_url <- "https://altmetric.figshare.com/ndownloader/files/20282124"
download.file(data_url, destfile = "data/altmetric-top-100.xlsx")
alt <- readxl::read_excel("data/altmetric-top-100.xlsx") %>%
janitor::clean_names() # column names to lowercase
# quick one -- to start with something
library(lubridate)
alt %>%
mutate(month = publication_date %>% month) %>%
ggplot(aes(altmetric_attention_score, twitter_mentions))+
geom_point(aes(color = month))+
scale_color_fermenter(palette = "YlOrRd")+
scale_x_log10()+
scale_y_log10()
| /day3/dataviz-challenge-altmetric.R | permissive | rashey/dataviz-mpidr | R | false | false | 1,574 | r | #===============================================================================
# 2021-06-16 -- MPIDR dataviz
# Dataviz challenge -- Altmetric top-100 2019
# Ilya Kashnitsky, ilya.kashnitsky@gmail.com
#===============================================================================
# Altmetric is a company that tracks the media attention of the academic output. Each year they publish the list of top-100 noticed papers.
# https://www.altmetric.com/top100/home/
# You challenge is to find an interesting way to represent (part of) the dataset.
# Hint: try to find a story in the dataset.
# Citation of the dataset:
# Engineering, Altmetric (2019): 2019 Altmetric Top 100 - dataset. Altmetric. Dataset. https://doi.org/10.6084/m9.figshare.11371860.v3
# SUBMIT your result via Google form before 2020-07-16 23:59 CET (end day 4)
# Please, name your plot with your surname -- eg "KASHNITSKY.png"
# Note: you need a Google account to upload files in the form
# https://bit.ly/dv-mpidr-alt
library(tidyverse)
data_url <- "https://altmetric.figshare.com/ndownloader/files/20282124"
download.file(data_url, destfile = "data/altmetric-top-100.xlsx")
alt <- readxl::read_excel("data/altmetric-top-100.xlsx") %>%
janitor::clean_names() # column names to lowercase
# quick one -- to start with something
library(lubridate)
alt %>%
mutate(month = publication_date %>% month) %>%
ggplot(aes(altmetric_attention_score, twitter_mentions))+
geom_point(aes(color = month))+
scale_color_fermenter(palette = "YlOrRd")+
scale_x_log10()+
scale_y_log10()
|
### fe
msim <- m_dist(fe_mlsn)
gsim <- g_dist(d$FeM3)
m_values <- msim[[1]]
max_mlsn <- msim[[2]]
g_values <- gsim[[1]]
max_gss <- gsim[[2]]
# choose max and a pretty max that divides for scale
real.max <- max(c(max_mlsn, max_gss))
pretty.max <- max(pretty(c(0, max(c(max_mlsn, max_gss)))))
# plot with sim data
p <- ggplot(data = m_values, aes(x = x.values, y = y.values))
fe_plot_sim <- p + theme_cowplot() +
background_grid(major = 'x') +
geom_area(fill = "#d95f02", colour = '#d95f02', alpha = 0.2,
linetype = 'dashed') +
geom_area(data = g_values, aes(x = x.values, y = y.values),
colour = "#3f7300", fill = '#3f7300', alpha = 0.3) +
scale_x_continuous(limits = c(0, pretty.max),
breaks = seq(0, pretty.max, pretty.max/5)) +
scale_y_continuous(expand = c(0, 0)) +
theme(axis.line.y = element_blank(),
axis.title.y = element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank()) +
labs(x = expression(paste('mg Fe ', kg^{-1})))
# labs(x = expression(paste('soil organic matter, g ', kg^{-1})))
fe_plot_sim
# plot with sim vs gss actual
p <- ggplot(data = d, aes(x = FeM3))
fe_plot_survey <- p + theme_cowplot() +
background_grid(major = 'x') +
geom_density(fill = '#7570b3', colour = '#7570b3', alpha = 0.2,
linetype = 'dashed') +
geom_area(data = g_values, aes(x = x.values, y = y.values),
colour = "#3f7300", fill = '#3f7300', alpha = 0.3) +
scale_x_continuous(limits = c(0, pretty.max),
breaks = seq(0, pretty.max, pretty.max/5)) +
scale_y_continuous(expand = c(0, 0)) +
theme(axis.line.y = element_blank(),
axis.title.y = element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank()) +
labs(x = expression(paste('mg Fe ', kg^{-1})))
# labs(x = expression(paste('Soil organic matter, g ', kg^{-1})))
fe_plot_survey
| /r/plots/exact_fe_plot.R | no_license | micahwoods/gss | R | false | false | 1,952 | r | ### fe
msim <- m_dist(fe_mlsn)
gsim <- g_dist(d$FeM3)
m_values <- msim[[1]]
max_mlsn <- msim[[2]]
g_values <- gsim[[1]]
max_gss <- gsim[[2]]
# choose max and a pretty max that divides for scale
real.max <- max(c(max_mlsn, max_gss))
pretty.max <- max(pretty(c(0, max(c(max_mlsn, max_gss)))))
# plot with sim data
p <- ggplot(data = m_values, aes(x = x.values, y = y.values))
fe_plot_sim <- p + theme_cowplot() +
background_grid(major = 'x') +
geom_area(fill = "#d95f02", colour = '#d95f02', alpha = 0.2,
linetype = 'dashed') +
geom_area(data = g_values, aes(x = x.values, y = y.values),
colour = "#3f7300", fill = '#3f7300', alpha = 0.3) +
scale_x_continuous(limits = c(0, pretty.max),
breaks = seq(0, pretty.max, pretty.max/5)) +
scale_y_continuous(expand = c(0, 0)) +
theme(axis.line.y = element_blank(),
axis.title.y = element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank()) +
labs(x = expression(paste('mg Fe ', kg^{-1})))
# labs(x = expression(paste('soil organic matter, g ', kg^{-1})))
fe_plot_sim
# plot with sim vs gss actual
p <- ggplot(data = d, aes(x = FeM3))
fe_plot_survey <- p + theme_cowplot() +
background_grid(major = 'x') +
geom_density(fill = '#7570b3', colour = '#7570b3', alpha = 0.2,
linetype = 'dashed') +
geom_area(data = g_values, aes(x = x.values, y = y.values),
colour = "#3f7300", fill = '#3f7300', alpha = 0.3) +
scale_x_continuous(limits = c(0, pretty.max),
breaks = seq(0, pretty.max, pretty.max/5)) +
scale_y_continuous(expand = c(0, 0)) +
theme(axis.line.y = element_blank(),
axis.title.y = element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank()) +
labs(x = expression(paste('mg Fe ', kg^{-1})))
# labs(x = expression(paste('Soil organic matter, g ', kg^{-1})))
fe_plot_survey
|
setwd("~/R-Code")
projectData = read.table("Sea Ice and Temperature Data.txt", sep = ",", header = T) #reads our data into the global environment
attach(projectData)
boxplot(Temperature.Anomalies.based.on.20th.century.averages...C., names = c("Temperature.Anomalies.based.on.20th.century.averages...C."))
boxplot(Minimum.Arctic.Sea.Ice.Extent..Millions.of.squared.km., names = c("Minimum.Arctic.Sea.Ice.Extent..Millions.of.squared.km."))
| /Boxplot.R | no_license | JohnLi002/ProbabilityAndStatisticsProject | R | false | false | 441 | r | setwd("~/R-Code")
projectData = read.table("Sea Ice and Temperature Data.txt", sep = ",", header = T) #reads our data into the global environment
attach(projectData)
boxplot(Temperature.Anomalies.based.on.20th.century.averages...C., names = c("Temperature.Anomalies.based.on.20th.century.averages...C."))
boxplot(Minimum.Arctic.Sea.Ice.Extent..Millions.of.squared.km., names = c("Minimum.Arctic.Sea.Ice.Extent..Millions.of.squared.km."))
|
# slajd 1
curve(dnorm,-5,5,main="Density of N(0,1) distribution",
ylab="Probability",lwd=2,col="blue")
lines(c(0,0),c(-1,1),lty=2)
curve(dexp,0.0001,5,main="Density of Exp(1) distribution",
ylab="Probability",lwd=2,col="red")
f <- function(x){
0.5*dnorm(x)+0.5*dnorm(x,4,1)
}
curve(f,-5,9,
main="Mixture of N(0,1) and N(4,1) distributions",
ylab="Probability",lwd=2, col="black")
lines(c(0,0),c(-1,1),lty=2)
lines(c(4,4),c(-1,1),lty=2)
curve(dnorm,-3,6,main="Densities of different distributions",
ylab="Probability",lwd=2, col="blue",ylim=c(-0.1,0.9))
curve(dexp,0.0001,6,lwd=2,col="red",add=TRUE)
curve(f,-5,9,lwd=2, col="black",add=TRUE)
lines(c(0,0),c(-1,1),lty=2)
legend("topright",c("N(0,1)","Exp(1)","N(0,1)+N(4,1)"),
col=c("red","blue","black"),lty=1,cex=0.75)
# slajd 2
n <- rnorm(200)
e <- rexp(200)
m <- c(rnorm(200,0,1),rnorm(200,4,1))
curve(dnorm,-5,5,main="Density of N(0,1) distribution",
lwd=2,col="blue",ylim=c(-0.1,0.6), ylab="", xlab="",
axes=FALSE)
lines(c(0,0),c(-1,1),lty=2)
axis(1)
cistrip(c(mean(n),-max(-min(n),max(n)),max(-min(n),max(n))),
at=0.5, horiz=T,lwd=2,col="blue")
curve(dexp,0.0001,5,main="Density of Exp(1) distribution",
lwd=2,col="red", ylab="", xlab="", ylim=c(-0.1,1.5),
axes=FALSE)
lines(c(0,0),c(-1,2),lty=2)
axis(1)
cistrip(c(mean(e),min(e),max(e)), at=1.3, horiz=T,lwd=2,col="red")
curve(f,-5,9,
main="Mixture of N(0,1) and N(4,1) distributions",
ylim=c(-0.1,0.4),lwd=2, col="black", ylab="", xlab="",
axes=FALSE)
lines(c(0,0),c(-1,1),lty=2)
lines(c(4,4),c(-1,1),lty=2)
axis(1)
cistrip(c(mean(m),min(m),max(m)), at=0.3, horiz=T,,lwd=2)
lines(c(2,2),c(-1,1),lty=2)
plot.new()
plot.window(c(-5,6),c(0,4))
axis(side=1)
cistrip(c(mean(n),-max(-min(n),max(n)),max(-min(n),max(n))),
at=0.5, horiz=T,col="blue",lwd=2)
cistrip(c(mean(e),min(e),max(e)), at=1.5, horiz=T,col="red",lwd=2)
cistrip(c(mean(m),min(m),max(m)), at=3, horiz=T,lwd=2)
title(main="Densities of different distributions")
# slajd 3
curve(dnorm,-5,5,main="Density of N(0,1) distribution",
lwd=2,col="blue",ylim=c(-0.1,1.1), ylab="", xlab="",
axes=FALSE)
lines(c(0,0),c(-1,2),lty=2)
axis(1)
boxplot(n,horizontal=TRUE,at=0.8,add=TRUE,col="blue",outline=FALSE,
cex=1,frame=FALSE)
curve(dexp,0.0001,5,main="Density of Exp(1) distribution",
lwd=2,col="red", ylab="", xlab="", ylim=c(-0.1,1.5),
axes=FALSE)
lines(c(0,0),c(-1,2),lty=2)
axis(1)
boxplot(e,horizontal=TRUE,add=TRUE, at=1.3,col="red",
frame=FALSE,outline=FALSE)
curve(f,-5,9,
main="Mixture of N(0,1) and N(4,1) distributions",
ylim=c(-0.1,0.8),lwd=2, col="black", ylab="", xlab="",
axes=FALSE)
lines(c(0,0),c(-1,1),lty=2)
lines(c(4,4),c(-1,1),lty=2)
axis(1)
boxplot(m,horizontal=TRUE,add=TRUE,at=0.6,col="gray",
frame=FALSE,outline=FALSE)
lines(c(2,2),c(-1,1),lty=2)
plot.new()
plot.window(c(-5,7),c(0,4))
axis(side=1)
boxplot(n,horizontal=TRUE,at=0.5,add=TRUE,col="blue",frame=FALSE,outline=FALSE)
boxplot(e,horizontal=TRUE,add=TRUE, at=2,col="red",frame=FALSE,outline=FALSE)
boxplot(m,horizontal=TRUE,add=TRUE,at=3.5,col="gray",frame=FALSE,outline=FALSE)
title(main="Densities of different distributions")
# slajd 4
curve(dnorm,-5,5,main="Density of N(0,1) distribution",
lwd=2,col="blue",ylim=c(-0.1,1.1), ylab="", xlab="",
axes=FALSE)
lines(c(0,0),c(-1,1),lty=2)
axis(1)
bpstrip(n,at=0.8,width=0.5,col="blue")
curve(dexp,0.0001,5,main="Density of Exp(1) distribution",
lwd=2,col="red", ylab="", xlab="", ylim=c(-0.1,1.5),
axes=FALSE)
lines(c(0,0),c(-1,2),lty=2)
axis(1)
bpstrip(e,at=1.3,width=0.5,col="red")
curve(f,-5,9,
main="Mixture of N(0,1) and N(4,1) distributions",
ylim=c(-0.1,0.7),lwd=2, col="black", ylab="", xlab="",
axes=FALSE)
lines(c(0,0),c(-1,1),lty=2)
lines(c(4,4),c(-1,1),lty=2)
axis(1)
bpstrip(m,at=0.5,width=0.35)
lines(c(2,2),c(-1,1),lty=2)
plot.new()
plot.window(c(-5,6),c(0,4))
axis(side=1)
bpstrip(n,at=0.5,width=0.5, col="blue")
bpstrip(e,at=1.5,width=0.5,col="red")
bpstrip(m,at=3,width=0.5)
title(main="Densities of different distributions")
# slajd 5
curve(dnorm,-5,5,main="Density of N(0,1) distribution",
lwd=2,col="blue",ylim=c(-0.1,1.1), ylab="", xlab="",
axes=FALSE)
lines(c(0,0),c(-1,1),lty=2)
axis(1)
vwstrip(n,at=0.8,width=0.3,col="blue")
curve(dexp,0.0001,5,main="Density of Exp(1) distribution",
lwd=2,col="red", ylab="", xlab="", ylim=c(-0.5,1.5),
axes=FALSE)
lines(c(0,0),c(-1,2),lty=2)
axis(1)
vwstrip(e,at=1.2,width=0.3,col="red")
curve(f,-5,9,
main="Mixture of N(0,1) and N(4,1) distributions",
ylim=c(-0.1,0.7),lwd=2, col="black", ylab="", xlab="",
axes=FALSE)
lines(c(0,0),c(-1,1),lty=2)
lines(c(4,4),c(-1,1),lty=2)
axis(1)
vwstrip(m,at=0.5,width=0.3)
lines(c(2,2),c(-1,1),lty=2)
plot.new()
plot.window(c(-5,6),c(0,4))
axis(side=1)
vwstrip(n,at=0.5,width=0.5,col="blue")
vwstrip(e,at=1.5,width=0.5,col="red")
vwstrip(m,at=3,width=0.5)
title(main="Densities of different distributions")
# slajd 6
curve(dnorm,-5,5,main="Density of N(0,1) distribution",
lwd=2,col="blue",ylim=c(-0.1,1.1), ylab="", xlab="",
axes=FALSE)
lines(c(0,0),c(-1,1),lty=2)
axis(1)
sectioned.density(n,at=0.5,colmax="blue")
curve(dexp,0.0001,5,main="Density of Exp(1) distribution",
lwd=2,col="red", ylab="", xlab="", ylim=c(-0.5,1.7),
axes=FALSE)
lines(c(0,0),c(-1,2),lty=2)
axis(1)
sectioned.density(e,at=1.2,colmax="red")
curve(f,-5,9,
main="Mixture of N(0,1) and N(4,1) distributions",
ylim=c(-0.1,0.7),lwd=2, col="black", ylab="", xlab="",
axes=FALSE)
lines(c(0,0),c(-1,1),lty=2)
lines(c(4,4),c(-1,1),lty=2)
axis(1)
sectioned.density(m,at=0.5)
lines(c(2,2),c(-1,1),lty=2)
plot.new()
plot.window(c(-5,6),c(0,4))
axis(side=1)
sectioned.density(n,at=0.5,colmax="blue")
sectioned.density(e,at=1.5,colmax="red")
sectioned.density(m,at=3)
title(main="Densities of different distributions")
# slajd 7
curve(dnorm,-5,5,main="Density of N(0,1) distribution",
lwd=2,col="blue",ylim=c(-0.1,0.6), ylab="", xlab="",
axes=FALSE)
lines(c(0,0),c(-1,1),lty=2)
axis(1)
denstrip(n,at=0.5,ticks=mean(n),colmax="blue")
curve(dexp,0.0001,5,main="Density of Exp(1) distribution",
lwd=2,col="red", ylab="", xlab="", ylim=c(-0.1,1.5),
axes=FALSE)
lines(c(0,0),c(-1,2),lty=2)
axis(1)
denstrip(e,at=1.3,ticks=mean(e),colmax="red")
curve(f,-5,9,
main="Mixture of N(0,1) and N(4,1) distributions",
ylim=c(-0.1,0.4),lwd=2, col="black", ylab="", xlab="",
axes=FALSE)
lines(c(0,0),c(-1,1),lty=2)
lines(c(4,4),c(-1,1),lty=2)
axis(1)
denstrip(m,at=0.3,ticks=mean(m))
lines(c(2,2),c(-1,1),lty=2)
plot.new()
plot.window(c(-5,6),c(0,4))
axis(side=1)
denstrip(n,at=0.5,ticks=mean(n),colmax="blue")
denstrip(e,at=1.5,ticks=mean(e),colmax="red")
denstrip(m,at=3,ticks=mean(m))
title(main="Densities of different distributions")
denstrip.legend(-4,2,width=0.3,len=1.5)
# slajd 8
curve(dnorm,-5,5,main="Density of N(0,1) distribution",
lwd=2,col="blue",ylim=c(-0.1,5), ylab="", xlab="",
axes=FALSE)
axis(1)
cistrip(c(mean(n),-max(-min(n),max(n)),max(-min(n),max(n))),
at=0.7, horiz=T,col="blue",lwd=2)
boxplot(n,horizontal=TRUE,add=TRUE, at=1.1,col="blue",
frame=FALSE,outline=FALSE)
vwstrip(n,at=1.7,width=0.3,col="blue")
bpstrip(n,at=2.2,width=0.5,col="blue")
sectioned.density(n,at=2.7,colmax="blue")
denstrip(n,at=4,ticks=mean(n),colmax="blue")
lines(c(0,0),c(0,5),lty=3)
curve(dexp,0.0001,5,main="Density of Exp(1) distribution",
lwd=2,col="red", ylab="", xlab="", ylim=c(-0.1,6),
axes=FALSE)
axis(1)
cistrip(c(mean(e),min(e),max(e)), at=1.4, horiz=T,lwd=2,col="red")
boxplot(e,horizontal=TRUE,add=TRUE, at=2,col="red",
frame=FALSE,outline=FALSE)
vwstrip(e,at=2.6,width=0.3,col="red")
bpstrip(n,at=3.3,width=0.5,col="red")
sectioned.density(n,at=3.9,colmax="red")
denstrip(n,at=5.8,ticks=mean(n),colmax="red")
lines(c(0,0),c(0,10),lty=3)
curve(f,-5,9,
main="Mixture of N(0,1) and N(4,1) distributions",
ylim=c(-0.1,6),lwd=2, col="black", ylab="", xlab="",
axes=FALSE)
cistrip(c(mean(m),min(m),max(m)), at=1, horiz=T,lwd=2)
boxplot(m,horizontal=TRUE,add=TRUE, at=2,col="gray",
frame=FALSE,outline=FALSE)
vwstrip(m,at=2.6,width=0.3)
bpstrip(m,at=3.3,width=0.5)
sectioned.density(m,at=3.9)
denstrip(m,at=5.8,ticks=mean(m))
lines(c(0,0),c(-1,10),lty=2)
lines(c(4,4),c(-1,10),lty=2)
axis(1)
lines(c(2,2),c(-1,10),lty=2)
| /magisterskie/5_rok/1_semestr/Seminarium/slajdy.R | no_license | sommermarta/studia | R | false | false | 8,551 | r | # slajd 1
curve(dnorm,-5,5,main="Density of N(0,1) distribution",
ylab="Probability",lwd=2,col="blue")
lines(c(0,0),c(-1,1),lty=2)
curve(dexp,0.0001,5,main="Density of Exp(1) distribution",
ylab="Probability",lwd=2,col="red")
f <- function(x){
0.5*dnorm(x)+0.5*dnorm(x,4,1)
}
curve(f,-5,9,
main="Mixture of N(0,1) and N(4,1) distributions",
ylab="Probability",lwd=2, col="black")
lines(c(0,0),c(-1,1),lty=2)
lines(c(4,4),c(-1,1),lty=2)
curve(dnorm,-3,6,main="Densities of different distributions",
ylab="Probability",lwd=2, col="blue",ylim=c(-0.1,0.9))
curve(dexp,0.0001,6,lwd=2,col="red",add=TRUE)
curve(f,-5,9,lwd=2, col="black",add=TRUE)
lines(c(0,0),c(-1,1),lty=2)
legend("topright",c("N(0,1)","Exp(1)","N(0,1)+N(4,1)"),
col=c("red","blue","black"),lty=1,cex=0.75)
# slajd 2
n <- rnorm(200)
e <- rexp(200)
m <- c(rnorm(200,0,1),rnorm(200,4,1))
curve(dnorm,-5,5,main="Density of N(0,1) distribution",
lwd=2,col="blue",ylim=c(-0.1,0.6), ylab="", xlab="",
axes=FALSE)
lines(c(0,0),c(-1,1),lty=2)
axis(1)
cistrip(c(mean(n),-max(-min(n),max(n)),max(-min(n),max(n))),
at=0.5, horiz=T,lwd=2,col="blue")
curve(dexp,0.0001,5,main="Density of Exp(1) distribution",
lwd=2,col="red", ylab="", xlab="", ylim=c(-0.1,1.5),
axes=FALSE)
lines(c(0,0),c(-1,2),lty=2)
axis(1)
cistrip(c(mean(e),min(e),max(e)), at=1.3, horiz=T,lwd=2,col="red")
curve(f,-5,9,
main="Mixture of N(0,1) and N(4,1) distributions",
ylim=c(-0.1,0.4),lwd=2, col="black", ylab="", xlab="",
axes=FALSE)
lines(c(0,0),c(-1,1),lty=2)
lines(c(4,4),c(-1,1),lty=2)
axis(1)
cistrip(c(mean(m),min(m),max(m)), at=0.3, horiz=T,,lwd=2)
lines(c(2,2),c(-1,1),lty=2)
plot.new()
plot.window(c(-5,6),c(0,4))
axis(side=1)
cistrip(c(mean(n),-max(-min(n),max(n)),max(-min(n),max(n))),
at=0.5, horiz=T,col="blue",lwd=2)
cistrip(c(mean(e),min(e),max(e)), at=1.5, horiz=T,col="red",lwd=2)
cistrip(c(mean(m),min(m),max(m)), at=3, horiz=T,lwd=2)
title(main="Densities of different distributions")
# slajd 3
curve(dnorm,-5,5,main="Density of N(0,1) distribution",
lwd=2,col="blue",ylim=c(-0.1,1.1), ylab="", xlab="",
axes=FALSE)
lines(c(0,0),c(-1,2),lty=2)
axis(1)
boxplot(n,horizontal=TRUE,at=0.8,add=TRUE,col="blue",outline=FALSE,
cex=1,frame=FALSE)
curve(dexp,0.0001,5,main="Density of Exp(1) distribution",
lwd=2,col="red", ylab="", xlab="", ylim=c(-0.1,1.5),
axes=FALSE)
lines(c(0,0),c(-1,2),lty=2)
axis(1)
boxplot(e,horizontal=TRUE,add=TRUE, at=1.3,col="red",
frame=FALSE,outline=FALSE)
curve(f,-5,9,
main="Mixture of N(0,1) and N(4,1) distributions",
ylim=c(-0.1,0.8),lwd=2, col="black", ylab="", xlab="",
axes=FALSE)
lines(c(0,0),c(-1,1),lty=2)
lines(c(4,4),c(-1,1),lty=2)
axis(1)
boxplot(m,horizontal=TRUE,add=TRUE,at=0.6,col="gray",
frame=FALSE,outline=FALSE)
lines(c(2,2),c(-1,1),lty=2)
plot.new()
plot.window(c(-5,7),c(0,4))
axis(side=1)
boxplot(n,horizontal=TRUE,at=0.5,add=TRUE,col="blue",frame=FALSE,outline=FALSE)
boxplot(e,horizontal=TRUE,add=TRUE, at=2,col="red",frame=FALSE,outline=FALSE)
boxplot(m,horizontal=TRUE,add=TRUE,at=3.5,col="gray",frame=FALSE,outline=FALSE)
title(main="Densities of different distributions")
# slajd 4
curve(dnorm,-5,5,main="Density of N(0,1) distribution",
lwd=2,col="blue",ylim=c(-0.1,1.1), ylab="", xlab="",
axes=FALSE)
lines(c(0,0),c(-1,1),lty=2)
axis(1)
bpstrip(n,at=0.8,width=0.5,col="blue")
curve(dexp,0.0001,5,main="Density of Exp(1) distribution",
lwd=2,col="red", ylab="", xlab="", ylim=c(-0.1,1.5),
axes=FALSE)
lines(c(0,0),c(-1,2),lty=2)
axis(1)
bpstrip(e,at=1.3,width=0.5,col="red")
curve(f,-5,9,
main="Mixture of N(0,1) and N(4,1) distributions",
ylim=c(-0.1,0.7),lwd=2, col="black", ylab="", xlab="",
axes=FALSE)
lines(c(0,0),c(-1,1),lty=2)
lines(c(4,4),c(-1,1),lty=2)
axis(1)
bpstrip(m,at=0.5,width=0.35)
lines(c(2,2),c(-1,1),lty=2)
plot.new()
plot.window(c(-5,6),c(0,4))
axis(side=1)
bpstrip(n,at=0.5,width=0.5, col="blue")
bpstrip(e,at=1.5,width=0.5,col="red")
bpstrip(m,at=3,width=0.5)
title(main="Densities of different distributions")
# slajd 5
curve(dnorm,-5,5,main="Density of N(0,1) distribution",
lwd=2,col="blue",ylim=c(-0.1,1.1), ylab="", xlab="",
axes=FALSE)
lines(c(0,0),c(-1,1),lty=2)
axis(1)
vwstrip(n,at=0.8,width=0.3,col="blue")
curve(dexp,0.0001,5,main="Density of Exp(1) distribution",
lwd=2,col="red", ylab="", xlab="", ylim=c(-0.5,1.5),
axes=FALSE)
lines(c(0,0),c(-1,2),lty=2)
axis(1)
vwstrip(e,at=1.2,width=0.3,col="red")
curve(f,-5,9,
main="Mixture of N(0,1) and N(4,1) distributions",
ylim=c(-0.1,0.7),lwd=2, col="black", ylab="", xlab="",
axes=FALSE)
lines(c(0,0),c(-1,1),lty=2)
lines(c(4,4),c(-1,1),lty=2)
axis(1)
vwstrip(m,at=0.5,width=0.3)
lines(c(2,2),c(-1,1),lty=2)
plot.new()
plot.window(c(-5,6),c(0,4))
axis(side=1)
vwstrip(n,at=0.5,width=0.5,col="blue")
vwstrip(e,at=1.5,width=0.5,col="red")
vwstrip(m,at=3,width=0.5)
title(main="Densities of different distributions")
# slajd 6
curve(dnorm,-5,5,main="Density of N(0,1) distribution",
lwd=2,col="blue",ylim=c(-0.1,1.1), ylab="", xlab="",
axes=FALSE)
lines(c(0,0),c(-1,1),lty=2)
axis(1)
sectioned.density(n,at=0.5,colmax="blue")
curve(dexp,0.0001,5,main="Density of Exp(1) distribution",
lwd=2,col="red", ylab="", xlab="", ylim=c(-0.5,1.7),
axes=FALSE)
lines(c(0,0),c(-1,2),lty=2)
axis(1)
sectioned.density(e,at=1.2,colmax="red")
curve(f,-5,9,
main="Mixture of N(0,1) and N(4,1) distributions",
ylim=c(-0.1,0.7),lwd=2, col="black", ylab="", xlab="",
axes=FALSE)
lines(c(0,0),c(-1,1),lty=2)
lines(c(4,4),c(-1,1),lty=2)
axis(1)
sectioned.density(m,at=0.5)
lines(c(2,2),c(-1,1),lty=2)
plot.new()
plot.window(c(-5,6),c(0,4))
axis(side=1)
sectioned.density(n,at=0.5,colmax="blue")
sectioned.density(e,at=1.5,colmax="red")
sectioned.density(m,at=3)
title(main="Densities of different distributions")
# slajd 7
curve(dnorm,-5,5,main="Density of N(0,1) distribution",
lwd=2,col="blue",ylim=c(-0.1,0.6), ylab="", xlab="",
axes=FALSE)
lines(c(0,0),c(-1,1),lty=2)
axis(1)
denstrip(n,at=0.5,ticks=mean(n),colmax="blue")
curve(dexp,0.0001,5,main="Density of Exp(1) distribution",
lwd=2,col="red", ylab="", xlab="", ylim=c(-0.1,1.5),
axes=FALSE)
lines(c(0,0),c(-1,2),lty=2)
axis(1)
denstrip(e,at=1.3,ticks=mean(e),colmax="red")
curve(f,-5,9,
main="Mixture of N(0,1) and N(4,1) distributions",
ylim=c(-0.1,0.4),lwd=2, col="black", ylab="", xlab="",
axes=FALSE)
lines(c(0,0),c(-1,1),lty=2)
lines(c(4,4),c(-1,1),lty=2)
axis(1)
denstrip(m,at=0.3,ticks=mean(m))
lines(c(2,2),c(-1,1),lty=2)
plot.new()
plot.window(c(-5,6),c(0,4))
axis(side=1)
denstrip(n,at=0.5,ticks=mean(n),colmax="blue")
denstrip(e,at=1.5,ticks=mean(e),colmax="red")
denstrip(m,at=3,ticks=mean(m))
title(main="Densities of different distributions")
denstrip.legend(-4,2,width=0.3,len=1.5)
# slajd 8
curve(dnorm,-5,5,main="Density of N(0,1) distribution",
lwd=2,col="blue",ylim=c(-0.1,5), ylab="", xlab="",
axes=FALSE)
axis(1)
cistrip(c(mean(n),-max(-min(n),max(n)),max(-min(n),max(n))),
at=0.7, horiz=T,col="blue",lwd=2)
boxplot(n,horizontal=TRUE,add=TRUE, at=1.1,col="blue",
frame=FALSE,outline=FALSE)
vwstrip(n,at=1.7,width=0.3,col="blue")
bpstrip(n,at=2.2,width=0.5,col="blue")
sectioned.density(n,at=2.7,colmax="blue")
denstrip(n,at=4,ticks=mean(n),colmax="blue")
lines(c(0,0),c(0,5),lty=3)
curve(dexp,0.0001,5,main="Density of Exp(1) distribution",
lwd=2,col="red", ylab="", xlab="", ylim=c(-0.1,6),
axes=FALSE)
axis(1)
cistrip(c(mean(e),min(e),max(e)), at=1.4, horiz=T,lwd=2,col="red")
boxplot(e,horizontal=TRUE,add=TRUE, at=2,col="red",
frame=FALSE,outline=FALSE)
vwstrip(e,at=2.6,width=0.3,col="red")
bpstrip(n,at=3.3,width=0.5,col="red")
sectioned.density(n,at=3.9,colmax="red")
denstrip(n,at=5.8,ticks=mean(n),colmax="red")
lines(c(0,0),c(0,10),lty=3)
curve(f,-5,9,
main="Mixture of N(0,1) and N(4,1) distributions",
ylim=c(-0.1,6),lwd=2, col="black", ylab="", xlab="",
axes=FALSE)
cistrip(c(mean(m),min(m),max(m)), at=1, horiz=T,lwd=2)
boxplot(m,horizontal=TRUE,add=TRUE, at=2,col="gray",
frame=FALSE,outline=FALSE)
vwstrip(m,at=2.6,width=0.3)
bpstrip(m,at=3.3,width=0.5)
sectioned.density(m,at=3.9)
denstrip(m,at=5.8,ticks=mean(m))
lines(c(0,0),c(-1,10),lty=2)
lines(c(4,4),c(-1,10),lty=2)
axis(1)
lines(c(2,2),c(-1,10),lty=2)
|
#***************************************************************8
#FROM HERE
#HAS TO HAVE THE MARKDF from before.
install.packages("tm")
library(jsonlite)
library(wordcloud)
library(tm)
MARKDF <-fromJSON("test.JSON", flatten = TRUE)
myCorpus <- Corpus(VectorSource(MARKDF$text))
myCorpus <- tm_map(myCorpus, content_transformer(tolower))
removeURL <- function(x) gsub("http[^[:space:]]*", "", x)
myCorpus <- tm_map(myCorpus, content_transformer(removeURL))
removeNumPunct <- function(x) gsub("[^[:alpha:][:space:]]*","", x)
myCorpus <- tm_map(myCorpus, content_transformer(removeNumPunct))
myStopwords <- c(setdiff(stopwords('english'), c("r", "big")), "use", "see", "used" , "via", "amp")
myCorpus <- tm_map(myCorpus, removeWords, myStopwords)
myCorpus <- tm_map(myCorpus, stripWhitespace)
#SAVE
stemCompletion2 <- function(x, dictionary) {
x <- unlist(strsplit(as.character(x), " "))
x <- x[x != ""]
x <- stemCompletion(x, dictionary=dictionary)
x <- paste(x, sep="", collapse=" ")
PlainTextDocument(stripWhitespace(x))
}
myCorpusCopy <- myCorpus
myCorpus <- lapply(myCorpus, stemCompletion2, dictionary=myCorpusCopy)
myCorpus <- Corpus(VectorSource(myCorpus))
tdm <- TermDocumentMatrix(myCorpus,control = list(wordLengths = c(3, Inf)))
freq.terms <- findFreqTerms(tdm, lowfreq = .05*tdm$ncol)
term.freq <- rowSums(as.matrix(tdm))
df <- data.frame(term = names(term.freq), freq = term.freq)
m <- as.matrix(tdm)
# calculate the frequency of words and sort it by frequency
word.freq <- sort(rowSums(m), decreasing = T)
# colors
cloud <- wordcloud(words = names(word.freq), freq = word.freq, min.freq = 3,
random.order = F, random.color = T, col = c("red", "royalblue1", " dark green", "grey28"))
write(cloud, stdout())
#TO HERE
########################################### | /WordCloudMaker.R | no_license | calebdre/culture-fit | R | false | false | 1,885 | r | #***************************************************************8
#FROM HERE
#HAS TO HAVE THE MARKDF from before.
install.packages("tm")
library(jsonlite)
library(wordcloud)
library(tm)
MARKDF <-fromJSON("test.JSON", flatten = TRUE)
myCorpus <- Corpus(VectorSource(MARKDF$text))
myCorpus <- tm_map(myCorpus, content_transformer(tolower))
removeURL <- function(x) gsub("http[^[:space:]]*", "", x)
myCorpus <- tm_map(myCorpus, content_transformer(removeURL))
removeNumPunct <- function(x) gsub("[^[:alpha:][:space:]]*","", x)
myCorpus <- tm_map(myCorpus, content_transformer(removeNumPunct))
myStopwords <- c(setdiff(stopwords('english'), c("r", "big")), "use", "see", "used" , "via", "amp")
myCorpus <- tm_map(myCorpus, removeWords, myStopwords)
myCorpus <- tm_map(myCorpus, stripWhitespace)
#SAVE
stemCompletion2 <- function(x, dictionary) {
x <- unlist(strsplit(as.character(x), " "))
x <- x[x != ""]
x <- stemCompletion(x, dictionary=dictionary)
x <- paste(x, sep="", collapse=" ")
PlainTextDocument(stripWhitespace(x))
}
myCorpusCopy <- myCorpus
myCorpus <- lapply(myCorpus, stemCompletion2, dictionary=myCorpusCopy)
myCorpus <- Corpus(VectorSource(myCorpus))
tdm <- TermDocumentMatrix(myCorpus,control = list(wordLengths = c(3, Inf)))
freq.terms <- findFreqTerms(tdm, lowfreq = .05*tdm$ncol)
term.freq <- rowSums(as.matrix(tdm))
df <- data.frame(term = names(term.freq), freq = term.freq)
m <- as.matrix(tdm)
# calculate the frequency of words and sort it by frequency
word.freq <- sort(rowSums(m), decreasing = T)
# colors
cloud <- wordcloud(words = names(word.freq), freq = word.freq, min.freq = 3,
random.order = F, random.color = T, col = c("red", "royalblue1", " dark green", "grey28"))
write(cloud, stdout())
#TO HERE
########################################### |
# JAGS script Scape_Fraction.R
# Copyright (C) 2015 Rafael S. de Souza
#
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License version 3 as published by
#the Free Software Foundation.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
#
# Required libraries
require(R2jags)
library(ggmcmc)
library(ggplot2)
library(ggthemes)
#Read the dataset
data.1= read.table(file="..//data/FiBY_escape_data_all.dat",header=FALSE)
colnames(data.1)<-c("redshift","fEsc","Mvir","Mstar","Mgas","QHI","sfr_gas",
"sfr_stars","ssfr_gas","ssfr_stars","baryon_fraction",
"spin","age_star_mean","age_star_max","age_star_min","NH_10")
data.2<-data.1[data.1$redshift<=10,]
N<-nrow(data.2)
data.2$Y[data.2$fEsc>=0.1]<-1
data.2$Y[data.2$fEsc<0.1]<-0
# Prepare data for JAGS
x.scale<-as.data.frame(scale(data.2[,c("Mstar","Mgas","Mvir","sfr_gas","baryon_fraction","ssfr_gas","age_star_mean",
"spin","NH_10","QHI")]))
X<-model.matrix(~baryon_fraction+QHI,data=x.scale)
# Scale
K<-ncol(X)
jags.data <- list(Y= data.2$Y,
N = nrow(data.2),
X=X,
b0 = rep(0,K),
B0=diag(1e-4,K),
Npred = K
)
LOGIT<-"model{
#1. Priors
beta~dmnorm(b0[],B0[,]) # Normal Priors
#2. Likelihood
for(i in 1:N){
Y[i] ~ dbern(pi[i])
logit(pi[i]) <- eta[i]
eta[i] <- inprod(beta[], X[i,])
#3. Prediction
NewPred[i]~dbern(pi[i])
}
}"
params <- c("beta","pi","NewPred")
inits <- function () {
list(beta = rnorm(K, 0, 0.1))}
jags.logit <- jags(
data = jags.data,
inits = inits,
parameters = params,
model = textConnection(LOGIT),
n.chains = 3,
n.iter = 500,
n.thin = 1,
n.burnin = 250)
print(jags.logit,justify = "left", digits=2)
| /scripts/old/Scape_Fraction_Logistic.R | no_license | RafaelSdeSouza/Beta_regression | R | false | false | 2,296 | r | # JAGS script Scape_Fraction.R
# Copyright (C) 2015 Rafael S. de Souza
#
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License version 3 as published by
#the Free Software Foundation.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
#
# Required libraries
require(R2jags)
library(ggmcmc)
library(ggplot2)
library(ggthemes)
#Read the dataset
data.1= read.table(file="..//data/FiBY_escape_data_all.dat",header=FALSE)
colnames(data.1)<-c("redshift","fEsc","Mvir","Mstar","Mgas","QHI","sfr_gas",
"sfr_stars","ssfr_gas","ssfr_stars","baryon_fraction",
"spin","age_star_mean","age_star_max","age_star_min","NH_10")
data.2<-data.1[data.1$redshift<=10,]
N<-nrow(data.2)
data.2$Y[data.2$fEsc>=0.1]<-1
data.2$Y[data.2$fEsc<0.1]<-0
# Prepare data for JAGS
x.scale<-as.data.frame(scale(data.2[,c("Mstar","Mgas","Mvir","sfr_gas","baryon_fraction","ssfr_gas","age_star_mean",
"spin","NH_10","QHI")]))
X<-model.matrix(~baryon_fraction+QHI,data=x.scale)
# Scale
K<-ncol(X)
jags.data <- list(Y= data.2$Y,
N = nrow(data.2),
X=X,
b0 = rep(0,K),
B0=diag(1e-4,K),
Npred = K
)
LOGIT<-"model{
#1. Priors
beta~dmnorm(b0[],B0[,]) # Normal Priors
#2. Likelihood
for(i in 1:N){
Y[i] ~ dbern(pi[i])
logit(pi[i]) <- eta[i]
eta[i] <- inprod(beta[], X[i,])
#3. Prediction
NewPred[i]~dbern(pi[i])
}
}"
params <- c("beta","pi","NewPred")
inits <- function () {
list(beta = rnorm(K, 0, 0.1))}
jags.logit <- jags(
data = jags.data,
inits = inits,
parameters = params,
model = textConnection(LOGIT),
n.chains = 3,
n.iter = 500,
n.thin = 1,
n.burnin = 250)
print(jags.logit,justify = "left", digits=2)
|
## caching the inverse of a matrix
## Matrix inversion may be some benefit to caching the inverse of a matrix
## rather than compute it repeatedly
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y){
x <<- y
inv <<- NULl
}
get <- function() X
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed)
## then the cachesolve should retrieve the inverse from the cache.
## Computing the inverse of a square matrix can be done with the solve function in R
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
mat <- x$get()
inv <- solve(mat,...)
x$setinverse(inv)
inv
}
| /cachematrix.R | no_license | YurouWang/ProgrammingAssignment2 | R | false | false | 1,195 | r | ## caching the inverse of a matrix
## Matrix inversion may be some benefit to caching the inverse of a matrix
## rather than compute it repeatedly
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y){
x <<- y
inv <<- NULl
}
get <- function() X
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed)
## then the cachesolve should retrieve the inverse from the cache.
## Computing the inverse of a square matrix can be done with the solve function in R
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
mat <- x$get()
inv <- solve(mat,...)
x$setinverse(inv)
inv
}
|
library(TxDb.Hsapiens.UCSC.hg19.knownGene)
library(SummarizedExperiment)
library(BSgenome.Hsapiens.UCSC.hg19)
library(fitCons.UCSC.hg19)
library(phastCons100way.UCSC.hg19)
library(caret)
library(ROCR)
library(pROC)
library(m6ALogisticModel)
source("/home/kunqi/m7G/method/class1.R")
source("/home/kunqi/m7G/method/class2.R")
source("/home/kunqi/m7G/method/class3.R")
source("/home/kunqi/m7G/method/class4.R")
source("/home/kunqi/m7G/method/class5.R")
source("/home/kunqi/m7G/method/class6.R")
#
GenoFgenreation <- function(data){
Notmethylated_sample <- readRDS("/home/kunqi/m6A reader/eIF3a/Exon/negative1.rds")
mature_sample <- readRDS("/home/kunqi/m6A reader/eIF3a/Exon/postive.rds")
analysis_data <- data
mcols(mature_sample) <- NULL
analysis_data <- c(analysis_data-2,mature_sample,Notmethylated_sample-2)
matureSE <- SummarizedExperiment()
rowRanges(matureSE) <- analysis_data
Additional_features_hg19 = list(
HNRNPC_eCLIP = eCLIP_HNRNPC_gr,
miR_targeted_genes = miR_targeted_genes_grl,
TargetScan = TargetScan_hg19_gr,
Verified_miRtargets = verified_targets_gr,
METTL3_TREW = METTL3_TREW,
METTL14_TREW = METTL14_TREW,
WTAP_TREW = WTAP_TREW,
METTL16_CLIP = METTL16_CLIP,
ALKBH5_PARCLIP = ALKBH5_PARCLIP,
FTO_CLIP = FTO_CLIP,
FTO_eCLIP = FTO_eCLIP
)
mature_FE <- predictors_annot(se = matureSE,
txdb = TxDb.Hsapiens.UCSC.hg19.knownGene,
bsgnm = Hsapiens,
fc = fitCons.UCSC.hg19,
motif = c(),
pc = phastCons100way.UCSC.hg19,
struct_hybridize = Struc_hg19,
feature_lst = Additional_features_hg19,
hk_genes_list = HK_hg19_eids,
genes_ambiguity_method = "average")
matureVSnon <- mcols(mature_FE)
top_selected_final <- readRDS("/home/yuxuan.wu/m6A reader/top_selected_final/feature_eIF3a_Exon.rds")
matureVSnon <- matureVSnon[,top_selected_final]
return(matureVSnon[1:length(data),])
}
SeqFgeneration <- function(data){
analysisData <- data
analysisData<-as.character(DNAStringSet(Views(Hsapiens,analysisData+20)))
CP <- ChemicalProperty(analysisData)
return(CP)
}
SitePredictionCollection <- function(data){
GF <- GenoFgenreation(data)
SeqFR <- SeqFgeneration(data)
BothFR <- cbind(GF,SeqFR)
reModel2 <- readRDS("/home/yuxuan.wu/m6A reader/model/eIF3a_Exon.rds")
results <- predict(reModel2, BothFR ,type="prob")
results <- results
return(results)
}
| /web_script/eif3a_exon_prob.R | no_license | yuxuanwu17/m6a_ML | R | false | false | 2,647 | r | library(TxDb.Hsapiens.UCSC.hg19.knownGene)
library(SummarizedExperiment)
library(BSgenome.Hsapiens.UCSC.hg19)
library(fitCons.UCSC.hg19)
library(phastCons100way.UCSC.hg19)
library(caret)
library(ROCR)
library(pROC)
library(m6ALogisticModel)
source("/home/kunqi/m7G/method/class1.R")
source("/home/kunqi/m7G/method/class2.R")
source("/home/kunqi/m7G/method/class3.R")
source("/home/kunqi/m7G/method/class4.R")
source("/home/kunqi/m7G/method/class5.R")
source("/home/kunqi/m7G/method/class6.R")
#
GenoFgenreation <- function(data){
Notmethylated_sample <- readRDS("/home/kunqi/m6A reader/eIF3a/Exon/negative1.rds")
mature_sample <- readRDS("/home/kunqi/m6A reader/eIF3a/Exon/postive.rds")
analysis_data <- data
mcols(mature_sample) <- NULL
analysis_data <- c(analysis_data-2,mature_sample,Notmethylated_sample-2)
matureSE <- SummarizedExperiment()
rowRanges(matureSE) <- analysis_data
Additional_features_hg19 = list(
HNRNPC_eCLIP = eCLIP_HNRNPC_gr,
miR_targeted_genes = miR_targeted_genes_grl,
TargetScan = TargetScan_hg19_gr,
Verified_miRtargets = verified_targets_gr,
METTL3_TREW = METTL3_TREW,
METTL14_TREW = METTL14_TREW,
WTAP_TREW = WTAP_TREW,
METTL16_CLIP = METTL16_CLIP,
ALKBH5_PARCLIP = ALKBH5_PARCLIP,
FTO_CLIP = FTO_CLIP,
FTO_eCLIP = FTO_eCLIP
)
mature_FE <- predictors_annot(se = matureSE,
txdb = TxDb.Hsapiens.UCSC.hg19.knownGene,
bsgnm = Hsapiens,
fc = fitCons.UCSC.hg19,
motif = c(),
pc = phastCons100way.UCSC.hg19,
struct_hybridize = Struc_hg19,
feature_lst = Additional_features_hg19,
hk_genes_list = HK_hg19_eids,
genes_ambiguity_method = "average")
matureVSnon <- mcols(mature_FE)
top_selected_final <- readRDS("/home/yuxuan.wu/m6A reader/top_selected_final/feature_eIF3a_Exon.rds")
matureVSnon <- matureVSnon[,top_selected_final]
return(matureVSnon[1:length(data),])
}
SeqFgeneration <- function(data){
analysisData <- data
analysisData<-as.character(DNAStringSet(Views(Hsapiens,analysisData+20)))
CP <- ChemicalProperty(analysisData)
return(CP)
}
SitePredictionCollection <- function(data){
GF <- GenoFgenreation(data)
SeqFR <- SeqFgeneration(data)
BothFR <- cbind(GF,SeqFR)
reModel2 <- readRDS("/home/yuxuan.wu/m6A reader/model/eIF3a_Exon.rds")
results <- predict(reModel2, BothFR ,type="prob")
results <- results
return(results)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/init_gdrive.R
\name{init_gdrive}
\alias{init_gdrive}
\title{Create folder structure on goodle drive}
\usage{
init_gdrive(gdrive_path = NULL,
folders = c("literature_search/",
"screening/",
"screening_consensus/",
"fulltext/",
"extraction/",
"extraction_consensus/"))
}
\arguments{
\item{gdrive_path}{a full (new) gdrive path <chr>, preferably with / at the end}
\item{folders}{a vector <chr> of folder names to create in path}
}
\value{
no output, this function exerts a side-effect
}
\description{
Create a folder structure (default or custom) on google drive for a meta-analysis project
}
\examples{
\dontrun{
# Creating default folder structure
init_gdrive("research/meta-analysis/")
# Creating custom folder structure
init_gdrive("research/meta-analysis/", c("screening", "extract"))
}
}
| /man/init_gdrive.Rd | permissive | nthun/metamanager | R | false | true | 1,045 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/init_gdrive.R
\name{init_gdrive}
\alias{init_gdrive}
\title{Create folder structure on goodle drive}
\usage{
init_gdrive(gdrive_path = NULL,
folders = c("literature_search/",
"screening/",
"screening_consensus/",
"fulltext/",
"extraction/",
"extraction_consensus/"))
}
\arguments{
\item{gdrive_path}{a full (new) gdrive path <chr>, preferably with / at the end}
\item{folders}{a vector <chr> of folder names to create in path}
}
\value{
no output, this function exerts a side-effect
}
\description{
Create a folder structure (default or custom) on google drive for a meta-analysis project
}
\examples{
\dontrun{
# Creating default folder structure
init_gdrive("research/meta-analysis/")
# Creating custom folder structure
init_gdrive("research/meta-analysis/", c("screening", "extract"))
}
}
|
# Load all your packages before calling make().
# workflow
library(drake)
library(rmarkdown)
# data processing
library(tidyverse)
library(lubridate)
library(stackr)
# bayes
library(rstan)
library(rstanarm)
library(tidybayes) | /R/packages.R | no_license | odaniel1/bayes-binary-timeseries | R | false | false | 228 | r | # Load all your packages before calling make().
# workflow
library(drake)
library(rmarkdown)
# data processing
library(tidyverse)
library(lubridate)
library(stackr)
# bayes
library(rstan)
library(rstanarm)
library(tidybayes) |
library(tidyverse)
library(glue)
sim <- readRDS("./data/simulation_v5_week11_posWaivers.rds")
sim$teams %>%
select(teamId, name, imageUrl, week.stats) %>%
unnest(week.stats) %>%
filter(week==.week) %>%
inner_join(sim$teams_sim, by="teamId") %>%
select(-simulation) %>%
rename(sim=simulation.org) %>%
unnest(sim) %>%
mutate(name=fct_reorder(name, sim),
pts=if_else(pts==0, as.numeric(NA), pts)) %>%
ggplot(aes(x=sim, y=name, fill=name)) +
stat_density_ridges(quantile_lines = TRUE, quantiles = c(0.1,0.5,0.9), scale=1, alpha=.6) +
geom_point(aes(x=pts, y=name), color="black", fill="red", shape=24) +
theme_ridges() +
theme( legend.position = "none" ) +
scale_y_discrete(expand=c(0.,0)) +
scale_x_continuous(expand = c(0.01,0)) +
labs(x="points",y="")
sim$players_sim %>%
select(teamId, teamName, playerId, rosterSlotId, pts.proj, weekPts, simulation.org ) %>%
inner_join(select(sim$players_stats,playerId, name, position), by="playerId") %>%
relocate(teamId, teamName, playerId, name, position, everything()) %>%
filter(teamId%in%c(1,2)) %>%
mutate(name=as.factor(glue("{name} [{position}]"))) %>%
arrange(rosterSlotId) %>%
unnest(simulation.org) %>%
mutate(name=fct_reorder(name, -rosterSlotId)) %>%
ggplot(aes(x=simulation.org, y=name, fill=position)) +
geom_density_ridges(quantile_lines = TRUE, quantiles = c(0.1,0.5,0.9), scale=1, alpha=.6) +
geom_point(aes(x=weekPts, y=name), color="black", fill="red", shape=24) +
theme_ridges() +
theme( legend.position = "none" ) +
scale_y_discrete(expand=c(0.,0)) +
scale_x_continuous(expand = c(0.01,0)) +
facet_wrap(rosterSlotId~., ncol = 1, scales = "free") +
labs(x="points",y="")
sim$players_sim %>%
select(teamId, teamName, playerId, rosterSlotId, pts.proj, weekPts, simulation.org ) %>%
inner_join(select(sim$players_stats,playerId, name, position), by="playerId") %>%
relocate(teamId, teamName, playerId, name, position, everything()) %>%
filter(teamId%in%c(1,2)) %>%
mutate(name=as.factor(glue("{name} [{position}]"))) %>%
arrange(rosterSlotId) %>%
unnest(simulation.org) %>%
mutate(name=fct_reorder(name, -rosterSlotId)) %>%
ggplot(aes(x=simulation.org, y=name, fill=teamName)) +
geom_boxplot() +
# geom_density_ridges(quantile_lines = TRUE, quantiles = c(0.1,0.5,0.9), scale=1, alpha=.6) +
geom_point(aes(x=weekPts, y=name), color="black", fill="red", shape=24) +
# theme_ridges() +
#theme( legend.position = "none" ) +
# scale_y_discrete(expand=c(0.,0)) +
# scale_x_continuous(expand = c(0.01,0)) +
facet_wrap(rosterSlotId~., ncol = 1, scales = "free") +
labs(x="points",y="") +
theme_minimal()
| /R/_draft/new_roster_comparation_chart.R | no_license | GiulSposito/DudesFantasyFootball | R | false | false | 2,699 | r | library(tidyverse)
library(glue)
sim <- readRDS("./data/simulation_v5_week11_posWaivers.rds")
sim$teams %>%
select(teamId, name, imageUrl, week.stats) %>%
unnest(week.stats) %>%
filter(week==.week) %>%
inner_join(sim$teams_sim, by="teamId") %>%
select(-simulation) %>%
rename(sim=simulation.org) %>%
unnest(sim) %>%
mutate(name=fct_reorder(name, sim),
pts=if_else(pts==0, as.numeric(NA), pts)) %>%
ggplot(aes(x=sim, y=name, fill=name)) +
stat_density_ridges(quantile_lines = TRUE, quantiles = c(0.1,0.5,0.9), scale=1, alpha=.6) +
geom_point(aes(x=pts, y=name), color="black", fill="red", shape=24) +
theme_ridges() +
theme( legend.position = "none" ) +
scale_y_discrete(expand=c(0.,0)) +
scale_x_continuous(expand = c(0.01,0)) +
labs(x="points",y="")
sim$players_sim %>%
select(teamId, teamName, playerId, rosterSlotId, pts.proj, weekPts, simulation.org ) %>%
inner_join(select(sim$players_stats,playerId, name, position), by="playerId") %>%
relocate(teamId, teamName, playerId, name, position, everything()) %>%
filter(teamId%in%c(1,2)) %>%
mutate(name=as.factor(glue("{name} [{position}]"))) %>%
arrange(rosterSlotId) %>%
unnest(simulation.org) %>%
mutate(name=fct_reorder(name, -rosterSlotId)) %>%
ggplot(aes(x=simulation.org, y=name, fill=position)) +
geom_density_ridges(quantile_lines = TRUE, quantiles = c(0.1,0.5,0.9), scale=1, alpha=.6) +
geom_point(aes(x=weekPts, y=name), color="black", fill="red", shape=24) +
theme_ridges() +
theme( legend.position = "none" ) +
scale_y_discrete(expand=c(0.,0)) +
scale_x_continuous(expand = c(0.01,0)) +
facet_wrap(rosterSlotId~., ncol = 1, scales = "free") +
labs(x="points",y="")
sim$players_sim %>%
select(teamId, teamName, playerId, rosterSlotId, pts.proj, weekPts, simulation.org ) %>%
inner_join(select(sim$players_stats,playerId, name, position), by="playerId") %>%
relocate(teamId, teamName, playerId, name, position, everything()) %>%
filter(teamId%in%c(1,2)) %>%
mutate(name=as.factor(glue("{name} [{position}]"))) %>%
arrange(rosterSlotId) %>%
unnest(simulation.org) %>%
mutate(name=fct_reorder(name, -rosterSlotId)) %>%
ggplot(aes(x=simulation.org, y=name, fill=teamName)) +
geom_boxplot() +
# geom_density_ridges(quantile_lines = TRUE, quantiles = c(0.1,0.5,0.9), scale=1, alpha=.6) +
geom_point(aes(x=weekPts, y=name), color="black", fill="red", shape=24) +
# theme_ridges() +
#theme( legend.position = "none" ) +
# scale_y_discrete(expand=c(0.,0)) +
# scale_x_continuous(expand = c(0.01,0)) +
facet_wrap(rosterSlotId~., ncol = 1, scales = "free") +
labs(x="points",y="") +
theme_minimal()
|
library(roxygen2)
roxygen2::roxygenize("R/")
| /Rsrc/roxygenize.R | no_license | LarryJacobson/gmacs | R | false | false | 45 | r | library(roxygen2)
roxygen2::roxygenize("R/")
|
#' Jackknife_Variance
#' @description This function takes a dataset with stacked multiple imputation and a model fit and applies jackknife to estimate the covariance matrix accounting for imputation uncertainty.
#'
#' @param fit object with corresponding vcov method (e.g. glm, coxph, survreg, etc.) from fitting to the (weighted) stacked dataset
#' @param stack data frame containing stacked dataset across multiple imputations. Could have 1 or M rows for each subject with complete data. Should have M rows for each subject with imputed data. Must contain the following named columns: (1) stack$.id, which correspond to a unique identifier for each subject. This column can be easily output from MICE. (2) stack$wt, which corresponds to weights assigned to each row. Standard analysis of stacked multiple imputations should set these weights to 1 over the number of times the subject appears in the stack. (3) stack$.imp, which indicates the multiply imputed dataset (from 1 to M). This column can be easily output from MICE.
#' @param M number of multiple imputations
#'
#' @return Variance, estimated covariance matrix accounting for within and between imputation variation
#' @details This function implements the jackknife-based estimation method for stacked multiple imputations proposed by Beesley and Taylor (2021).
#'
#' @examples
#' data(stackExample)
#'
#' fit = stackExample$fit
#' stack = stackExample$stack
#'
#' jackcovar = Jackknife_Variance(fit, stack, M = 5)
#' VARIANCE_jack = diag(jackcovar)
#'
#' @export
Jackknife_Variance = function(fit, stack, M){
if('glm' %in% class(fit)){
if(substr(fit$family$family, 1, 17) %in% c("poisson", "binomial", "Negative Binomial")) {
dispersion = 1
}else{
dispersion = StackImpute::glm.weighted.dispersion(fit)
}
covariance_weighted = summary(fit)$cov.unscaled*dispersion
}else{
covariance_weighted = vcov(fit)
}
results <- apply(cbind(c(1:M)), 1,FUN=StackImpute::func.jack, stack)
#Nobs = length(stack[,1])
#results_corrected = matrix(rep(as.vector(coef(fit)),M), ncol = M, byrow=F) - ((Nobs-M)/Nobs)*results
theta_var = var(t(results))*(M-1)*((M-1)/M)
Variance =covariance_weighted + (1+M)*theta_var
return(Variance)
}
#' func.jack
#'
#' @description This function is internal to Jackknife_Variance. This estimates model parameters using a subset of the stacked data.
#'
#' @param leaveout indexes the multiple imputation being excluded from estimation
#' @param stack data frame containing stacked dataset across multiple imputations. Could have 1 or M rows for each subject with complete data. Should have M rows for each subject with imputed data. Must contain the following named columns: (1) stack$.id, which correspond to a unique identifier for each subject. This column can be easily output from MICE. (2) stack$wt, which corresponds to weights assigned to each row. Standard analysis of stacked multiple imputations should set these weights to 1 over the number of times the subject appears in the stack. (3) stack$.imp, which indicates the multiply imputed dataset (from 1 to M). This column can be easily output from MICE.
#'
#' @export
func.jack <- function(leaveout, stack){
stack_temp = stack[stack$.imp != leaveout, ]
stack_temp <- stack_temp %>% dplyr::group_by(.id) %>% dplyr::mutate(wt = wt / sum(wt))
stack_temp <- as.data.frame(stack_temp)
fit_jack <- StackImpute::my_update(fit, . ~ ., data = stack_temp, weights = stack_temp$wt)
param = coef(fit_jack)
return(param)
}
| /R/Jackknife_Variance.R | no_license | lbeesleyBIOSTAT/StackImpute | R | false | false | 3,522 | r |
#' Jackknife_Variance
#' @description This function takes a dataset with stacked multiple imputation and a model fit and applies jackknife to estimate the covariance matrix accounting for imputation uncertainty.
#'
#' @param fit object with corresponding vcov method (e.g. glm, coxph, survreg, etc.) from fitting to the (weighted) stacked dataset
#' @param stack data frame containing stacked dataset across multiple imputations. Could have 1 or M rows for each subject with complete data. Should have M rows for each subject with imputed data. Must contain the following named columns: (1) stack$.id, which correspond to a unique identifier for each subject. This column can be easily output from MICE. (2) stack$wt, which corresponds to weights assigned to each row. Standard analysis of stacked multiple imputations should set these weights to 1 over the number of times the subject appears in the stack. (3) stack$.imp, which indicates the multiply imputed dataset (from 1 to M). This column can be easily output from MICE.
#' @param M number of multiple imputations
#'
#' @return Variance, estimated covariance matrix accounting for within and between imputation variation
#' @details This function implements the jackknife-based estimation method for stacked multiple imputations proposed by Beesley and Taylor (2021).
#'
#' @examples
#' data(stackExample)
#'
#' fit = stackExample$fit
#' stack = stackExample$stack
#'
#' jackcovar = Jackknife_Variance(fit, stack, M = 5)
#' VARIANCE_jack = diag(jackcovar)
#'
#' @export
Jackknife_Variance = function(fit, stack, M){
if('glm' %in% class(fit)){
if(substr(fit$family$family, 1, 17) %in% c("poisson", "binomial", "Negative Binomial")) {
dispersion = 1
}else{
dispersion = StackImpute::glm.weighted.dispersion(fit)
}
covariance_weighted = summary(fit)$cov.unscaled*dispersion
}else{
covariance_weighted = vcov(fit)
}
results <- apply(cbind(c(1:M)), 1,FUN=StackImpute::func.jack, stack)
#Nobs = length(stack[,1])
#results_corrected = matrix(rep(as.vector(coef(fit)),M), ncol = M, byrow=F) - ((Nobs-M)/Nobs)*results
theta_var = var(t(results))*(M-1)*((M-1)/M)
Variance =covariance_weighted + (1+M)*theta_var
return(Variance)
}
#' func.jack
#'
#' @description This function is internal to Jackknife_Variance. This estimates model parameters using a subset of the stacked data.
#'
#' @param leaveout indexes the multiple imputation being excluded from estimation
#' @param stack data frame containing stacked dataset across multiple imputations. Could have 1 or M rows for each subject with complete data. Should have M rows for each subject with imputed data. Must contain the following named columns: (1) stack$.id, which correspond to a unique identifier for each subject. This column can be easily output from MICE. (2) stack$wt, which corresponds to weights assigned to each row. Standard analysis of stacked multiple imputations should set these weights to 1 over the number of times the subject appears in the stack. (3) stack$.imp, which indicates the multiply imputed dataset (from 1 to M). This column can be easily output from MICE.
#'
#' @export
func.jack <- function(leaveout, stack){
stack_temp = stack[stack$.imp != leaveout, ]
stack_temp <- stack_temp %>% dplyr::group_by(.id) %>% dplyr::mutate(wt = wt / sum(wt))
stack_temp <- as.data.frame(stack_temp)
fit_jack <- StackImpute::my_update(fit, . ~ ., data = stack_temp, weights = stack_temp$wt)
param = coef(fit_jack)
return(param)
}
|
#!/usr/bin/env Rscript
## A class that contains bam file information
## Copyright 2014, Sahil Seth
## licence: MIT
## sahil.seth@me.com
## A few functions to supplement those already in this package.
## URL: github.com/sahilseth/rfun
## URL: docs.flowr.space
#### -----------------------
## -------- incase of issues switch on verbose to see what is going on !
verbose=FALSE
nm = "rfun"
get_params <- function(paramPairs){
func <- as.character(paramPairs[1])
if(length(func) == 0) return(help())
args <- formals(func)
paramPairs <- paramPairs[grep("=", paramPairs)] ## get those with =
if(verbose) message("We have ",length(paramPairs)," parameters\n")
for(param in paramPairs){
temp <- unlist(strsplit(param, "="));
nm = temp[1]
value=temp[2]
value <- strsplit(value,",")[[1]] #handling those with , in value.. for multiple R values
## ---------- if function supports ... need to pass ALL arguments
if(sum(names(args) %in% "...") & !nm %in% names(args)){
message("Adding ", nm, ":", value)
l = list(nm = value);names(l) = nm
args <- c(args, l)
}
if(class(args[[nm]]) == "numeric" ){
args[[nm]] = as.numeric(value)
}else if(class(args[[nm]]) %in% c("character", "name" )){
args[[nm]] = as.character(value)
}else if(class(args[[nm]]) %in% c("logical")){
args[[nm]] = as.logical(value)
}else if(class(args[[nm]]) %in% c("list")){
args[[nm]] = as.list(value)
}
}
if(verbose) print(args)#print(do.call(rbind, as.list(args)))
return(as.list(args))
}
flow_help <- function(){
cmds <- matrix(c(
'status', 'Detailed status of a flow',
'kill_flow', 'Kill the flow, upon providing working directory'
), byrow=T, ncol=2)
cat("\nThis interface allows shell access to all functions in package flowr (and beyond).")
cat(sprintf("\n %-15s %s\n", cmds[,1], cmds[,2]), sep="")
}
generic_help <- function(){
cat(sprintf("\nUsage: %s function [arguments]\n", nm))
cat("\nFunctions where the arguments are simple objects like numeric/character/logical can be called.",
"\nLists become a little more complicated. say x=a,b is converted to a list with elements a,b\n",
"\nSome examples:")
if(nm == "flowr"){
flow_help()
}else{
cat("\n##sample some numbers:\nrfun rnorm n=100",
"\n##fetch files from pacakges:",
"\nrmd=$(rfun system.file package=knitr ...=examples/knitr-minimal.Rmd)",
"\necho $rmd",
"\n## knit this awesome example !",
"\nrfun knitr::knit input=$rmd\n")
}
cat(sprintf("\nPlease use '%s function -h' to obtain further information about the usage.\n", nm))
}
args <- commandArgs(trailingOnly = TRUE)
##-------- if default function is not in the space, load flowr library
if(nm == "flowr")
require(flowr, quietly=!verbose, warn.conflicts=verbose)
## -------------- Load the required package
if(grepl("::", args[1])){
pkg <- gsub("(.?)::.*", "\\1", args[1])
cat("loading pkg:", pkg, "\n");
library(pkg, character.only = TRUE)
args[1] = gsub(".*::(.*)", "\\1", args[1])
}
if(is.na(args[1])) {
generic_help()
}else if(args[1] == "-h"){
flow_help()
}else if(is.na(args[2])){
help(args[1])
}else if(args[2] == "-h"){
help(args[1])
}else{
params <- get_params(args)
if(verbose){
cat("\nStarting",args[1],"with params\n",
paste(names(params), unlist(params),sep=": ",
collapse="\n"),"\n")
#print(args)
if(verbose) print(str(params))
}
cat(out <- do.call(as.character(args[1]), args = params))
}
| /rfun | no_license | yzharold/rfun | R | false | false | 3,618 | #!/usr/bin/env Rscript
## A class that contains bam file information
## Copyright 2014, Sahil Seth
## licence: MIT
## sahil.seth@me.com
## A few functions to supplement those already in this package.
## URL: github.com/sahilseth/rfun
## URL: docs.flowr.space
#### -----------------------
## -------- incase of issues switch on verbose to see what is going on !
verbose=FALSE
nm = "rfun"
get_params <- function(paramPairs){
func <- as.character(paramPairs[1])
if(length(func) == 0) return(help())
args <- formals(func)
paramPairs <- paramPairs[grep("=", paramPairs)] ## get those with =
if(verbose) message("We have ",length(paramPairs)," parameters\n")
for(param in paramPairs){
temp <- unlist(strsplit(param, "="));
nm = temp[1]
value=temp[2]
value <- strsplit(value,",")[[1]] #handling those with , in value.. for multiple R values
## ---------- if function supports ... need to pass ALL arguments
if(sum(names(args) %in% "...") & !nm %in% names(args)){
message("Adding ", nm, ":", value)
l = list(nm = value);names(l) = nm
args <- c(args, l)
}
if(class(args[[nm]]) == "numeric" ){
args[[nm]] = as.numeric(value)
}else if(class(args[[nm]]) %in% c("character", "name" )){
args[[nm]] = as.character(value)
}else if(class(args[[nm]]) %in% c("logical")){
args[[nm]] = as.logical(value)
}else if(class(args[[nm]]) %in% c("list")){
args[[nm]] = as.list(value)
}
}
if(verbose) print(args)#print(do.call(rbind, as.list(args)))
return(as.list(args))
}
flow_help <- function(){
cmds <- matrix(c(
'status', 'Detailed status of a flow',
'kill_flow', 'Kill the flow, upon providing working directory'
), byrow=T, ncol=2)
cat("\nThis interface allows shell access to all functions in package flowr (and beyond).")
cat(sprintf("\n %-15s %s\n", cmds[,1], cmds[,2]), sep="")
}
generic_help <- function(){
cat(sprintf("\nUsage: %s function [arguments]\n", nm))
cat("\nFunctions where the arguments are simple objects like numeric/character/logical can be called.",
"\nLists become a little more complicated. say x=a,b is converted to a list with elements a,b\n",
"\nSome examples:")
if(nm == "flowr"){
flow_help()
}else{
cat("\n##sample some numbers:\nrfun rnorm n=100",
"\n##fetch files from pacakges:",
"\nrmd=$(rfun system.file package=knitr ...=examples/knitr-minimal.Rmd)",
"\necho $rmd",
"\n## knit this awesome example !",
"\nrfun knitr::knit input=$rmd\n")
}
cat(sprintf("\nPlease use '%s function -h' to obtain further information about the usage.\n", nm))
}
args <- commandArgs(trailingOnly = TRUE)
##-------- if default function is not in the space, load flowr library
if(nm == "flowr")
require(flowr, quietly=!verbose, warn.conflicts=verbose)
## -------------- Load the required package
if(grepl("::", args[1])){
pkg <- gsub("(.?)::.*", "\\1", args[1])
cat("loading pkg:", pkg, "\n");
library(pkg, character.only = TRUE)
args[1] = gsub(".*::(.*)", "\\1", args[1])
}
if(is.na(args[1])) {
generic_help()
}else if(args[1] == "-h"){
flow_help()
}else if(is.na(args[2])){
help(args[1])
}else if(args[2] == "-h"){
help(args[1])
}else{
params <- get_params(args)
if(verbose){
cat("\nStarting",args[1],"with params\n",
paste(names(params), unlist(params),sep=": ",
collapse="\n"),"\n")
#print(args)
if(verbose) print(str(params))
}
cat(out <- do.call(as.character(args[1]), args = params))
}
| |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{poly_md_dset}
\alias{poly_md_dset}
\title{Uncorrelated Multidimensional Polytomous Data Set}
\format{
An object of class \code{data.frame} with 301 rows and 18 columns.
}
\usage{
data(poly_md_dset)
}
\description{
Data set with polytomous responses (five categories) containing
three subsets which represent different uncorrelated dimensions.
}
\keyword{datasets}
| /man/poly_md_dset.Rd | no_license | fwijayanto/autoRasch | R | false | true | 470 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{poly_md_dset}
\alias{poly_md_dset}
\title{Uncorrelated Multidimensional Polytomous Data Set}
\format{
An object of class \code{data.frame} with 301 rows and 18 columns.
}
\usage{
data(poly_md_dset)
}
\description{
Data set with polytomous responses (five categories) containing
three subsets which represent different uncorrelated dimensions.
}
\keyword{datasets}
|
library(shiny)
shinyServer(function(input, output){
output$selected_var <- renderText({
paste("Izbrana država je", input$var, "!")
})
output$drzava <- renderUI(
selectInput("var", label = "Izberite državo",
choices = c("Slovenia",
"Latvia",
"Bulgaria",
"Belgium",
"France",
"Finland",
"Luxembourg",
"Greece",
"Germany",
"Denmark",
"Italy")))
output$potrosnjaPlot <- renderPlot({
d <- nova3 %>% filter(Leto == input$year)
ggplot(d, aes(x=Potrosnja, y=Vrednost, color = Drzava)) + geom_point(size=7) +
geom_smooth(method=lm, fullrange=TRUE, color="black") +
geom_point(data = d, shape = 21, fill = NA, color = "black", alpha = 0.25) +
labs(title="Število obolelih v odvisnosti od potrošnje za šport in zdravje",
x="Potrošnja v univerzalni valuti", y="Delež bolnih (v %)", color = "Država")
})
output$drzavaPlot <- renderPlot({
t <- ociscenapotrosnjakupnamoc %>% filter(Drzava == input$var) %>% filter(Podrocje != "Skupaj")
ggplot(t, aes(x=Podrocje, y=Potrosnja)) +
geom_bar(stat="identity", position="dodge", fill="skyblue") +
theme(axis.text.x = element_text(angle = 60, vjust = 1, hjust = 1)) +
labs(title="Prikaz potrošnje izbrane države po področjih", x="Področje", y="Potrošnja v univerzalni valuti")
})
output$drsnik <- renderText({
paste("Izbrali ste leto", input$year, "!")
})
output$gumbi <- renderText ({
paste("Izbran spol je :", input$radio, "!")
})
output$aktivnostPlot <- renderPlot({
h <- nova4 %>% filter(Spol==input$radio)
ggplot(h, aes(x=Starost, y=Stevilo)) + geom_jitter(size = 5) +
geom_point() +
geom_smooth(method=lm, fullrange=TRUE, color="black") +
geom_point(data = h, shape = 21, fill = NA, color = "black", alpha = 0.25) +
labs(title="Prikaz povezave med povprečno dočakano starostjo in številom neaktivnih",
x="Povprečna življenjska doba", y="Delež neaktivnih državljanov (v %)", color="Država")
})
})
| /shiny/server.R | permissive | nezahabjan/APPR-2017-18 | R | false | false | 2,232 | r | library(shiny)
shinyServer(function(input, output){
output$selected_var <- renderText({
paste("Izbrana država je", input$var, "!")
})
output$drzava <- renderUI(
selectInput("var", label = "Izberite državo",
choices = c("Slovenia",
"Latvia",
"Bulgaria",
"Belgium",
"France",
"Finland",
"Luxembourg",
"Greece",
"Germany",
"Denmark",
"Italy")))
output$potrosnjaPlot <- renderPlot({
d <- nova3 %>% filter(Leto == input$year)
ggplot(d, aes(x=Potrosnja, y=Vrednost, color = Drzava)) + geom_point(size=7) +
geom_smooth(method=lm, fullrange=TRUE, color="black") +
geom_point(data = d, shape = 21, fill = NA, color = "black", alpha = 0.25) +
labs(title="Število obolelih v odvisnosti od potrošnje za šport in zdravje",
x="Potrošnja v univerzalni valuti", y="Delež bolnih (v %)", color = "Država")
})
output$drzavaPlot <- renderPlot({
t <- ociscenapotrosnjakupnamoc %>% filter(Drzava == input$var) %>% filter(Podrocje != "Skupaj")
ggplot(t, aes(x=Podrocje, y=Potrosnja)) +
geom_bar(stat="identity", position="dodge", fill="skyblue") +
theme(axis.text.x = element_text(angle = 60, vjust = 1, hjust = 1)) +
labs(title="Prikaz potrošnje izbrane države po področjih", x="Področje", y="Potrošnja v univerzalni valuti")
})
output$drsnik <- renderText({
paste("Izbrali ste leto", input$year, "!")
})
output$gumbi <- renderText ({
paste("Izbran spol je :", input$radio, "!")
})
output$aktivnostPlot <- renderPlot({
h <- nova4 %>% filter(Spol==input$radio)
ggplot(h, aes(x=Starost, y=Stevilo)) + geom_jitter(size = 5) +
geom_point() +
geom_smooth(method=lm, fullrange=TRUE, color="black") +
geom_point(data = h, shape = 21, fill = NA, color = "black", alpha = 0.25) +
labs(title="Prikaz povezave med povprečno dočakano starostjo in številom neaktivnih",
x="Povprečna življenjska doba", y="Delež neaktivnih državljanov (v %)", color="Država")
})
})
|
rm(list=ls())
library(ggplot2)
library(stringr) # for str_to_title()
library(gridExtra)
setwd("C:/Users/Michel/Documents/eukaryotes/rscripts")
source('euk_functions.R')
f <- create.df.species()
df.soilData <- getSoilData(f,colnames = MYCOLNAMES)
my.color = 'deepskyblue4'
createBoxplot <- function(df,colname,header,ylabel) {
df[which(df$Location=="Mawson Escarpment"),"Location"] <- "Mawson"
d <- data.frame(x = as.numeric(df[[colname]]),
y = df$Location)
d <- d[complete.cases(d),]
p <- ggplot(data=d,aes(y,x)) +
geom_boxplot() + ggtitle(header) +
ylab(ylabel) + xlab('') +
theme(text = element_text(size=10))
return(p)
}
headers <-
colnames <- setdiff(MYCOLNAMES,c("Sample", "AMMN","NITR","PHOS","CARB"))
headers <- c('Potassium','Sulphur','Conductivity','PH','RLU',c(str_to_title(colnames[6:length(colnames)])))
ylabels <- c('Colwell mg/Kg','mg/Kg','dS/M','PH','Relative Light Units',rep('Proportion',9),'?')
# must get unique samples
i = 1
all.plots <- list()
for (i in 1:length(colnames)) {
p <- createBoxplot(df=f[!duplicated(f$Sample),],
colname=colnames[i],
header=headers[i],
ylabel=ylabels[i])
p
all.plots[[i]] <- p
ggsave(p, file=paste0("../images/boxplots_soildata/",headers[i],'.pdf'),
device="pdf",dpi=800,
width=100,height=70,units='mm')
}
# first plot
# page1 <- grid.arrange(all.plots[[1]],
# all.plots[[2]],
# all.plots[[3]],
# all.plots[[4]],
# ncol=3)
# ggsave(page1, file=paste0("../images/boxplots_page1"),
# device="bmp",dpi=800,
# width=100,height=70,units='mm')
#
# # all.plots[[5]],
# # all.plots[[15]],
#
#
# # second plot
# page2 <- grid.arrange(all.plots[[6]],
# all.plots[[7]],
# all.plots[[8]],
# all.plots[[9]],
# # all.plots[[10]],
# # all.plots[[11]],
# # all.plots[[12]],
# # all.plots[[13]],
# # all.plots[[14]],
# ncol=2)
# ggsave(page2, file=paste0("../images/boxplots_page2"),
# device="bmp",dpi=800,
# width=200,height=100,units='mm')
#
# # second plot
# page3 <- grid.arrange(all.plots[[10]],
# all.plots[[11]],
# all.plots[[12]],
# all.plots[[13]],
# #all.plots[[14]],
# ncol=2)
# ggsave(page3, file=paste0("../images/boxplots_page3"),
# device="bmp",dpi=800,
# width=200,height=100,units='mm')
| /rscripts/soildata_boxplots.R | no_license | OldMortality/eukaryotes | R | false | false | 2,752 | r | rm(list=ls())
library(ggplot2)
library(stringr) # for str_to_title()
library(gridExtra)
setwd("C:/Users/Michel/Documents/eukaryotes/rscripts")
source('euk_functions.R')
f <- create.df.species()
df.soilData <- getSoilData(f,colnames = MYCOLNAMES)
my.color = 'deepskyblue4'
createBoxplot <- function(df,colname,header,ylabel) {
df[which(df$Location=="Mawson Escarpment"),"Location"] <- "Mawson"
d <- data.frame(x = as.numeric(df[[colname]]),
y = df$Location)
d <- d[complete.cases(d),]
p <- ggplot(data=d,aes(y,x)) +
geom_boxplot() + ggtitle(header) +
ylab(ylabel) + xlab('') +
theme(text = element_text(size=10))
return(p)
}
headers <-
colnames <- setdiff(MYCOLNAMES,c("Sample", "AMMN","NITR","PHOS","CARB"))
headers <- c('Potassium','Sulphur','Conductivity','PH','RLU',c(str_to_title(colnames[6:length(colnames)])))
ylabels <- c('Colwell mg/Kg','mg/Kg','dS/M','PH','Relative Light Units',rep('Proportion',9),'?')
# must get unique samples
i = 1
all.plots <- list()
for (i in 1:length(colnames)) {
p <- createBoxplot(df=f[!duplicated(f$Sample),],
colname=colnames[i],
header=headers[i],
ylabel=ylabels[i])
p
all.plots[[i]] <- p
ggsave(p, file=paste0("../images/boxplots_soildata/",headers[i],'.pdf'),
device="pdf",dpi=800,
width=100,height=70,units='mm')
}
# first plot
# page1 <- grid.arrange(all.plots[[1]],
# all.plots[[2]],
# all.plots[[3]],
# all.plots[[4]],
# ncol=3)
# ggsave(page1, file=paste0("../images/boxplots_page1"),
# device="bmp",dpi=800,
# width=100,height=70,units='mm')
#
# # all.plots[[5]],
# # all.plots[[15]],
#
#
# # second plot
# page2 <- grid.arrange(all.plots[[6]],
# all.plots[[7]],
# all.plots[[8]],
# all.plots[[9]],
# # all.plots[[10]],
# # all.plots[[11]],
# # all.plots[[12]],
# # all.plots[[13]],
# # all.plots[[14]],
# ncol=2)
# ggsave(page2, file=paste0("../images/boxplots_page2"),
# device="bmp",dpi=800,
# width=200,height=100,units='mm')
#
# # second plot
# page3 <- grid.arrange(all.plots[[10]],
# all.plots[[11]],
# all.plots[[12]],
# all.plots[[13]],
# #all.plots[[14]],
# ncol=2)
# ggsave(page3, file=paste0("../images/boxplots_page3"),
# device="bmp",dpi=800,
# width=200,height=100,units='mm')
|
\name{cuEventQuery}
\alias{cuEventQuery}
\title{Queries an event's status}
\description{ Query the status of all device work preceding the most recent
call to \code{cuEventRecord}() (in the appropriate compute streams,
as specified by the arguments to \code{cuEventRecord}()).}
\usage{cuEventQuery(hEvent)}
\arguments{
\item{hEvent}{Event to query}
}
\seealso{\code{\link{cuEventCreate}}
\code{\link{cuEventRecord}}
\code{\link{cuEventSynchronize}}
\code{\link{cuEventDestroy}}
\code{\link{cuEventElapsedTime}}}
\references{\url{http://docs.nvidia.com/cuda/cuda-driver-api/index.html}}
\keyword{programming}
\concept{GPU}
| /man/cuEventQuery.Rd | no_license | chen0031/RCUDA | R | false | false | 627 | rd | \name{cuEventQuery}
\alias{cuEventQuery}
\title{Queries an event's status}
\description{ Query the status of all device work preceding the most recent
call to \code{cuEventRecord}() (in the appropriate compute streams,
as specified by the arguments to \code{cuEventRecord}()).}
\usage{cuEventQuery(hEvent)}
\arguments{
\item{hEvent}{Event to query}
}
\seealso{\code{\link{cuEventCreate}}
\code{\link{cuEventRecord}}
\code{\link{cuEventSynchronize}}
\code{\link{cuEventDestroy}}
\code{\link{cuEventElapsedTime}}}
\references{\url{http://docs.nvidia.com/cuda/cuda-driver-api/index.html}}
\keyword{programming}
\concept{GPU}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stan_biglm.R, R/stan_biglm.fit.R
\name{stan_biglm}
\alias{stan_biglm}
\alias{stan_biglm.fit}
\title{Bayesian regularized linear but big models via Stan}
\usage{
stan_biglm(biglm, xbar, ybar, s_y, ...,
prior = R2(stop("'location' must be specified")), prior_intercept = NULL,
prior_PD = FALSE, algorithm = c("sampling", "meanfield", "fullrank"),
adapt_delta = NULL)
stan_biglm.fit(b, R, SSR, N, xbar, ybar, s_y, has_intercept = TRUE, ...,
prior = R2(stop("'location' must be specified")), prior_intercept = NULL,
prior_PD = FALSE, algorithm = c("sampling", "meanfield", "fullrank"),
adapt_delta = NULL)
}
\arguments{
\item{biglm}{The list output by \code{\link[biglm]{biglm}} in the \pkg{biglm}
package.}
\item{xbar}{A numeric vector of column means in the implicit design matrix
excluding the intercept for the observations included in the model.}
\item{ybar}{A numeric scalar indicating the mean of the outcome for the
observations included in the model.}
\item{s_y}{A numeric scalar indicating the unbiased sample standard deviation
of the outcome for the observations included in the model.}
\item{...}{Further arguments passed to the function in the \pkg{rstan}
package (\code{\link[rstan]{sampling}}, \code{\link[rstan]{vb}}, or
\code{\link[rstan]{optimizing}}), corresponding to the estimation method
named by \code{algorithm}. For example, if \code{algorithm} is
\code{"sampling"} it is possibly to specify \code{iter}, \code{chains},
\code{cores}, \code{refresh}, etc.}
\item{prior}{Must be a call to \code{\link{R2}} with its \code{location}
argument specified or \code{NULL}, which would indicate a standard uniform
prior for the \eqn{R^2}.}
\item{prior_intercept}{Either \code{NULL} (the default) or a call to
\code{\link{normal}}. If a \code{\link{normal}} prior is specified without
a \code{scale}, then the standard deviation is taken to be the marginal
standard deviation of the outcome divided by the square root of the sample
size, which is legitimate because the marginal standard deviation of the
outcome is a primitive parameter being estimated.}
\item{prior_PD}{A logical scalar (defaulting to \code{FALSE}) indicating
whether to draw from the prior predictive distribution instead of
conditioning on the outcome.}
\item{algorithm}{A string (possibly abbreviated) indicating the
estimation approach to use. Can be \code{"sampling"} for MCMC (the
default), \code{"optimizing"} for optimization, \code{"meanfield"} for
variational inference with independent normal distributions, or
\code{"fullrank"} for variational inference with a multivariate normal
distribution. See \code{\link{rstanarm-package}} for more details on the
estimation algorithms. NOTE: not all fitting functions support all four
algorithms.}
\item{adapt_delta}{Only relevant if \code{algorithm="sampling"}. See
\code{\link{adapt_delta}} for details.}
\item{b}{A numeric vector of OLS coefficients, excluding the intercept}
\item{R}{A square upper-triangular matrix from the QR decomposition of the
design matrix, excluding the intercept}
\item{SSR}{A numeric scalar indicating the sum-of-squared residuals for OLS}
\item{N}{A integer scalar indicating the number of included observations}
\item{has_intercept}{A logical scalar indicating whether to add an intercept
to the model when estimating it.}
}
\value{
The output of both \code{stan_biglm} and \code{stan_biglm.fit} is an
object of \code{\link[rstan]{stanfit-class}} rather than
\code{\link{stanreg-objects}}, which is more limited and less convenient
but necessitated by the fact that \code{stan_biglm} does not bring the full
design matrix into memory. Without the full design matrix,some of the
elements of a \code{\link{stanreg-objects}} object cannot be calculated,
such as residuals. Thus, the functions in the \pkg{rstanarm} package that
input \code{\link{stanreg-objects}}, such as
\code{\link{posterior_predict}} cannot be used.
}
\description{
This is the same model as with \code{\link{stan_lm}} but it utilizes the
output from \code{\link[biglm]{biglm}} in the \pkg{biglm} package in order to
proceed when the data is too large to fit in memory.
}
\details{
The \code{stan_biglm} function is intended to be used in the same
circumstances as the \code{\link[biglm]{biglm}} function in the \pkg{biglm}
package but with an informative prior on the \eqn{R^2} of the regression.
Like \code{\link[biglm]{biglm}}, the memory required to estimate the model
depends largely on the number of predictors rather than the number of
observations. However, \code{stan_biglm} and \code{stan_biglm.fit} have
additional required arguments that are not necessary in
\code{\link[biglm]{biglm}}, namely \code{xbar}, \code{ybar}, and \code{s_y}.
If any observations have any missing values on any of the predictors or the
outcome, such observations do not contribute to these statistics.
}
\examples{
# create inputs
ols <- lm(mpg ~ wt + qsec + am, data = mtcars, # all row are complete so ...
na.action = na.exclude) # not necessary in this case
b <- coef(ols)[-1]
R <- qr.R(ols$qr)[-1,-1]
SSR <- crossprod(ols$residuals)[1]
not_NA <- !is.na(fitted(ols))
N <- sum(not_NA)
xbar <- colMeans(mtcars[not_NA,c("wt", "qsec", "am")])
y <- mtcars$mpg[not_NA]
ybar <- mean(y)
s_y <- sd(y)
post <- stan_biglm.fit(b, R, SSR, N, xbar, ybar, s_y, prior = R2(.75),
# the next line is only to make the example go fast
chains = 1, iter = 500, seed = 12345)
cbind(lm = b, stan_lm = rstan::get_posterior_mean(post)[13:15,]) # shrunk
}
| /man/stan_biglm.Rd | no_license | fartist/rstanarm | R | false | true | 5,680 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stan_biglm.R, R/stan_biglm.fit.R
\name{stan_biglm}
\alias{stan_biglm}
\alias{stan_biglm.fit}
\title{Bayesian regularized linear but big models via Stan}
\usage{
stan_biglm(biglm, xbar, ybar, s_y, ...,
prior = R2(stop("'location' must be specified")), prior_intercept = NULL,
prior_PD = FALSE, algorithm = c("sampling", "meanfield", "fullrank"),
adapt_delta = NULL)
stan_biglm.fit(b, R, SSR, N, xbar, ybar, s_y, has_intercept = TRUE, ...,
prior = R2(stop("'location' must be specified")), prior_intercept = NULL,
prior_PD = FALSE, algorithm = c("sampling", "meanfield", "fullrank"),
adapt_delta = NULL)
}
\arguments{
\item{biglm}{The list output by \code{\link[biglm]{biglm}} in the \pkg{biglm}
package.}
\item{xbar}{A numeric vector of column means in the implicit design matrix
excluding the intercept for the observations included in the model.}
\item{ybar}{A numeric scalar indicating the mean of the outcome for the
observations included in the model.}
\item{s_y}{A numeric scalar indicating the unbiased sample standard deviation
of the outcome for the observations included in the model.}
\item{...}{Further arguments passed to the function in the \pkg{rstan}
package (\code{\link[rstan]{sampling}}, \code{\link[rstan]{vb}}, or
\code{\link[rstan]{optimizing}}), corresponding to the estimation method
named by \code{algorithm}. For example, if \code{algorithm} is
\code{"sampling"} it is possibly to specify \code{iter}, \code{chains},
\code{cores}, \code{refresh}, etc.}
\item{prior}{Must be a call to \code{\link{R2}} with its \code{location}
argument specified or \code{NULL}, which would indicate a standard uniform
prior for the \eqn{R^2}.}
\item{prior_intercept}{Either \code{NULL} (the default) or a call to
\code{\link{normal}}. If a \code{\link{normal}} prior is specified without
a \code{scale}, then the standard deviation is taken to be the marginal
standard deviation of the outcome divided by the square root of the sample
size, which is legitimate because the marginal standard deviation of the
outcome is a primitive parameter being estimated.}
\item{prior_PD}{A logical scalar (defaulting to \code{FALSE}) indicating
whether to draw from the prior predictive distribution instead of
conditioning on the outcome.}
\item{algorithm}{A string (possibly abbreviated) indicating the
estimation approach to use. Can be \code{"sampling"} for MCMC (the
default), \code{"optimizing"} for optimization, \code{"meanfield"} for
variational inference with independent normal distributions, or
\code{"fullrank"} for variational inference with a multivariate normal
distribution. See \code{\link{rstanarm-package}} for more details on the
estimation algorithms. NOTE: not all fitting functions support all four
algorithms.}
\item{adapt_delta}{Only relevant if \code{algorithm="sampling"}. See
\code{\link{adapt_delta}} for details.}
\item{b}{A numeric vector of OLS coefficients, excluding the intercept}
\item{R}{A square upper-triangular matrix from the QR decomposition of the
design matrix, excluding the intercept}
\item{SSR}{A numeric scalar indicating the sum-of-squared residuals for OLS}
\item{N}{A integer scalar indicating the number of included observations}
\item{has_intercept}{A logical scalar indicating whether to add an intercept
to the model when estimating it.}
}
\value{
The output of both \code{stan_biglm} and \code{stan_biglm.fit} is an
object of \code{\link[rstan]{stanfit-class}} rather than
\code{\link{stanreg-objects}}, which is more limited and less convenient
but necessitated by the fact that \code{stan_biglm} does not bring the full
design matrix into memory. Without the full design matrix,some of the
elements of a \code{\link{stanreg-objects}} object cannot be calculated,
such as residuals. Thus, the functions in the \pkg{rstanarm} package that
input \code{\link{stanreg-objects}}, such as
\code{\link{posterior_predict}} cannot be used.
}
\description{
This is the same model as with \code{\link{stan_lm}} but it utilizes the
output from \code{\link[biglm]{biglm}} in the \pkg{biglm} package in order to
proceed when the data is too large to fit in memory.
}
\details{
The \code{stan_biglm} function is intended to be used in the same
circumstances as the \code{\link[biglm]{biglm}} function in the \pkg{biglm}
package but with an informative prior on the \eqn{R^2} of the regression.
Like \code{\link[biglm]{biglm}}, the memory required to estimate the model
depends largely on the number of predictors rather than the number of
observations. However, \code{stan_biglm} and \code{stan_biglm.fit} have
additional required arguments that are not necessary in
\code{\link[biglm]{biglm}}, namely \code{xbar}, \code{ybar}, and \code{s_y}.
If any observations have any missing values on any of the predictors or the
outcome, such observations do not contribute to these statistics.
}
\examples{
# create inputs
ols <- lm(mpg ~ wt + qsec + am, data = mtcars, # all row are complete so ...
na.action = na.exclude) # not necessary in this case
b <- coef(ols)[-1]
R <- qr.R(ols$qr)[-1,-1]
SSR <- crossprod(ols$residuals)[1]
not_NA <- !is.na(fitted(ols))
N <- sum(not_NA)
xbar <- colMeans(mtcars[not_NA,c("wt", "qsec", "am")])
y <- mtcars$mpg[not_NA]
ybar <- mean(y)
s_y <- sd(y)
post <- stan_biglm.fit(b, R, SSR, N, xbar, ybar, s_y, prior = R2(.75),
# the next line is only to make the example go fast
chains = 1, iter = 500, seed = 12345)
cbind(lm = b, stan_lm = rstan::get_posterior_mean(post)[13:15,]) # shrunk
}
|
#' @title Current Probability of Failure for LV UGB
#' @description This function calculates the current
#' annual probability of failure for LV UGB
#' The function is a cubic curve that is based on
#' the first three terms of the Taylor series for an
#' exponential function. For more information about the
#' probability of failure function see section 6
#' on page 30 in CNAIM (2017).
#' @param lv_asset_category String.
#' A sting that refers to the specific asset category.
#' See See page 15, table 1 in CNAIM (2017).
#' @param lv_asset_category String The type of LV asset category
#' @param placement String. Specify if the asset is located outdoor or indoor.
#' @param altitude_m Numeric. Specify the altitude location for
#' the asset measured in meters from sea level.\code{altitude_m}
#' is used to derive the altitude factor. See page 107,
#' table 23 in CNAIM (2017). A setting of \code{"Default"}
#' will set the altitude factor to 1 independent of \code{asset_type}.
#' @param distance_from_coast_km Numeric. Specify the distance from the
#' coast measured in kilometers. \code{distance_from_coast_km} is used
#' to derive the distance from coast factor See page 106,
#' table 22 in CNAIM (2017). A setting of \code{"Default"} will set the
#' distance from coast factor to 1 independent of \code{asset_type}.
#' @param corrosion_category_index Integer.
#' Specify the corrosion index category, 1-5.
#' @param age Numeric. The current age in years of the conductor.
#' @param measured_condition_inputs Named list observed_conditions_input
#' @param observed_condition_inputs Named list observed_conditions_input
#' \code{conductor_samp = c("Low","Medium/Normal","High","Default")}.
#' See page 146-147, table 192 and 194 in CNAIM (2017).
#' @inheritParams current_health
#' @return Numeric. Current probability of failure
#' per annum per kilometer.
#' @source DNO Common Network Asset Indices Methodology (CNAIM),
#' Health & Criticality - Version 1.1, 2017:
#' \url{https://www.ofgem.gov.uk/system/files/docs/2017/05/dno_common_network_asset_indices_methodology_v1.1.pdf}
#' @export
#' @examples
#' # Current annual probability of failure for 10kV OHL (Tower Line) Conductor
#'pof_lv_ugb(
#'lv_asset_category = "LV UGB",
#'placement = "Default",
#'altitude_m = "Default",
#'distance_from_coast_km = "Default",
#'corrosion_category_index = "Default",
#'age = 10,
#'observed_condition_inputs =
#'list("steel_cover_and_pit_condition" =
#'list("Condition Criteria: Observed Condition" = "Default"),
#'"water_moisture" = list("Condition Criteria: Observed Condition" = "Default"),
#'"bell_cond" = list("Condition Criteria: Observed Condition" = "Default"),
#'"insulation_cond" = list("Condition Criteria: Observed Condition" = "Default"),
#'"signs_heating" = list("Condition Criteria: Observed Condition" = "Default"),
#'"phase_barriers" = list("Condition Criteria: Observed Condition" = "Default")),
#'measured_condition_inputs =
#'list("opsal_adequacy" =
#'list("Condition Criteria: Operational Adequacy" = "Default")),
#'reliability_factor = "Default")
pof_lv_ugb <-
function(lv_asset_category = "LV UGB",
placement = "Default",
altitude_m = "Default",
distance_from_coast_km = "Default",
corrosion_category_index = "Default",
age,
measured_condition_inputs,
observed_condition_inputs,
reliability_factor = "Default") {
`Asset Register Category` = `Health Index Asset Category` =
`Generic Term...1` = `Generic Term...2` = `Functional Failure Category` =
`K-Value (%)` = `C-Value` = `Asset Register Category` = NULL
# due to NSE notes in R CMD check
asset_category <- gb_ref$categorisation_of_assets %>%
dplyr::filter(`Asset Register Category` ==
lv_asset_category) %>%
dplyr::select(`Health Index Asset Category`) %>% dplyr::pull()
generic_term_1 <- gb_ref$generic_terms_for_assets %>%
dplyr::filter(`Health Index Asset Category` == asset_category) %>%
dplyr::select(`Generic Term...1`) %>% dplyr::pull()
generic_term_2 <- gb_ref$generic_terms_for_assets %>%
dplyr::filter(`Health Index Asset Category` == asset_category) %>%
dplyr::select(`Generic Term...2`) %>% dplyr::pull()
# Normal expected life -------------------------
normal_expected_life_cond <- gb_ref$normal_expected_life %>%
dplyr::filter(`Asset Register Category` ==
lv_asset_category) %>%
dplyr::pull()
# Constants C and K for PoF function --------------------------------------
k <- gb_ref$pof_curve_parameters %>%
dplyr::filter(`Functional Failure Category` %in% lv_asset_category) %>%
dplyr::select(`K-Value (%)`) %>%
dplyr::pull()/100
c <- gb_ref$pof_curve_parameters %>%
dplyr::filter(`Functional Failure Category` %in% lv_asset_category) %>%
dplyr::select(`C-Value`) %>%
dplyr::pull()
# Duty factor -------------------------------------------------------------
duty_factor_cond <- 1
# Location factor ----------------------------------------------------
location_factor_cond <- location_factor(placement,
altitude_m,
distance_from_coast_km,
corrosion_category_index,
asset_type = lv_asset_category)
# Expected life ------------------------------
expected_life_years <- expected_life(normal_expected_life_cond,
duty_factor_cond,
location_factor_cond)
# b1 (Initial Ageing Rate) ------------------------------------------------
b1 <- beta_1(expected_life_years)
# Initial health score ----------------------------------------------------
initial_health_score <- initial_health(b1, age)
# Measured conditions
mci_table_names <- list("opsal_adequacy" = "mci_lv_ugb_opsal_adequacy")
measured_condition_modifier <-
get_measured_conditions_modifier_lv_switchgear(lv_asset_category,
mci_table_names,
measured_condition_inputs)
# Observed conditions -----------------------------------------------------
oci_table_names <- list(
"steel_cover_and_pit_condition" = "oci_lv_ugb_steel_covr_pit_cond",
"water_moisture" = "oci_lv_ugb_water_moisture",
"bell_cond" = "oci_lv_ugb_bell_cond",
"insulation_cond" = "oci_lv_ugb_insulation_cond",
"signs_heating" = "oci_lv_ugb_signs_heating",
"phase_barriers" = "oci_lv_ugb_phase_barriers"
)
observed_condition_modifier <-
get_observed_conditions_modifier_lv_switchgear(lv_asset_category,
oci_table_names,
observed_condition_inputs)
# Health score factor ---------------------------------------------------
health_score_factor <-
health_score_excl_ehv_132kv_tf(observed_condition_modifier$condition_factor,
measured_condition_modifier$condition_factor)
# Health score cap --------------------------------------------------------
health_score_cap <- min(observed_condition_modifier$condition_cap,
measured_condition_modifier$condition_cap)
# Health score collar -----------------------------------------------------
health_score_collar <- max(observed_condition_modifier$condition_collar,
measured_condition_modifier$condition_collar)
# Health score modifier ---------------------------------------------------
health_score_modifier <- data.frame(health_score_factor,
health_score_cap,
health_score_collar)
# Current health score ----------------------------------------------------
current_health_score <-
current_health(initial_health_score,
health_score_modifier$health_score_factor,
health_score_modifier$health_score_cap,
health_score_modifier$health_score_collar,
reliability_factor = reliability_factor)
# Probability of failure ---------------------------------------------------
probability_of_failure <- k *
(1 + (c * current_health_score) +
(((c * current_health_score)^2) / factorial(2)) +
(((c * current_health_score)^3) / factorial(3)))
return(probability_of_failure)
}
| /R/pof_lv_ugb.R | permissive | scoultersdcoe/CNAIM | R | false | false | 8,737 | r | #' @title Current Probability of Failure for LV UGB
#' @description This function calculates the current
#' annual probability of failure for LV UGB
#' The function is a cubic curve that is based on
#' the first three terms of the Taylor series for an
#' exponential function. For more information about the
#' probability of failure function see section 6
#' on page 30 in CNAIM (2017).
#' @param lv_asset_category String.
#' A sting that refers to the specific asset category.
#' See See page 15, table 1 in CNAIM (2017).
#' @param lv_asset_category String The type of LV asset category
#' @param placement String. Specify if the asset is located outdoor or indoor.
#' @param altitude_m Numeric. Specify the altitude location for
#' the asset measured in meters from sea level.\code{altitude_m}
#' is used to derive the altitude factor. See page 107,
#' table 23 in CNAIM (2017). A setting of \code{"Default"}
#' will set the altitude factor to 1 independent of \code{asset_type}.
#' @param distance_from_coast_km Numeric. Specify the distance from the
#' coast measured in kilometers. \code{distance_from_coast_km} is used
#' to derive the distance from coast factor See page 106,
#' table 22 in CNAIM (2017). A setting of \code{"Default"} will set the
#' distance from coast factor to 1 independent of \code{asset_type}.
#' @param corrosion_category_index Integer.
#' Specify the corrosion index category, 1-5.
#' @param age Numeric. The current age in years of the conductor.
#' @param measured_condition_inputs Named list observed_conditions_input
#' @param observed_condition_inputs Named list observed_conditions_input
#' \code{conductor_samp = c("Low","Medium/Normal","High","Default")}.
#' See page 146-147, table 192 and 194 in CNAIM (2017).
#' @inheritParams current_health
#' @return Numeric. Current probability of failure
#' per annum per kilometer.
#' @source DNO Common Network Asset Indices Methodology (CNAIM),
#' Health & Criticality - Version 1.1, 2017:
#' \url{https://www.ofgem.gov.uk/system/files/docs/2017/05/dno_common_network_asset_indices_methodology_v1.1.pdf}
#' @export
#' @examples
#' # Current annual probability of failure for 10kV OHL (Tower Line) Conductor
#'pof_lv_ugb(
#'lv_asset_category = "LV UGB",
#'placement = "Default",
#'altitude_m = "Default",
#'distance_from_coast_km = "Default",
#'corrosion_category_index = "Default",
#'age = 10,
#'observed_condition_inputs =
#'list("steel_cover_and_pit_condition" =
#'list("Condition Criteria: Observed Condition" = "Default"),
#'"water_moisture" = list("Condition Criteria: Observed Condition" = "Default"),
#'"bell_cond" = list("Condition Criteria: Observed Condition" = "Default"),
#'"insulation_cond" = list("Condition Criteria: Observed Condition" = "Default"),
#'"signs_heating" = list("Condition Criteria: Observed Condition" = "Default"),
#'"phase_barriers" = list("Condition Criteria: Observed Condition" = "Default")),
#'measured_condition_inputs =
#'list("opsal_adequacy" =
#'list("Condition Criteria: Operational Adequacy" = "Default")),
#'reliability_factor = "Default")
pof_lv_ugb <-
function(lv_asset_category = "LV UGB",
placement = "Default",
altitude_m = "Default",
distance_from_coast_km = "Default",
corrosion_category_index = "Default",
age,
measured_condition_inputs,
observed_condition_inputs,
reliability_factor = "Default") {
`Asset Register Category` = `Health Index Asset Category` =
`Generic Term...1` = `Generic Term...2` = `Functional Failure Category` =
`K-Value (%)` = `C-Value` = `Asset Register Category` = NULL
# due to NSE notes in R CMD check
asset_category <- gb_ref$categorisation_of_assets %>%
dplyr::filter(`Asset Register Category` ==
lv_asset_category) %>%
dplyr::select(`Health Index Asset Category`) %>% dplyr::pull()
generic_term_1 <- gb_ref$generic_terms_for_assets %>%
dplyr::filter(`Health Index Asset Category` == asset_category) %>%
dplyr::select(`Generic Term...1`) %>% dplyr::pull()
generic_term_2 <- gb_ref$generic_terms_for_assets %>%
dplyr::filter(`Health Index Asset Category` == asset_category) %>%
dplyr::select(`Generic Term...2`) %>% dplyr::pull()
# Normal expected life -------------------------
normal_expected_life_cond <- gb_ref$normal_expected_life %>%
dplyr::filter(`Asset Register Category` ==
lv_asset_category) %>%
dplyr::pull()
# Constants C and K for PoF function --------------------------------------
k <- gb_ref$pof_curve_parameters %>%
dplyr::filter(`Functional Failure Category` %in% lv_asset_category) %>%
dplyr::select(`K-Value (%)`) %>%
dplyr::pull()/100
c <- gb_ref$pof_curve_parameters %>%
dplyr::filter(`Functional Failure Category` %in% lv_asset_category) %>%
dplyr::select(`C-Value`) %>%
dplyr::pull()
# Duty factor -------------------------------------------------------------
duty_factor_cond <- 1
# Location factor ----------------------------------------------------
location_factor_cond <- location_factor(placement,
altitude_m,
distance_from_coast_km,
corrosion_category_index,
asset_type = lv_asset_category)
# Expected life ------------------------------
expected_life_years <- expected_life(normal_expected_life_cond,
duty_factor_cond,
location_factor_cond)
# b1 (Initial Ageing Rate) ------------------------------------------------
b1 <- beta_1(expected_life_years)
# Initial health score ----------------------------------------------------
initial_health_score <- initial_health(b1, age)
# Measured conditions
mci_table_names <- list("opsal_adequacy" = "mci_lv_ugb_opsal_adequacy")
measured_condition_modifier <-
get_measured_conditions_modifier_lv_switchgear(lv_asset_category,
mci_table_names,
measured_condition_inputs)
# Observed conditions -----------------------------------------------------
oci_table_names <- list(
"steel_cover_and_pit_condition" = "oci_lv_ugb_steel_covr_pit_cond",
"water_moisture" = "oci_lv_ugb_water_moisture",
"bell_cond" = "oci_lv_ugb_bell_cond",
"insulation_cond" = "oci_lv_ugb_insulation_cond",
"signs_heating" = "oci_lv_ugb_signs_heating",
"phase_barriers" = "oci_lv_ugb_phase_barriers"
)
observed_condition_modifier <-
get_observed_conditions_modifier_lv_switchgear(lv_asset_category,
oci_table_names,
observed_condition_inputs)
# Health score factor ---------------------------------------------------
health_score_factor <-
health_score_excl_ehv_132kv_tf(observed_condition_modifier$condition_factor,
measured_condition_modifier$condition_factor)
# Health score cap --------------------------------------------------------
health_score_cap <- min(observed_condition_modifier$condition_cap,
measured_condition_modifier$condition_cap)
# Health score collar -----------------------------------------------------
health_score_collar <- max(observed_condition_modifier$condition_collar,
measured_condition_modifier$condition_collar)
# Health score modifier ---------------------------------------------------
health_score_modifier <- data.frame(health_score_factor,
health_score_cap,
health_score_collar)
# Current health score ----------------------------------------------------
current_health_score <-
current_health(initial_health_score,
health_score_modifier$health_score_factor,
health_score_modifier$health_score_cap,
health_score_modifier$health_score_collar,
reliability_factor = reliability_factor)
# Probability of failure ---------------------------------------------------
probability_of_failure <- k *
(1 + (c * current_health_score) +
(((c * current_health_score)^2) / factorial(2)) +
(((c * current_health_score)^3) / factorial(3)))
return(probability_of_failure)
}
|
##Data Science - Johns Hopkins University
##coursera
## course 3 - R
##week3
#checking factor behavior
cat("\014")
yesno <- sample(c("yes","no"), size=10, replace=TRUE)
yesno
yesnofac = factor(yesno, levels=c("yes","no"))
as.numeric(yesnofac)
yesnofac
| /Johns Hopkins Data Science/course 3/c3-w3-factor.R | no_license | tmPolla/R | R | false | false | 267 | r | ##Data Science - Johns Hopkins University
##coursera
## course 3 - R
##week3
#checking factor behavior
cat("\014")
yesno <- sample(c("yes","no"), size=10, replace=TRUE)
yesno
yesnofac = factor(yesno, levels=c("yes","no"))
as.numeric(yesnofac)
yesnofac
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/i.R
\name{i_point}
\alias{i_point}
\title{Identify Points}
\usage{
i_point(spdata)
}
\description{
Identify Points
}
\keyword{internal}
| /man/i_point.Rd | no_license | wintercmin/quickmapr | R | false | false | 223 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/i.R
\name{i_point}
\alias{i_point}
\title{Identify Points}
\usage{
i_point(spdata)
}
\description{
Identify Points
}
\keyword{internal}
|
#' @title Compute the integration constant for distance density functions
#'
#' @description Using numerical integration, this function computes
#' the area under a distance function between two limits (\code{w.lo}
#' and \code{w.hi}).
#'
#' @param dist Vector of detection distance values.
#'
#' @param density A likelihood function for which the
#' integration constant is sought. This function
#' must be capable of evaluating values between \code{w.lo}
#' and \code{w.hi} and have the following parameters:
#' \itemize{
#' \item \samp{a} = Parameter vector.
#' \item \samp{dist} = Vector of distances.
#' \item \samp{covars} = If the density allows covariates,
#' the covariate matrix.
#' \item \samp{w.lo} = Lower limit or left truncation value.
#' \item \samp{w.hi} = Upper limit or right truncation value.
#' \item \samp{series} = Form of the series expansions, if any.
#' \item \samp{expansions} = Number of expansion terms.
#' \item \samp{scale} = Whether to scale function to integrate to 1.
#' }
#'
#' @param w.lo The lower limit of integration, or the left truncation
#' value for perpendicular distances.
#'
#' @param w.hi The upper limit of integration, or the right truncation
#' value for perpendicular distances.
#'
#' @param covars Matrix of covariate values.
#'
#' @param a Vector of parameters to pass to \code{density}.
#'
#' @param series The series to use for expansions.
#' If \code{expansions} > 0, this string
#' specifies the type of expansion. Valid values at
#' present are 'simple', 'hermite', and 'cosine'.
#'
#' @param expansions Number of expansions in \code{density}.
#'
#' @param pointSurvey Boolean. TRUE if point transect data,
#' FALSE if line transect data.
#'
#'
#' @details The trapezoid rule is used to numerically integrate
#' \code{density} from \code{w.lo} to \code{w.hi}. Two-hundred
#' (200) equal-sized trapezoids are used in the integration. The number
#' of trapezoids to use is fixed and cannot be changed without
#' re-writing this routine.
#'
#' @return A scalar (or vector of scalars if covariates are present)
#' that is the area under \code{density} between \code{w.lo} and \code{w.hi}.
#' This scalar can be used as a divisor to scale density such that
#' it integrates to 1.0. If x = density(\ldots), then
#' x / \code{integration.constant(density, \ldots)} will integrate to 1.0.
#'
#' @seealso \code{\link{dfuncEstim}}, \code{\link{halfnorm.like}}
#'
#' @examples
#' # Can put any number for first argument (1 used here)
#' scl <- integration.constant(dist=units::set_units(1,"m")
#' , density=logistic.like
#' , covars = NULL
#' , pointSurvey = FALSE
#' , w.lo = units::set_units(0,"m")
#' , w.hi = units::set_units(100,"m")
#' , expansions = 0
#' , a=c(75,25))
#' print(scl) # Should be 75.1
#'
#' x <- units::set_units(seq(0,100,length=200), "m")
#' y <- logistic.like( c(75,25), x, scale=FALSE ) / scl
#' int.y <- (x[2]-x[1]) * sum(y[-length(y)]+y[-1]) / 2 # the trapezoid rule, should be 1.0
#' print(int.y) # Should be 1
#'
#' @keywords models
#' @importFrom stats integrate
#' @export
integration.constant <- function(dist,
density,
a,
covars,
w.lo,
w.hi,
series,
expansions,
pointSurvey){
density = match.fun(density)
# We need w.lo, w.hi, and dist to have same units.
# This is important because we occasionally drop units in integral calculations below.
# I cannot think of a case where units(w.lo) != units(dist),
# but just in case...
if( units(w.lo) != units(dist)){
w.lo <- units::set_units(w.lo, units(dist), mode = "standard")
}
if( units(w.hi) != units(dist)){
w.hi <- units::set_units(w.hi, units(dist), mode = "standard")
}
# Now, we can safely compute sequence of x values for numerical integration.
# This is done below, in each case where its needed.
nTrapazoids <- 200 # number of evaluation points in numerical integration
if(!is.null(covars)){
# Not sure following is best to do.
# It is much faster to de-dup and compute values on just
# unique combinations of covariates IF there are large number
# of duplicated covariates. This is common when using
# factors. But, if using continuous covariates, this is
# inefficient. Regardless, this code only computes values
# of the scaling constant for unique combinations of X,
# then merges them into the covar array. The non-de-dupped
# way to do these calculations is something like
# x <- covars %*% matrix(a,ncol=1), which might be just as fast.
Pkey <- tapply(1:nrow(covars), as.data.frame(covars)) # groups of duplicates
covars <- data.frame(covars, zzzPkey=Pkey)
dupCovars <- duplicated(covars$zzzPkey)
unique.covars <- covars[!dupCovars,]
PkeyCol <- ncol(unique.covars)
# Remember that unique.covars now has extra column, zzzPkey
# don't include this column in calculations below (or set a[last]=0)
# covars and unique.covars now have Pkey, which we will use to
# merge later
seqy <- list()
temp.scaler <- vector(length = nrow(unique.covars))
scaler <- vector(length = nrow(covars), "numeric")
if(pointSurvey){
# This case is POINTS, COVARS, all Likelihoods
seqx = seq(w.lo, w.hi, length=nTrapazoids)
for(i in 1:nrow(unique.covars)){
temp.covars <- matrix(as.numeric(unique.covars[i,-PkeyCol])
, nrow = length(seqx)
, ncol = ncol(unique.covars)-1
, byrow=TRUE)
seqy[[i]] <- units::drop_units(seqx) * density(a = a
, dist = seqx
, covars = temp.covars
, scale = FALSE
, w.lo = w.lo
, w.hi = w.hi
, expansions = expansions
, series=series
)
temp.scaler[i] <- units::drop_units(seqx[2] - seqx[1]) * sum(seqy[[i]][-length(seqy[[i]])] + seqy[[i]][-1]) / 2
}
} else if(identical(density, halfnorm.like) & expansions == 0){
# this case is LINES, COVARS, HALFNORM, NO EXPANSIONS
# Made this a case because I think it's faster, because we know integral
s <- as.matrix(unique.covars) %*% matrix(c(a,0),ncol=1)
sigma <- exp(s) # link function here
# temp.scaler should be integral under distance function
# We happen to know it for halfnorm (and some others below)
# Integrals are by defn unit-less; but, pnorm returns units. Drop apriori.
# We evaluate normal with mean w.lo, sd = sigma, from -Inf to w.hi, then
# subtract 0.5 from result for the area to left of mean (w.lo)
temp.scaler <- (pnorm(units::drop_units(w.hi)
, units::drop_units(w.lo)
, sigma) - 0.5) *
sqrt(2*pi) * sigma
} else if(identical(density, hazrate.like) & expansions == 0){
# This case is LINES, HAZRATE, COVARS, NO EXPANSIONS
#
# Integral of hazrate involves incomplete gamma functions.
# See wolfram. Incomplete gammas are implemented in some packages, e.g.,
# expint. You could convert to an exact integral using one of these
# packages. But, for now, numerically integrate.
seqx = seq(w.lo, w.hi, length=nTrapazoids)
beta <- a[-length(a)]
K <- a[length(a)]
s <- as.matrix(unique.covars[,-PkeyCol]) %*% matrix(beta,ncol=1)
sigma <- exp(s) # link function here
temp.scaler <- sapply(sigma
, FUN = function(s, Seqx, KK){
seqy <- 1 - exp(-(Seqx/s)^(-KK))
scaler <- (Seqx[2] - Seqx[1])*sum(seqy[-length(seqy)] + seqy[-1]) / 2
scaler }
, Seqx = units::drop_units(seqx)
, KK = K)
} else if(identical(density, negexp.like) & expansions == 0){
# This case is LINES, NEGEXP, COVARS, NO EXPANSIONS
s <- as.matrix(unique.covars) %*% matrix(c(a,0),ncol=1)
beta <- exp(s)
temp.scaler <- unname((exp(-beta * units::drop_units(w.lo)) -
exp(-beta * units::drop_units(w.hi)))/beta)
} else {
# For case is for LINES, COVARS, LIKE in {Logistic, User, Gamma}
# and ALL LIKELIHOODS with expansions > 0
#
# We could do all likelihoods this way (i.e., numerical integration); but,
# the above special cases are faster (I think) and more accurate in some cases because
# we know the theoretical integral (i.e., for normal and exponential)
seqx = seq(w.lo, w.hi, length=nTrapazoids)
# function to apply density to each row of covariates
likeApply <- function(covs
, Seqx
, W.lo
, W.hi
, A
, Expansions
, Series
){
# Matrix of constant covariates for this case
temp.covars <- matrix(covs
, nrow = length(Seqx)
, ncol = length(covs)
, byrow = TRUE
)
seqy <- density(dist = Seqx
, covars = temp.covars
, scale = FALSE
, w.lo = W.lo
, w.hi = W.hi
, a = A
, expansions = Expansions
, series = Series
)
scaler <- units::drop_units(Seqx[2] - Seqx[1])*sum(seqy[-length(seqy)] + seqy[-1]) / 2
scaler
}
temp.scaler <- apply(X = unique.covars[, -PkeyCol, drop = FALSE]
, MARGIN = 1
, FUN = likeApply
, Seqx = seqx
, W.lo = w.lo
, W.hi = w.hi
, A = a
, Expansions = expansions
, Series = series
)
}
df <- data.frame(unique.covars, temp.scaler)
z <- merge(covars, df, by.x="zzzPkey", by.y="zzzPkey", sort=F)
scaler <- z$temp.scaler
if(pointSurvey){
scaler <- scaler/units::drop_units(dist)
}
} else if( pointSurvey ){
# This case is POINTS - NO Covariates
seqx = seq(w.lo, w.hi, length=nTrapazoids)
seqy <- units::drop_units(seqx) * density( dist = seqx, scale = FALSE,
w.lo = w.lo, w.hi = w.hi, a = a,
expansions = expansions, series=series)
# trapezoid rule
scaler <- units::drop_units(seqx[2]-seqx[1]) * sum(seqy[-length(seqy)]+seqy[-1]) / (2*units::drop_units(dist))
} else {
# This case is LINES - NO Covariates
# Density should return unit-less numbers (height of density function)
seqx = seq(w.lo, w.hi, length=nTrapazoids)
seqy <- density( dist = seqx
, scale = FALSE
, w.lo = w.lo
, w.hi = w.hi
, a = a
, expansions = expansions
, series=series
)
# trapezoid rule
scaler <- units::drop_units(seqx[2]-seqx[1]) * sum(seqy[-length(seqy)]+seqy[-1]) / 2
}
# there are cases where the guess at parameters is so bad, that the integration
# constant is 0 (consider pnorm(100,0,2e30)). But, we don't want to return 0
# because it goes in denominator of likelihood and results in Inf, which is
# not informative. nlminb guesses NaN after that sometimes. We want to return
# the smallest possible number that does not result in log(x) = -Inf.
# Because of the negative applied in nLL function we actually mant to return
# the largest possible numbers such that when we sum them and others we don't get Inf
if( any(indZeros <- is.na(scaler) |
is.infinite(scaler) |
is.nan(scaler) |
(scaler <= .Machine$double.xmin)) ){
scaler[ indZeros ] <- .Machine$double.xmax / sum(indZeros)
}
# cat(paste("\tscaler = \n\t", paste(scaler, collapse = ", "), "\n"))
scaler
} | /R/integration.constant.R | no_license | cran/Rdistance | R | false | false | 13,178 | r | #' @title Compute the integration constant for distance density functions
#'
#' @description Using numerical integration, this function computes
#' the area under a distance function between two limits (\code{w.lo}
#' and \code{w.hi}).
#'
#' @param dist Vector of detection distance values.
#'
#' @param density A likelihood function for which the
#' integration constant is sought. This function
#' must be capable of evaluating values between \code{w.lo}
#' and \code{w.hi} and have the following parameters:
#' \itemize{
#' \item \samp{a} = Parameter vector.
#' \item \samp{dist} = Vector of distances.
#' \item \samp{covars} = If the density allows covariates,
#' the covariate matrix.
#' \item \samp{w.lo} = Lower limit or left truncation value.
#' \item \samp{w.hi} = Upper limit or right truncation value.
#' \item \samp{series} = Form of the series expansions, if any.
#' \item \samp{expansions} = Number of expansion terms.
#' \item \samp{scale} = Whether to scale function to integrate to 1.
#' }
#'
#' @param w.lo The lower limit of integration, or the left truncation
#' value for perpendicular distances.
#'
#' @param w.hi The upper limit of integration, or the right truncation
#' value for perpendicular distances.
#'
#' @param covars Matrix of covariate values.
#'
#' @param a Vector of parameters to pass to \code{density}.
#'
#' @param series The series to use for expansions.
#' If \code{expansions} > 0, this string
#' specifies the type of expansion. Valid values at
#' present are 'simple', 'hermite', and 'cosine'.
#'
#' @param expansions Number of expansions in \code{density}.
#'
#' @param pointSurvey Boolean. TRUE if point transect data,
#' FALSE if line transect data.
#'
#'
#' @details The trapezoid rule is used to numerically integrate
#' \code{density} from \code{w.lo} to \code{w.hi}. Two-hundred
#' (200) equal-sized trapezoids are used in the integration. The number
#' of trapezoids to use is fixed and cannot be changed without
#' re-writing this routine.
#'
#' @return A scalar (or vector of scalars if covariates are present)
#' that is the area under \code{density} between \code{w.lo} and \code{w.hi}.
#' This scalar can be used as a divisor to scale density such that
#' it integrates to 1.0. If x = density(\ldots), then
#' x / \code{integration.constant(density, \ldots)} will integrate to 1.0.
#'
#' @seealso \code{\link{dfuncEstim}}, \code{\link{halfnorm.like}}
#'
#' @examples
#' # Can put any number for first argument (1 used here)
#' scl <- integration.constant(dist=units::set_units(1,"m")
#' , density=logistic.like
#' , covars = NULL
#' , pointSurvey = FALSE
#' , w.lo = units::set_units(0,"m")
#' , w.hi = units::set_units(100,"m")
#' , expansions = 0
#' , a=c(75,25))
#' print(scl) # Should be 75.1
#'
#' x <- units::set_units(seq(0,100,length=200), "m")
#' y <- logistic.like( c(75,25), x, scale=FALSE ) / scl
#' int.y <- (x[2]-x[1]) * sum(y[-length(y)]+y[-1]) / 2 # the trapezoid rule, should be 1.0
#' print(int.y) # Should be 1
#'
#' @keywords models
#' @importFrom stats integrate
#' @export
integration.constant <- function(dist,
density,
a,
covars,
w.lo,
w.hi,
series,
expansions,
pointSurvey){
density = match.fun(density)
# We need w.lo, w.hi, and dist to have same units.
# This is important because we occasionally drop units in integral calculations below.
# I cannot think of a case where units(w.lo) != units(dist),
# but just in case...
if( units(w.lo) != units(dist)){
w.lo <- units::set_units(w.lo, units(dist), mode = "standard")
}
if( units(w.hi) != units(dist)){
w.hi <- units::set_units(w.hi, units(dist), mode = "standard")
}
# Now, we can safely compute sequence of x values for numerical integration.
# This is done below, in each case where its needed.
nTrapazoids <- 200 # number of evaluation points in numerical integration
if(!is.null(covars)){
# Not sure following is best to do.
# It is much faster to de-dup and compute values on just
# unique combinations of covariates IF there are large number
# of duplicated covariates. This is common when using
# factors. But, if using continuous covariates, this is
# inefficient. Regardless, this code only computes values
# of the scaling constant for unique combinations of X,
# then merges them into the covar array. The non-de-dupped
# way to do these calculations is something like
# x <- covars %*% matrix(a,ncol=1), which might be just as fast.
Pkey <- tapply(1:nrow(covars), as.data.frame(covars)) # groups of duplicates
covars <- data.frame(covars, zzzPkey=Pkey)
dupCovars <- duplicated(covars$zzzPkey)
unique.covars <- covars[!dupCovars,]
PkeyCol <- ncol(unique.covars)
# Remember that unique.covars now has extra column, zzzPkey
# don't include this column in calculations below (or set a[last]=0)
# covars and unique.covars now have Pkey, which we will use to
# merge later
seqy <- list()
temp.scaler <- vector(length = nrow(unique.covars))
scaler <- vector(length = nrow(covars), "numeric")
if(pointSurvey){
# This case is POINTS, COVARS, all Likelihoods
seqx = seq(w.lo, w.hi, length=nTrapazoids)
for(i in 1:nrow(unique.covars)){
temp.covars <- matrix(as.numeric(unique.covars[i,-PkeyCol])
, nrow = length(seqx)
, ncol = ncol(unique.covars)-1
, byrow=TRUE)
seqy[[i]] <- units::drop_units(seqx) * density(a = a
, dist = seqx
, covars = temp.covars
, scale = FALSE
, w.lo = w.lo
, w.hi = w.hi
, expansions = expansions
, series=series
)
temp.scaler[i] <- units::drop_units(seqx[2] - seqx[1]) * sum(seqy[[i]][-length(seqy[[i]])] + seqy[[i]][-1]) / 2
}
} else if(identical(density, halfnorm.like) & expansions == 0){
# this case is LINES, COVARS, HALFNORM, NO EXPANSIONS
# Made this a case because I think it's faster, because we know integral
s <- as.matrix(unique.covars) %*% matrix(c(a,0),ncol=1)
sigma <- exp(s) # link function here
# temp.scaler should be integral under distance function
# We happen to know it for halfnorm (and some others below)
# Integrals are by defn unit-less; but, pnorm returns units. Drop apriori.
# We evaluate normal with mean w.lo, sd = sigma, from -Inf to w.hi, then
# subtract 0.5 from result for the area to left of mean (w.lo)
temp.scaler <- (pnorm(units::drop_units(w.hi)
, units::drop_units(w.lo)
, sigma) - 0.5) *
sqrt(2*pi) * sigma
} else if(identical(density, hazrate.like) & expansions == 0){
# This case is LINES, HAZRATE, COVARS, NO EXPANSIONS
#
# Integral of hazrate involves incomplete gamma functions.
# See wolfram. Incomplete gammas are implemented in some packages, e.g.,
# expint. You could convert to an exact integral using one of these
# packages. But, for now, numerically integrate.
seqx = seq(w.lo, w.hi, length=nTrapazoids)
beta <- a[-length(a)]
K <- a[length(a)]
s <- as.matrix(unique.covars[,-PkeyCol]) %*% matrix(beta,ncol=1)
sigma <- exp(s) # link function here
temp.scaler <- sapply(sigma
, FUN = function(s, Seqx, KK){
seqy <- 1 - exp(-(Seqx/s)^(-KK))
scaler <- (Seqx[2] - Seqx[1])*sum(seqy[-length(seqy)] + seqy[-1]) / 2
scaler }
, Seqx = units::drop_units(seqx)
, KK = K)
} else if(identical(density, negexp.like) & expansions == 0){
# This case is LINES, NEGEXP, COVARS, NO EXPANSIONS
s <- as.matrix(unique.covars) %*% matrix(c(a,0),ncol=1)
beta <- exp(s)
temp.scaler <- unname((exp(-beta * units::drop_units(w.lo)) -
exp(-beta * units::drop_units(w.hi)))/beta)
} else {
# For case is for LINES, COVARS, LIKE in {Logistic, User, Gamma}
# and ALL LIKELIHOODS with expansions > 0
#
# We could do all likelihoods this way (i.e., numerical integration); but,
# the above special cases are faster (I think) and more accurate in some cases because
# we know the theoretical integral (i.e., for normal and exponential)
seqx = seq(w.lo, w.hi, length=nTrapazoids)
# function to apply density to each row of covariates
likeApply <- function(covs
, Seqx
, W.lo
, W.hi
, A
, Expansions
, Series
){
# Matrix of constant covariates for this case
temp.covars <- matrix(covs
, nrow = length(Seqx)
, ncol = length(covs)
, byrow = TRUE
)
seqy <- density(dist = Seqx
, covars = temp.covars
, scale = FALSE
, w.lo = W.lo
, w.hi = W.hi
, a = A
, expansions = Expansions
, series = Series
)
scaler <- units::drop_units(Seqx[2] - Seqx[1])*sum(seqy[-length(seqy)] + seqy[-1]) / 2
scaler
}
temp.scaler <- apply(X = unique.covars[, -PkeyCol, drop = FALSE]
, MARGIN = 1
, FUN = likeApply
, Seqx = seqx
, W.lo = w.lo
, W.hi = w.hi
, A = a
, Expansions = expansions
, Series = series
)
}
df <- data.frame(unique.covars, temp.scaler)
z <- merge(covars, df, by.x="zzzPkey", by.y="zzzPkey", sort=F)
scaler <- z$temp.scaler
if(pointSurvey){
scaler <- scaler/units::drop_units(dist)
}
} else if( pointSurvey ){
# This case is POINTS - NO Covariates
seqx = seq(w.lo, w.hi, length=nTrapazoids)
seqy <- units::drop_units(seqx) * density( dist = seqx, scale = FALSE,
w.lo = w.lo, w.hi = w.hi, a = a,
expansions = expansions, series=series)
# trapezoid rule
scaler <- units::drop_units(seqx[2]-seqx[1]) * sum(seqy[-length(seqy)]+seqy[-1]) / (2*units::drop_units(dist))
} else {
# This case is LINES - NO Covariates
# Density should return unit-less numbers (height of density function)
seqx = seq(w.lo, w.hi, length=nTrapazoids)
seqy <- density( dist = seqx
, scale = FALSE
, w.lo = w.lo
, w.hi = w.hi
, a = a
, expansions = expansions
, series=series
)
# trapezoid rule
scaler <- units::drop_units(seqx[2]-seqx[1]) * sum(seqy[-length(seqy)]+seqy[-1]) / 2
}
# there are cases where the guess at parameters is so bad, that the integration
# constant is 0 (consider pnorm(100,0,2e30)). But, we don't want to return 0
# because it goes in denominator of likelihood and results in Inf, which is
# not informative. nlminb guesses NaN after that sometimes. We want to return
# the smallest possible number that does not result in log(x) = -Inf.
# Because of the negative applied in nLL function we actually mant to return
# the largest possible numbers such that when we sum them and others we don't get Inf
if( any(indZeros <- is.na(scaler) |
is.infinite(scaler) |
is.nan(scaler) |
(scaler <= .Machine$double.xmin)) ){
scaler[ indZeros ] <- .Machine$double.xmax / sum(indZeros)
}
# cat(paste("\tscaler = \n\t", paste(scaler, collapse = ", "), "\n"))
scaler
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/knit_print.R
\name{knit_print.fmt_uni_regression}
\alias{knit_print.fmt_uni_regression}
\title{Print \code{fmt_uni_regression} objects in Rmarkdown}
\usage{
\method{knit_print}{fmt_uni_regression}(x, options, ...)
}
\arguments{
\item{x}{object of class \code{fmt_uni_regression} object from
\code{\link{fmt_uni_regression}} function}
\item{options}{Copied from the printr package....should we delete?}
\item{...}{further arguments passed to \code{knitr::kable()}.}
}
\description{
Print \code{fmt_uni_regression} objects in Rmarkdown
}
\examples{
\donttest{
fmt_uni_regression(
trial,
method = "glm",
y = "response",
method.args = list(family = binomial),
exponentiate = TRUE
) \%>\%
print()
}
}
| /man/knit_print.fmt_uni_regression.Rd | permissive | shijianasdf/clintable | R | false | true | 788 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/knit_print.R
\name{knit_print.fmt_uni_regression}
\alias{knit_print.fmt_uni_regression}
\title{Print \code{fmt_uni_regression} objects in Rmarkdown}
\usage{
\method{knit_print}{fmt_uni_regression}(x, options, ...)
}
\arguments{
\item{x}{object of class \code{fmt_uni_regression} object from
\code{\link{fmt_uni_regression}} function}
\item{options}{Copied from the printr package....should we delete?}
\item{...}{further arguments passed to \code{knitr::kable()}.}
}
\description{
Print \code{fmt_uni_regression} objects in Rmarkdown
}
\examples{
\donttest{
fmt_uni_regression(
trial,
method = "glm",
y = "response",
method.args = list(family = binomial),
exponentiate = TRUE
) \%>\%
print()
}
}
|
library(shiny)
ui <- fluidPage(
sliderInput(inputId = "num",
label = "Choose a number",
value = 25, min = 1, max = 100),
textInput(inputId ="title",
label = "Write a title",
value = "Histogram of Random Normal Values"),
#textInput(inputId ="xtitle",
# label = "Write a title",
# value = "X-axis title"),
plotOutput("hist"),
# verbatinTextOutput("stats")
)
server <- function(input, output){
#data <- reactive ({rnorm(input$num)
#})
output$hist <- renderPlot({
hist(rnorm(input$num), main = input$title)
})
#output$hist <- renderPlot({
# hist(data())
#})
# main = input$title,)
# output$stats <- renderPrint({
# summary(data())
#})
}
shinyApp(ui = ui, server = server)
library(shiny)
library(shiny.users)
demo_users <- list(
list(
username = "demo-appsilon",
password_sha256 = "A7574A42198B7D7EEE2C037703A0B95558F195457908D6975E681E2055FD5EB9",
roles = list("basic", "admin")
),
list(
username = "john",
password_sha256 = "C2F77349B4D0CDE5A1E865195A9E395E1DF8829BE9D31707BD12F44CEB384A60",
roles = list("basic")
)
)
ui <- shinyUI(fluidPage(
div(class = "container", style = "padding: 4em",
login_screen_ui('login_screen'),
uiOutput("authorized_content")
)
))
server <- shinyServer(function(input, output) {
users <- initialize_users(demo_users)
callModule(login_screen, 'login_screen', users)
output$authorized_content <- renderUI({
if (!is.null(users$user()) {
... # application content
}
})
})
shinyApp(ui, server) | /app.R | no_license | kchauhan295/app1 | R | false | false | 1,731 | r | library(shiny)
ui <- fluidPage(
sliderInput(inputId = "num",
label = "Choose a number",
value = 25, min = 1, max = 100),
textInput(inputId ="title",
label = "Write a title",
value = "Histogram of Random Normal Values"),
#textInput(inputId ="xtitle",
# label = "Write a title",
# value = "X-axis title"),
plotOutput("hist"),
# verbatinTextOutput("stats")
)
server <- function(input, output){
#data <- reactive ({rnorm(input$num)
#})
output$hist <- renderPlot({
hist(rnorm(input$num), main = input$title)
})
#output$hist <- renderPlot({
# hist(data())
#})
# main = input$title,)
# output$stats <- renderPrint({
# summary(data())
#})
}
shinyApp(ui = ui, server = server)
library(shiny)
library(shiny.users)
demo_users <- list(
list(
username = "demo-appsilon",
password_sha256 = "A7574A42198B7D7EEE2C037703A0B95558F195457908D6975E681E2055FD5EB9",
roles = list("basic", "admin")
),
list(
username = "john",
password_sha256 = "C2F77349B4D0CDE5A1E865195A9E395E1DF8829BE9D31707BD12F44CEB384A60",
roles = list("basic")
)
)
ui <- shinyUI(fluidPage(
div(class = "container", style = "padding: 4em",
login_screen_ui('login_screen'),
uiOutput("authorized_content")
)
))
server <- shinyServer(function(input, output) {
users <- initialize_users(demo_users)
callModule(login_screen, 'login_screen', users)
output$authorized_content <- renderUI({
if (!is.null(users$user()) {
... # application content
}
})
})
shinyApp(ui, server) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BSDA-package.R
\docType{data}
\name{Eggs}
\alias{Eggs}
\title{Number of eggs versus amounts of feed supplement}
\format{
A data frame/tibble with 12 observations on two variables
\describe{
\item{feed}{amount of feed supplement}
\item{eggs}{number of eggs per day for 100 chickens}
}
}
\usage{
Eggs
}
\description{
Data for Exercise 9.22
}
\examples{
plot(eggs ~ feed, data = Eggs)
model <- lm(eggs ~ feed, data = Eggs)
abline(model, col = "red")
summary(model)
rm(model)
}
\references{
Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
}
\keyword{datasets}
| /man/Eggs.Rd | no_license | alanarnholt/BSDA | R | false | true | 721 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BSDA-package.R
\docType{data}
\name{Eggs}
\alias{Eggs}
\title{Number of eggs versus amounts of feed supplement}
\format{
A data frame/tibble with 12 observations on two variables
\describe{
\item{feed}{amount of feed supplement}
\item{eggs}{number of eggs per day for 100 chickens}
}
}
\usage{
Eggs
}
\description{
Data for Exercise 9.22
}
\examples{
plot(eggs ~ feed, data = Eggs)
model <- lm(eggs ~ feed, data = Eggs)
abline(model, col = "red")
summary(model)
rm(model)
}
\references{
Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
}
\keyword{datasets}
|
# project euler question 4 palyndromic number
rm(list=ls())
is_pal <- function(n){
a <- n
b <- as.numeric(paste(rev(strsplit(as.character(a),"")[[1]]),collapse=""))
if(a==b){
return(1)
} else{
return(0)
}
}
v1 <- c()
for( i in 101:(999*999)){
if(is_pal(i)==1){
v1 <- append(v1,i)
}
}
max <- 0
for ( i in seq(length(v1),1, by= -1)){
for(j in 100:999){
if(v1[i]%%j==0){
if(v1[i]/j<1000){
max<-v1[i]
}
}
}
if(max!=0){
break
}
}
max
| /project euler 4.R | no_license | mpommer/Project-euler | R | false | false | 553 | r | # project euler question 4 palyndromic number
rm(list=ls())
is_pal <- function(n){
a <- n
b <- as.numeric(paste(rev(strsplit(as.character(a),"")[[1]]),collapse=""))
if(a==b){
return(1)
} else{
return(0)
}
}
v1 <- c()
for( i in 101:(999*999)){
if(is_pal(i)==1){
v1 <- append(v1,i)
}
}
max <- 0
for ( i in seq(length(v1),1, by= -1)){
for(j in 100:999){
if(v1[i]%%j==0){
if(v1[i]/j<1000){
max<-v1[i]
}
}
}
if(max!=0){
break
}
}
max
|
{
#Se borran las columnas innecesarias que causaran distorcion sobre los nombres finales de las columnas
ACCESSES2<-ACCESSES
ACCESSES2[["Acceso"]]<-NULL
ACCESSES2[["Proveedor"]]<-NULL
ACCESSES2[["Tipo"]]<-NULL
ACCESSES2[["Estado"]]<-NULL
# se une uso con ACCESSES
Consolidado <- merge(uso,
ACCESSES2,
by.x = "Acceso fix",
by.y = "Acceso fix",
all.x = TRUE)
#se modifica plan eliminando las columnas innecesarias
PLAN2 <- SF_Final
#Se dejan solo los que son plano tarifario para el analisis
PLAN2 <- subset(PLAN2,
PLAN2[["Tipo de producto"]] == "Plano tarifario")
PLAN2<-subset(PLAN2,select = c("Acceso","Producto","Importe de las opciones descontadas"))
PLAN2[["Acceso"]]<-as.character(PLAN2[["Acceso"]])
Consolidado[["Acceso"]]<-as.character(Consolidado[["Acceso"]])
Consolidado<- merge(Consolidado,PLAN2,by.x ="Acceso",by.y = "Acceso", all.x = TRUE)
SinUsos<-Consolidado
SinUsos<-subset(SinUsos,SinUsos[["Tipo"]]!="Centro de facturación")
SinUsos<<-SinUsos
ASDAS<<-names(SinUsos)
SinUsos[,'usocant']<-SinUsos[,'Voz (sec)']+SinUsos[,'Datos (KB)']+SinUsos[,'N.° SMS/MMS']
SinUsos <-
subset(
SinUsos,
select = c(
"Acceso",
"Fecha",
"Total",
"usocant"
)
)
SinUsos<-subset(SinUsos,SinUsos[["usocant"]]==0)
month1 <- sapply(SinUsos[,'Fecha'], substr, 6, 7)
month <- as.numeric(month1)
rm(month1)
year1<-sapply(SinUsos[,'Fecha'],substr,1,4)
year<-as.numeric(year1)
rm(year1)
AAA<-data.frame(SinUsos[,'Fecha'])
AAA[,'month']<-month
AAA[,'year']<-year
BBB<-subset(AAA,
AAA["year"]==max(year))
rm(month,year)
fin1<-max(BBB[["month"]])
fin2<-max(BBB[["year"]])
rm(BBB)
for (i in 1:length(SinUsos[["Acceso"]])){
SinUsos[["Meses"]][i]<-(fin2-AAA[["year"]][i])*12+fin1-AAA[["month"]][i]+1
}
rm(AAA)
SinUsos<-subset(SinUsos,SinUsos[["Meses"]]<=3)
SinUsos<<-subset(SinUsos,SinUsos[["Meses"]]<=3)
########Excepciones############
if(!is.null(nombre)){
if(nombre == "Aguas Andinas"){
probar<<-subset(Consolidado,Consolidado[["Centro de facturacion"]] == '-')
if(length(probar[["Acceso"]])>0){
UAADP_usos2<-subset(Consolidado,Consolidado[["Centro de facturacion"]]!='-')
probar[["Centro de facturacion"]]<-NULL
probar[,'Centro de facturacion']<-probar[,'Proveedor Nivel 3']
Consolidado<-rbind(probar,UAADP_usos2)
rm(probar,UAADP_usos2)
}
}
}
Consolidado<<-Consolidado
}
| /pj_igm.R | no_license | neobiscorp/NewCargaBI | R | false | false | 2,708 | r | {
#Se borran las columnas innecesarias que causaran distorcion sobre los nombres finales de las columnas
ACCESSES2<-ACCESSES
ACCESSES2[["Acceso"]]<-NULL
ACCESSES2[["Proveedor"]]<-NULL
ACCESSES2[["Tipo"]]<-NULL
ACCESSES2[["Estado"]]<-NULL
# se une uso con ACCESSES
Consolidado <- merge(uso,
ACCESSES2,
by.x = "Acceso fix",
by.y = "Acceso fix",
all.x = TRUE)
#se modifica plan eliminando las columnas innecesarias
PLAN2 <- SF_Final
#Se dejan solo los que son plano tarifario para el analisis
PLAN2 <- subset(PLAN2,
PLAN2[["Tipo de producto"]] == "Plano tarifario")
PLAN2<-subset(PLAN2,select = c("Acceso","Producto","Importe de las opciones descontadas"))
PLAN2[["Acceso"]]<-as.character(PLAN2[["Acceso"]])
Consolidado[["Acceso"]]<-as.character(Consolidado[["Acceso"]])
Consolidado<- merge(Consolidado,PLAN2,by.x ="Acceso",by.y = "Acceso", all.x = TRUE)
SinUsos<-Consolidado
SinUsos<-subset(SinUsos,SinUsos[["Tipo"]]!="Centro de facturación")
SinUsos<<-SinUsos
ASDAS<<-names(SinUsos)
SinUsos[,'usocant']<-SinUsos[,'Voz (sec)']+SinUsos[,'Datos (KB)']+SinUsos[,'N.° SMS/MMS']
SinUsos <-
subset(
SinUsos,
select = c(
"Acceso",
"Fecha",
"Total",
"usocant"
)
)
SinUsos<-subset(SinUsos,SinUsos[["usocant"]]==0)
month1 <- sapply(SinUsos[,'Fecha'], substr, 6, 7)
month <- as.numeric(month1)
rm(month1)
year1<-sapply(SinUsos[,'Fecha'],substr,1,4)
year<-as.numeric(year1)
rm(year1)
AAA<-data.frame(SinUsos[,'Fecha'])
AAA[,'month']<-month
AAA[,'year']<-year
BBB<-subset(AAA,
AAA["year"]==max(year))
rm(month,year)
fin1<-max(BBB[["month"]])
fin2<-max(BBB[["year"]])
rm(BBB)
for (i in 1:length(SinUsos[["Acceso"]])){
SinUsos[["Meses"]][i]<-(fin2-AAA[["year"]][i])*12+fin1-AAA[["month"]][i]+1
}
rm(AAA)
SinUsos<-subset(SinUsos,SinUsos[["Meses"]]<=3)
SinUsos<<-subset(SinUsos,SinUsos[["Meses"]]<=3)
########Excepciones############
if(!is.null(nombre)){
if(nombre == "Aguas Andinas"){
probar<<-subset(Consolidado,Consolidado[["Centro de facturacion"]] == '-')
if(length(probar[["Acceso"]])>0){
UAADP_usos2<-subset(Consolidado,Consolidado[["Centro de facturacion"]]!='-')
probar[["Centro de facturacion"]]<-NULL
probar[,'Centro de facturacion']<-probar[,'Proveedor Nivel 3']
Consolidado<-rbind(probar,UAADP_usos2)
rm(probar,UAADP_usos2)
}
}
}
Consolidado<<-Consolidado
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Evaluation.r
\name{mcEvaluate}
\alias{mcEvaluate}
\title{Evaluates the number of occurrences of predicted next clicks}
\usage{
mcEvaluate(mc, startPattern, testCLS)
}
\arguments{
\item{mc}{a markovchain object (this should have been built from a set of training data)}
\item{startPattern}{the starting pattern we want to predict next click on, and evaluate observed occurrences in test data.}
\item{testCLS}{clickstream object with test data}
}
\description{
Evaluates the number of occurrences of predicted next clicks vs. total number of starting pattern occurrences
in a given clickstream. The predicted next click can be a markov chain of any order.
}
\examples{
training <- c("User1,h,c,c,p,c,h,c,p,p,c,p,p,o",
"User2,i,c,i,c,c,c,d",
"User3,h,i,c,i,c,p,c,c,p,c,c,i,d",
"User4,c,c,p,c,d")
test <- c("User1,h,h,h,h,c,c,p,p,h,c,p,p,c,p,p,o",
"User2,i,c,i,c,c,c,d",
"User4,c,c,c,c,d,c,c,c,c")
csf <- tempfile()
writeLines(training, csf)
trainingCLS <- readClickstreams(csf, header = TRUE)
unlink(csf)
csf <- tempfile()
writeLines(test, csf)
testCLS <- readClickstreams(csf, header = TRUE)
unlink(csf)
mc <- fitMarkovChain(trainingCLS, order = 1)
startPattern <- new("Pattern", sequence = c("c","c"))
res <- mcEvaluate(mc, startPattern, testCLS)
res
}
\author{
Theo van Kraay \email{theo.vankraay@hotmail.com}
}
| /man/mcEvaluate.Rd | no_license | cran/clickstream | R | false | true | 1,459 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Evaluation.r
\name{mcEvaluate}
\alias{mcEvaluate}
\title{Evaluates the number of occurrences of predicted next clicks}
\usage{
mcEvaluate(mc, startPattern, testCLS)
}
\arguments{
\item{mc}{a markovchain object (this should have been built from a set of training data)}
\item{startPattern}{the starting pattern we want to predict next click on, and evaluate observed occurrences in test data.}
\item{testCLS}{clickstream object with test data}
}
\description{
Evaluates the number of occurrences of predicted next clicks vs. total number of starting pattern occurrences
in a given clickstream. The predicted next click can be a markov chain of any order.
}
\examples{
training <- c("User1,h,c,c,p,c,h,c,p,p,c,p,p,o",
"User2,i,c,i,c,c,c,d",
"User3,h,i,c,i,c,p,c,c,p,c,c,i,d",
"User4,c,c,p,c,d")
test <- c("User1,h,h,h,h,c,c,p,p,h,c,p,p,c,p,p,o",
"User2,i,c,i,c,c,c,d",
"User4,c,c,c,c,d,c,c,c,c")
csf <- tempfile()
writeLines(training, csf)
trainingCLS <- readClickstreams(csf, header = TRUE)
unlink(csf)
csf <- tempfile()
writeLines(test, csf)
testCLS <- readClickstreams(csf, header = TRUE)
unlink(csf)
mc <- fitMarkovChain(trainingCLS, order = 1)
startPattern <- new("Pattern", sequence = c("c","c"))
res <- mcEvaluate(mc, startPattern, testCLS)
res
}
\author{
Theo van Kraay \email{theo.vankraay@hotmail.com}
}
|
# This is the user-interface definition of a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
source("mainScript.R")
source("mongo.R")
library(shiny)
library(shinyBS)
library("scatterplot3d")
library("threejs")
shinyUI(navbarPage("",
tabPanel(
selectInput("selectX", label = h3("coord x"), choices = attribs, selected = 1)
),
tabPanel(
selectInput("selectY", label = h3("coord y"), choices = attribs, selected = 1)
),
tabPanel(
selectInput("selectZ", label = h3("coord z"), choices = attribs, selected = 1)
),
navbarMenu("More",
tabPanel(actionButton("showInterpretationButton", label = "Show Semantic Interpretation")),
tabPanel(actionButton("createQueryButton", label = "Query Input"))
),
mainPanel(
tags$style(type="text/css",
".shiny-output-error { visibility: hidden; }",
".shiny-output-error:before { visibility: hidden; }"
),
br(),
br(),
br(),
br(),
br(),
hr(),
column(12,
textOutput("selectX"),
textOutput("selectY"),
textOutput("selectZ")
),
column(12,
bsAlert("showInterpretationAlert_anchorId")
),
column(12,
plotOutput("plotGraphics")
),
column(12,
scatterplotThreeOutput("scatterplot")
)
)
,position="fixed-top"))
| /ui.R | no_license | jbjares/sfmrshiny | R | false | false | 1,693 | r |
# This is the user-interface definition of a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
source("mainScript.R")
source("mongo.R")
library(shiny)
library(shinyBS)
library("scatterplot3d")
library("threejs")
shinyUI(navbarPage("",
tabPanel(
selectInput("selectX", label = h3("coord x"), choices = attribs, selected = 1)
),
tabPanel(
selectInput("selectY", label = h3("coord y"), choices = attribs, selected = 1)
),
tabPanel(
selectInput("selectZ", label = h3("coord z"), choices = attribs, selected = 1)
),
navbarMenu("More",
tabPanel(actionButton("showInterpretationButton", label = "Show Semantic Interpretation")),
tabPanel(actionButton("createQueryButton", label = "Query Input"))
),
mainPanel(
tags$style(type="text/css",
".shiny-output-error { visibility: hidden; }",
".shiny-output-error:before { visibility: hidden; }"
),
br(),
br(),
br(),
br(),
br(),
hr(),
column(12,
textOutput("selectX"),
textOutput("selectY"),
textOutput("selectZ")
),
column(12,
bsAlert("showInterpretationAlert_anchorId")
),
column(12,
plotOutput("plotGraphics")
),
column(12,
scatterplotThreeOutput("scatterplot")
)
)
,position="fixed-top"))
|
# People who more responds to others is highlighted / layout: 'stress'
#Project Data Visualization
# http://pablobarbera.com/big-data-upf/html/02a-networks-intro-visualization.html
#Federico Ferrero
#10/25/2020
# Clear environment
rm(list=ls())
#call library
library(igraph)
# reading in the data
g <- read.delim("/Users/feder/Desktop/BASE.txt")
g <- read.delim("/Users/feder/Desktop/BASE2.txt")# this base contains from 14 to 17 Aug
g <- read.delim("/Users/feder/Desktop/BASE14.txt")
# create data graph
g <- graph.data.frame(g, directed= T)
# remove loops in the igraph
g <- simplify(g, remove.multiple = F, remove.loops = T)
# check number of edges and nodes
E(g)# edges= 2312
V(g)# vertex= 49111
# defining labels and degrees
V(g)$node_size <- degree(g)
V(g)$node_label <- ifelse( degree(g)>=15, V(g)$name, NA )
#plot in 2 columns
par(mfrow=c(1,2))
##plot the complete network
plot(g,
vertex.color= "lightblue",
vertex.size= V(g)$degree*0.4,
edge.arrow.size= 0.01,
vertex.label.family="Helvetica",
vertex.label.size= V(g)$label,
layout=layout.graphopt,
main= "#AlevelResults complete network")
###
library(rtweet)
library(igraph)
library(ggraph)
require("hrbrthemes")
require("ggraph")
library(tidyverse)
V(g)$node_label <- unname(ifelse(degree(g)[V(g)] > 20, names(V(g)), ""))
V(g)$node_size <- unname(ifelse(degree(g)[V(g)] > 20, degree(g), 0))
# People who more responds to others is highlighted / layout: kk
ggraph(g, layout = "kk") +
geom_edge_arc(edge_width=0.1, aes(alpha=..index..)) +
geom_node_label(aes(label=node_label, size=node_size),
label.size=0, fill="#ffffff66", segment.colour="springgreen",
color="blue", repel=TRUE, family="Apple Garamond") +
coord_fixed() +
scale_size_area(trans="sqrt") +
labs(title="#AlevelResults Retweet Relationships") +
theme_graph(base_family="Apple Garamond") +
theme(legend.position="none")
# People who more responds to others is highlighted / layout: 'stress'
ggraph(g, layout = "stress") +
geom_edge_arc(edge_width=0.1, aes(alpha=..index..)) +
geom_node_label(aes(label=node_label, size=node_size),
label.size=0, fill="#ffffff66", segment.colour="springgreen",
color="blue", repel=TRUE, family="Apple Garamond") +
coord_fixed() +
scale_size_area(trans="sqrt") +
labs(title="#AlevelResults Retweet Relationships") +
theme_graph(base_family="Apple Garamond") +
theme(legend.position="none")
# People who more responds to others is highlighted / layout:'fr'
ggraph(g, layout = "fr") +
geom_edge_arc(edge_width=0.1, aes(alpha=..index..)) +
geom_node_label(aes(label=node_label, size=node_size),
label.size=0, fill="#ffffff66", segment.colour="springgreen",
color="blue", repel=TRUE, family="Apple Garamond") +
coord_fixed() +
scale_size_area(trans="sqrt") +
labs(title="#AlevelResults Retweet Relationships") +
theme_graph(base_family="Apple Garamond") +
theme(legend.position="none")
# People who more responds to others is highlighted / layout: 'lgl'
ggraph(g, layout = "lgl") +
geom_edge_arc(edge_width=0.1, aes(alpha=..index..)) +
geom_node_label(aes(label=node_label, size=node_size),
label.size=0, fill="#ffffff66", segment.colour="springgreen",
color="blue", repel=TRUE, family="Apple Garamond") +
coord_fixed() +
scale_size_area(trans="sqrt") +
labs(title="#AlevelResults Retweet Relationships") +
theme_graph(base_family="Apple Garamond") +
theme(legend.position="none")
# People who more responds to others is highlighted / layout:'graphopt'
ggraph(g, layout = "graphopt") +
geom_edge_arc(edge_width=0.1, aes(alpha=..index..)) +
geom_node_label(aes(label=node_label, size=node_size),
label.size=0, fill="#ffffff66", segment.colour="springgreen",
color="blue", repel=TRUE, family="Apple Garamond") +
coord_fixed() +
scale_size_area(trans="sqrt") +
labs(title="#AlevelResults Retweet Relationships") +
theme_graph(base_family="Apple Garamond") +
theme(legend.position="none")
# People who more responds to others is highlighted / circular layout
ggraph(g, layout = 'linear', circular = TRUE) +
geom_edge_arc(edge_width=0.125, aes(alpha=..index..)) +
geom_node_label(aes(label=node_label, size=node_size),
label.size=0, fill="#ffffff66", segment.colour="springgreen",
color="blue", repel=TRUE, family="Apple Garamond") +
coord_fixed() +
scale_size_area(trans="sqrt") +
labs(title="#AlevelResults Retweet Relationships") +
theme_graph(base_family="Apple Garamond") +
theme(legend.position="none")
ggraph(g, layout = "stress") +
geom_edge_arc(edge_width=0.1, aes(alpha=..index..)) +
geom_node_label(aes(label=node_label, size=node_size),
label.size=0, fill="#ffffff66", segment.colour="springgreen",
color="blue", repel=TRUE, family="Apple Garamond") +
coord_fixed() +
scale_size_area(trans="sqrt") +
labs(title="#AlevelResults Retweet Relationships") +
theme_graph(base_family="Apple Garamond") +
theme(legend.position="none")
# People who more responds to others is highlighted / layout:'fr'
ggraph(g, layout = "fr") +
geom_edge_arc(edge_width=0.1, aes(alpha=..index..)) +
geom_node_label(aes(label=node_label, size=node_size),
label.size=0, fill="#ffffff66", segment.colour="springgreen",
color="blue", repel=TRUE, family="Apple Garamond") +
coord_fixed() +
scale_size_area(trans="sqrt") +
labs(title="#AlevelResults Retweet Relationships") +
theme_graph(base_family="Apple Garamond") +
theme(legend.position="none")
# People who more responds to others is highlighted / layout: 'lgl'
ggraph(g, layout = "lgl") +
geom_edge_arc(edge_width=0.1, aes(alpha=..index..)) +
geom_node_label(aes(label=node_label, size=node_size),
label.size=0, fill="#ffffff66", segment.colour="springgreen",
color="blue", repel=TRUE, family="Apple Garamond") +
coord_fixed() +
scale_size_area(trans="sqrt") +
labs(title="#AlevelResults Retweet Relationships") +
theme_graph(base_family="Apple Garamond") +
theme(legend.position="none")
# People who more responds to others is highlighted / layout:'graphopt'
ggraph(g, layout = "graphopt") +
geom_edge_arc(edge_width=0.1, aes(alpha=..index..)) +
geom_node_label(aes(label=node_label, size=node_size),
label.size=0, fill="#ffffff66", segment.colour="springgreen",
color="blue", repel=TRUE, family="Apple Garamond") +
coord_fixed() +
scale_size_area(trans="sqrt") +
labs(title="#AlevelResults Retweet Relationships") +
theme_graph(base_family="Apple Garamond") +
theme(legend.position="none")
# People who more responds to others is highlighted / circular layout
ggraph(g, layout = 'linear', circular = TRUE) +
geom_edge_arc(edge_width=0.125, aes(alpha=..index..)) +
geom_node_label(aes(label=node_label, size=node_size),
label.size=0, fill="#ffffff66", segment.colour="springgreen",
color="blue", repel=TRUE, family="Apple Garamond") +
coord_fixed() +
scale_size_area(trans="sqrt") +
labs(title="#AlevelResults Retweet Relationships") +
theme_graph(base_family="Apple Garamond") +
theme(legend.position="none")
| /social_network_analysis.R | no_license | federico-jf/Data-Visualization-UTD-EPPS-6356 | R | false | false | 8,013 | r | # People who more responds to others is highlighted / layout: 'stress'
#Project Data Visualization
# http://pablobarbera.com/big-data-upf/html/02a-networks-intro-visualization.html
#Federico Ferrero
#10/25/2020
# Clear environment
rm(list=ls())
#call library
library(igraph)
# reading in the data
g <- read.delim("/Users/feder/Desktop/BASE.txt")
g <- read.delim("/Users/feder/Desktop/BASE2.txt")# this base contains from 14 to 17 Aug
g <- read.delim("/Users/feder/Desktop/BASE14.txt")
# create data graph
g <- graph.data.frame(g, directed= T)
# remove loops in the igraph
g <- simplify(g, remove.multiple = F, remove.loops = T)
# check number of edges and nodes
E(g)# edges= 2312
V(g)# vertex= 49111
# defining labels and degrees
V(g)$node_size <- degree(g)
V(g)$node_label <- ifelse( degree(g)>=15, V(g)$name, NA )
#plot in 2 columns
par(mfrow=c(1,2))
##plot the complete network
plot(g,
vertex.color= "lightblue",
vertex.size= V(g)$degree*0.4,
edge.arrow.size= 0.01,
vertex.label.family="Helvetica",
vertex.label.size= V(g)$label,
layout=layout.graphopt,
main= "#AlevelResults complete network")
###
library(rtweet)
library(igraph)
library(ggraph)
require("hrbrthemes")
require("ggraph")
library(tidyverse)
V(g)$node_label <- unname(ifelse(degree(g)[V(g)] > 20, names(V(g)), ""))
V(g)$node_size <- unname(ifelse(degree(g)[V(g)] > 20, degree(g), 0))
# People who more responds to others is highlighted / layout: kk
ggraph(g, layout = "kk") +
geom_edge_arc(edge_width=0.1, aes(alpha=..index..)) +
geom_node_label(aes(label=node_label, size=node_size),
label.size=0, fill="#ffffff66", segment.colour="springgreen",
color="blue", repel=TRUE, family="Apple Garamond") +
coord_fixed() +
scale_size_area(trans="sqrt") +
labs(title="#AlevelResults Retweet Relationships") +
theme_graph(base_family="Apple Garamond") +
theme(legend.position="none")
# People who more responds to others is highlighted / layout: 'stress'
ggraph(g, layout = "stress") +
geom_edge_arc(edge_width=0.1, aes(alpha=..index..)) +
geom_node_label(aes(label=node_label, size=node_size),
label.size=0, fill="#ffffff66", segment.colour="springgreen",
color="blue", repel=TRUE, family="Apple Garamond") +
coord_fixed() +
scale_size_area(trans="sqrt") +
labs(title="#AlevelResults Retweet Relationships") +
theme_graph(base_family="Apple Garamond") +
theme(legend.position="none")
# People who more responds to others is highlighted / layout:'fr'
ggraph(g, layout = "fr") +
geom_edge_arc(edge_width=0.1, aes(alpha=..index..)) +
geom_node_label(aes(label=node_label, size=node_size),
label.size=0, fill="#ffffff66", segment.colour="springgreen",
color="blue", repel=TRUE, family="Apple Garamond") +
coord_fixed() +
scale_size_area(trans="sqrt") +
labs(title="#AlevelResults Retweet Relationships") +
theme_graph(base_family="Apple Garamond") +
theme(legend.position="none")
# People who more responds to others is highlighted / layout: 'lgl'
ggraph(g, layout = "lgl") +
geom_edge_arc(edge_width=0.1, aes(alpha=..index..)) +
geom_node_label(aes(label=node_label, size=node_size),
label.size=0, fill="#ffffff66", segment.colour="springgreen",
color="blue", repel=TRUE, family="Apple Garamond") +
coord_fixed() +
scale_size_area(trans="sqrt") +
labs(title="#AlevelResults Retweet Relationships") +
theme_graph(base_family="Apple Garamond") +
theme(legend.position="none")
# People who more responds to others is highlighted / layout:'graphopt'
ggraph(g, layout = "graphopt") +
geom_edge_arc(edge_width=0.1, aes(alpha=..index..)) +
geom_node_label(aes(label=node_label, size=node_size),
label.size=0, fill="#ffffff66", segment.colour="springgreen",
color="blue", repel=TRUE, family="Apple Garamond") +
coord_fixed() +
scale_size_area(trans="sqrt") +
labs(title="#AlevelResults Retweet Relationships") +
theme_graph(base_family="Apple Garamond") +
theme(legend.position="none")
# People who more responds to others is highlighted / circular layout
ggraph(g, layout = 'linear', circular = TRUE) +
geom_edge_arc(edge_width=0.125, aes(alpha=..index..)) +
geom_node_label(aes(label=node_label, size=node_size),
label.size=0, fill="#ffffff66", segment.colour="springgreen",
color="blue", repel=TRUE, family="Apple Garamond") +
coord_fixed() +
scale_size_area(trans="sqrt") +
labs(title="#AlevelResults Retweet Relationships") +
theme_graph(base_family="Apple Garamond") +
theme(legend.position="none")
ggraph(g, layout = "stress") +
geom_edge_arc(edge_width=0.1, aes(alpha=..index..)) +
geom_node_label(aes(label=node_label, size=node_size),
label.size=0, fill="#ffffff66", segment.colour="springgreen",
color="blue", repel=TRUE, family="Apple Garamond") +
coord_fixed() +
scale_size_area(trans="sqrt") +
labs(title="#AlevelResults Retweet Relationships") +
theme_graph(base_family="Apple Garamond") +
theme(legend.position="none")
# People who more responds to others is highlighted / layout:'fr'
ggraph(g, layout = "fr") +
geom_edge_arc(edge_width=0.1, aes(alpha=..index..)) +
geom_node_label(aes(label=node_label, size=node_size),
label.size=0, fill="#ffffff66", segment.colour="springgreen",
color="blue", repel=TRUE, family="Apple Garamond") +
coord_fixed() +
scale_size_area(trans="sqrt") +
labs(title="#AlevelResults Retweet Relationships") +
theme_graph(base_family="Apple Garamond") +
theme(legend.position="none")
# People who more responds to others is highlighted / layout: 'lgl'
ggraph(g, layout = "lgl") +
geom_edge_arc(edge_width=0.1, aes(alpha=..index..)) +
geom_node_label(aes(label=node_label, size=node_size),
label.size=0, fill="#ffffff66", segment.colour="springgreen",
color="blue", repel=TRUE, family="Apple Garamond") +
coord_fixed() +
scale_size_area(trans="sqrt") +
labs(title="#AlevelResults Retweet Relationships") +
theme_graph(base_family="Apple Garamond") +
theme(legend.position="none")
# People who more responds to others is highlighted / layout:'graphopt'
ggraph(g, layout = "graphopt") +
geom_edge_arc(edge_width=0.1, aes(alpha=..index..)) +
geom_node_label(aes(label=node_label, size=node_size),
label.size=0, fill="#ffffff66", segment.colour="springgreen",
color="blue", repel=TRUE, family="Apple Garamond") +
coord_fixed() +
scale_size_area(trans="sqrt") +
labs(title="#AlevelResults Retweet Relationships") +
theme_graph(base_family="Apple Garamond") +
theme(legend.position="none")
# People who more responds to others is highlighted / circular layout
ggraph(g, layout = 'linear', circular = TRUE) +
geom_edge_arc(edge_width=0.125, aes(alpha=..index..)) +
geom_node_label(aes(label=node_label, size=node_size),
label.size=0, fill="#ffffff66", segment.colour="springgreen",
color="blue", repel=TRUE, family="Apple Garamond") +
coord_fixed() +
scale_size_area(trans="sqrt") +
labs(title="#AlevelResults Retweet Relationships") +
theme_graph(base_family="Apple Garamond") +
theme(legend.position="none")
|
#!/usr/bin/env Rscript
library(plyr)
root.path <- "output1of3"
score.matrix.path <- paste(root.path, "matrix.csv", sep="/")
train.questions.path <- paste(root.path, "itemID_train.txt", sep="/")
test.questions.path <- paste(root.path, "itemID_test.txt", sep="/")
train.users.path <- paste(root.path, "userID_train.txt", sep="/")
test.users.path <- paste(root.path, "userID_test.txt", sep="/")
score <- read.csv(file = score.matrix.path)
train.questions <- read.table(file = train.questions.path)[,1]
test.questions <- read.table(file = test.questions.path)[,1]
train.users <- read.table(file = train.users.path)[,1]
test.users <- read.table(file = test.users.path)[,1]
score.train <- score[score$question_id %in% train.questions, ]
score.test <- score[score$question_id %in% test.questions, ]
score.CF.test <- score.test[score.test$user_id %in% test.users, ]
train.num.qs <- tapply(score.train$question_id, score.train$user_id, length)
score.train.sub <- score.train[score.train$user_id %in% names(train.num.qs)[train.num.qs >= 4],]
score.test.sub <- score.test[score.test$user_id %in% names(train.num.qs)[train.num.qs >= 4],]
score.CF.test.sub <- score.CF.test[score.CF.test$user_id %in% names(train.num.qs)[train.num.qs >= 4],]
cat( paste0( "Magic number for CF recall/precision calculations: ",
length(unique(score.CF.test.sub$question_id)),
"\n" ))
cat( paste0( "Magic number for BM25 recall/precision calculations: ",
length(unique(score.test.sub$question_id)),
"\n" ))
| /src/calculate_question_count.R | no_license | arturosaco/StackOverflowRecommendations | R | false | false | 1,570 | r | #!/usr/bin/env Rscript
library(plyr)
root.path <- "output1of3"
score.matrix.path <- paste(root.path, "matrix.csv", sep="/")
train.questions.path <- paste(root.path, "itemID_train.txt", sep="/")
test.questions.path <- paste(root.path, "itemID_test.txt", sep="/")
train.users.path <- paste(root.path, "userID_train.txt", sep="/")
test.users.path <- paste(root.path, "userID_test.txt", sep="/")
score <- read.csv(file = score.matrix.path)
train.questions <- read.table(file = train.questions.path)[,1]
test.questions <- read.table(file = test.questions.path)[,1]
train.users <- read.table(file = train.users.path)[,1]
test.users <- read.table(file = test.users.path)[,1]
score.train <- score[score$question_id %in% train.questions, ]
score.test <- score[score$question_id %in% test.questions, ]
score.CF.test <- score.test[score.test$user_id %in% test.users, ]
train.num.qs <- tapply(score.train$question_id, score.train$user_id, length)
score.train.sub <- score.train[score.train$user_id %in% names(train.num.qs)[train.num.qs >= 4],]
score.test.sub <- score.test[score.test$user_id %in% names(train.num.qs)[train.num.qs >= 4],]
score.CF.test.sub <- score.CF.test[score.CF.test$user_id %in% names(train.num.qs)[train.num.qs >= 4],]
cat( paste0( "Magic number for CF recall/precision calculations: ",
length(unique(score.CF.test.sub$question_id)),
"\n" ))
cat( paste0( "Magic number for BM25 recall/precision calculations: ",
length(unique(score.test.sub$question_id)),
"\n" ))
|
#' Get Air Resistance
#'
#' \code{get_air_resistance} estimates air resitance in Newtons
#'
#' @param velocity Instantaneous running velocity in meters per second (m/s)
#' @param bodymass In kilograms (kg)
#' @param bodyheight In meters (m)
#' @param barometric_pressure In Torrs
#' @param air_temperature In Celzius (C)
#' @param wind_velocity In meters per second (m/s). Use negative number as head
#' wind, and positive number as back wind
#' @return Air resistance in Newtons (N)
#' @export
#' @examples
#' get_air_resistance(
#' velocity = 5,
#' bodymass = 80,
#' bodyheight = 1.90,
#' barometric_pressure = 760,
#' air_temperature = 16,
#' wind_velocity = -0.5
#' )
#' @references
#' Arsac LM, Locatelli E. 2002. Modeling the energetics of 100-m running by using speed curves of
#' world champions. Journal of Applied Physiology 92:1781–1788.
#' DOI: 10.1152/japplphysiol.00754.2001.
#'
#' Samozino P, Rabita G, Dorel S, Slawinski J, Peyrot N, Saez de Villarreal E, Morin J-B. 2016.
#' A simple method for measuring power, force, velocity properties, and mechanical
#' effectiveness in sprint running: Simple method to compute sprint mechanics.
#' Scandinavian Journal of Medicine & Science in Sports 26:648–658. DOI: 10.1111/sms.12490.
#'
#' van Ingen Schenau GJ, Jacobs R, de Koning JJ. 1991. Can cycle power predict sprint running
#' performance? European Journal of Applied Physiology and Occupational Physiology 63:255–260.
#' DOI: 10.1007/BF00233857.
get_air_resistance <- function(velocity,
bodymass = 75,
bodyheight = 1.75,
barometric_pressure = 760,
air_temperature = 25,
wind_velocity = 0) {
air_density <- 1.293 * (barometric_pressure/760) * (273/(273 + air_temperature))
frontal_area <- (0.2025 * (bodyheight^0.725) * (bodymass^0.425)) * 0.266
drag_coefficient <- 0.9
k <- 0.5 * air_density * frontal_area * drag_coefficient
# Return air resistance
k * (velocity - wind_velocity)^2
}
| /R/get_air_resistance.R | permissive | Arielnasc/shorts | R | false | false | 2,162 | r | #' Get Air Resistance
#'
#' \code{get_air_resistance} estimates air resitance in Newtons
#'
#' @param velocity Instantaneous running velocity in meters per second (m/s)
#' @param bodymass In kilograms (kg)
#' @param bodyheight In meters (m)
#' @param barometric_pressure In Torrs
#' @param air_temperature In Celzius (C)
#' @param wind_velocity In meters per second (m/s). Use negative number as head
#' wind, and positive number as back wind
#' @return Air resistance in Newtons (N)
#' @export
#' @examples
#' get_air_resistance(
#' velocity = 5,
#' bodymass = 80,
#' bodyheight = 1.90,
#' barometric_pressure = 760,
#' air_temperature = 16,
#' wind_velocity = -0.5
#' )
#' @references
#' Arsac LM, Locatelli E. 2002. Modeling the energetics of 100-m running by using speed curves of
#' world champions. Journal of Applied Physiology 92:1781–1788.
#' DOI: 10.1152/japplphysiol.00754.2001.
#'
#' Samozino P, Rabita G, Dorel S, Slawinski J, Peyrot N, Saez de Villarreal E, Morin J-B. 2016.
#' A simple method for measuring power, force, velocity properties, and mechanical
#' effectiveness in sprint running: Simple method to compute sprint mechanics.
#' Scandinavian Journal of Medicine & Science in Sports 26:648–658. DOI: 10.1111/sms.12490.
#'
#' van Ingen Schenau GJ, Jacobs R, de Koning JJ. 1991. Can cycle power predict sprint running
#' performance? European Journal of Applied Physiology and Occupational Physiology 63:255–260.
#' DOI: 10.1007/BF00233857.
get_air_resistance <- function(velocity,
bodymass = 75,
bodyheight = 1.75,
barometric_pressure = 760,
air_temperature = 25,
wind_velocity = 0) {
air_density <- 1.293 * (barometric_pressure/760) * (273/(273 + air_temperature))
frontal_area <- (0.2025 * (bodyheight^0.725) * (bodymass^0.425)) * 0.266
drag_coefficient <- 0.9
k <- 0.5 * air_density * frontal_area * drag_coefficient
# Return air resistance
k * (velocity - wind_velocity)^2
}
|
\alias{gFileAttributeMatcherMatchesOnly}
\name{gFileAttributeMatcherMatchesOnly}
\title{gFileAttributeMatcherMatchesOnly}
\description{Checks if a attribute matcher only matches a given attribute. Always
returns \code{FALSE} if "*" was used when creating the matcher.}
\usage{gFileAttributeMatcherMatchesOnly(object, attribute)}
\arguments{
\item{\verb{object}}{a \code{\link{GFileAttributeMatcher}}.}
\item{\verb{attribute}}{a file attribute key.}
}
\value{[logical] \code{TRUE} if the matcher only matches \code{attribute}. \code{FALSE} otherwise.}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
| /RGtk2/man/gFileAttributeMatcherMatchesOnly.Rd | no_license | lawremi/RGtk2 | R | false | false | 622 | rd | \alias{gFileAttributeMatcherMatchesOnly}
\name{gFileAttributeMatcherMatchesOnly}
\title{gFileAttributeMatcherMatchesOnly}
\description{Checks if a attribute matcher only matches a given attribute. Always
returns \code{FALSE} if "*" was used when creating the matcher.}
\usage{gFileAttributeMatcherMatchesOnly(object, attribute)}
\arguments{
\item{\verb{object}}{a \code{\link{GFileAttributeMatcher}}.}
\item{\verb{attribute}}{a file attribute key.}
}
\value{[logical] \code{TRUE} if the matcher only matches \code{attribute}. \code{FALSE} otherwise.}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
################################################################################
# Ita Coin Analysis
################################################################################
# load packages
library(readxl)
library(ggplot2)
library(dplyr)
################################################################################
temp <- tempfile(fileext = ".xlsx")
link <- "https://www.bancaditalia.it/statistiche/tematiche/indicatori/indicatore-ciclico-coincidente/Itacoin_it.xlsx"
download.file(link, temp, mode = "wb")
Ita_Coin <- read_excel(temp, skip = 1) %>%
mutate(Date = as.Date(as.character(Date)))
################################################################################
# Ita Coin is a "nowcasted" GDP proxy, which might be useful to analyze Italy's
# short term economic evolution. Here wer just download the data and plot some
# of it. A deeper explenation is available on the Bank of Italy's website.
# plot Ita Coin Data
ggplot(Ita_Coin %>%
filter(Date > "2015-01-01"),
aes(x = Date, y = `Ita-coin`)) +
ggtitle("Entwicklung Ita Coin") +
geom_point(color = "orangered") +
geom_line(color = "steelblue") +
theme_minimal() +
geom_hline(yintercept = 0, linetype = "dashed", color = "black")
################################################################################
| /Programmatic_Download_Bancit_ECB/Ita_Coin.R | no_license | tom-finance/R_Web-Scraping | R | false | false | 1,327 | r | ################################################################################
# Ita Coin Analysis
################################################################################
# load packages
library(readxl)
library(ggplot2)
library(dplyr)
################################################################################
temp <- tempfile(fileext = ".xlsx")
link <- "https://www.bancaditalia.it/statistiche/tematiche/indicatori/indicatore-ciclico-coincidente/Itacoin_it.xlsx"
download.file(link, temp, mode = "wb")
Ita_Coin <- read_excel(temp, skip = 1) %>%
mutate(Date = as.Date(as.character(Date)))
################################################################################
# Ita Coin is a "nowcasted" GDP proxy, which might be useful to analyze Italy's
# short term economic evolution. Here wer just download the data and plot some
# of it. A deeper explenation is available on the Bank of Italy's website.
# plot Ita Coin Data
ggplot(Ita_Coin %>%
filter(Date > "2015-01-01"),
aes(x = Date, y = `Ita-coin`)) +
ggtitle("Entwicklung Ita Coin") +
geom_point(color = "orangered") +
geom_line(color = "steelblue") +
theme_minimal() +
geom_hline(yintercept = 0, linetype = "dashed", color = "black")
################################################################################
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mapleaflet.R
\name{map_leaf}
\alias{map_leaf}
\title{Make an interactive map locally}
\usage{
map_leaf(input, lat = NULL, lon = NULL, basemap = "Stamen.Toner",
...)
}
\arguments{
\item{input}{Input object}
\item{lat}{Name of latitude variable}
\item{lon}{Name of longitude variable}
\item{basemap}{Basemap to use. See \code{\link[leaflet]{addProviderTiles}}.
Default: \code{Stamen.Toner}}
\item{...}{Further arguments passed on to \code{\link[leaflet]{addPolygons}},
\code{\link[leaflet]{addMarkers}}, \code{\link[leaflet]{addGeoJSON}}, or
\code{\link[leaflet]{addPolylines}}}
}
\description{
Make an interactive map locally
}
\examples{
\dontrun{
# We'll need leaflet below
library("leaflet")
# From file
file <- "myfile.geojson"
geojson_write(us_cities[1:20, ], lat='lat', lon='long', file = file)
map_leaf(as.location(file))
# From SpatialPoints class
library("sp")
x <- c(1,2,3,4,20)
y <- c(3,2,5,3,4)
s <- SpatialPoints(cbind(x,y))
map_leaf(s)
# from SpatialPointsDataFrame class
x <- c(1,2,3,4,5)
y <- c(3,2,5,1,4)
s <- SpatialPointsDataFrame(cbind(x,y), mtcars[1:5,])
map_leaf(s)
# from SpatialPolygons class
poly1 <- Polygons(list(Polygon(cbind(c(-100,-90,-85,-100),
c(40,50,45,40)))), "1")
poly2 <- Polygons(list(Polygon(cbind(c(-90,-80,-75,-90),
c(30,40,35,30)))), "2")
sp_poly <- SpatialPolygons(list(poly1, poly2), 1:2)
map_leaf(sp_poly)
# From SpatialPolygonsDataFrame class
sp_polydf <- as(sp_poly, "SpatialPolygonsDataFrame")
map_leaf(sp_poly)
# From SpatialLines class
c1 <- cbind(c(1,2,3), c(3,2,2))
c2 <- cbind(c1[,1]+.05,c1[,2]+.05)
c3 <- cbind(c(1,2,3),c(1,1.5,1))
L1 <- Line(c1)
L2 <- Line(c2)
L3 <- Line(c3)
Ls1 <- Lines(list(L1), ID = "a")
Ls2 <- Lines(list(L2, L3), ID = "b")
sl1 <- SpatialLines(list(Ls1))
sl12 <- SpatialLines(list(Ls1, Ls2))
map_leaf(sl1)
map_leaf(sl12)
# From SpatialLinesDataFrame class
dat <- data.frame(X = c("Blue", "Green"),
Y = c("Train", "Plane"),
Z = c("Road", "River"), row.names = c("a", "b"))
sldf <- SpatialLinesDataFrame(sl12, dat)
map_leaf(sldf)
# From SpatialGrid
x <- GridTopology(c(0,0), c(1,1), c(5,5))
y <- SpatialGrid(x)
map_leaf(y)
# From SpatialGridDataFrame
sgdim <- c(3,4)
sg <- SpatialGrid(GridTopology(rep(0,2), rep(10,2), sgdim))
sgdf <- SpatialGridDataFrame(sg, data.frame(val = 1:12))
map_leaf(sgdf)
# from data.frame
map_leaf(us_cities)
## another example
head(states)
map_leaf(states[1:351, ])
## From a named list
mylist <- list(list(lat=30, long=120, marker="red"),
list(lat=30, long=130, marker="blue"))
map_leaf(mylist, lat="lat", lon="long")
## From an unnamed list
poly <- list(c(-114.345703125,39.436192999314095),
c(-114.345703125,43.45291889355468),
c(-106.61132812499999,43.45291889355468),
c(-106.61132812499999,39.436192999314095),
c(-114.345703125,39.436192999314095))
map_leaf(poly)
## NOTE: Polygons from lists aren't supported yet
# From a json object
map_leaf(geojson_json(c(-99.74, 32.45)))
map_leaf(geojson_json(c(-119, 45)))
map_leaf(geojson_json(c(-99.74, 32.45)))
## another example
map_leaf(geojson_json(us_cities[1:10,], lat='lat', lon='long'))
# From a geo_list object
(res <- geojson_list(us_cities[1:2,], lat='lat', lon='long'))
map_leaf(res)
# From SpatialPixels
pixels <- suppressWarnings(SpatialPixels(SpatialPoints(us_cities[c("long", "lat")])))
summary(pixels)
map_leaf(pixels)
# From SpatialPixelsDataFrame
pixelsdf <- suppressWarnings(
SpatialPixelsDataFrame(points = canada_cities[c("long", "lat")], data = canada_cities)
)
map_leaf(pixelsdf)
# From SpatialRings
library("rgeos")
r1 <- Ring(cbind(x=c(1,1,2,2,1), y=c(1,2,2,1,1)), ID="1")
r2 <- Ring(cbind(x=c(1,1,2,2,1), y=c(1,2,2,1,1)), ID="2")
r1r2 <- SpatialRings(list(r1, r2))
map_leaf(r1r2)
# From SpatialRingsDataFrame
dat <- data.frame(id = c(1,2), value = 3:4)
r1r2df <- SpatialRingsDataFrame(r1r2, data = dat)
map_leaf(r1r2df)
# basemap toggling ------------------------
map_leaf(us_cities, basemap = "Acetate.terrain")
map_leaf(us_cities, basemap = "CartoDB.Positron")
map_leaf(us_cities, basemap = "OpenTopoMap")
# leaflet options ------------------------
map_leaf(us_cities) \%>\%
addPopups(-122.327298, 47.597131, "foo bar", options = popupOptions(closeButton = FALSE))
####### not working yet
# From a numeric vector
## of length 2 to a point
## vec <- c(-99.74,32.45)
## map_leaf(vec)
}
}
| /man/map_leaf.Rd | permissive | Zwens/geojsonio | R | false | true | 4,481 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mapleaflet.R
\name{map_leaf}
\alias{map_leaf}
\title{Make an interactive map locally}
\usage{
map_leaf(input, lat = NULL, lon = NULL, basemap = "Stamen.Toner",
...)
}
\arguments{
\item{input}{Input object}
\item{lat}{Name of latitude variable}
\item{lon}{Name of longitude variable}
\item{basemap}{Basemap to use. See \code{\link[leaflet]{addProviderTiles}}.
Default: \code{Stamen.Toner}}
\item{...}{Further arguments passed on to \code{\link[leaflet]{addPolygons}},
\code{\link[leaflet]{addMarkers}}, \code{\link[leaflet]{addGeoJSON}}, or
\code{\link[leaflet]{addPolylines}}}
}
\description{
Make an interactive map locally
}
\examples{
\dontrun{
# We'll need leaflet below
library("leaflet")
# From file
file <- "myfile.geojson"
geojson_write(us_cities[1:20, ], lat='lat', lon='long', file = file)
map_leaf(as.location(file))
# From SpatialPoints class
library("sp")
x <- c(1,2,3,4,20)
y <- c(3,2,5,3,4)
s <- SpatialPoints(cbind(x,y))
map_leaf(s)
# from SpatialPointsDataFrame class
x <- c(1,2,3,4,5)
y <- c(3,2,5,1,4)
s <- SpatialPointsDataFrame(cbind(x,y), mtcars[1:5,])
map_leaf(s)
# from SpatialPolygons class
poly1 <- Polygons(list(Polygon(cbind(c(-100,-90,-85,-100),
c(40,50,45,40)))), "1")
poly2 <- Polygons(list(Polygon(cbind(c(-90,-80,-75,-90),
c(30,40,35,30)))), "2")
sp_poly <- SpatialPolygons(list(poly1, poly2), 1:2)
map_leaf(sp_poly)
# From SpatialPolygonsDataFrame class
sp_polydf <- as(sp_poly, "SpatialPolygonsDataFrame")
map_leaf(sp_poly)
# From SpatialLines class
c1 <- cbind(c(1,2,3), c(3,2,2))
c2 <- cbind(c1[,1]+.05,c1[,2]+.05)
c3 <- cbind(c(1,2,3),c(1,1.5,1))
L1 <- Line(c1)
L2 <- Line(c2)
L3 <- Line(c3)
Ls1 <- Lines(list(L1), ID = "a")
Ls2 <- Lines(list(L2, L3), ID = "b")
sl1 <- SpatialLines(list(Ls1))
sl12 <- SpatialLines(list(Ls1, Ls2))
map_leaf(sl1)
map_leaf(sl12)
# From SpatialLinesDataFrame class
dat <- data.frame(X = c("Blue", "Green"),
Y = c("Train", "Plane"),
Z = c("Road", "River"), row.names = c("a", "b"))
sldf <- SpatialLinesDataFrame(sl12, dat)
map_leaf(sldf)
# From SpatialGrid
x <- GridTopology(c(0,0), c(1,1), c(5,5))
y <- SpatialGrid(x)
map_leaf(y)
# From SpatialGridDataFrame
sgdim <- c(3,4)
sg <- SpatialGrid(GridTopology(rep(0,2), rep(10,2), sgdim))
sgdf <- SpatialGridDataFrame(sg, data.frame(val = 1:12))
map_leaf(sgdf)
# from data.frame
map_leaf(us_cities)
## another example
head(states)
map_leaf(states[1:351, ])
## From a named list
mylist <- list(list(lat=30, long=120, marker="red"),
list(lat=30, long=130, marker="blue"))
map_leaf(mylist, lat="lat", lon="long")
## From an unnamed list
poly <- list(c(-114.345703125,39.436192999314095),
c(-114.345703125,43.45291889355468),
c(-106.61132812499999,43.45291889355468),
c(-106.61132812499999,39.436192999314095),
c(-114.345703125,39.436192999314095))
map_leaf(poly)
## NOTE: Polygons from lists aren't supported yet
# From a json object
map_leaf(geojson_json(c(-99.74, 32.45)))
map_leaf(geojson_json(c(-119, 45)))
map_leaf(geojson_json(c(-99.74, 32.45)))
## another example
map_leaf(geojson_json(us_cities[1:10,], lat='lat', lon='long'))
# From a geo_list object
(res <- geojson_list(us_cities[1:2,], lat='lat', lon='long'))
map_leaf(res)
# From SpatialPixels
pixels <- suppressWarnings(SpatialPixels(SpatialPoints(us_cities[c("long", "lat")])))
summary(pixels)
map_leaf(pixels)
# From SpatialPixelsDataFrame
pixelsdf <- suppressWarnings(
SpatialPixelsDataFrame(points = canada_cities[c("long", "lat")], data = canada_cities)
)
map_leaf(pixelsdf)
# From SpatialRings
library("rgeos")
r1 <- Ring(cbind(x=c(1,1,2,2,1), y=c(1,2,2,1,1)), ID="1")
r2 <- Ring(cbind(x=c(1,1,2,2,1), y=c(1,2,2,1,1)), ID="2")
r1r2 <- SpatialRings(list(r1, r2))
map_leaf(r1r2)
# From SpatialRingsDataFrame
dat <- data.frame(id = c(1,2), value = 3:4)
r1r2df <- SpatialRingsDataFrame(r1r2, data = dat)
map_leaf(r1r2df)
# basemap toggling ------------------------
map_leaf(us_cities, basemap = "Acetate.terrain")
map_leaf(us_cities, basemap = "CartoDB.Positron")
map_leaf(us_cities, basemap = "OpenTopoMap")
# leaflet options ------------------------
map_leaf(us_cities) \%>\%
addPopups(-122.327298, 47.597131, "foo bar", options = popupOptions(closeButton = FALSE))
####### not working yet
# From a numeric vector
## of length 2 to a point
## vec <- c(-99.74,32.45)
## map_leaf(vec)
}
}
|
\name{hpcc.data.layout}
\alias{hpcc.data.layout}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
title
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
hpcc.data.layout(logicalfilename)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{logicalfilename}{
%% ~~Describe \code{logicalfilename} here~~
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (logicalfilename)
{
out.struct <- ""
body <- paste("<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <soap:Envelope xmlns:soap=\"http://schemas.xmlsoap.org/soap/envelope/\"\n xmlns:SOAP-ENC=\"http://schemas.xmlsoap.org/soap/encoding/\"\n xmlns=\"urn:hpccsystems:ws:wsdfu\">\n <soap:Body>\n <DFUInfoRequest>\n <Name>",
logicalfilename, "</Name>\n </DFUInfoRequest>\n </soap:Body>\n </soap:Envelope>")
headerFields = c(Accept = "text/xml", Accept = "multipart/*",
`Content-Type` = "text/xml; charset=utf-8", SOAPAction = "urn:hpccsystems:ws:wsdfu")
reader = basicTextGatherer()
handle = getCurlHandle()
url <- .uUrlHpcc
curlPerform(url = url, httpheader = headerFields, ssl.verifypeer = FALSE,
postfields = body, writefunction = reader$update, curl = handle)
status = getCurlInfo(handle)$response.code
if (status >= 200 && status <= 300) {
sResponse <- reader$value()
responseXml <- xmlParse(sResponse)
layout <- getNodeSet(responseXml, "//*[local-name()='Ecl']/text()",
namespaces = xmlNamespaceDefinitions(responseXml,
simplify = TRUE))
if (length(layout) > 0) {
colLayout <- layout[[1]]
out.struct <- xmlToList(colLayout, addAttributes = TRUE)
}
}
return(out.struct)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
| /man/hpcc.data.layout.Rd | no_license | Saulus/rHpcc | R | false | false | 2,736 | rd | \name{hpcc.data.layout}
\alias{hpcc.data.layout}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
title
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
hpcc.data.layout(logicalfilename)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{logicalfilename}{
%% ~~Describe \code{logicalfilename} here~~
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (logicalfilename)
{
out.struct <- ""
body <- paste("<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <soap:Envelope xmlns:soap=\"http://schemas.xmlsoap.org/soap/envelope/\"\n xmlns:SOAP-ENC=\"http://schemas.xmlsoap.org/soap/encoding/\"\n xmlns=\"urn:hpccsystems:ws:wsdfu\">\n <soap:Body>\n <DFUInfoRequest>\n <Name>",
logicalfilename, "</Name>\n </DFUInfoRequest>\n </soap:Body>\n </soap:Envelope>")
headerFields = c(Accept = "text/xml", Accept = "multipart/*",
`Content-Type` = "text/xml; charset=utf-8", SOAPAction = "urn:hpccsystems:ws:wsdfu")
reader = basicTextGatherer()
handle = getCurlHandle()
url <- .uUrlHpcc
curlPerform(url = url, httpheader = headerFields, ssl.verifypeer = FALSE,
postfields = body, writefunction = reader$update, curl = handle)
status = getCurlInfo(handle)$response.code
if (status >= 200 && status <= 300) {
sResponse <- reader$value()
responseXml <- xmlParse(sResponse)
layout <- getNodeSet(responseXml, "//*[local-name()='Ecl']/text()",
namespaces = xmlNamespaceDefinitions(responseXml,
simplify = TRUE))
if (length(layout) > 0) {
colLayout <- layout[[1]]
out.struct <- xmlToList(colLayout, addAttributes = TRUE)
}
}
return(out.struct)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
## ----setup, cache=TRUE, echo=FALSE, message=FALSE, warning=FALSE---------
# setwd("C:/Users/Jon/Dropbox/immig_exp")
# rm(list = ls())
# setwd("C:/Users/Jon/Documents/immigrant_labor_competition/pre_analysis_plan")
Sys.setenv(TEXINPUTS=getwd(),
BIBINPUTS=getwd(),
BSTINPUTS=getwd())
library(MASS)
library(haven)
library(knitr)
# Function that fixes a common read problem with the Haven package
labelDataset <- function(data) {
correctLabel <- function(x) {
if(!is.null(attributes(x)$labels)) {
class(attributes(x)$labels) <- typeof(x)
}
return(x)
}
for(i in colnames(data)) {
data[, i] <- correctLabel(data[, i])
}
return(data)
}
### Function adjusted from ocME function in the erer package. The original function did not support interaction terms
olMFX <- function(w) {
rev.dum <- T
digits <- 3
lev <- w$lev
J <- length(lev)
x.name <- attr(x = w$terms, which = "term.labels")
# x2 <- w$model[, x.name]
x2 <- w$model[, -1]
ww <- paste("~ 1", paste("+", x.name, collapse = " "), collapse = " ")
x <- model.matrix(as.formula(ww), data = x2)[, -1]
x.bar <- as.matrix(colMeans(x))
b.est <- as.matrix(coef(w))
K <- nrow(b.est)
xb <- t(x.bar) %*% b.est
z <- c(-10^6, w$zeta, 10^6)
pfun <- switch(w$method, probit = pnorm, logistic = plogis)
dfun <- switch(w$method, probit = dnorm, logistic = dlogis)
V2 <- vcov(w)
V3 <- rbind(cbind(V2, 0, 0), 0, 0)
ind <- c(1:K, nrow(V3) - 1, (K + 1):(K + J - 1), nrow(V3))
V4 <- V3[ind, ]
V5 <- V4[, ind]
f.xb <- dfun(z[1:J] - xb) - dfun(z[2:(J + 1)] - xb)
me <- b.est %*% matrix(data = f.xb, nrow = 1)
colnames(me) <- paste("effect", lev, sep = ".")
se <- matrix(0, nrow = K, ncol = J)
for (j in 1:J) {
u1 <- c(z[j] - xb)
u2 <- c(z[j + 1] - xb)
if (w$method == "probit") {
s1 <- -u1
s2 <- -u2
}
else {
s1 <- 1 - 2 * pfun(u1)
s2 <- 1 - 2 * pfun(u2)
}
d1 <- dfun(u1) * (diag(1, K, K) - s1 * (b.est %*% t(x.bar)))
d2 <- -1 * dfun(u2) * (diag(1, K, K) - s2 * (b.est %*%
t(x.bar)))
q1 <- dfun(u1) * s1 * b.est
q2 <- -1 * dfun(u2) * s2 * b.est
dr <- cbind(d1 + d2, q1, q2)
V <- V5[c(1:K, K + j, K + j + 1), c(1:K, K + j, K + j +
1)]
cova <- dr %*% V %*% t(dr)
se[, j] <- sqrt(diag(cova))
}
colnames(se) <- paste("SE", lev, sep = ".")
rownames(se) <- colnames(x)
if (rev.dum) {
for (k in 1:K) {
if (identical(sort(unique(x[, k])), c(0, 1))) {
for (j in 1:J) {
x.d1 <- x.bar
x.d1[k, 1] <- 1
x.d0 <- x.bar
x.d0[k, 1] <- 0
ua1 <- z[j] - t(x.d1) %*% b.est
ub1 <- z[j + 1] - t(x.d1) %*% b.est
ua0 <- z[j] - t(x.d0) %*% b.est
ub0 <- z[j + 1] - t(x.d0) %*% b.est
me[k, j] <- pfun(ub1) - pfun(ua1) - (pfun(ub0) -
pfun(ua0))
d1 <- (dfun(ua1) - dfun(ub1)) %*% t(x.d1) -
(dfun(ua0) - dfun(ub0)) %*% t(x.d0)
q1 <- -dfun(ua1) + dfun(ua0)
q2 <- dfun(ub1) - dfun(ub0)
dr <- cbind(d1, q1, q2)
V <- V5[c(1:K, K + j, K + j + 1), c(1:K, K +
j, K + j + 1)]
se[k, j] <- sqrt(c(dr %*% V %*% t(dr)))
}
}
}
}
t.value <- me/se
p.value <- 2 * (1 - pt(abs(t.value), w$df.residual))
out <- list()
for (j in 1:J) {
out[[j]] <- round(cbind(effect = me[, j], error = se[,j], t.value = t.value[, j], p.value = p.value[, j]),
digits)
}
out[[J + 1]] <- round(me, digits)
names(out) <- paste("ME", c(lev, "all"), sep = ".")
result <- listn(w, out)
class(result) <- "ocME"
return(result)
}
immig <- read_spss("BES2016_wave7.sav")
bes.data <- read_dta("bes_immig.dta")
bes.data <- labelDataset(bes.data)
jobs <- read.csv("job_assignments_final.csv",
stringsAsFactors = FALSE)
# jobs[match(immig$id, jobs$id), c("job.1", "job.2")]==immig[, c("Job1", "Job2")]
orig.occs <- read.csv("occsmall2_coded.csv", stringsAsFactors = FALSE)
immig$own.job <- orig.occs[match(immig$id, orig.occs$id), "exptext"]
immig$dv <- as_factor(immig$immigExpDV)
# immig$dv.dk <- as_factor(immig$immigExpDV)
immig$dv[immig$dv=="Don't know"] <- NA
immig <- immig[!is.na(immig$own.job), ]
immig <- merge(immig, bes.data, by = "id", all.x = TRUE)
immig <- merge(immig, jobs, by= "id")
immig$treatment <- immig$own.job==immig$job.2
immig$immigManipCheck[immig$immigManipCheck==997] <- NA
immig$immigManipCheck2[immig$immigManipCheck2==997] <- NA
rm(bes.data)
manip.check.table <- table(immig$immigManipCheck, immig$treatment)
colnames(manip.check.table) <- c("Control groups", "Own-job Treatment")
manip.check.table <- round(prop.table(manip.check.table, 2) * 100, 1)
library(Hmisc, quietly = TRUE)
# chisq.test(y = immig$immigManipCheck, x = immig$treatment)
low.skill.jobs <- c("waiters", "drivers", "receptionists",
"shop assistants", "carers", "cleaners")
high.skill.jobs <- c("senior managers", "doctors",
"lawyers", "lecturers", "engineers",
"programmers")
immig$skill.1 <- NA
immig$skill.1[immig$job.1 %in% low.skill.jobs] <- "Low"
immig$skill.1[immig$job.1 %in% high.skill.jobs] <- "High"
immig$skill.2 <- NA
immig$skill.2[immig$job.2 %in% low.skill.jobs & !immig$treatment] <- "Low"
immig$skill.2[immig$job.2 %in% high.skill.jobs & !immig$treatment] <- "High"
immig$skill.2[immig$treatment] <- "treatment"
immig$group <- NA
immig$group[immig$skill.1=="Low" & immig$skill.2=="Low"] <- "Low"
immig$group[immig$skill.1=="High" & immig$skill.2=="High"] <- "High"
immig$group[(immig$skill.1=="High" & immig$skill.2=="Low") |
(immig$skill.1=="Low" & immig$skill.2=="High")] <- "Mixed"
immig$group[immig$skill.1=="Low" & immig$treatment] <- "low-treat"
immig$group[immig$skill.1=="High" & immig$treatment] <- "high-treat"
immig$edlevel1 <- NA
immig$edlevel1[immig$edlevel==0] <- 1
immig$edlevel1[immig$edlevel==1] <- 2
immig$edlevel1[immig$edlevel==2] <- 2
immig$edlevel1[immig$edlevel==3] <- 3
immig$edlevel1[immig$edlevel==4] <- 4
immig$edlevel1[immig$edlevel==5] <- 4
immig$edlevel1 <- factor(immig$edlevel1, labels = c("None", "GCSE", "A-Level", "Degree"))
immig$edlevel2 <- NA
immig$edlevel2[immig$edlevel1 %in% c("None", "GCSE")] <- "Low Ed"
immig$edlevel2[immig$edlevel1 %in% c("A-Level")] <- "Med Ed"
immig$edlevel2[immig$edlevel1 %in% c("Degree")] <- "High Ed"
library(ggplot2)
library(reshape)
immig$dv <- factor(immig$dv)
immig$edlevel2 <- as.factor(immig$edlevel2)
immig$dv <- factor(immig$dv)
immig$Treatment <- as.numeric(immig$treatment)
immig.replicate <- immig[immig$group %in% c("High", "Mixed", "Low"), ]
immig.replicate$dv.dk <- immig.replicate$dv
nice.names <- matrix(c("edlevel2Low Ed" , "Low education",
"edlevel2Med Ed", "Medium education",
"skill.1Low", "Job 1: low skill",
"skill.2Low", "Job 2: Low skill",
"skill.2treatment", "Job 2: own job",
"edlevel2Low Ed:skill.2Low", "Low ed. * low skill job",
"edlevel2Med Ed:skill.2Low", "Medium ed. * low skill job",
"edlevel2Low Ed:skill.2treatment", "Low ed. * own job",
"edlevel2Med Ed:skill.2treatment", "Medium ed. * own job",
"groupMixed", "Treatment: low/high",
"groupHigh", "Treatment: both high",
"edlevel2Low Ed:groupMixed", "Low ed. * low/high",
"groupMixed:edlevel2Low Ed", "Low ed. * low/high",
"edlevel2Med Ed:groupMixed", "Medium ed. * low/high",
"edlevel2Low Ed:groupHigh", "Low ed. * both high",
"edlevel2Med Ed:groupHigh", "Medium ed. * both high",
"groupLow:edlevel2Low Ed" , "Low ed. * both low",
"groupLow:edlevel2Med Ed", "Medium ed. * both low",
"groupMixed:edlevel2Med Ed", "Medium ed. * low/high",
"edlevel2Low Ed:groupLow", "Low ed. * both low",
"edlevel2Med Ed:groupLow", "Medium ed. * both low",
"groupLow", "Treatment: both low",
"grouphigh-treat", "Treatment: high/ own job",
"grouplow-treat", "Treatment: low/ own job") ,
byrow = TRUE, ncol = 2)
colnames(nice.names) <- c("model", "nice")
nice.names <- data.frame(nice.names, stringsAsFactors = FALSE)
nice.order <- c("Treatment: high/ own job",
"Treatment: low/ own job",
"Treatment: both high",
"Treatment: low/high",
"Treatment: both low",
"Job 2: high skill",
"Job 2: own job",
"Job 2: Low skill",
"Job 1: high skill",
"Job 1: low skill",
"High education",
"Medium education",
"Low education",
"Medium ed. * both high",
"Low ed. * both high",
"Medium ed. * low/high",
"Low ed. * low/high",
"Medium ed. * both low",
"Low ed. * both low",
"Medium ed. * own job",
"Low ed. * own job",
"Medium ed. * low skill job",
"Low ed. * low skill job")
makeNice <- function(x) {
output <- nice.names$nice[match(x, nice.names$model)]
return(output)
}
makeNiceFactor <- function(x) {
output <- factor(x, levels = nice.order[length(nice.order):1])
output <- factor(output)
return(output)
}
immig.replicate$group <- factor(immig.replicate$group,
levels = c("High", "Mixed", "Low"))
## ----assignments, cache=TRUE, eval=TRUE, dpi=100, echo=FALSE, results='asis', message=FALSE, warning=FALSE----
all.assign <- table(immig$group)
assign.table <- data.frame(matrix(c("Low skilled/own occupation", "25", all.assign["low-treat"],
"High skilled/own occupation", "25", all.assign["high-treat"],
"Both high skilled", "12.5", all.assign["High"],
"Both low skilled", "12.5", all.assign["Low"],
"Low skilled and high skilled", "25", all.assign["Mixed"]), byrow = TRUE, ncol = 3), stringsAsFactors = FALSE)
colnames(assign.table) <- c("Assignment", "% in limit", "Cases assigned")
assign.table$`% assigned` <- round(prop.table(as.numeric(assign.table$`Cases assigned`)) * 100, 1)
library(xtable)
xtable(assign.table, caption = "Experimental assignments", label = "table:assignment")
## ----mainLabMarketDisplay, cache=TRUE, eval=TRUE, dpi=100, echo=FALSE, results='asis', message=FALSE, warning=FALSE----
library(pander)
library(texreg)
library(erer)
library(MASS)
immig.main.reg <- polr(data = immig, formula = dv~ edlevel2 +
edlevel2:skill.2 + skill.1 + skill.2)
texreg(immig.main.reg, caption = "Ordered logistic regression predicting acceptance of immigrants. (Reference categories: high education, Job 1: high, skill, and Job 2: high skill)",
label = "table:mainLabMarket",
custom.coef.names = makeNice(names(immig.main.reg$coefficients)))
## ----mainLabMarketMFX, cache=TRUE, eval=TRUE, dpi=100, echo=FALSE, results='asis', message=FALSE, warning=FALSE, fig.cap = "Effects of labor market competition treatment on probability of `strongly disagreeing' that more immigrants should be allowed to come to Britain"----
mfx.main <- olMFX(immig.main.reg)
mfx.df <- data.frame(mfx= mfx.main$out$`ME.Strongly disagree`[, "effect"],
se = mfx.main$out$`ME.Strongly disagree`[, "error"])
mfx.df$u.ci <- mfx.df$mfx + (1.96 * mfx.df$se)
mfx.df$l.ci <- mfx.df$mfx - (1.96 * mfx.df$se)
mfx.df$var <- makeNice(rownames(mfx.df))
mfx.df <- rbind(mfx.df, NA, NA, NA)
mfx.df$var[is.na(mfx.df$var)] <- c("Job 1: high skill", "High education",
"Job 2: high skill")
mfx.df$var <- makeNiceFactor(mfx.df$var)
mfx.df$mfx[is.na(mfx.df$mfx)] <- 0
ggplot(data = mfx.df, aes(x = var, y = mfx)) + geom_point() +
geom_errorbar(aes(ymax = u.ci, ymin = l.ci), width = 0.25) +
geom_hline(aes(yintercept=0)) + theme_bw() +
xlab("") + ylab("Marginal effect of strongly disagreeing with more immigrants") +
coord_flip()
## ----manipCheckSimple, cache=TRUE, eval=TRUE, dpi=100, echo=FALSE, results='asis', message=FALSE, warning=FALSE----
library(pander)
library(texreg)
immig.manip.compete <- polr(data = immig,
formula = factor(immigManipCheck)~ Treatment)
immig.manip.culture <- polr(data = immig,
formula = factor(immigManipCheck2)~ Treatment)
texreg(list(immig.manip.compete, immig.manip.culture),
caption = "Manipulation checks. Ordered logistic regression predicting worry about job prospects.",
custom.model.names = c("Job prospects", "Cultural threat"),
label = "table:manipulation_checks")
## ----mainReplication, cache=TRUE, eval=TRUE, dpi=100, echo=FALSE, results='asis', message=FALSE, warning=FALSE, fig.cap="Effects of immigrant skill levels on probability of `strongly disagreeing' that more immigrants should be allowed to come to Britain (excludes `own job' treatment group)"----
reg.replicate <- polr(data = immig.replicate, dv ~ edlevel2 + group + group:edlevel2)
replicate.to.plot <- olMFX(reg.replicate)
mfx.df.rep <- data.frame(mfx= replicate.to.plot$out$`ME.Strongly disagree`[, "effect"],
se = replicate.to.plot$out$`ME.Strongly disagree`[, "error"])
mfx.df.rep$u.ci <- mfx.df.rep$mfx + (1.96 * mfx.df.rep$se)
mfx.df.rep$l.ci <- mfx.df.rep$mfx - (1.96 * mfx.df.rep$se)
mfx.df.rep$var <- makeNice(rownames(mfx.df.rep))
mfx.df.rep <- rbind(mfx.df.rep, NA, NA)
mfx.df.rep$var[is.na(mfx.df.rep$var)] <- c("High education", "Treatment: both high")
mfx.df.rep$mfx[is.na(mfx.df.rep$mfx)] <- 0
mfx.df.rep$var <- makeNiceFactor(mfx.df.rep$var)
ggplot(data = mfx.df.rep, aes(x = var, y = mfx)) + geom_point() +
geom_errorbar(aes(ymax = u.ci, ymin = l.ci), width = 0.25) +
geom_hline(aes(yintercept=0)) + theme_bw() +
xlab("") +
ylab("Marginal effect on 'strongly disagreeing' with more immigrants") +
coord_flip()
## ----manipCheckCompete, cache=TRUE, eval=TRUE, dpi=100, echo=FALSE, results='asis', message=FALSE, warning=FALSE, fig.cap="Effects of education and immigrant skill level on probability of respondent being `not at all worried' about their own job prospects (excludes `own job' treatment group)"----
# prop.table(table(immig.replicate$immigManipCheck, immig.replicate$edlevel2), 1)
# immig.replicate$group
large.labmarket.rep <- polr(data = immig.replicate,
formula = factor(immigManipCheck) ~ group + edlevel2 + group:edlevel2)
manip.compet.mfx <- olMFX(large.labmarket.rep)
manip.compet.mfx <- data.frame(manip.compet.mfx$out$ME.1)
manip.compet.mfx$u.ci <- manip.compet.mfx[, "effect"] + (manip.compet.mfx[, "error"] * 1.96)
manip.compet.mfx$l.ci <- manip.compet.mfx[, "effect"] - (manip.compet.mfx[, "error"] * 1.96)
manip.compet.mfx$var <- makeNice(rownames(manip.compet.mfx))
manip.compet.mfx <- rbind(manip.compet.mfx, NA, NA)
manip.compet.mfx$var[is.na(manip.compet.mfx$var)] <-
c("High education", "Treatment: both high")
manip.compet.mfx$var <- makeNiceFactor(manip.compet.mfx$var)
manip.compet.mfx$effect[is.na(manip.compet.mfx$effect)] <- 0
ggplot(data = manip.compet.mfx, aes(x = var, y = effect)) + geom_point() +
geom_errorbar(aes(ymax = u.ci, ymin = l.ci), width = 0.25) +
geom_hline(aes(yintercept=0)) + theme_bw() +
xlab("") + ylab("Marginal effect on probability of respondent feeling 'not at all worried' about their job prospects") + coord_flip()
## ----replicateRegTable, cache=TRUE, eval=TRUE, dpi=100, echo=FALSE, results='asis', message=FALSE, warning=FALSE----
texreg(reg.replicate, caption = "Ordered logistic regression predicting acceptance of immigrants in control groups", label = "table:mainreplication",
custom.coef.names = makeNice(names(reg.replicate$coefficients)))
## ----manipCheckControlGroup, cache=TRUE, eval=TRUE, dpi=100, echo=FALSE, results='asis', message=FALSE, warning=FALSE----
texreg(large.labmarket.rep,
label = "table:manipcheckreplicate",
caption = "Ordered logistic regression predictors of economic threat",
custom.coef.names = makeNice(names(large.labmarket.rep$coefficients)))
## ----balanceCheck, cache=TRUE, eval=TRUE, dpi=100, echo=FALSE, results='asis', message=FALSE, warning=FALSE----
immig$immigSelf[immig$immigSelf==111] <- 0
immig$immigSelf[immig$immigSelf==99] <-NA
immig.att.balance <- polr(data = immig, factor(immigSelf)~group)
ed.balance <- chisq.test(x = immig$edlevel2, y = immig$group)
texreg(immig.att.balance,
label = "table:balanceCheckImmig",
caption = "Ordered logistic regression predictors of immigration attitudes using random groups")
## ----hhFig1, cache=TRUE, eval=TRUE, dpi=100, echo=FALSE, results='asis', message = FALSE, fig.height=5, fig.cap="Bivariate relationship between education and anti-immigrant responses in the experiment (excludes own job experimental group)"----
immig.ed <- prop.table(table(immig.replicate$dv, immig.replicate$edlevel1), 2)
immig.ed<- melt(immig.ed)
colnames(immig.ed) <- c("More immigrants", "Education", "Fraction")
immig.ed$`More immigrants` <- factor(immig.ed$`More immigrants`, levels = levels(immig.replicate$dv))
immig.ed$Education <- factor(immig.ed$Education, levels = levels(immig.replicate$edlevel1))
ggplot(data = immig.ed, aes(x = Education, y = Fraction, group = `More immigrants`,
fill = `More immigrants`)) +
geom_bar(position = "dodge", colour = "black",
stat = "identity") +
theme_bw() +
scale_fill_grey(start = 0, end = 1, na.value = "blue") +
ylab("Fraction") + xlab("Education")
## ----hhFig2, cache=TRUE, eval=TRUE, dpi=100, echo=FALSE, results='asis', message = FALSE, fig.height=5, fig.cap = "Control group support for more immigrants by skill level of immigrants (excludes own job experimental group)"----
### H&H figure 2 replication
counts.replicate <- table(immig.replicate$dv, immig.replicate$group)
counts.replicate <- prop.table(counts.replicate, 2)
counts.replicate <- melt(counts.replicate)
colnames(counts.replicate) <- c("More immigrants", "Treatment", "Count")
counts.replicate$Treatment <- factor(counts.replicate$Treatment, levels = c("Low", "Mixed", "High"))
counts.replicate$`More immigrants` <- factor(counts.replicate$`More immigrants`, levels = levels(immig.replicate$dv))
ggplot(counts.replicate, aes(x = Treatment, group = `More immigrants`, y = Count, fill = `More immigrants`)) + geom_bar(stat = "identity", position = "dodge", colour = "black") +
theme_bw() +
scale_fill_grey(start = 0, end = 1, na.value = "red") +
ylab("Fraction") + xlab("Skill level (within control group)")
kable(manip.check.table, format = "latex", caption = "Level of concern about their own job for respondents in own job group and other groups")
## ----dk.check, cache=TRUE, eval=TRUE, dpi=100, echo=FALSE, results='asis', message=FALSE, warning=FALSE----
immig$dk.binary <- immig$immigExpDV==99
dk.pred <- glm(data = immig, dk.binary ~ group)
texreg(dk.pred,
label = "table:dk.pred",
caption = "Logistic regression of reporting don't know to support for immigration across experimental groups", custom.coef.names = makeNice(names(dk.pred$coefficients)))
| /pre_analysis_plan/immig_writeup.R | no_license | jon-mellon/immigrant_labor_competition | R | false | false | 19,901 | r | ## ----setup, cache=TRUE, echo=FALSE, message=FALSE, warning=FALSE---------
# setwd("C:/Users/Jon/Dropbox/immig_exp")
# rm(list = ls())
# setwd("C:/Users/Jon/Documents/immigrant_labor_competition/pre_analysis_plan")
Sys.setenv(TEXINPUTS=getwd(),
BIBINPUTS=getwd(),
BSTINPUTS=getwd())
library(MASS)
library(haven)
library(knitr)
# Function that fixes a common read problem with the Haven package
labelDataset <- function(data) {
correctLabel <- function(x) {
if(!is.null(attributes(x)$labels)) {
class(attributes(x)$labels) <- typeof(x)
}
return(x)
}
for(i in colnames(data)) {
data[, i] <- correctLabel(data[, i])
}
return(data)
}
### Function adjusted from ocME function in the erer package. The original function did not support interaction terms
olMFX <- function(w) {
rev.dum <- T
digits <- 3
lev <- w$lev
J <- length(lev)
x.name <- attr(x = w$terms, which = "term.labels")
# x2 <- w$model[, x.name]
x2 <- w$model[, -1]
ww <- paste("~ 1", paste("+", x.name, collapse = " "), collapse = " ")
x <- model.matrix(as.formula(ww), data = x2)[, -1]
x.bar <- as.matrix(colMeans(x))
b.est <- as.matrix(coef(w))
K <- nrow(b.est)
xb <- t(x.bar) %*% b.est
z <- c(-10^6, w$zeta, 10^6)
pfun <- switch(w$method, probit = pnorm, logistic = plogis)
dfun <- switch(w$method, probit = dnorm, logistic = dlogis)
V2 <- vcov(w)
V3 <- rbind(cbind(V2, 0, 0), 0, 0)
ind <- c(1:K, nrow(V3) - 1, (K + 1):(K + J - 1), nrow(V3))
V4 <- V3[ind, ]
V5 <- V4[, ind]
f.xb <- dfun(z[1:J] - xb) - dfun(z[2:(J + 1)] - xb)
me <- b.est %*% matrix(data = f.xb, nrow = 1)
colnames(me) <- paste("effect", lev, sep = ".")
se <- matrix(0, nrow = K, ncol = J)
for (j in 1:J) {
u1 <- c(z[j] - xb)
u2 <- c(z[j + 1] - xb)
if (w$method == "probit") {
s1 <- -u1
s2 <- -u2
}
else {
s1 <- 1 - 2 * pfun(u1)
s2 <- 1 - 2 * pfun(u2)
}
d1 <- dfun(u1) * (diag(1, K, K) - s1 * (b.est %*% t(x.bar)))
d2 <- -1 * dfun(u2) * (diag(1, K, K) - s2 * (b.est %*%
t(x.bar)))
q1 <- dfun(u1) * s1 * b.est
q2 <- -1 * dfun(u2) * s2 * b.est
dr <- cbind(d1 + d2, q1, q2)
V <- V5[c(1:K, K + j, K + j + 1), c(1:K, K + j, K + j +
1)]
cova <- dr %*% V %*% t(dr)
se[, j] <- sqrt(diag(cova))
}
colnames(se) <- paste("SE", lev, sep = ".")
rownames(se) <- colnames(x)
if (rev.dum) {
for (k in 1:K) {
if (identical(sort(unique(x[, k])), c(0, 1))) {
for (j in 1:J) {
x.d1 <- x.bar
x.d1[k, 1] <- 1
x.d0 <- x.bar
x.d0[k, 1] <- 0
ua1 <- z[j] - t(x.d1) %*% b.est
ub1 <- z[j + 1] - t(x.d1) %*% b.est
ua0 <- z[j] - t(x.d0) %*% b.est
ub0 <- z[j + 1] - t(x.d0) %*% b.est
me[k, j] <- pfun(ub1) - pfun(ua1) - (pfun(ub0) -
pfun(ua0))
d1 <- (dfun(ua1) - dfun(ub1)) %*% t(x.d1) -
(dfun(ua0) - dfun(ub0)) %*% t(x.d0)
q1 <- -dfun(ua1) + dfun(ua0)
q2 <- dfun(ub1) - dfun(ub0)
dr <- cbind(d1, q1, q2)
V <- V5[c(1:K, K + j, K + j + 1), c(1:K, K +
j, K + j + 1)]
se[k, j] <- sqrt(c(dr %*% V %*% t(dr)))
}
}
}
}
t.value <- me/se
p.value <- 2 * (1 - pt(abs(t.value), w$df.residual))
out <- list()
for (j in 1:J) {
out[[j]] <- round(cbind(effect = me[, j], error = se[,j], t.value = t.value[, j], p.value = p.value[, j]),
digits)
}
out[[J + 1]] <- round(me, digits)
names(out) <- paste("ME", c(lev, "all"), sep = ".")
result <- listn(w, out)
class(result) <- "ocME"
return(result)
}
immig <- read_spss("BES2016_wave7.sav")
bes.data <- read_dta("bes_immig.dta")
bes.data <- labelDataset(bes.data)
jobs <- read.csv("job_assignments_final.csv",
stringsAsFactors = FALSE)
# jobs[match(immig$id, jobs$id), c("job.1", "job.2")]==immig[, c("Job1", "Job2")]
orig.occs <- read.csv("occsmall2_coded.csv", stringsAsFactors = FALSE)
immig$own.job <- orig.occs[match(immig$id, orig.occs$id), "exptext"]
immig$dv <- as_factor(immig$immigExpDV)
# immig$dv.dk <- as_factor(immig$immigExpDV)
immig$dv[immig$dv=="Don't know"] <- NA
immig <- immig[!is.na(immig$own.job), ]
immig <- merge(immig, bes.data, by = "id", all.x = TRUE)
immig <- merge(immig, jobs, by= "id")
immig$treatment <- immig$own.job==immig$job.2
immig$immigManipCheck[immig$immigManipCheck==997] <- NA
immig$immigManipCheck2[immig$immigManipCheck2==997] <- NA
rm(bes.data)
manip.check.table <- table(immig$immigManipCheck, immig$treatment)
colnames(manip.check.table) <- c("Control groups", "Own-job Treatment")
manip.check.table <- round(prop.table(manip.check.table, 2) * 100, 1)
library(Hmisc, quietly = TRUE)
# chisq.test(y = immig$immigManipCheck, x = immig$treatment)
low.skill.jobs <- c("waiters", "drivers", "receptionists",
"shop assistants", "carers", "cleaners")
high.skill.jobs <- c("senior managers", "doctors",
"lawyers", "lecturers", "engineers",
"programmers")
immig$skill.1 <- NA
immig$skill.1[immig$job.1 %in% low.skill.jobs] <- "Low"
immig$skill.1[immig$job.1 %in% high.skill.jobs] <- "High"
immig$skill.2 <- NA
immig$skill.2[immig$job.2 %in% low.skill.jobs & !immig$treatment] <- "Low"
immig$skill.2[immig$job.2 %in% high.skill.jobs & !immig$treatment] <- "High"
immig$skill.2[immig$treatment] <- "treatment"
immig$group <- NA
immig$group[immig$skill.1=="Low" & immig$skill.2=="Low"] <- "Low"
immig$group[immig$skill.1=="High" & immig$skill.2=="High"] <- "High"
immig$group[(immig$skill.1=="High" & immig$skill.2=="Low") |
(immig$skill.1=="Low" & immig$skill.2=="High")] <- "Mixed"
immig$group[immig$skill.1=="Low" & immig$treatment] <- "low-treat"
immig$group[immig$skill.1=="High" & immig$treatment] <- "high-treat"
immig$edlevel1 <- NA
immig$edlevel1[immig$edlevel==0] <- 1
immig$edlevel1[immig$edlevel==1] <- 2
immig$edlevel1[immig$edlevel==2] <- 2
immig$edlevel1[immig$edlevel==3] <- 3
immig$edlevel1[immig$edlevel==4] <- 4
immig$edlevel1[immig$edlevel==5] <- 4
immig$edlevel1 <- factor(immig$edlevel1, labels = c("None", "GCSE", "A-Level", "Degree"))
immig$edlevel2 <- NA
immig$edlevel2[immig$edlevel1 %in% c("None", "GCSE")] <- "Low Ed"
immig$edlevel2[immig$edlevel1 %in% c("A-Level")] <- "Med Ed"
immig$edlevel2[immig$edlevel1 %in% c("Degree")] <- "High Ed"
library(ggplot2)
library(reshape)
immig$dv <- factor(immig$dv)
immig$edlevel2 <- as.factor(immig$edlevel2)
immig$dv <- factor(immig$dv)
immig$Treatment <- as.numeric(immig$treatment)
immig.replicate <- immig[immig$group %in% c("High", "Mixed", "Low"), ]
immig.replicate$dv.dk <- immig.replicate$dv
nice.names <- matrix(c("edlevel2Low Ed" , "Low education",
"edlevel2Med Ed", "Medium education",
"skill.1Low", "Job 1: low skill",
"skill.2Low", "Job 2: Low skill",
"skill.2treatment", "Job 2: own job",
"edlevel2Low Ed:skill.2Low", "Low ed. * low skill job",
"edlevel2Med Ed:skill.2Low", "Medium ed. * low skill job",
"edlevel2Low Ed:skill.2treatment", "Low ed. * own job",
"edlevel2Med Ed:skill.2treatment", "Medium ed. * own job",
"groupMixed", "Treatment: low/high",
"groupHigh", "Treatment: both high",
"edlevel2Low Ed:groupMixed", "Low ed. * low/high",
"groupMixed:edlevel2Low Ed", "Low ed. * low/high",
"edlevel2Med Ed:groupMixed", "Medium ed. * low/high",
"edlevel2Low Ed:groupHigh", "Low ed. * both high",
"edlevel2Med Ed:groupHigh", "Medium ed. * both high",
"groupLow:edlevel2Low Ed" , "Low ed. * both low",
"groupLow:edlevel2Med Ed", "Medium ed. * both low",
"groupMixed:edlevel2Med Ed", "Medium ed. * low/high",
"edlevel2Low Ed:groupLow", "Low ed. * both low",
"edlevel2Med Ed:groupLow", "Medium ed. * both low",
"groupLow", "Treatment: both low",
"grouphigh-treat", "Treatment: high/ own job",
"grouplow-treat", "Treatment: low/ own job") ,
byrow = TRUE, ncol = 2)
colnames(nice.names) <- c("model", "nice")
nice.names <- data.frame(nice.names, stringsAsFactors = FALSE)
nice.order <- c("Treatment: high/ own job",
"Treatment: low/ own job",
"Treatment: both high",
"Treatment: low/high",
"Treatment: both low",
"Job 2: high skill",
"Job 2: own job",
"Job 2: Low skill",
"Job 1: high skill",
"Job 1: low skill",
"High education",
"Medium education",
"Low education",
"Medium ed. * both high",
"Low ed. * both high",
"Medium ed. * low/high",
"Low ed. * low/high",
"Medium ed. * both low",
"Low ed. * both low",
"Medium ed. * own job",
"Low ed. * own job",
"Medium ed. * low skill job",
"Low ed. * low skill job")
makeNice <- function(x) {
output <- nice.names$nice[match(x, nice.names$model)]
return(output)
}
makeNiceFactor <- function(x) {
output <- factor(x, levels = nice.order[length(nice.order):1])
output <- factor(output)
return(output)
}
immig.replicate$group <- factor(immig.replicate$group,
levels = c("High", "Mixed", "Low"))
## ----assignments, cache=TRUE, eval=TRUE, dpi=100, echo=FALSE, results='asis', message=FALSE, warning=FALSE----
all.assign <- table(immig$group)
assign.table <- data.frame(matrix(c("Low skilled/own occupation", "25", all.assign["low-treat"],
"High skilled/own occupation", "25", all.assign["high-treat"],
"Both high skilled", "12.5", all.assign["High"],
"Both low skilled", "12.5", all.assign["Low"],
"Low skilled and high skilled", "25", all.assign["Mixed"]), byrow = TRUE, ncol = 3), stringsAsFactors = FALSE)
colnames(assign.table) <- c("Assignment", "% in limit", "Cases assigned")
assign.table$`% assigned` <- round(prop.table(as.numeric(assign.table$`Cases assigned`)) * 100, 1)
library(xtable)
xtable(assign.table, caption = "Experimental assignments", label = "table:assignment")
## ----mainLabMarketDisplay, cache=TRUE, eval=TRUE, dpi=100, echo=FALSE, results='asis', message=FALSE, warning=FALSE----
library(pander)
library(texreg)
library(erer)
library(MASS)
immig.main.reg <- polr(data = immig, formula = dv~ edlevel2 +
edlevel2:skill.2 + skill.1 + skill.2)
texreg(immig.main.reg, caption = "Ordered logistic regression predicting acceptance of immigrants. (Reference categories: high education, Job 1: high, skill, and Job 2: high skill)",
label = "table:mainLabMarket",
custom.coef.names = makeNice(names(immig.main.reg$coefficients)))
## ----mainLabMarketMFX, cache=TRUE, eval=TRUE, dpi=100, echo=FALSE, results='asis', message=FALSE, warning=FALSE, fig.cap = "Effects of labor market competition treatment on probability of `strongly disagreeing' that more immigrants should be allowed to come to Britain"----
mfx.main <- olMFX(immig.main.reg)
mfx.df <- data.frame(mfx= mfx.main$out$`ME.Strongly disagree`[, "effect"],
se = mfx.main$out$`ME.Strongly disagree`[, "error"])
mfx.df$u.ci <- mfx.df$mfx + (1.96 * mfx.df$se)
mfx.df$l.ci <- mfx.df$mfx - (1.96 * mfx.df$se)
mfx.df$var <- makeNice(rownames(mfx.df))
mfx.df <- rbind(mfx.df, NA, NA, NA)
mfx.df$var[is.na(mfx.df$var)] <- c("Job 1: high skill", "High education",
"Job 2: high skill")
mfx.df$var <- makeNiceFactor(mfx.df$var)
mfx.df$mfx[is.na(mfx.df$mfx)] <- 0
ggplot(data = mfx.df, aes(x = var, y = mfx)) + geom_point() +
geom_errorbar(aes(ymax = u.ci, ymin = l.ci), width = 0.25) +
geom_hline(aes(yintercept=0)) + theme_bw() +
xlab("") + ylab("Marginal effect of strongly disagreeing with more immigrants") +
coord_flip()
## ----manipCheckSimple, cache=TRUE, eval=TRUE, dpi=100, echo=FALSE, results='asis', message=FALSE, warning=FALSE----
library(pander)
library(texreg)
immig.manip.compete <- polr(data = immig,
formula = factor(immigManipCheck)~ Treatment)
immig.manip.culture <- polr(data = immig,
formula = factor(immigManipCheck2)~ Treatment)
texreg(list(immig.manip.compete, immig.manip.culture),
caption = "Manipulation checks. Ordered logistic regression predicting worry about job prospects.",
custom.model.names = c("Job prospects", "Cultural threat"),
label = "table:manipulation_checks")
## ----mainReplication, cache=TRUE, eval=TRUE, dpi=100, echo=FALSE, results='asis', message=FALSE, warning=FALSE, fig.cap="Effects of immigrant skill levels on probability of `strongly disagreeing' that more immigrants should be allowed to come to Britain (excludes `own job' treatment group)"----
reg.replicate <- polr(data = immig.replicate, dv ~ edlevel2 + group + group:edlevel2)
replicate.to.plot <- olMFX(reg.replicate)
mfx.df.rep <- data.frame(mfx= replicate.to.plot$out$`ME.Strongly disagree`[, "effect"],
se = replicate.to.plot$out$`ME.Strongly disagree`[, "error"])
mfx.df.rep$u.ci <- mfx.df.rep$mfx + (1.96 * mfx.df.rep$se)
mfx.df.rep$l.ci <- mfx.df.rep$mfx - (1.96 * mfx.df.rep$se)
mfx.df.rep$var <- makeNice(rownames(mfx.df.rep))
mfx.df.rep <- rbind(mfx.df.rep, NA, NA)
mfx.df.rep$var[is.na(mfx.df.rep$var)] <- c("High education", "Treatment: both high")
mfx.df.rep$mfx[is.na(mfx.df.rep$mfx)] <- 0
mfx.df.rep$var <- makeNiceFactor(mfx.df.rep$var)
ggplot(data = mfx.df.rep, aes(x = var, y = mfx)) + geom_point() +
geom_errorbar(aes(ymax = u.ci, ymin = l.ci), width = 0.25) +
geom_hline(aes(yintercept=0)) + theme_bw() +
xlab("") +
ylab("Marginal effect on 'strongly disagreeing' with more immigrants") +
coord_flip()
## ----manipCheckCompete, cache=TRUE, eval=TRUE, dpi=100, echo=FALSE, results='asis', message=FALSE, warning=FALSE, fig.cap="Effects of education and immigrant skill level on probability of respondent being `not at all worried' about their own job prospects (excludes `own job' treatment group)"----
# prop.table(table(immig.replicate$immigManipCheck, immig.replicate$edlevel2), 1)
# immig.replicate$group
large.labmarket.rep <- polr(data = immig.replicate,
formula = factor(immigManipCheck) ~ group + edlevel2 + group:edlevel2)
manip.compet.mfx <- olMFX(large.labmarket.rep)
manip.compet.mfx <- data.frame(manip.compet.mfx$out$ME.1)
manip.compet.mfx$u.ci <- manip.compet.mfx[, "effect"] + (manip.compet.mfx[, "error"] * 1.96)
manip.compet.mfx$l.ci <- manip.compet.mfx[, "effect"] - (manip.compet.mfx[, "error"] * 1.96)
manip.compet.mfx$var <- makeNice(rownames(manip.compet.mfx))
manip.compet.mfx <- rbind(manip.compet.mfx, NA, NA)
manip.compet.mfx$var[is.na(manip.compet.mfx$var)] <-
c("High education", "Treatment: both high")
manip.compet.mfx$var <- makeNiceFactor(manip.compet.mfx$var)
manip.compet.mfx$effect[is.na(manip.compet.mfx$effect)] <- 0
ggplot(data = manip.compet.mfx, aes(x = var, y = effect)) + geom_point() +
geom_errorbar(aes(ymax = u.ci, ymin = l.ci), width = 0.25) +
geom_hline(aes(yintercept=0)) + theme_bw() +
xlab("") + ylab("Marginal effect on probability of respondent feeling 'not at all worried' about their job prospects") + coord_flip()
## ----replicateRegTable, cache=TRUE, eval=TRUE, dpi=100, echo=FALSE, results='asis', message=FALSE, warning=FALSE----
texreg(reg.replicate, caption = "Ordered logistic regression predicting acceptance of immigrants in control groups", label = "table:mainreplication",
custom.coef.names = makeNice(names(reg.replicate$coefficients)))
## ----manipCheckControlGroup, cache=TRUE, eval=TRUE, dpi=100, echo=FALSE, results='asis', message=FALSE, warning=FALSE----
texreg(large.labmarket.rep,
label = "table:manipcheckreplicate",
caption = "Ordered logistic regression predictors of economic threat",
custom.coef.names = makeNice(names(large.labmarket.rep$coefficients)))
## ----balanceCheck, cache=TRUE, eval=TRUE, dpi=100, echo=FALSE, results='asis', message=FALSE, warning=FALSE----
immig$immigSelf[immig$immigSelf==111] <- 0
immig$immigSelf[immig$immigSelf==99] <-NA
immig.att.balance <- polr(data = immig, factor(immigSelf)~group)
ed.balance <- chisq.test(x = immig$edlevel2, y = immig$group)
texreg(immig.att.balance,
label = "table:balanceCheckImmig",
caption = "Ordered logistic regression predictors of immigration attitudes using random groups")
## ----hhFig1, cache=TRUE, eval=TRUE, dpi=100, echo=FALSE, results='asis', message = FALSE, fig.height=5, fig.cap="Bivariate relationship between education and anti-immigrant responses in the experiment (excludes own job experimental group)"----
immig.ed <- prop.table(table(immig.replicate$dv, immig.replicate$edlevel1), 2)
immig.ed<- melt(immig.ed)
colnames(immig.ed) <- c("More immigrants", "Education", "Fraction")
immig.ed$`More immigrants` <- factor(immig.ed$`More immigrants`, levels = levels(immig.replicate$dv))
immig.ed$Education <- factor(immig.ed$Education, levels = levels(immig.replicate$edlevel1))
ggplot(data = immig.ed, aes(x = Education, y = Fraction, group = `More immigrants`,
fill = `More immigrants`)) +
geom_bar(position = "dodge", colour = "black",
stat = "identity") +
theme_bw() +
scale_fill_grey(start = 0, end = 1, na.value = "blue") +
ylab("Fraction") + xlab("Education")
## ----hhFig2, cache=TRUE, eval=TRUE, dpi=100, echo=FALSE, results='asis', message = FALSE, fig.height=5, fig.cap = "Control group support for more immigrants by skill level of immigrants (excludes own job experimental group)"----
### H&H figure 2 replication
counts.replicate <- table(immig.replicate$dv, immig.replicate$group)
counts.replicate <- prop.table(counts.replicate, 2)
counts.replicate <- melt(counts.replicate)
colnames(counts.replicate) <- c("More immigrants", "Treatment", "Count")
counts.replicate$Treatment <- factor(counts.replicate$Treatment, levels = c("Low", "Mixed", "High"))
counts.replicate$`More immigrants` <- factor(counts.replicate$`More immigrants`, levels = levels(immig.replicate$dv))
ggplot(counts.replicate, aes(x = Treatment, group = `More immigrants`, y = Count, fill = `More immigrants`)) + geom_bar(stat = "identity", position = "dodge", colour = "black") +
theme_bw() +
scale_fill_grey(start = 0, end = 1, na.value = "red") +
ylab("Fraction") + xlab("Skill level (within control group)")
kable(manip.check.table, format = "latex", caption = "Level of concern about their own job for respondents in own job group and other groups")
## ----dk.check, cache=TRUE, eval=TRUE, dpi=100, echo=FALSE, results='asis', message=FALSE, warning=FALSE----
immig$dk.binary <- immig$immigExpDV==99
dk.pred <- glm(data = immig, dk.binary ~ group)
texreg(dk.pred,
label = "table:dk.pred",
caption = "Logistic regression of reporting don't know to support for immigration across experimental groups", custom.coef.names = makeNice(names(dk.pred$coefficients)))
|
#Open the data
setwd("/Users/aditya/Dropbox/CovariateTesting/NeuralData/FDRreg-master/data")
synchrony_smithkohn2008 = read.csv('synchrony_smithkohn2008.csv', header=TRUE)
sys = synchrony_smithkohn2008
#Only the last three columns seem relevant to us. It is not clear though as to how the Dist variable is calculated.
z = sys$z
#Histogram of the z scores
#The total number of data points seems to 7004 which is smaller than 128*127/2. So not all pairs of neurons are recorded?
#Histogram of the z values
hist(z, 250)
ddfull = cbind(synchrony_smithkohn2008$Dist, synchrony_smithkohn2008$TuningCor, synchrony_smithkohn2008$z)
ddfull = as.data.frame(ddfull)
names(ddfull) = c('Dist', 'TuningCor', 'z')
#Load the FDR Regression package:
require(FDRreg)
# the null distribution is not modeled as N(0,1) but rather as N(mu,sig)
# where mu and sig are computed using Efron's ML estimate in a two-groups model
efron_res = FDRreg::efron(ddfull$z, nulltype='empirical', df=10)
mu0hat = efron_res$mu0; sig0hat = efron_res$sig0;
# using Scott et al's Empirical Bayes FDR Regression
X = ddfull[,1:2]
# Set up spline basis functions (df=3)
df = 3
b1 = bs(synchrony_smithkohn2008$Dist, df=df)
b2 = bs(synchrony_smithkohn2008$TuningCor, df=df)
Xs = model.matrix( ~ b1 + b2 - 1)
# produces a matrix with 6 column, first three are splines corresponding to Dist, next three corr. to TuningCor
# FDRreg analysis uses a scaled and centered Xs
# lets use the same
Xs = scale(Xs)
# for our methods, need to rescale z so that nulls are N(0,1)
zscaled = (ddfull$z - mu0hat)/sig0hat
#Let us just work with these rescaled values throughout.
#Histogram of these values
hist(zscaled, breaks = 500, freq = F, main = "Histogram of the test statistics (z)", xlab = "Test Statistics (z)")
#Superimpose the standard normal density
points(sort(zscaled), dnorm(sort(zscaled)), type = "l", col = "red")
#points(sort(zscaled), dnorm(sort(zscaled), mu0, sigma0), type = "l", col = "red")
legend("topright", c("N(0, 1) density"), lty = c(1), lwd = c(1), col = c("red"))
#Run two methods (Benjamini-Hochberg and local FDR with predictive recursion) which do not use covariates.
#Benjamini-Hochberg
help(BenjaminiHochberg)
BH1 = BenjaminiHochberg(zscaled, 0.1)
#Basically here the z-values are converted to p-values by Prob that |Z| is at least |z| where Z is standard normal
#and then one applies the usual BH procedure to the p-values.
sum(BH1 == 1 & z > 0)
#There seem to be 329 rejections.
min(zscaled[BH1 == 1])
#This minimum is 2.82922. So every z-value larger than 2.82922 is rejected.
summary(zscaled[BH1 == 1])
#Looks like all the rejected p-values are positive. There is no significant p-value (according to BH) that is negative.
hist(zscaled, 500, main = "Rejection Regions (both methods use level = 0.1)", xlab = "Test Statistics (z)")
min(zscaled[BH1 == 1 & z > 0]) #This is 2.836328
abline(v = min(zscaled[BH1 == 1 & z > 0]), col = "blue")
sum(BH1 == 1 & z > 0) #299 rejections
#Efron's local FDR with (1) the empirical null estimated according to Efron's method, (2) alternative density estimated via predictive recursion, and (3) pi_0 given also by the predictive recursion algorithm. Can the Patra and Sen method be used for this?
#e1 = efron(zscaled, nulltype = 'empirical', df = 10)
#There seems to be no help file for this efron function. The code for this function can be got by typing efron in R.
#names(e1)
#[1] "mids" "breaks" "zcounts" "zdens" "z" "fz" "mu0"
# [8] "sig0" "p0" "fdr"
#This procedure for estimating the empirical null comes from Section 4 in the paper "Size, Power and False Discovery Rates"
#by Bradley Efron (published in the Annals of Statistics).
#Is the empirical null Gaussian (with mean mu0 and standard deviation sig0)? Here e1$mu0 = 0.6081196 and e1$sig0 = 0.8140629.
#With this f0, the next step seems to be to estimate the alternative density f1. For this, they are using predictive recursion.
#mu0 = e1$mu0
#sigma0 = e1$sig0
fhat = prfdr(zscaled, mu0 = 0, sig0 = 1)
#The most important output of fhat is postprob which gives the probability of being a signal for each observation i.e., the probability of the null hypothesis being false.
postprob = fhat$postprob
BFDR = getFDR(fhat$postprob)
#This getFDR function outputs two quantities: localfdr and FDR. localfdr is simply 1 - postprob. FDR(z) is defined as the average of lfdr conditional on Z \leq z. It is essentially computed by ordering the postprob, taking cumulative sums and then reordering them. Scott et al seem to put cutoffs on FDR this as their ultimate procedure to reject p-values (instead of putting cutoffs on local fdr).
sum(BFDR$FDR <= 0.1 & z > 0) #This gives the number of rejections by the local FDR method.
#Every time I run this, I seem to be getting different answers. The predictive recursion algorithm seems to give different answers for each run. We can check this easily.
#f1 = prfdr(z, mu0 = mu0, sig0 = sigma0, control = list(npasses = 20, gridsize = 500))
#f2 = prfdr(z, mu0 = mu0, sig0 = sigma0, control = list(npasses = 20, gridsize = 500))
#head(cbind(f1$postprob, f2$postprob))
min(zscaled[BFDR$FDR <= 0.1 & z > 0]) #2.406
abline(v = min(zscaled[BFDR$FDR <= 0.1 & z > 0]), col = "red")
sum(BFDR$FDR <= 0.1 & z > 0) #497 rejections.
legend("topright", c("B-H (299 rejections, z > 2.836)", "Local FDR (497 rejections, z > 2.406)"), lty = c(1, 1), lwd = c(1, 1), col = c("blue", "red"))
#Plotting the test statistics against the covariates
plot(X[,1], zscaled, main = "z vs Distance", xlab = "Inter-neuron distances (X1)", ylab = "Test Statistic for Synchrony", cex = 0.2)
abline(lm(zscaled ~ X[,1]), col = "blue")
plot(X[,2], zscaled, main = "z vs Tuning correlations", xlab = "Tuning Curve Correlations (X2)", ylab = "Test Statistic for Synchrony", cex = 0.2)
m1 = lm(zscaled ~ X[,2] + I(X[,2]^2))
summary(m1)
points(X[order(X[,2]), 2], m1$fitted.values[order(X[,2])], type = "l", col = "red")
#Run Scott's method:
scott_res = FDRreg(ddfull$z, Xs, nulltype='empirical', control=list(lambda = 1))
names(scott_res)
plot(c(scott_res$x_grid, scott_res$x_grid), c(scott_res$f1_grid, scott_res$f0_grid), type = "n")
#This does seem like a density:
sum(diff(scott_res$x_grid)*scott_res$f1_grid[-1])
sum(diff(scott_res$x_grid)*scott_res$f0_grid[-1])
points(scott_res$x_grid, scott_res$f0_grid, type = "l", col = "blue")
points(scott_res$x_grid, scott_res$f1_grid, type = "l", col = "red")
legend("topright", c("Null density (f0)", "Alternative density (f1)"), lty = c(1, 1), lwd = c(1, 1), col = c("blue", "red"))
#Note that we do not work with the original data z. But we rather work with zscaled.
#Let us re-plot f0 and f1 after rescaling.
scal_grid = (scott_res$x_grid - mu0hat)/sig0hat
scott_f1_scal = scott_res$f1_grid*sig0hat
scott_f0_scal = scott_res$f0_grid*sig0hat
plot(c(scal_grid, scal_grid), c(scott_f0_scal, scott_f1_scal), type = "n")
points(scal_grid, scott_f0_scal, type = "l", col = "blue")
points(scal_grid, scott_f1_scal, type = "l", col = "red")
legend("topright", c("Null density (f0)", "Alternative density (f1)"), lty = c(1, 1), lwd = c(1, 1), col = c("blue", "red"))
#It is easy to check that f0 now corresponds to the standard normal density:
#points(scal_grid, dnorm(scal_grid), col = "green")
#The results for our methods are all saved and can be retrieved via load('save_for_now.Rdata').
setwd("/Users/aditya/Dropbox/CovariateTesting/NeuralData/FDRreg-master/Our Code")
load('save_for_now.Rdata')
#The results for our methods are saved in four objects: gridy_res, am1_res, m2_res, am2_res:
#1) Marginal Method One: gridy_res
#2) Marginal Method Two: m2_res (this apparently does not work well).
#3) Full MLE with Marginal One as Initialization: am1_res
#4) Full MLE with Marginal Two as Initialization: am2_res
names(gridy_res)
names(m2_res)
names(am1_res)
names(am2_res)
#Plotting the estimates for f1 from all the methods:
plot(c(scal_grid, scal_grid, scal_grid, scal_grid), c(scott_f1_scal, f1_m1, f1_fullm1, f1_fullm2), type = "n", xlab = "z", ylab = "Alternative Density (f1)", main = "Estimates of f1")
#1) Scott's method
points(scal_grid, scott_f1_scal, type = "l")
#2) gridy_res
f1_m1 = rep(0, length(scal_grid))
for(i in 1:length(scal_grid))
{
f1_m1[i] = sum(gridy_res$probs*dnorm(scal_grid[i] - gridy_res$atoms))
}
points(scal_grid, f1_m1, type = "l", col = "blue")
#3) m2_res
f1_m2 = rep(0, length(scal_grid))
for(i in 1:length(scal_grid))
{
f1_m2[i] = sum(m2_res$probs*dnorm(scal_grid[i] - m2_res$atoms))
}
#points(scal_grid, f1_m2, type = "l", col = "purple")
#3) am1_res
f1_fullm1 = rep(0, length(scal_grid))
for(i in 1:length(scal_grid))
{
f1_fullm1[i] = sum(am1_res$probs*dnorm(scal_grid[i] - am1_res$atoms))
}
points(scal_grid, f1_fullm1, type = "l", col = "green")
#4) am2_res
f1_fullm2 = rep(0, length(scal_grid))
for(i in 1:length(scal_grid))
{
f1_fullm2[i] = sum(am2_res$probs*dnorm(scal_grid[i] - am2_res$atoms))
}
points(scal_grid, f1_fullm2, type = "l", col = "red")
legend("topright", c("Scott's Method", "Marginal One", "Full MLE - M1", "Full MLE - M2"), lty = c(1, 1, 1, 1), lwd = c(1, 1, 1, 1), col = c("black", "blue", "green", "red"))
#Plotting the estimates of the estimated function pi from all the methods:
#Because this is an additive model, I will plot pi in two functions (as a function of the first variable and as a function of the second variable)
plot(c(X[or1,1], X[or1,1], X[or1,1], X[or1, 1]), c(scott_res$model$coef[2]*Xs[or1,1] + scott_res$model$coef[3]*Xs[or1,2] + scott_res$model$coef[4]*Xs[or1,3], gridy_res$b[2]*Xs[or1,1] + gridy_res$b[3]*Xs[or1,2] + gridy_res$b[4]*Xs[or1,3], am1_res$b[2]*Xs[or1,1] + am1_res$b[3]*Xs[or1,2] + am1_res$b[4]*Xs[or1,3], am2_res$b[2]*Xs[or1,1] + am2_res$b[3]*Xs[or1,2] + am2_res$b[4]*Xs[or1,3]), type = "n", xlab = "Inter-neuron Distance (X1)", ylab = "The (logit of) Pi function (as a function of X1)", main = "Plot of the (logit of) Pi Function in terms of X1")
#For Scott's method:
or1 = order(X[,1])
points(X[or1,1], scott_res$model$coef[2]*Xs[or1,1] + scott_res$model$coef[3]*Xs[or1,2] + scott_res$model$coef[4]*Xs[or1,3], type = "o", cex = 0.5)
#For gridy_res
or1 = order(X[,1])
points(X[or1,1], gridy_res$b[2]*Xs[or1,1] + gridy_res$b[3]*Xs[or1,2] + gridy_res$b[4]*Xs[or1,3], type = "o", cex = 0.5, col = "blue")
#For m2_res
or1 = order(X[,1])
#points(X[or1,1], m2_res$b[2]*Xs[or1,1] + m2_res$b[3]*Xs[or1,2] + m2_res$b[4]*Xs[or1,3], type = "o", cex = 0.5, col = "purple")
#For am1_res
or1 = order(X[,1])
points(X[or1,1], am1_res$b[2]*Xs[or1,1] + am1_res$b[3]*Xs[or1,2] + am1_res$b[4]*Xs[or1,3], type = "o", cex = 0.5, col = "green")
#For am2_res
or1 = order(X[,1])
points(X[or1,1], am2_res$b[2]*Xs[or1,1] + am2_res$b[3]*Xs[or1,2] + am2_res$b[4]*Xs[or1,3], type = "o", cex = 0.5, col = "red")
legend("topright", c("Scott's Method", "Marginal One", "Full MLE - M1", "Full MLE - M2"), lty = c(1, 1, 1, 1), lwd = c(1, 1, 1, 1), col = c("black", "blue", "green", "red"))
#Plotting pi as a function of the tuning curve correlation
plot(c(X[or2,2], X[or2,2], X[or2,2], X[or2, 2]), c(scott_res$model$coef[5]*Xs[or2,4] + scott_res$model$coef[6]*Xs[or2,5] + scott_res$model$coef[7]*Xs[or2,6], gridy_res$b[5]*Xs[or2,4] + gridy_res$b[6]*Xs[or2,5] + gridy_res$b[7]*Xs[or2,6], am1_res$b[5]*Xs[or2,4] + am1_res$b[6]*Xs[or2,5] + am1_res$b[7]*Xs[or2,6], am2_res$b[5]*Xs[or2,4] + am2_res$b[6]*Xs[or2,5] + am2_res$b[7]*Xs[or2,6]), type = "n", xlab = "Tuning Curve Correlation (X2)", ylab = "The (logit of) Pi function (as a function of X2)", main = "Plot of the (logit of) Pi function in terms of X2")
#For Scott's method:
or2 = order(X[,2])
points(X[or2,2], scott_res$model$coef[5]*Xs[or2,4] + scott_res$model$coef[6]*Xs[or2,5] + scott_res$model$coef[7]*Xs[or1,6], type = "o", cex = 0.5)
#For gridy_res
points(X[or2,2], gridy_res$b[5]*Xs[or2,4] + gridy_res$b[6]*Xs[or2,5] + gridy_res$b[7]*Xs[or2,6], type = "o", cex = 0.5, col = "blue")
#For m2_res
#points(X[or2,2], m2_res$b[5]*Xs[or2,4] + m2_res$b[6]*Xs[or2,5] + m2_res$b[7]*Xs[or2,6], type = "o", cex = 0.5, col = "purple")
#For am1_res
points(X[or2,2], am1_res$b[5]*Xs[or2,4] + am1_res$b[6]*Xs[or2,5] + am1_res$b[7]*Xs[or2,6], type = "o", cex = 0.5, col = "green")
#For am2_res
points(X[or2,2], am2_res$b[5]*Xs[or2,4] + am2_res$b[6]*Xs[or2,5] + am2_res$b[7]*Xs[or2,6], type = "o", cex = 0.5, col = "red")
legend("top", c("Scott's Method", "Marginal One", "Full MLE - M1", "Full MLE - M2"), lty = c(1, 1, 1, 1), lwd = c(1, 1, 1, 1), col = c("black", "blue", "green", "red"))
#Plotting the pi(xi) values as a function of xi^Tbeta for a fixed beta.
Xs1 = cbind(rep(1, length(Xs[,1])), Xs)
xaxs = Xs1%*%am1_res$b
or = order(xaxs)
plot(c(xaxs[or],xaxs[or],xaxs[or],xaxs[or]), c(scott_res$priorprob[or], gridy_res$p[or], am1_res$p[or], am2_res$p[or]), type = "n", xlab = "X * beta", ylab = "Prior Probabilities", main = "Plot of Estimates of the Pi function")
#Scott's method
points(xaxs[or], scott_res$priorprob[or], type = "l")
#gridy_res
points(xaxs[or], gridy_res$p[or], type = "l", col = "blue")
#m2_res
#points(xaxs[or], m2_res$p[or], type = "l", col = "purple")
#am1_res
points(xaxs[or], am1_res$p[or], type = "l", col = "green")
#am2_res
points(xaxs[or], am2_res$p[or], type = "l", col = "red")
legend("topleft", c("Scott's Method", "Marginal One", "Full MLE - M1", "Full MLE - M2"), lty = c(1, 1, 1, 1), lwd = c(1, 1, 1, 1), col = c("black", "blue", "green", "red"))
#Understanding the rejections:
#Finding the rejection sets for each of the methods (nominal level is 0.1)
#Scott's method:
scott_rejects = which(getFDR(scott_res$postprob)$FDR <= 0.1 & ddfull$z > 0)
length(scott_rejects)
#Sujayam's function
lfdr_reject = function(lfdr, fdr_nominal = 0.1){
sl = sort(lfdr)
k = sum(cumsum(sl)/seq_along(sl) <= fdr_nominal)
if(k) rejects = which(lfdr <= sl[k]) else rejects = numeric(0)
}
#gridy_res
gridy_f1_zscaled = sapply(gridy_res$atoms, function(ai) dnorm(zscaled - ai)) %*% gridy_res$probs
gridy_lfdr = (1 - gridy_res$p) * dnorm(zscaled)/((1 - gridy_res$p) * dnorm(zscaled) + gridy_res$p * gridy_f1_zscaled)
gridy_rejects = lfdr_reject(gridy_lfdr)
length(gridy_rejects)
#Slightly fewer rejections compared to Scott's method (makes sense from the plot of the pi's)
#m2_res
m2_f1_zscaled = sapply(m2_res$atoms, function(ai) dnorm(zscaled - ai)) %*% m2_res$probs
m2_lfdr = (1 - m2_res$p) * dnorm(zscaled)/((1 - m2_res$p) * dnorm(zscaled) + m2_res$p * m2_f1_zscaled)
m2_rejects = lfdr_reject(m2_lfdr)
length(m2_rejects)
#This does not work at all.
#am1_res
am1_f1_zscaled = sapply(am1_res$atoms, function(ai) dnorm(zscaled - ai)) %*% am1_res$probs
am1_lfdr = (1 - am1_res$p) * dnorm(zscaled)/((1 - am1_res$p) * dnorm(zscaled) + am1_res$p * am1_f1_zscaled)
am1_rejects = lfdr_reject(am1_lfdr)
length(am1_rejects)
#This (and the next method) unsurprisingly give the most rejections.
#am2_res
am2_f1_zscaled = sapply(am2_res$atoms, function(ai) dnorm(zscaled - ai)) %*% am2_res$probs
am2_lfdr = (1 - am2_res$p) * dnorm(zscaled)/((1 - am2_res$p) * dnorm(zscaled) + am2_res$p * am2_f1_zscaled)
am2_rejects = lfdr_reject(am2_lfdr)
length(am2_rejects)
#This is identical to am1.
c(length(scott_rejects), length(gridy_rejects), length(m2_rejects), length(am1_rejects), length(am2_rejects))
#Additional Rejects (of Full MLE compared to Local FDR without Covariates)
noco_rejects = which(BFDR$FDR <= 0.1 & z > 0)
par(mfrow = c(2, 1))
sid = setdiff(am1_rejects, noco_rejects)
plot(X[,1], X[,2], xlab = "Inter-neuron Distances (X1)", ylab = "Tuning Curve Correlation (X2)", main = "Extra rejections compared to No Covariate method (575)", type = "n" )
points(X[sid, 1], X[sid, 2])
length(sid)
sid = setdiff(noco_rejects, am1_rejects)
plot(X[,1], X[,2], xlab = "Inter-neuron Distances (X1)", ylab = "Tuning Curve Correlation (X2)", main = "Rejected by the No-Covariate method but not by our method (52)", type = "n" )
points(X[sid, 1], X[sid, 2])
length(sid)
#Histogram
par(mfrow = c(1, 1))
sid = setdiff(am1_rejects, noco_rejects)
hist(zscaled[sid], breaks = 150)
plot(X[,1], X[,2], xlab = "Inter-neuron Distances (X1)", ylab = "Tuning Curve Correlation (X2)", main = "Extra rejections compared to No Covariate method", type = "n" )
points(X[sid, 1], X[sid, 2])
#Additional rejects (of full MLE compared to Scott's method)
par(mfrow = c(2, 1))
sid = setdiff(am1_rejects, scott_rejects)
plot(X[,1], X[,2], xlab = "Inter-neuron Distances (X1)", ylab = "Tuning Curve Correlation (X2)", main = "Extra rejections compared to Scott's method (276)", type = "n" )
points(X[sid, 1], X[sid, 2])
length(sid)
sid = setdiff(scott_rejects, am1_rejects)
plot(X[,1], X[,2], xlab = "Inter-neuron Distances (X1)", ylab = "Tuning Curve Correlation (X2)", main = "Rejected by Scott's method but not by our method (8)", type = "n" )
points(X[sid, 1], X[sid, 2])
length(sid)
#Rejection Regions of the four methods (Scott et al, no covariate lfdr, marginal one and am1)
par(mfrow = c(2, 2))
si = noco_rejects
length(si)
plot(X[,1], X[,2], xlab = "Inter-neuron Distances (X1)", ylab = "Tuning Curve Correlation (X2)", main = "Rejections of the No-Covariate method (497)", type = "n")
points(X[si, 1], X[si, 2])
si = scott_rejects
length(si)
plot(X[,1], X[,2], xlab = "Inter-neuron Distances (X1)", ylab = "Tuning Curve Correlation (X2)", main = "Rejections of the Scott et al. (2015) method (752)", type = "n")
points(X[si, 1], X[si, 2])
si = gridy_rejects
length(si)
plot(X[,1], X[,2], xlab = "Inter-neuron Distances (X1)", ylab = "Tuning Curve Correlation (X2)", main = "Rejections of the First Marginal Method (722)", type = "n")
points(X[si, 1], X[si, 2])
si = am1_rejects
length(si)
plot(X[,1], X[,2], xlab = "Inter-neuron Distances (X1)", ylab = "Tuning Curve Correlation (X2)", main = "Rejections of the Full MLE initialized by M1 (1020)", type = "n")
points(X[si, 1], X[si, 2])
par(mfrow = c(1, 1))
#Box Plot of the estimated pi values:
boxplot(cbind(scott_res$priorprob, gridy_res$p, am1_res$p, am2_res$p), names = c("Scott", "Marg 1", "Full MLE - 1", "Full MLE - 2"), main = "Box Plots of the Estimated Pi values")
abline(h = 1 - fhat$pi0, col = "red") #This is the value of the pi for the no-covariate local FDR method.
| /NeuralData/FDRreg-master/Our Code/RCodeforKnightLabTalkAG13March2017.R | no_license | NabarunD/NPMLEmix | R | false | false | 18,212 | r | #Open the data
setwd("/Users/aditya/Dropbox/CovariateTesting/NeuralData/FDRreg-master/data")
synchrony_smithkohn2008 = read.csv('synchrony_smithkohn2008.csv', header=TRUE)
sys = synchrony_smithkohn2008
#Only the last three columns seem relevant to us. It is not clear though as to how the Dist variable is calculated.
z = sys$z
#Histogram of the z scores
#The total number of data points seems to 7004 which is smaller than 128*127/2. So not all pairs of neurons are recorded?
#Histogram of the z values
hist(z, 250)
ddfull = cbind(synchrony_smithkohn2008$Dist, synchrony_smithkohn2008$TuningCor, synchrony_smithkohn2008$z)
ddfull = as.data.frame(ddfull)
names(ddfull) = c('Dist', 'TuningCor', 'z')
#Load the FDR Regression package:
require(FDRreg)
# the null distribution is not modeled as N(0,1) but rather as N(mu,sig)
# where mu and sig are computed using Efron's ML estimate in a two-groups model
efron_res = FDRreg::efron(ddfull$z, nulltype='empirical', df=10)
mu0hat = efron_res$mu0; sig0hat = efron_res$sig0;
# using Scott et al's Empirical Bayes FDR Regression
X = ddfull[,1:2]
# Set up spline basis functions (df=3)
df = 3
b1 = bs(synchrony_smithkohn2008$Dist, df=df)
b2 = bs(synchrony_smithkohn2008$TuningCor, df=df)
Xs = model.matrix( ~ b1 + b2 - 1)
# produces a matrix with 6 column, first three are splines corresponding to Dist, next three corr. to TuningCor
# FDRreg analysis uses a scaled and centered Xs
# lets use the same
Xs = scale(Xs)
# for our methods, need to rescale z so that nulls are N(0,1)
zscaled = (ddfull$z - mu0hat)/sig0hat
#Let us just work with these rescaled values throughout.
#Histogram of these values
hist(zscaled, breaks = 500, freq = F, main = "Histogram of the test statistics (z)", xlab = "Test Statistics (z)")
#Superimpose the standard normal density
points(sort(zscaled), dnorm(sort(zscaled)), type = "l", col = "red")
#points(sort(zscaled), dnorm(sort(zscaled), mu0, sigma0), type = "l", col = "red")
legend("topright", c("N(0, 1) density"), lty = c(1), lwd = c(1), col = c("red"))
#Run two methods (Benjamini-Hochberg and local FDR with predictive recursion) which do not use covariates.
#Benjamini-Hochberg
help(BenjaminiHochberg)
BH1 = BenjaminiHochberg(zscaled, 0.1)
#Basically here the z-values are converted to p-values by Prob that |Z| is at least |z| where Z is standard normal
#and then one applies the usual BH procedure to the p-values.
sum(BH1 == 1 & z > 0)
#There seem to be 329 rejections.
min(zscaled[BH1 == 1])
#This minimum is 2.82922. So every z-value larger than 2.82922 is rejected.
summary(zscaled[BH1 == 1])
#Looks like all the rejected p-values are positive. There is no significant p-value (according to BH) that is negative.
hist(zscaled, 500, main = "Rejection Regions (both methods use level = 0.1)", xlab = "Test Statistics (z)")
min(zscaled[BH1 == 1 & z > 0]) #This is 2.836328
abline(v = min(zscaled[BH1 == 1 & z > 0]), col = "blue")
sum(BH1 == 1 & z > 0) #299 rejections
#Efron's local FDR with (1) the empirical null estimated according to Efron's method, (2) alternative density estimated via predictive recursion, and (3) pi_0 given also by the predictive recursion algorithm. Can the Patra and Sen method be used for this?
#e1 = efron(zscaled, nulltype = 'empirical', df = 10)
#There seems to be no help file for this efron function. The code for this function can be got by typing efron in R.
#names(e1)
#[1] "mids" "breaks" "zcounts" "zdens" "z" "fz" "mu0"
# [8] "sig0" "p0" "fdr"
#This procedure for estimating the empirical null comes from Section 4 in the paper "Size, Power and False Discovery Rates"
#by Bradley Efron (published in the Annals of Statistics).
#Is the empirical null Gaussian (with mean mu0 and standard deviation sig0)? Here e1$mu0 = 0.6081196 and e1$sig0 = 0.8140629.
#With this f0, the next step seems to be to estimate the alternative density f1. For this, they are using predictive recursion.
#mu0 = e1$mu0
#sigma0 = e1$sig0
fhat = prfdr(zscaled, mu0 = 0, sig0 = 1)
#The most important output of fhat is postprob which gives the probability of being a signal for each observation i.e., the probability of the null hypothesis being false.
postprob = fhat$postprob
BFDR = getFDR(fhat$postprob)
#This getFDR function outputs two quantities: localfdr and FDR. localfdr is simply 1 - postprob. FDR(z) is defined as the average of lfdr conditional on Z \leq z. It is essentially computed by ordering the postprob, taking cumulative sums and then reordering them. Scott et al seem to put cutoffs on FDR this as their ultimate procedure to reject p-values (instead of putting cutoffs on local fdr).
sum(BFDR$FDR <= 0.1 & z > 0) #This gives the number of rejections by the local FDR method.
#Every time I run this, I seem to be getting different answers. The predictive recursion algorithm seems to give different answers for each run. We can check this easily.
#f1 = prfdr(z, mu0 = mu0, sig0 = sigma0, control = list(npasses = 20, gridsize = 500))
#f2 = prfdr(z, mu0 = mu0, sig0 = sigma0, control = list(npasses = 20, gridsize = 500))
#head(cbind(f1$postprob, f2$postprob))
min(zscaled[BFDR$FDR <= 0.1 & z > 0]) #2.406
abline(v = min(zscaled[BFDR$FDR <= 0.1 & z > 0]), col = "red")
sum(BFDR$FDR <= 0.1 & z > 0) #497 rejections.
legend("topright", c("B-H (299 rejections, z > 2.836)", "Local FDR (497 rejections, z > 2.406)"), lty = c(1, 1), lwd = c(1, 1), col = c("blue", "red"))
#Plotting the test statistics against the covariates
plot(X[,1], zscaled, main = "z vs Distance", xlab = "Inter-neuron distances (X1)", ylab = "Test Statistic for Synchrony", cex = 0.2)
abline(lm(zscaled ~ X[,1]), col = "blue")
plot(X[,2], zscaled, main = "z vs Tuning correlations", xlab = "Tuning Curve Correlations (X2)", ylab = "Test Statistic for Synchrony", cex = 0.2)
m1 = lm(zscaled ~ X[,2] + I(X[,2]^2))
summary(m1)
points(X[order(X[,2]), 2], m1$fitted.values[order(X[,2])], type = "l", col = "red")
#Run Scott's method:
scott_res = FDRreg(ddfull$z, Xs, nulltype='empirical', control=list(lambda = 1))
names(scott_res)
plot(c(scott_res$x_grid, scott_res$x_grid), c(scott_res$f1_grid, scott_res$f0_grid), type = "n")
#This does seem like a density:
sum(diff(scott_res$x_grid)*scott_res$f1_grid[-1])
sum(diff(scott_res$x_grid)*scott_res$f0_grid[-1])
points(scott_res$x_grid, scott_res$f0_grid, type = "l", col = "blue")
points(scott_res$x_grid, scott_res$f1_grid, type = "l", col = "red")
legend("topright", c("Null density (f0)", "Alternative density (f1)"), lty = c(1, 1), lwd = c(1, 1), col = c("blue", "red"))
#Note that we do not work with the original data z. But we rather work with zscaled.
#Let us re-plot f0 and f1 after rescaling.
scal_grid = (scott_res$x_grid - mu0hat)/sig0hat
scott_f1_scal = scott_res$f1_grid*sig0hat
scott_f0_scal = scott_res$f0_grid*sig0hat
plot(c(scal_grid, scal_grid), c(scott_f0_scal, scott_f1_scal), type = "n")
points(scal_grid, scott_f0_scal, type = "l", col = "blue")
points(scal_grid, scott_f1_scal, type = "l", col = "red")
legend("topright", c("Null density (f0)", "Alternative density (f1)"), lty = c(1, 1), lwd = c(1, 1), col = c("blue", "red"))
#It is easy to check that f0 now corresponds to the standard normal density:
#points(scal_grid, dnorm(scal_grid), col = "green")
#The results for our methods are all saved and can be retrieved via load('save_for_now.Rdata').
setwd("/Users/aditya/Dropbox/CovariateTesting/NeuralData/FDRreg-master/Our Code")
load('save_for_now.Rdata')
#The results for our methods are saved in four objects: gridy_res, am1_res, m2_res, am2_res:
#1) Marginal Method One: gridy_res
#2) Marginal Method Two: m2_res (this apparently does not work well).
#3) Full MLE with Marginal One as Initialization: am1_res
#4) Full MLE with Marginal Two as Initialization: am2_res
names(gridy_res)
names(m2_res)
names(am1_res)
names(am2_res)
#Plotting the estimates for f1 from all the methods:
plot(c(scal_grid, scal_grid, scal_grid, scal_grid), c(scott_f1_scal, f1_m1, f1_fullm1, f1_fullm2), type = "n", xlab = "z", ylab = "Alternative Density (f1)", main = "Estimates of f1")
#1) Scott's method
points(scal_grid, scott_f1_scal, type = "l")
#2) gridy_res
f1_m1 = rep(0, length(scal_grid))
for(i in 1:length(scal_grid))
{
f1_m1[i] = sum(gridy_res$probs*dnorm(scal_grid[i] - gridy_res$atoms))
}
points(scal_grid, f1_m1, type = "l", col = "blue")
#3) m2_res
f1_m2 = rep(0, length(scal_grid))
for(i in 1:length(scal_grid))
{
f1_m2[i] = sum(m2_res$probs*dnorm(scal_grid[i] - m2_res$atoms))
}
#points(scal_grid, f1_m2, type = "l", col = "purple")
#3) am1_res
f1_fullm1 = rep(0, length(scal_grid))
for(i in 1:length(scal_grid))
{
f1_fullm1[i] = sum(am1_res$probs*dnorm(scal_grid[i] - am1_res$atoms))
}
points(scal_grid, f1_fullm1, type = "l", col = "green")
#4) am2_res
f1_fullm2 = rep(0, length(scal_grid))
for(i in 1:length(scal_grid))
{
f1_fullm2[i] = sum(am2_res$probs*dnorm(scal_grid[i] - am2_res$atoms))
}
points(scal_grid, f1_fullm2, type = "l", col = "red")
legend("topright", c("Scott's Method", "Marginal One", "Full MLE - M1", "Full MLE - M2"), lty = c(1, 1, 1, 1), lwd = c(1, 1, 1, 1), col = c("black", "blue", "green", "red"))
#Plotting the estimates of the estimated function pi from all the methods:
#Because this is an additive model, I will plot pi in two functions (as a function of the first variable and as a function of the second variable)
plot(c(X[or1,1], X[or1,1], X[or1,1], X[or1, 1]), c(scott_res$model$coef[2]*Xs[or1,1] + scott_res$model$coef[3]*Xs[or1,2] + scott_res$model$coef[4]*Xs[or1,3], gridy_res$b[2]*Xs[or1,1] + gridy_res$b[3]*Xs[or1,2] + gridy_res$b[4]*Xs[or1,3], am1_res$b[2]*Xs[or1,1] + am1_res$b[3]*Xs[or1,2] + am1_res$b[4]*Xs[or1,3], am2_res$b[2]*Xs[or1,1] + am2_res$b[3]*Xs[or1,2] + am2_res$b[4]*Xs[or1,3]), type = "n", xlab = "Inter-neuron Distance (X1)", ylab = "The (logit of) Pi function (as a function of X1)", main = "Plot of the (logit of) Pi Function in terms of X1")
#For Scott's method:
or1 = order(X[,1])
points(X[or1,1], scott_res$model$coef[2]*Xs[or1,1] + scott_res$model$coef[3]*Xs[or1,2] + scott_res$model$coef[4]*Xs[or1,3], type = "o", cex = 0.5)
#For gridy_res
or1 = order(X[,1])
points(X[or1,1], gridy_res$b[2]*Xs[or1,1] + gridy_res$b[3]*Xs[or1,2] + gridy_res$b[4]*Xs[or1,3], type = "o", cex = 0.5, col = "blue")
#For m2_res
or1 = order(X[,1])
#points(X[or1,1], m2_res$b[2]*Xs[or1,1] + m2_res$b[3]*Xs[or1,2] + m2_res$b[4]*Xs[or1,3], type = "o", cex = 0.5, col = "purple")
#For am1_res
or1 = order(X[,1])
points(X[or1,1], am1_res$b[2]*Xs[or1,1] + am1_res$b[3]*Xs[or1,2] + am1_res$b[4]*Xs[or1,3], type = "o", cex = 0.5, col = "green")
#For am2_res
or1 = order(X[,1])
points(X[or1,1], am2_res$b[2]*Xs[or1,1] + am2_res$b[3]*Xs[or1,2] + am2_res$b[4]*Xs[or1,3], type = "o", cex = 0.5, col = "red")
legend("topright", c("Scott's Method", "Marginal One", "Full MLE - M1", "Full MLE - M2"), lty = c(1, 1, 1, 1), lwd = c(1, 1, 1, 1), col = c("black", "blue", "green", "red"))
#Plotting pi as a function of the tuning curve correlation
plot(c(X[or2,2], X[or2,2], X[or2,2], X[or2, 2]), c(scott_res$model$coef[5]*Xs[or2,4] + scott_res$model$coef[6]*Xs[or2,5] + scott_res$model$coef[7]*Xs[or2,6], gridy_res$b[5]*Xs[or2,4] + gridy_res$b[6]*Xs[or2,5] + gridy_res$b[7]*Xs[or2,6], am1_res$b[5]*Xs[or2,4] + am1_res$b[6]*Xs[or2,5] + am1_res$b[7]*Xs[or2,6], am2_res$b[5]*Xs[or2,4] + am2_res$b[6]*Xs[or2,5] + am2_res$b[7]*Xs[or2,6]), type = "n", xlab = "Tuning Curve Correlation (X2)", ylab = "The (logit of) Pi function (as a function of X2)", main = "Plot of the (logit of) Pi function in terms of X2")
#For Scott's method:
or2 = order(X[,2])
points(X[or2,2], scott_res$model$coef[5]*Xs[or2,4] + scott_res$model$coef[6]*Xs[or2,5] + scott_res$model$coef[7]*Xs[or1,6], type = "o", cex = 0.5)
#For gridy_res
points(X[or2,2], gridy_res$b[5]*Xs[or2,4] + gridy_res$b[6]*Xs[or2,5] + gridy_res$b[7]*Xs[or2,6], type = "o", cex = 0.5, col = "blue")
#For m2_res
#points(X[or2,2], m2_res$b[5]*Xs[or2,4] + m2_res$b[6]*Xs[or2,5] + m2_res$b[7]*Xs[or2,6], type = "o", cex = 0.5, col = "purple")
#For am1_res
points(X[or2,2], am1_res$b[5]*Xs[or2,4] + am1_res$b[6]*Xs[or2,5] + am1_res$b[7]*Xs[or2,6], type = "o", cex = 0.5, col = "green")
#For am2_res
points(X[or2,2], am2_res$b[5]*Xs[or2,4] + am2_res$b[6]*Xs[or2,5] + am2_res$b[7]*Xs[or2,6], type = "o", cex = 0.5, col = "red")
legend("top", c("Scott's Method", "Marginal One", "Full MLE - M1", "Full MLE - M2"), lty = c(1, 1, 1, 1), lwd = c(1, 1, 1, 1), col = c("black", "blue", "green", "red"))
#Plotting the pi(xi) values as a function of xi^Tbeta for a fixed beta.
Xs1 = cbind(rep(1, length(Xs[,1])), Xs)
xaxs = Xs1%*%am1_res$b
or = order(xaxs)
plot(c(xaxs[or],xaxs[or],xaxs[or],xaxs[or]), c(scott_res$priorprob[or], gridy_res$p[or], am1_res$p[or], am2_res$p[or]), type = "n", xlab = "X * beta", ylab = "Prior Probabilities", main = "Plot of Estimates of the Pi function")
#Scott's method
points(xaxs[or], scott_res$priorprob[or], type = "l")
#gridy_res
points(xaxs[or], gridy_res$p[or], type = "l", col = "blue")
#m2_res
#points(xaxs[or], m2_res$p[or], type = "l", col = "purple")
#am1_res
points(xaxs[or], am1_res$p[or], type = "l", col = "green")
#am2_res
points(xaxs[or], am2_res$p[or], type = "l", col = "red")
legend("topleft", c("Scott's Method", "Marginal One", "Full MLE - M1", "Full MLE - M2"), lty = c(1, 1, 1, 1), lwd = c(1, 1, 1, 1), col = c("black", "blue", "green", "red"))
#Understanding the rejections:
#Finding the rejection sets for each of the methods (nominal level is 0.1)
#Scott's method:
scott_rejects = which(getFDR(scott_res$postprob)$FDR <= 0.1 & ddfull$z > 0)
length(scott_rejects)
#Sujayam's function
lfdr_reject = function(lfdr, fdr_nominal = 0.1){
sl = sort(lfdr)
k = sum(cumsum(sl)/seq_along(sl) <= fdr_nominal)
if(k) rejects = which(lfdr <= sl[k]) else rejects = numeric(0)
}
#gridy_res
gridy_f1_zscaled = sapply(gridy_res$atoms, function(ai) dnorm(zscaled - ai)) %*% gridy_res$probs
gridy_lfdr = (1 - gridy_res$p) * dnorm(zscaled)/((1 - gridy_res$p) * dnorm(zscaled) + gridy_res$p * gridy_f1_zscaled)
gridy_rejects = lfdr_reject(gridy_lfdr)
length(gridy_rejects)
#Slightly fewer rejections compared to Scott's method (makes sense from the plot of the pi's)
#m2_res
m2_f1_zscaled = sapply(m2_res$atoms, function(ai) dnorm(zscaled - ai)) %*% m2_res$probs
m2_lfdr = (1 - m2_res$p) * dnorm(zscaled)/((1 - m2_res$p) * dnorm(zscaled) + m2_res$p * m2_f1_zscaled)
m2_rejects = lfdr_reject(m2_lfdr)
length(m2_rejects)
#This does not work at all.
#am1_res
am1_f1_zscaled = sapply(am1_res$atoms, function(ai) dnorm(zscaled - ai)) %*% am1_res$probs
am1_lfdr = (1 - am1_res$p) * dnorm(zscaled)/((1 - am1_res$p) * dnorm(zscaled) + am1_res$p * am1_f1_zscaled)
am1_rejects = lfdr_reject(am1_lfdr)
length(am1_rejects)
#This (and the next method) unsurprisingly give the most rejections.
#am2_res
am2_f1_zscaled = sapply(am2_res$atoms, function(ai) dnorm(zscaled - ai)) %*% am2_res$probs
am2_lfdr = (1 - am2_res$p) * dnorm(zscaled)/((1 - am2_res$p) * dnorm(zscaled) + am2_res$p * am2_f1_zscaled)
am2_rejects = lfdr_reject(am2_lfdr)
length(am2_rejects)
#This is identical to am1.
c(length(scott_rejects), length(gridy_rejects), length(m2_rejects), length(am1_rejects), length(am2_rejects))
#Additional Rejects (of Full MLE compared to Local FDR without Covariates)
noco_rejects = which(BFDR$FDR <= 0.1 & z > 0)
par(mfrow = c(2, 1))
sid = setdiff(am1_rejects, noco_rejects)
plot(X[,1], X[,2], xlab = "Inter-neuron Distances (X1)", ylab = "Tuning Curve Correlation (X2)", main = "Extra rejections compared to No Covariate method (575)", type = "n" )
points(X[sid, 1], X[sid, 2])
length(sid)
sid = setdiff(noco_rejects, am1_rejects)
plot(X[,1], X[,2], xlab = "Inter-neuron Distances (X1)", ylab = "Tuning Curve Correlation (X2)", main = "Rejected by the No-Covariate method but not by our method (52)", type = "n" )
points(X[sid, 1], X[sid, 2])
length(sid)
#Histogram
par(mfrow = c(1, 1))
sid = setdiff(am1_rejects, noco_rejects)
hist(zscaled[sid], breaks = 150)
plot(X[,1], X[,2], xlab = "Inter-neuron Distances (X1)", ylab = "Tuning Curve Correlation (X2)", main = "Extra rejections compared to No Covariate method", type = "n" )
points(X[sid, 1], X[sid, 2])
#Additional rejects (of full MLE compared to Scott's method)
par(mfrow = c(2, 1))
sid = setdiff(am1_rejects, scott_rejects)
plot(X[,1], X[,2], xlab = "Inter-neuron Distances (X1)", ylab = "Tuning Curve Correlation (X2)", main = "Extra rejections compared to Scott's method (276)", type = "n" )
points(X[sid, 1], X[sid, 2])
length(sid)
sid = setdiff(scott_rejects, am1_rejects)
plot(X[,1], X[,2], xlab = "Inter-neuron Distances (X1)", ylab = "Tuning Curve Correlation (X2)", main = "Rejected by Scott's method but not by our method (8)", type = "n" )
points(X[sid, 1], X[sid, 2])
length(sid)
#Rejection Regions of the four methods (Scott et al, no covariate lfdr, marginal one and am1)
par(mfrow = c(2, 2))
si = noco_rejects
length(si)
plot(X[,1], X[,2], xlab = "Inter-neuron Distances (X1)", ylab = "Tuning Curve Correlation (X2)", main = "Rejections of the No-Covariate method (497)", type = "n")
points(X[si, 1], X[si, 2])
si = scott_rejects
length(si)
plot(X[,1], X[,2], xlab = "Inter-neuron Distances (X1)", ylab = "Tuning Curve Correlation (X2)", main = "Rejections of the Scott et al. (2015) method (752)", type = "n")
points(X[si, 1], X[si, 2])
si = gridy_rejects
length(si)
plot(X[,1], X[,2], xlab = "Inter-neuron Distances (X1)", ylab = "Tuning Curve Correlation (X2)", main = "Rejections of the First Marginal Method (722)", type = "n")
points(X[si, 1], X[si, 2])
si = am1_rejects
length(si)
plot(X[,1], X[,2], xlab = "Inter-neuron Distances (X1)", ylab = "Tuning Curve Correlation (X2)", main = "Rejections of the Full MLE initialized by M1 (1020)", type = "n")
points(X[si, 1], X[si, 2])
par(mfrow = c(1, 1))
#Box Plot of the estimated pi values:
boxplot(cbind(scott_res$priorprob, gridy_res$p, am1_res$p, am2_res$p), names = c("Scott", "Marg 1", "Full MLE - 1", "Full MLE - 2"), main = "Box Plots of the Estimated Pi values")
abline(h = 1 - fhat$pi0, col = "red") #This is the value of the pi for the no-covariate local FDR method.
|
doc_extract = function(file, location=FALSE){
require(textreadr)
require(tidyverse)
data_df <-
read_dir(file, combine=TRUE, ocr = TRUE, doc.col = "title") %>%
as_tibble()
data_df <-
data_df %>%
mutate(date = str_extract(content, "\\d{2}.\\d{2}.\\d{4}") %>%
parse_date("%d%.%m%.%Y")) %>%
select(date, title, content)
date_nas <-
data_df %>%
filter(date %>% is.na()) %>%
mutate(date = str_extract(title, "\\d{2}.\\d{2}.\\d{4}") %>%
readr::parse_date("%d%.%m%.%Y"))
data_df <-
data_df %>%
filter(is.finite(date)) %>%
bind_rows(date_nas) %>%
arrange(desc(date))
if(location){
data_df <-
data_df %>%
mutate(location = str_extract(content, "ANKARA|GENEL|((?i)[Iİiı]stanbul)|DİYANET|Diyanet"),
.after = date)
}
return(data_df)
}
| /preprocessing/doc_extract.R | no_license | ihsankahveci/pols559_final | R | false | false | 882 | r | doc_extract = function(file, location=FALSE){
require(textreadr)
require(tidyverse)
data_df <-
read_dir(file, combine=TRUE, ocr = TRUE, doc.col = "title") %>%
as_tibble()
data_df <-
data_df %>%
mutate(date = str_extract(content, "\\d{2}.\\d{2}.\\d{4}") %>%
parse_date("%d%.%m%.%Y")) %>%
select(date, title, content)
date_nas <-
data_df %>%
filter(date %>% is.na()) %>%
mutate(date = str_extract(title, "\\d{2}.\\d{2}.\\d{4}") %>%
readr::parse_date("%d%.%m%.%Y"))
data_df <-
data_df %>%
filter(is.finite(date)) %>%
bind_rows(date_nas) %>%
arrange(desc(date))
if(location){
data_df <-
data_df %>%
mutate(location = str_extract(content, "ANKARA|GENEL|((?i)[Iİiı]stanbul)|DİYANET|Diyanet"),
.after = date)
}
return(data_df)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AOO_functions.R
\name{createGrid}
\alias{createGrid}
\title{Create empty Area of Occupancy (AOO) Grid.}
\usage{
createGrid(input.data, grid.size)
}
\arguments{
\item{input.data}{Object of an ecosystem or species distribution. Accepts either
raster or spatial points formats. Please use a CRS with units measured in
metres.}
\item{grid.size}{A number specifying the width of the desired grid square (in
same units as your coordinate reference system)}
}
\value{
A regular grid raster with extent \code{input.data} and grid size
\code{grid.size}. Each grid square has a unique identification number.
}
\description{
\code{createGrid} produces empty grid which can be used as the basis to help
compute AOO.
}
\references{
Bland, L.M., Keith, D.A., Miller, R.M., Murray, N.J. and
Rodriguez, J.P. (eds.) 2016. Guidelines for the application of IUCN Red
List of Ecosystems Categories and Criteria, Version 1.0. Gland,
Switzerland: IUCN. ix + 94pp. Available at the following web site:
\url{https://iucnrle.org/}
}
\seealso{
Other AOO functions: \code{\link{getAOOSilent}},
\code{\link{getAOO}}, \code{\link{makeAOOGrid}}
}
\author{
Nicholas Murray \email{murr.nick@gmail.com}, Calvin Lee
\email{calvinkflee@gmail.com}
}
| /man/createGrid.Rd | no_license | dondealban/redlistr | R | false | true | 1,307 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AOO_functions.R
\name{createGrid}
\alias{createGrid}
\title{Create empty Area of Occupancy (AOO) Grid.}
\usage{
createGrid(input.data, grid.size)
}
\arguments{
\item{input.data}{Object of an ecosystem or species distribution. Accepts either
raster or spatial points formats. Please use a CRS with units measured in
metres.}
\item{grid.size}{A number specifying the width of the desired grid square (in
same units as your coordinate reference system)}
}
\value{
A regular grid raster with extent \code{input.data} and grid size
\code{grid.size}. Each grid square has a unique identification number.
}
\description{
\code{createGrid} produces empty grid which can be used as the basis to help
compute AOO.
}
\references{
Bland, L.M., Keith, D.A., Miller, R.M., Murray, N.J. and
Rodriguez, J.P. (eds.) 2016. Guidelines for the application of IUCN Red
List of Ecosystems Categories and Criteria, Version 1.0. Gland,
Switzerland: IUCN. ix + 94pp. Available at the following web site:
\url{https://iucnrle.org/}
}
\seealso{
Other AOO functions: \code{\link{getAOOSilent}},
\code{\link{getAOO}}, \code{\link{makeAOOGrid}}
}
\author{
Nicholas Murray \email{murr.nick@gmail.com}, Calvin Lee
\email{calvinkflee@gmail.com}
}
|
leitorPDF <- function(diretorioLISTA){
dfTotal <- data.frame()
for (index in 1:length(diretorioLISTA)) {
df<- lerPdf(diretorioLISTA[index])
dfTotal <- rbind.data.frame(dfTotal,df)
} # end for
return(dfTotal)
}
#diretorioLISTA <- c("C:\\Users\\Jordão Alves\\Desktop\\analisarAtestadoGratificacao\\2001-2010.pdf",
# "C:\\Users\\Jordão Alves\\Desktop\\analisarAtestadoGratificacao\\2011-2020.pdf")
#a <-leitorPDF(diretorioLISTA)
#legal <- tabelaContribuicaoTotalCompleta(a)
#tabelaValoresIncidentes(a)
| /leitorPDF.R | no_license | jordaoalves/Analisar-Gratificacoes---IPERN | R | false | false | 572 | r | leitorPDF <- function(diretorioLISTA){
dfTotal <- data.frame()
for (index in 1:length(diretorioLISTA)) {
df<- lerPdf(diretorioLISTA[index])
dfTotal <- rbind.data.frame(dfTotal,df)
} # end for
return(dfTotal)
}
#diretorioLISTA <- c("C:\\Users\\Jordão Alves\\Desktop\\analisarAtestadoGratificacao\\2001-2010.pdf",
# "C:\\Users\\Jordão Alves\\Desktop\\analisarAtestadoGratificacao\\2011-2020.pdf")
#a <-leitorPDF(diretorioLISTA)
#legal <- tabelaContribuicaoTotalCompleta(a)
#tabelaValoresIncidentes(a)
|
#!/usr/bin/env Rscript
args = commandArgs(trailingOnly=TRUE)
# Summarizes sequencing coverage from BAM summary data produced by SummarizeBAM version 1.21
# and AnnotateVariants version 1.13.
library(tidyverse)
# Verify that the correct number of arguments are given.
if(length(args)!=3){
stop("The following arguments must be supplied: input summary file, output variants file,
and minimum coverage.",
call.=FALSE)
}
VariantFile <- args[1]
OutputFile <- args[2]
MinCoverage <- as.numeric(args[3])
# Read in variant file.
Data <- read.table(VariantFile, header=FALSE, stringsAsFactors = FALSE)
colnames(Data) <- c("Chr","Pos","Base","RefBase","GenomePos","Count","AvgQ","AvgReadPos",
"Gene","Codon","RefAA","AltAA","Syn","FourfoldSyn","RefCodon","AltCodon","CodonPos",
"Sample")
# Calculate sequencing coverage, taking care not to double coverage
# due to annotations of overlapping genes.
Data <- Data %>%
group_by(Sample, Chr, GenomePos, Base) %>% summarize(Count=mean(Count)) %>%
ungroup() %>% group_by(Sample, Chr, GenomePos) %>%
summarize(Coverage=sum(Count))
# Summarize sequencing coverage at each chromosome.
DataSummary <- Data %>%
ungroup() %>% group_by(Sample, Chr) %>%
summarize(MeanCov=mean(Coverage), MedCov=median(Coverage),
IQRCov=quantile(Coverage,0.75)-quantile(Coverage,0.25),
MinCov=min(Coverage), MaxCov=max(Coverage),
PercentSitesAboveMin=sum(Coverage>MinCoverage)/n())
# Export coverage summary.
write.table(DataSummary,
file=OutputFile, quote=FALSE, sep='\t',
row.names=FALSE, col.names=FALSE)
| /scripts/SummarizeCoverage/SummarizeCoverage.r | no_license | ksxue/within-vs-between-hosts-influenza | R | false | false | 1,655 | r | #!/usr/bin/env Rscript
args = commandArgs(trailingOnly=TRUE)
# Summarizes sequencing coverage from BAM summary data produced by SummarizeBAM version 1.21
# and AnnotateVariants version 1.13.
library(tidyverse)
# Verify that the correct number of arguments are given.
if(length(args)!=3){
stop("The following arguments must be supplied: input summary file, output variants file,
and minimum coverage.",
call.=FALSE)
}
VariantFile <- args[1]
OutputFile <- args[2]
MinCoverage <- as.numeric(args[3])
# Read in variant file.
Data <- read.table(VariantFile, header=FALSE, stringsAsFactors = FALSE)
colnames(Data) <- c("Chr","Pos","Base","RefBase","GenomePos","Count","AvgQ","AvgReadPos",
"Gene","Codon","RefAA","AltAA","Syn","FourfoldSyn","RefCodon","AltCodon","CodonPos",
"Sample")
# Calculate sequencing coverage, taking care not to double coverage
# due to annotations of overlapping genes.
Data <- Data %>%
group_by(Sample, Chr, GenomePos, Base) %>% summarize(Count=mean(Count)) %>%
ungroup() %>% group_by(Sample, Chr, GenomePos) %>%
summarize(Coverage=sum(Count))
# Summarize sequencing coverage at each chromosome.
DataSummary <- Data %>%
ungroup() %>% group_by(Sample, Chr) %>%
summarize(MeanCov=mean(Coverage), MedCov=median(Coverage),
IQRCov=quantile(Coverage,0.75)-quantile(Coverage,0.25),
MinCov=min(Coverage), MaxCov=max(Coverage),
PercentSitesAboveMin=sum(Coverage>MinCoverage)/n())
# Export coverage summary.
write.table(DataSummary,
file=OutputFile, quote=FALSE, sep='\t',
row.names=FALSE, col.names=FALSE)
|
# Faktoren
object.size(c("test","test2","test"))
object.size(as.factor(c("test","test1","test")))
object.size(rep(c("test","test2","test"),10000))
object.size(rep(as.factor(c("test","test2","test")),10000))
# Erst installieren
# library(ggplot2)
library(foreign)
# fertige Daten sind oft data frames:
titanic <- read.dta("http://www.stata-press.com/data/kkd/titanic2.dta")
is.data.frame(titanic)
head(titanic)
# Structure
str(titanic)
# Data frame ist ein hierarchisches Objekt welches Vektoren enthaelt
titanic$age2
summary(titanic)
# Alles ist eigentlich ein Vektor
# Die Frage ist die Anzahl der Dimensionen
dim(titanic)
# Namen von Datensaetzen
names(titanic)
# Koennen auch ueberschrieben werden
# names(titanic) <- c("a", "b")
# names(titanic)[3] <- "age"
# Wir wollen kein offenes Fenster, aber es wuerde geben:
# View(titanic)
# man kann sich auch leicht selber einen bauen
obst <- c("Apfel","Apfel","Birne")
gemuese <- c("Tomate","Karotte","Karotte")
id <- 1:3
df <- data.frame(id, obst, gemuese)
df
# $ = gehe in dem Objekt eine Ebene tiefer
# Ansteuern von Zeilen und Spaltenpositionen
df$obst
df[,"obst"]
df[3,"gemuese"]
df[3,3] # Zeilen zuerst, Spalte später
# auch mehrere Spalten moeglich
df[,c("gemuese","obst")]
# Namen
row.names(df)
# which
# which(LETTERS == "R")
# If Argument
x <- -5
x
if(x <= 0) {
y <- 1
} else {
y <- 0
}
y
# Kombinierte Bedingungen
x <- 1
if(x == 0 | x == 1) { #oder
y <- 1
} else {
y <- 0
}
y
# ifelse
ifelse(1 <= 2, 1, 0)
x <- 10
if(x<= -10 & x>= 10) { #und
y <- 1 } else {
y <- 0
}
y
# For Schleife
for (x in 1:10) print(sqrt(x))
AA <- "bar"
switch(AA,
"foo"={
# case 'foo' here...
print('foo')
},
"bar"={
# case 'bar' here...
print('bar')
},
{
print('default')
}
)
# For Schleife
x <- 0
for(i in 1:10) {
x <- x+i
print(x)
}
# x wird immer weiter inkrementiert
for(x in seq(1,100, .2))
{
print(x)
}
# Sequenzen können auch Character sein:
namen <- c("Alfred","Jakob","Peter")
for (name in namen) {
print(paste("Hallo",name))
}
# While Schleife
x <- 0
while(x<13) {
x <- x+1
print(x)
} # solange x<13=TRUE
# Durch Spalten loopen
for (column in 2:6) { # this loop runs through 2 to 6
print(names(swiss)[column])
print(mean(swiss[,column]))
}
#Durch Datensätze loopen
for (dataset in c(data1, data2, data3)) {
# Anweisungen,
# z.B. Datenbereinigung, Appending (rbind), Modellschätzungen, etc.
}
# Viel performanter
apply(swiss[,2:6],2,mean)
mean(titanic$age2[titanic$sex == "man"])
mean(titanic$age2[titanic$sex == "women"])
# 13.2 Übung
# 1
x <- 60
if(x < -50 | x > 50){
y <- 10
} else {
y <- 0
}
# 2
for (x in 1:10){
werte <- rnorm(100)
print(paste("Mittelwert", round(mean(werte), digits = 2)))
print(paste("Standartabweichung", round(sd(werte), digits = 2)))
}
showMean <- function(){
observations <- rnorm(100)
m <- mean(observations)
s <- sd(observations)
print(paste("mean: ", m, " std: ", s))
}
rep(showMean(), 10)
# 3
x = 1
y = 1
while(y < 1000){
x <- x+1
y <- x^3
print(paste("x:", x, " x^3: ", y))
}
x <- matrix(rnorm(1000), ncol = 10)
# E.g., for a matrix 1 indicates rows, 2 indicates columns,
apply(x, 2, mean)
test <- matrix(c(1,2,3,4,5,6,7,8,9), ncol = 3)
apply(test, 2, max)
apply(test, 1, max)
# 14.2
# 1
add2AndSquare <- function(x){
(x+2)^2
}
add2AndSquare(1:10)
# 2
weightedMean <- function(x, y){
sum(x*y)/ sum(y)
}
# Scripts laden
#source("C:\pfad\script.R")
weightedMean(c(1,2,3,4,5), c(2,4,5,6,7))
weighted.mean(c(1,2,3,4,5), c(2,4,5,6,7))
# help.search("mean")
# ??mean
# zwei fiktive Vektoren erstellen
x <- c(10,20,30,40,30,10,20,30,40,30,10,20,10)
y <- c(2,5,3,5,3,5,1,6,3,4,5,1,1)
# Tabelle
table(x)
table(x,y)
# Chi2-Test
chisq.test(table(x,y))
# Tabelle in Prozent
100*prop.table(table(x))
100*prop.table(table(x,y))
round(100*prop.table(table(x,y)), 2) # gerundete Werte
# Mean
mean(x)
# Median
median(x)
sort(x)
# mehrere Statistiken in einem Vektor
c(mean=mean(x), median=median(x), stddev=sd(x), min=min(x), max=max(x))
# gibt es verkürzt über die generische Funktion summary
summary(x)
# Korrelation zwischen Vektoren
cor(x,y)
cor(x,y, method="spearman") #Rangkorrelation
cov(x,y)
# Mittelwertvergleich
t.test(x,y)
| /CAS Datenanalyse/Tooling & Datamanagement/lesson3.R | no_license | huli/mas-data-science | R | false | false | 4,654 | r |
# Faktoren
object.size(c("test","test2","test"))
object.size(as.factor(c("test","test1","test")))
object.size(rep(c("test","test2","test"),10000))
object.size(rep(as.factor(c("test","test2","test")),10000))
# Erst installieren
# library(ggplot2)
library(foreign)
# fertige Daten sind oft data frames:
titanic <- read.dta("http://www.stata-press.com/data/kkd/titanic2.dta")
is.data.frame(titanic)
head(titanic)
# Structure
str(titanic)
# Data frame ist ein hierarchisches Objekt welches Vektoren enthaelt
titanic$age2
summary(titanic)
# Alles ist eigentlich ein Vektor
# Die Frage ist die Anzahl der Dimensionen
dim(titanic)
# Namen von Datensaetzen
names(titanic)
# Koennen auch ueberschrieben werden
# names(titanic) <- c("a", "b")
# names(titanic)[3] <- "age"
# Wir wollen kein offenes Fenster, aber es wuerde geben:
# View(titanic)
# man kann sich auch leicht selber einen bauen
obst <- c("Apfel","Apfel","Birne")
gemuese <- c("Tomate","Karotte","Karotte")
id <- 1:3
df <- data.frame(id, obst, gemuese)
df
# $ = gehe in dem Objekt eine Ebene tiefer
# Ansteuern von Zeilen und Spaltenpositionen
df$obst
df[,"obst"]
df[3,"gemuese"]
df[3,3] # Zeilen zuerst, Spalte später
# auch mehrere Spalten moeglich
df[,c("gemuese","obst")]
# Namen
row.names(df)
# which
# which(LETTERS == "R")
# If Argument
x <- -5
x
if(x <= 0) {
y <- 1
} else {
y <- 0
}
y
# Kombinierte Bedingungen
x <- 1
if(x == 0 | x == 1) { #oder
y <- 1
} else {
y <- 0
}
y
# ifelse
ifelse(1 <= 2, 1, 0)
x <- 10
if(x<= -10 & x>= 10) { #und
y <- 1 } else {
y <- 0
}
y
# For Schleife
for (x in 1:10) print(sqrt(x))
AA <- "bar"
switch(AA,
"foo"={
# case 'foo' here...
print('foo')
},
"bar"={
# case 'bar' here...
print('bar')
},
{
print('default')
}
)
# For Schleife
x <- 0
for(i in 1:10) {
x <- x+i
print(x)
}
# x wird immer weiter inkrementiert
for(x in seq(1,100, .2))
{
print(x)
}
# Sequenzen können auch Character sein:
namen <- c("Alfred","Jakob","Peter")
for (name in namen) {
print(paste("Hallo",name))
}
# While Schleife
x <- 0
while(x<13) {
x <- x+1
print(x)
} # solange x<13=TRUE
# Durch Spalten loopen
for (column in 2:6) { # this loop runs through 2 to 6
print(names(swiss)[column])
print(mean(swiss[,column]))
}
#Durch Datensätze loopen
for (dataset in c(data1, data2, data3)) {
# Anweisungen,
# z.B. Datenbereinigung, Appending (rbind), Modellschätzungen, etc.
}
# Viel performanter
apply(swiss[,2:6],2,mean)
mean(titanic$age2[titanic$sex == "man"])
mean(titanic$age2[titanic$sex == "women"])
# 13.2 Übung
# 1
x <- 60
if(x < -50 | x > 50){
y <- 10
} else {
y <- 0
}
# 2
for (x in 1:10){
werte <- rnorm(100)
print(paste("Mittelwert", round(mean(werte), digits = 2)))
print(paste("Standartabweichung", round(sd(werte), digits = 2)))
}
showMean <- function(){
observations <- rnorm(100)
m <- mean(observations)
s <- sd(observations)
print(paste("mean: ", m, " std: ", s))
}
rep(showMean(), 10)
# 3
x = 1
y = 1
while(y < 1000){
x <- x+1
y <- x^3
print(paste("x:", x, " x^3: ", y))
}
x <- matrix(rnorm(1000), ncol = 10)
# E.g., for a matrix 1 indicates rows, 2 indicates columns,
apply(x, 2, mean)
test <- matrix(c(1,2,3,4,5,6,7,8,9), ncol = 3)
apply(test, 2, max)
apply(test, 1, max)
# 14.2
# 1
add2AndSquare <- function(x){
(x+2)^2
}
add2AndSquare(1:10)
# 2
weightedMean <- function(x, y){
sum(x*y)/ sum(y)
}
# Scripts laden
#source("C:\pfad\script.R")
weightedMean(c(1,2,3,4,5), c(2,4,5,6,7))
weighted.mean(c(1,2,3,4,5), c(2,4,5,6,7))
# help.search("mean")
# ??mean
# zwei fiktive Vektoren erstellen
x <- c(10,20,30,40,30,10,20,30,40,30,10,20,10)
y <- c(2,5,3,5,3,5,1,6,3,4,5,1,1)
# Tabelle
table(x)
table(x,y)
# Chi2-Test
chisq.test(table(x,y))
# Tabelle in Prozent
100*prop.table(table(x))
100*prop.table(table(x,y))
round(100*prop.table(table(x,y)), 2) # gerundete Werte
# Mean
mean(x)
# Median
median(x)
sort(x)
# mehrere Statistiken in einem Vektor
c(mean=mean(x), median=median(x), stddev=sd(x), min=min(x), max=max(x))
# gibt es verkürzt über die generische Funktion summary
summary(x)
# Korrelation zwischen Vektoren
cor(x,y)
cor(x,y, method="spearman") #Rangkorrelation
cov(x,y)
# Mittelwertvergleich
t.test(x,y)
|
#!/usr/bin/env Rscript
### run ./ReadLengthPlot.r
library("RColorBrewer")
#library(ggplot2)
start= 0.4
step= 0.2
end = 0.8
count = (end - start) / step + 1
#dir <- "/cbcb/project-scratch/fdorri/Code/methylFlow/testing/cpg/"
wdir <- "/Users/faezeh/Projects/methylFlow/exps/compelet/"
#data[1]= 1
#wdir <- getwd();
print(wdir)
##### reading files ##################
dir_hard <- paste(wdir,"hard-Auto/",sep="");
avg_hard <- read.table(paste(dir_hard,"evalAvg.txt",sep=""), sep="\t", row.names=NULL, header = TRUE)
mcf_hard <- read.table(paste(dir_hard,"mcf.txt",sep=""), sep="\t", row.names=NULL, header = TRUE)
dir_moderate <- paste(wdir,"moderate-Auto/",sep="");
avg_moderate <- read.table(paste(dir_moderate,"evalAvg.txt",sep=""), sep="\t", row.names=NULL, header = TRUE)
mcf_moderate <- read.table(paste(dir_moderate,"mcf.txt",sep=""), sep="\t", row.names=NULL, header = TRUE)
dir_simple <- paste(wdir,"simple-Auto/",sep="");
avg_simple <- read.table(paste(dir_simple,"evalAvg.txt",sep=""), sep="\t", row.names=NULL, header = TRUE)
mcf_simple <- read.table(paste(dir_simple,"mcf.txt",sep=""), sep="\t", row.names=NULL, header = TRUE)
#dir <- paste(wdir, "/allInOneFig/", sep="")
dir <- file.path(wdir, "allInOne")
####### different plots for readLength ##############################################
xrange_hard <- range(avg_hard$methylCallError)
xrange_moderate <- range(avg_moderate$methylCallError)
xrange_simple <- range(avg_simple$methylCallError)
yrange_hard <- range(avg_hard$abdncError)
yrange_moderate <- range(avg_moderate$abdncError)
yrange_simple <- range(avg_simple$abdncError)
#### plot the abundance Error vs methylcall for simple
print("plot abundance Error vs methylCall error vs different thresholds for simple run of readLength")
pdf(paste(dir,"abdVmethylVthrVreadLenght_Simple.pdf",sep=""), width =3, height =2.5, pointsize=8)
par(mar= c(5,5,2,2))
# get the range for the x and y axis
xrange_min <- min(xrange_hard[1], xrange_moderate[1], xrange_simple[1])
xrange_max <- max(xrange_hard[2], xrange_moderate[2], xrange_simple[2])
yrange_min <- min(yrange_hard[1], yrange_moderate[1], yrange_simple[1])
yrange_max <- max(yrange_hard[2], yrange_moderate[2], yrange_simple[2])
#ntrees <- length(unique(readLengthAvg$threshold))
ntrees <- count
# set up the plot
plot(0, 0,
pch = "",
#yaxt='n',
ylim = c(yrange_min - 0.1, yrange_max + 0.1),
xlim = c(xrange_min - 0.05, xrange_max + 0.1),
xlab="Average MethylCall Error",
ylab="Average Abundance Error",
# ylab="Abundance Error",
cex.lab= 1.2,
# cex.axis = 0.5
)
# add lines
#loess_fit <- loess(avg_simple$abdncError[sel] ~ avg_simple$methylCallError[sel], avg_simple)
#lines(avg_simple$methylCallError[sel], predict(loess_fit), col = colors[i])
points(avg_simple$methylCallError,
avg_simple$abdncError,
col = "dark green",
# pch = pchs[i],
cex = 0.5,
type = "b",
lty = 3,
lwd = 1.0
)
lines(avg_moderate$methylCallError,
avg_moderate$abdncError,
col = "blue",
# pch = pchs[i],
cex = 0.5,
type = "b",
lty = 3,
lwd = 1.0
)
lines(avg_hard$methylCallError,
avg_hard$abdncError,
col = "red",
#pch = pchs[i],
cex = 0.5,
type = "b",
lty = 3,
lwd = 1.0
)
legend("topright", legend = c("simple", "moderate", "hard"),
#pch = pchs,
col = c("green", "blue", "red"),
# cex = 0.5,
cex = 0.5,
lty = 3,
pch = 1,
lwd = 1.0)
dev.off()
| /compelet/mixedCompeletPlot.r | no_license | hcorrada/methylFlow_analyses | R | false | false | 3,474 | r | #!/usr/bin/env Rscript
### run ./ReadLengthPlot.r
library("RColorBrewer")
#library(ggplot2)
start= 0.4
step= 0.2
end = 0.8
count = (end - start) / step + 1
#dir <- "/cbcb/project-scratch/fdorri/Code/methylFlow/testing/cpg/"
wdir <- "/Users/faezeh/Projects/methylFlow/exps/compelet/"
#data[1]= 1
#wdir <- getwd();
print(wdir)
##### reading files ##################
dir_hard <- paste(wdir,"hard-Auto/",sep="");
avg_hard <- read.table(paste(dir_hard,"evalAvg.txt",sep=""), sep="\t", row.names=NULL, header = TRUE)
mcf_hard <- read.table(paste(dir_hard,"mcf.txt",sep=""), sep="\t", row.names=NULL, header = TRUE)
dir_moderate <- paste(wdir,"moderate-Auto/",sep="");
avg_moderate <- read.table(paste(dir_moderate,"evalAvg.txt",sep=""), sep="\t", row.names=NULL, header = TRUE)
mcf_moderate <- read.table(paste(dir_moderate,"mcf.txt",sep=""), sep="\t", row.names=NULL, header = TRUE)
dir_simple <- paste(wdir,"simple-Auto/",sep="");
avg_simple <- read.table(paste(dir_simple,"evalAvg.txt",sep=""), sep="\t", row.names=NULL, header = TRUE)
mcf_simple <- read.table(paste(dir_simple,"mcf.txt",sep=""), sep="\t", row.names=NULL, header = TRUE)
#dir <- paste(wdir, "/allInOneFig/", sep="")
dir <- file.path(wdir, "allInOne")
####### different plots for readLength ##############################################
xrange_hard <- range(avg_hard$methylCallError)
xrange_moderate <- range(avg_moderate$methylCallError)
xrange_simple <- range(avg_simple$methylCallError)
yrange_hard <- range(avg_hard$abdncError)
yrange_moderate <- range(avg_moderate$abdncError)
yrange_simple <- range(avg_simple$abdncError)
#### plot the abundance Error vs methylcall for simple
print("plot abundance Error vs methylCall error vs different thresholds for simple run of readLength")
pdf(paste(dir,"abdVmethylVthrVreadLenght_Simple.pdf",sep=""), width =3, height =2.5, pointsize=8)
par(mar= c(5,5,2,2))
# get the range for the x and y axis
xrange_min <- min(xrange_hard[1], xrange_moderate[1], xrange_simple[1])
xrange_max <- max(xrange_hard[2], xrange_moderate[2], xrange_simple[2])
yrange_min <- min(yrange_hard[1], yrange_moderate[1], yrange_simple[1])
yrange_max <- max(yrange_hard[2], yrange_moderate[2], yrange_simple[2])
#ntrees <- length(unique(readLengthAvg$threshold))
ntrees <- count
# set up the plot
plot(0, 0,
pch = "",
#yaxt='n',
ylim = c(yrange_min - 0.1, yrange_max + 0.1),
xlim = c(xrange_min - 0.05, xrange_max + 0.1),
xlab="Average MethylCall Error",
ylab="Average Abundance Error",
# ylab="Abundance Error",
cex.lab= 1.2,
# cex.axis = 0.5
)
# add lines
#loess_fit <- loess(avg_simple$abdncError[sel] ~ avg_simple$methylCallError[sel], avg_simple)
#lines(avg_simple$methylCallError[sel], predict(loess_fit), col = colors[i])
points(avg_simple$methylCallError,
avg_simple$abdncError,
col = "dark green",
# pch = pchs[i],
cex = 0.5,
type = "b",
lty = 3,
lwd = 1.0
)
lines(avg_moderate$methylCallError,
avg_moderate$abdncError,
col = "blue",
# pch = pchs[i],
cex = 0.5,
type = "b",
lty = 3,
lwd = 1.0
)
lines(avg_hard$methylCallError,
avg_hard$abdncError,
col = "red",
#pch = pchs[i],
cex = 0.5,
type = "b",
lty = 3,
lwd = 1.0
)
legend("topright", legend = c("simple", "moderate", "hard"),
#pch = pchs,
col = c("green", "blue", "red"),
# cex = 0.5,
cex = 0.5,
lty = 3,
pch = 1,
lwd = 1.0)
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/shield_operations.R
\name{shield_list_protections}
\alias{shield_list_protections}
\title{Retrieves Protection objects for the account}
\usage{
shield_list_protections(
NextToken = NULL,
MaxResults = NULL,
InclusionFilters = NULL
)
}
\arguments{
\item{NextToken}{When you request a list of objects from Shield Advanced, if the response
does not include all of the remaining available objects, Shield Advanced
includes a \code{NextToken} value in the response. You can retrieve the next
batch of objects by requesting the list again and providing the token
that was returned by the prior call in your request.
You can indicate the maximum number of objects that you want Shield
Advanced to return for a single call with the \code{MaxResults} setting.
Shield Advanced will not return more than \code{MaxResults} objects, but may
return fewer, even if more objects are still available.
Whenever more objects remain that Shield Advanced has not yet returned
to you, the response will include a \code{NextToken} value.
On your first call to a list operation, leave this setting empty.}
\item{MaxResults}{The greatest number of objects that you want Shield Advanced to return
to the list request. Shield Advanced might return fewer objects than you
indicate in this setting, even if more objects are available. If there
are more objects remaining, Shield Advanced will always also return a
\code{NextToken} value in the response.
The default setting is 20.}
\item{InclusionFilters}{Narrows the set of protections that the call retrieves. You can retrieve
a single protection by providing its name or the ARN (Amazon Resource
Name) of its protected resource. You can also retrieve all protections
for a specific resource type. You can provide up to one criteria per
filter type. Shield Advanced returns protections that exactly match all
of the filter criteria that you provide.}
}
\description{
Retrieves Protection objects for the account. You can retrieve all protections or you can provide filtering criteria and retrieve just the subset of protections that match the criteria.
See \url{https://www.paws-r-sdk.com/docs/shield_list_protections/} for full documentation.
}
\keyword{internal}
| /cran/paws.security.identity/man/shield_list_protections.Rd | permissive | paws-r/paws | R | false | true | 2,278 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/shield_operations.R
\name{shield_list_protections}
\alias{shield_list_protections}
\title{Retrieves Protection objects for the account}
\usage{
shield_list_protections(
NextToken = NULL,
MaxResults = NULL,
InclusionFilters = NULL
)
}
\arguments{
\item{NextToken}{When you request a list of objects from Shield Advanced, if the response
does not include all of the remaining available objects, Shield Advanced
includes a \code{NextToken} value in the response. You can retrieve the next
batch of objects by requesting the list again and providing the token
that was returned by the prior call in your request.
You can indicate the maximum number of objects that you want Shield
Advanced to return for a single call with the \code{MaxResults} setting.
Shield Advanced will not return more than \code{MaxResults} objects, but may
return fewer, even if more objects are still available.
Whenever more objects remain that Shield Advanced has not yet returned
to you, the response will include a \code{NextToken} value.
On your first call to a list operation, leave this setting empty.}
\item{MaxResults}{The greatest number of objects that you want Shield Advanced to return
to the list request. Shield Advanced might return fewer objects than you
indicate in this setting, even if more objects are available. If there
are more objects remaining, Shield Advanced will always also return a
\code{NextToken} value in the response.
The default setting is 20.}
\item{InclusionFilters}{Narrows the set of protections that the call retrieves. You can retrieve
a single protection by providing its name or the ARN (Amazon Resource
Name) of its protected resource. You can also retrieve all protections
for a specific resource type. You can provide up to one criteria per
filter type. Shield Advanced returns protections that exactly match all
of the filter criteria that you provide.}
}
\description{
Retrieves Protection objects for the account. You can retrieve all protections or you can provide filtering criteria and retrieve just the subset of protections that match the criteria.
See \url{https://www.paws-r-sdk.com/docs/shield_list_protections/} for full documentation.
}
\keyword{internal}
|
library("data.table")
setwd("D:/machine learning using R/datasciencejhon/exdata_data_household_power_consumption")
#Reads in data from file then subsets data for specified dates
powerDT <- data.table::fread(input = "household_power_consumption.txt"
, na.strings="?"
)
# Prevents Scientific Notation
powerDT[, Global_active_power := lapply(.SD, as.numeric), .SDcols = c("Global_active_power")]
# Making a POSIXct date capable of being filtered and graphed by time of day
powerDT[, dateTime := as.POSIXct(paste(Date, Time), format = "%d/%m/%Y %H:%M:%S")]
# Filter Dates for 2007-02-01 and 2007-02-02
powerDT <- powerDT[(dateTime >= "2007-02-01") & (dateTime < "2007-02-03")]
png("plot3.png", width=480, height=480)
# Plot 3
plot(powerDT[, dateTime], powerDT[, Sub_metering_1], type="l", xlab="", ylab="Energy sub metering")
lines(powerDT[, dateTime], powerDT[, Sub_metering_2],col="red")
lines(powerDT[, dateTime], powerDT[, Sub_metering_3],col="blue")
legend("topright"
, col=c("black","red","blue")
, c("Sub_metering_1 ","Sub_metering_2 ", "Sub_metering_3 ")
,lty=c(1,1), lwd=c(1,1))
dev.off() | /exploratory data analysis/plot3.R | no_license | satish605/datasciencejhon | R | false | false | 1,183 | r | library("data.table")
setwd("D:/machine learning using R/datasciencejhon/exdata_data_household_power_consumption")
#Reads in data from file then subsets data for specified dates
powerDT <- data.table::fread(input = "household_power_consumption.txt"
, na.strings="?"
)
# Prevents Scientific Notation
powerDT[, Global_active_power := lapply(.SD, as.numeric), .SDcols = c("Global_active_power")]
# Making a POSIXct date capable of being filtered and graphed by time of day
powerDT[, dateTime := as.POSIXct(paste(Date, Time), format = "%d/%m/%Y %H:%M:%S")]
# Filter Dates for 2007-02-01 and 2007-02-02
powerDT <- powerDT[(dateTime >= "2007-02-01") & (dateTime < "2007-02-03")]
png("plot3.png", width=480, height=480)
# Plot 3
plot(powerDT[, dateTime], powerDT[, Sub_metering_1], type="l", xlab="", ylab="Energy sub metering")
lines(powerDT[, dateTime], powerDT[, Sub_metering_2],col="red")
lines(powerDT[, dateTime], powerDT[, Sub_metering_3],col="blue")
legend("topright"
, col=c("black","red","blue")
, c("Sub_metering_1 ","Sub_metering_2 ", "Sub_metering_3 ")
,lty=c(1,1), lwd=c(1,1))
dev.off() |
#' Compiled datasets of HLA class I and class II epitopes associated with autoimmune diseases.
#'
#' Datasets were compiled from source files attached in the package.
#'
#' @docType data
#' @keywords datasets
#' @name Dataset
#' @rdname Dataset
"autoimmunityDT"
#' @name Dataset
#' @rdname Dataset
"SummaryDF_Disease_HLA"
#' @name Dataset
#' @rdname Dataset
"SummaryDF_Disease_HLAString"
#' @name Dataset
#' @rdname Dataset
"SummaryDF_Disease_Peptide"
| /R/Datasets.R | permissive | masato-ogishi/DPA | R | false | false | 478 | r | #' Compiled datasets of HLA class I and class II epitopes associated with autoimmune diseases.
#'
#' Datasets were compiled from source files attached in the package.
#'
#' @docType data
#' @keywords datasets
#' @name Dataset
#' @rdname Dataset
"autoimmunityDT"
#' @name Dataset
#' @rdname Dataset
"SummaryDF_Disease_HLA"
#' @name Dataset
#' @rdname Dataset
"SummaryDF_Disease_HLAString"
#' @name Dataset
#' @rdname Dataset
"SummaryDF_Disease_Peptide"
|
library(dplyr)
library(readr)
library(configr)
library(futile.logger)
library(patchwork)
library(ShortRead)
library(Biostrings)
library(dada2)
library(charlier)
library(dadautils)
#' main processing step - tries to operate as a pipeline returning 0 (success) or
#' failure ( > 0)
#'
#' @param CFG list, configuration from a config file
#' @return integer where 0 means success
main <- function(CFG){
RETURN <- 0
if (CFG$multithread[1] == "auto") CFG$multithread <- charlier::count_cores()
if (!dir.exists(CFG$input_path)){
warning("input path not found:", CFG$input_path)
return(RETURN + 1)
}
if (!charlier::make_path(CFG$output_path)){
warning("output path not created:", CFG$output_path)
return(RETURN + 1)
}
# preliminaries
charlier::start_logger(filename = file.path(CFG$output_path, "log"))
PBS_JOBID <- charlier::get_pbs_jobid(no_pbs_text = "not in PBS queue")
charlier::audit(file.path(CFG$output_path, "audit.txt"), pbs_jobid = PBS_JOBID)
# add baseline info into log, just because
flog.info("starting run: %s", format(Sys.time(), "%Y-%m-%d %H:%M:%S"))
flog.info("NCPUS: %s", as.character(CFG$multithread))
flog.info("System PID: %s", Sys.getpid())
flog.info("PBS_JOBID: %s", PBS_JOBID)
flog.info("VERSION: %s", CFG$version)
flog.info("INPUT PATH: %s", CFG$input_path)
flog.info("OUTPUT PATH: %s", CFG$output_path)
flog.info("checking for input fastq files")
fq_files <- dadautils::list_filepairs(CFG$input_path) %>%
dadautils::verify_filepairs()
sample.names <- dadautils::extract_sample_names(fq_files, rule="basename")
if ("dada2_plotQualityProfile" %in% names(CFG)){
ofile = file.path(CFG$output_path, "quality_profiles.pdf")
flog.info("plotting quality profiles: %s", ofile)
dadautils::plot_qualityProfiles(fq_files,
n = CFG$dada2_plotQualityProfile$nplots,
ofile = ofile)
}
flog.info("filter and trim of input files")
filtN_path <- file.path(CFG$output_path, CFG$dada2_filterAndTrim_filtN$name)
if (!charlier::make_path(filtN_path)) {
flog.error("filtN_path not created: %s", filtN_path)
return(RETURN + 1)
}
filtN_r <- dadautils::filter_and_trim(fq_files,
output_path = filtN_path,
maxN = CFG$dada2_filterAndTrim_filtN$maxN,
multithread = CFG$multithread,
truncLen = CFG$dada2_filterAndTrim_filtN$truncLen,
minLen = CFG$dada2_filterAndTrim_filtN$minLen,
maxEE = CFG$dada2_filterAndTrim_filtN$maxEE,
truncQ = CFG$dada2_filterAndTrim_filtN$truncQ,
rm.phix = CFG$dada2_filterAndTrim_filtN$rm.phix,
compress = CFG$dada2_filterAndTrim_filtN$compress,
save_results = TRUE)
filtN_files <- dadautils::list_filepairs(filtN_path)
if(identical(unname(lengths(filtN_files)), c(0,0))){
stop("No filtN files produced")
}
if (!identical(lengths(fq_files), lengths(filtN_files))){
# presumably filter_and_trim dropped some files if we get here...
# so we need to trim fq_files to match. We assume that the output basenames are the same as the
# input basenames - so all we need to do is match
fq_files <- sapply(names(fq_files),
function(name){
ix <- basename(fq_files[[name]]) %in% basename(filtN_files[[name]])
fq_files[[name]][ix]
}, simplify = FALSE)
filtN_r <- filtN_r %>%
dplyr::filter(reads.out > 0)
sample.names <- dadautils::extract_sample_names(fq_files, rule="basename")
} # check for dropped inputs
flog.info("learn errors")
learnErrors_path <- file.path(CFG$output_path, CFG$dada2_learnErrors$name)
if (!charlier::make_path(learnErrors_path)) {
flog.error("learnErrors_path not created: %s", learnErrors_path)
return(RETURN + 1)
}
err <- dadautils::learn_errors(filtN_files,
output_path = learnErrors_path,
multithread = CFG$multithread,
save_output = TRUE,
save_graphics = TRUE)
flog.info("run dada")
dada_r <- dadautils::run_dada(
filtN_files,
err,
multithread = CFG$multithread,
pool = CFG$dada2_dada$pool)
# run merge pairs
flog.info("merge pairs")
mergers <- dadautils::merge_pairs(filtN_files, dada_r, verbose = TRUE, save_output = TRUE, minOverlap = CFG$dada2_merge_pairs$minOverlap)
#saveRDS(mergers, file = file.path(CFG$output_path, "mergers.rds"))
flog.info("make sequence table")
seqtab <- dada2::makeSequenceTable(mergers)
tseqtab <- dplyr::as_tibble(t(seqtab)) %>%
readr::write_csv(file.path(CFG$output_path, "seqtab.csv"))
flog.info("remove Bimera Denovo")
seqtab.nochim <- dada2::removeBimeraDenovo(seqtab,
method = CFG$dada2_removeBimeraDenovo_seqtab$method,
multithread = CFG$multithread,
verbose = CFG$dada2_removeBimeraDenovo_seqtab$verbose)
write.csv(seqtab.nochim, file.path(CFG$output_path, "dada2_seqtab-nochim.csv"))
# tseqtab.nochim <- dplyr::as_tibble(t(seqtab.nochim)) %>%
# readr::write_csv(file.path(CFG$output_path, "seqtab-nochim.csv"))
fasta <- dadautils::asv_fasta(seqtab.nochim, file = file.path(CFG$output_path,"ASV_sequences.fasta"))
tseqtab.nochim <- dplyr::as_tibble(t(seqtab.nochim)) %>%
dplyr::mutate(ASV = names(fasta)) %>%
dplyr::relocate(ASV, .before = 1) %>%
readr::write_csv(file.path(CFG$output_path, "seqtab-nochim.csv"))
track <- dplyr::tibble(
name = sample.names,
input = filtN_r$reads.in,
filtered = filtN_r$reads.out,
denoised_forward = sapply(dada_r$forward, dadautils::count_uniques),
denoised_reverse = sapply(dada_r$reverse, dadautils::count_uniques),
merged = sapply(mergers, dadautils::count_uniques),
nonchim = rowSums(seqtab.nochim),
final_prr = nonchim/input) %>%
readr::write_csv(file.path(CFG$output_path, "track.csv"))
flog.info("assign taxonomy")
# taxa <- dadautils::assign_taxonomy(seqtab.nochim,
# refFasta = CFG$dada2_assignTaxonomy_nochim$refFasta,
# taxLevels = CFG$dada2_assignTaxonomy_nochim$taxLevels,
# minBoot = CFG$dada2_assignTaxonomy_nochim$minBoot,
# outputBootstraps = CFG$dada2_assignTaxonomy_nochim$outputBootstraps,
# verbose = CFG$dada2_assignTaxonomy_nochim$verbose,
# multithread = CFG$multithread,
# drop_levels = "Species")
taxa <- dadautils::assign_taxonomy(seqtab.nochim,
refFasta = CFG$dada2_assignTaxonomy_nochim$refFasta,
taxLevels = CFG$dada2_assignTaxonomy_nochim$taxLevels,
minBoot = CFG$dada2_assignTaxonomy_nochim$minBoot,
outputBootstraps = CFG$dada2_assignTaxonomy_nochim$outputBootstraps,
verbose = CFG$dada2_assignTaxonomy_nochim$verbose,
multithread = CFG$multithread,
drop_levels = "NA",
save_file = TRUE,
filename = file.path(CFG$output_path, "taxa.csv"))
ttaxa <- dplyr::as_tibble(taxa) %>%
dplyr::mutate(ASV = names(fasta)) %>%
dplyr::relocate(ASV, .before = 1) %>%
readr::write_csv(file.path(CFG$output_path, "ASV_taxa.csv"))
if ("dada2_addSpecies" %in% names(CFG)){
flog.info("add species to taxonomy")
if (length(taxa) == 2 && "tax" %in% names(taxa)){
taxa <- taxa$tax
}
taxa <- dada2::addSpecies(taxa, refFasta = CFG$dada2_addSpecies$refFasta)
readr::write_csv(taxa %>% dplyr::as_tibble(),
file.path(CFG$output_path, "taxa-species.csv"))
}
if ("dada2_taxa_remove" %in% names(CFG)){
flog.info("remove unwanted values in taxonomy")
taxa <- taxa %>%
dadautils::taxa_remove(vars = CFG$dada2_taxa_remove)
readr::write_csv(taxa %>% dplyr::as_tibble(),
file.path(CFG$output_path, "taxa-cleaned.csv"))
}
fflog.info("done: %s", CFG$output_path)
return(RETURN)
} #main
# we only run is run as a script - not if interactive
if (!interactive()){
cfgfile <- commandArgs(trailingOnly = TRUE)[1]
} else {
cfgfile = ""
}
CFG <- charlier::read_config(cfgfile[1],
autopopulate = TRUE,
fields = list(
data_path = "data_path",
reference_path = "reference_path"),
rootname = "global")
if (!interactive()){
ok <- main(CFG)
quit(save = "no", status = ok)
}
| /dada2_16S.R | no_license | BigelowLab/edna-dada2 | R | false | false | 9,650 | r | library(dplyr)
library(readr)
library(configr)
library(futile.logger)
library(patchwork)
library(ShortRead)
library(Biostrings)
library(dada2)
library(charlier)
library(dadautils)
#' main processing step - tries to operate as a pipeline returning 0 (success) or
#' failure ( > 0)
#'
#' @param CFG list, configuration from a config file
#' @return integer where 0 means success
main <- function(CFG){
RETURN <- 0
if (CFG$multithread[1] == "auto") CFG$multithread <- charlier::count_cores()
if (!dir.exists(CFG$input_path)){
warning("input path not found:", CFG$input_path)
return(RETURN + 1)
}
if (!charlier::make_path(CFG$output_path)){
warning("output path not created:", CFG$output_path)
return(RETURN + 1)
}
# preliminaries
charlier::start_logger(filename = file.path(CFG$output_path, "log"))
PBS_JOBID <- charlier::get_pbs_jobid(no_pbs_text = "not in PBS queue")
charlier::audit(file.path(CFG$output_path, "audit.txt"), pbs_jobid = PBS_JOBID)
# add baseline info into log, just because
flog.info("starting run: %s", format(Sys.time(), "%Y-%m-%d %H:%M:%S"))
flog.info("NCPUS: %s", as.character(CFG$multithread))
flog.info("System PID: %s", Sys.getpid())
flog.info("PBS_JOBID: %s", PBS_JOBID)
flog.info("VERSION: %s", CFG$version)
flog.info("INPUT PATH: %s", CFG$input_path)
flog.info("OUTPUT PATH: %s", CFG$output_path)
flog.info("checking for input fastq files")
fq_files <- dadautils::list_filepairs(CFG$input_path) %>%
dadautils::verify_filepairs()
sample.names <- dadautils::extract_sample_names(fq_files, rule="basename")
if ("dada2_plotQualityProfile" %in% names(CFG)){
ofile = file.path(CFG$output_path, "quality_profiles.pdf")
flog.info("plotting quality profiles: %s", ofile)
dadautils::plot_qualityProfiles(fq_files,
n = CFG$dada2_plotQualityProfile$nplots,
ofile = ofile)
}
flog.info("filter and trim of input files")
filtN_path <- file.path(CFG$output_path, CFG$dada2_filterAndTrim_filtN$name)
if (!charlier::make_path(filtN_path)) {
flog.error("filtN_path not created: %s", filtN_path)
return(RETURN + 1)
}
filtN_r <- dadautils::filter_and_trim(fq_files,
output_path = filtN_path,
maxN = CFG$dada2_filterAndTrim_filtN$maxN,
multithread = CFG$multithread,
truncLen = CFG$dada2_filterAndTrim_filtN$truncLen,
minLen = CFG$dada2_filterAndTrim_filtN$minLen,
maxEE = CFG$dada2_filterAndTrim_filtN$maxEE,
truncQ = CFG$dada2_filterAndTrim_filtN$truncQ,
rm.phix = CFG$dada2_filterAndTrim_filtN$rm.phix,
compress = CFG$dada2_filterAndTrim_filtN$compress,
save_results = TRUE)
filtN_files <- dadautils::list_filepairs(filtN_path)
if(identical(unname(lengths(filtN_files)), c(0,0))){
stop("No filtN files produced")
}
if (!identical(lengths(fq_files), lengths(filtN_files))){
# presumably filter_and_trim dropped some files if we get here...
# so we need to trim fq_files to match. We assume that the output basenames are the same as the
# input basenames - so all we need to do is match
fq_files <- sapply(names(fq_files),
function(name){
ix <- basename(fq_files[[name]]) %in% basename(filtN_files[[name]])
fq_files[[name]][ix]
}, simplify = FALSE)
filtN_r <- filtN_r %>%
dplyr::filter(reads.out > 0)
sample.names <- dadautils::extract_sample_names(fq_files, rule="basename")
} # check for dropped inputs
flog.info("learn errors")
learnErrors_path <- file.path(CFG$output_path, CFG$dada2_learnErrors$name)
if (!charlier::make_path(learnErrors_path)) {
flog.error("learnErrors_path not created: %s", learnErrors_path)
return(RETURN + 1)
}
err <- dadautils::learn_errors(filtN_files,
output_path = learnErrors_path,
multithread = CFG$multithread,
save_output = TRUE,
save_graphics = TRUE)
flog.info("run dada")
dada_r <- dadautils::run_dada(
filtN_files,
err,
multithread = CFG$multithread,
pool = CFG$dada2_dada$pool)
# run merge pairs
flog.info("merge pairs")
mergers <- dadautils::merge_pairs(filtN_files, dada_r, verbose = TRUE, save_output = TRUE, minOverlap = CFG$dada2_merge_pairs$minOverlap)
#saveRDS(mergers, file = file.path(CFG$output_path, "mergers.rds"))
flog.info("make sequence table")
seqtab <- dada2::makeSequenceTable(mergers)
tseqtab <- dplyr::as_tibble(t(seqtab)) %>%
readr::write_csv(file.path(CFG$output_path, "seqtab.csv"))
flog.info("remove Bimera Denovo")
seqtab.nochim <- dada2::removeBimeraDenovo(seqtab,
method = CFG$dada2_removeBimeraDenovo_seqtab$method,
multithread = CFG$multithread,
verbose = CFG$dada2_removeBimeraDenovo_seqtab$verbose)
write.csv(seqtab.nochim, file.path(CFG$output_path, "dada2_seqtab-nochim.csv"))
# tseqtab.nochim <- dplyr::as_tibble(t(seqtab.nochim)) %>%
# readr::write_csv(file.path(CFG$output_path, "seqtab-nochim.csv"))
fasta <- dadautils::asv_fasta(seqtab.nochim, file = file.path(CFG$output_path,"ASV_sequences.fasta"))
tseqtab.nochim <- dplyr::as_tibble(t(seqtab.nochim)) %>%
dplyr::mutate(ASV = names(fasta)) %>%
dplyr::relocate(ASV, .before = 1) %>%
readr::write_csv(file.path(CFG$output_path, "seqtab-nochim.csv"))
track <- dplyr::tibble(
name = sample.names,
input = filtN_r$reads.in,
filtered = filtN_r$reads.out,
denoised_forward = sapply(dada_r$forward, dadautils::count_uniques),
denoised_reverse = sapply(dada_r$reverse, dadautils::count_uniques),
merged = sapply(mergers, dadautils::count_uniques),
nonchim = rowSums(seqtab.nochim),
final_prr = nonchim/input) %>%
readr::write_csv(file.path(CFG$output_path, "track.csv"))
flog.info("assign taxonomy")
# taxa <- dadautils::assign_taxonomy(seqtab.nochim,
# refFasta = CFG$dada2_assignTaxonomy_nochim$refFasta,
# taxLevels = CFG$dada2_assignTaxonomy_nochim$taxLevels,
# minBoot = CFG$dada2_assignTaxonomy_nochim$minBoot,
# outputBootstraps = CFG$dada2_assignTaxonomy_nochim$outputBootstraps,
# verbose = CFG$dada2_assignTaxonomy_nochim$verbose,
# multithread = CFG$multithread,
# drop_levels = "Species")
taxa <- dadautils::assign_taxonomy(seqtab.nochim,
refFasta = CFG$dada2_assignTaxonomy_nochim$refFasta,
taxLevels = CFG$dada2_assignTaxonomy_nochim$taxLevels,
minBoot = CFG$dada2_assignTaxonomy_nochim$minBoot,
outputBootstraps = CFG$dada2_assignTaxonomy_nochim$outputBootstraps,
verbose = CFG$dada2_assignTaxonomy_nochim$verbose,
multithread = CFG$multithread,
drop_levels = "NA",
save_file = TRUE,
filename = file.path(CFG$output_path, "taxa.csv"))
ttaxa <- dplyr::as_tibble(taxa) %>%
dplyr::mutate(ASV = names(fasta)) %>%
dplyr::relocate(ASV, .before = 1) %>%
readr::write_csv(file.path(CFG$output_path, "ASV_taxa.csv"))
if ("dada2_addSpecies" %in% names(CFG)){
flog.info("add species to taxonomy")
if (length(taxa) == 2 && "tax" %in% names(taxa)){
taxa <- taxa$tax
}
taxa <- dada2::addSpecies(taxa, refFasta = CFG$dada2_addSpecies$refFasta)
readr::write_csv(taxa %>% dplyr::as_tibble(),
file.path(CFG$output_path, "taxa-species.csv"))
}
if ("dada2_taxa_remove" %in% names(CFG)){
flog.info("remove unwanted values in taxonomy")
taxa <- taxa %>%
dadautils::taxa_remove(vars = CFG$dada2_taxa_remove)
readr::write_csv(taxa %>% dplyr::as_tibble(),
file.path(CFG$output_path, "taxa-cleaned.csv"))
}
fflog.info("done: %s", CFG$output_path)
return(RETURN)
} #main
# we only run is run as a script - not if interactive
if (!interactive()){
cfgfile <- commandArgs(trailingOnly = TRUE)[1]
} else {
cfgfile = ""
}
CFG <- charlier::read_config(cfgfile[1],
autopopulate = TRUE,
fields = list(
data_path = "data_path",
reference_path = "reference_path"),
rootname = "global")
if (!interactive()){
ok <- main(CFG)
quit(save = "no", status = ok)
}
|
setwd('C:/Users/hnjyzdc/Desktop/从数据到结论')
x=scan('gs.txt')
pbinom(sum(x>100),25,0.5)
#符号检验,sum表示了大于100的个数,25为样本量,0.5为想要检验的概率,输出为p值
wilcox.test(x,m=100,alt='less')
#Wilcoxon符号秩检验,m为检验均值,alt表示单侧,双侧,分别为less(小于),greater(大于),
install.packages('tseries')
library(tseries)
y = scan('run1.txt')
runs.test(factor(y))
#随机游程检验,用来检验哑变量是不是随机取值
z = scan('run2.txt')
runs.test(factor(z>median(z)))
#随机游程检验,用来检验一般变量是不是随机取
w = read.table('gdp.txt')
wilcox.test(w[w[,2]==1,1],w[w[,2]==2,1],paired=F,alt = 'less')
#两个独立样本的wilcoxon秩和检验,Mann-Whitney U 检验
| /books/从数据到结论/R/非参数检验.R | permissive | yuanqingmei/Statistics_with_R | R | false | false | 787 | r | setwd('C:/Users/hnjyzdc/Desktop/从数据到结论')
x=scan('gs.txt')
pbinom(sum(x>100),25,0.5)
#符号检验,sum表示了大于100的个数,25为样本量,0.5为想要检验的概率,输出为p值
wilcox.test(x,m=100,alt='less')
#Wilcoxon符号秩检验,m为检验均值,alt表示单侧,双侧,分别为less(小于),greater(大于),
install.packages('tseries')
library(tseries)
y = scan('run1.txt')
runs.test(factor(y))
#随机游程检验,用来检验哑变量是不是随机取值
z = scan('run2.txt')
runs.test(factor(z>median(z)))
#随机游程检验,用来检验一般变量是不是随机取
w = read.table('gdp.txt')
wilcox.test(w[w[,2]==1,1],w[w[,2]==2,1],paired=F,alt = 'less')
#两个独立样本的wilcoxon秩和检验,Mann-Whitney U 检验
|
\name{grasp.adm.dump}
\alias{grasp.adm.dump}
\title{ Internal GRASP function }
\description{
This function saves the current version of your GRASP functions in a dump file that can be transported to another R session or to another computer.
}
\usage{
grasp.adm.dump()
}
\author{ Anthony.Lehmann@unige.ch }
\seealso{ grasp \code{\link{grasp}}, grasp.in \code{\link{grasp.in}}}
\keyword{models}
| /man/grasp.adm.dump.Rd | no_license | cran/grasp | R | false | false | 405 | rd | \name{grasp.adm.dump}
\alias{grasp.adm.dump}
\title{ Internal GRASP function }
\description{
This function saves the current version of your GRASP functions in a dump file that can be transported to another R session or to another computer.
}
\usage{
grasp.adm.dump()
}
\author{ Anthony.Lehmann@unige.ch }
\seealso{ grasp \code{\link{grasp}}, grasp.in \code{\link{grasp.in}}}
\keyword{models}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PWMScanHeatmap.R
\docType{methods}
\name{PWMScanHeatmap}
\alias{PWMScanHeatmap}
\alias{PWMScanHeatmap,DNAStringSet,matrix-method}
\title{Generate a Heatmap of PWM Scores in DNA sequnce}
\usage{
PWMScanHeatmap(seq, pwm, ...)
\S4method{PWMScanHeatmap}{DNAStringSet,matrix}(seq, pwm, coords = NULL,
label = "")
}
\arguments{
\item{seq}{A DNAString of equal length}
\item{pwm}{A PWM}
\item{...}{additional arguments used by methods
This function creates a heatmap where each point is the score of a PWM match
starting from that position, which can visualise regions of enrichment or exclusion
of certain motifs}
\item{coords}{Co-ordinates for the heatmap, defaults to c(0, width(windows))}
\item{label}{Label for the heatmap}
}
\value{
A heatmap
}
\description{
Generate a Heatmap of PWM Scores in DNA sequnce
}
\section{Methods (by class)}{
\itemize{
\item \code{seq = DNAStringSet,pwm = matrix}: Heatmap of PWM Scores
}}
\examples{
data(HeatmapExamples)
PatternHeatmap(string_set, tata_pwm, coords=c(-100, 100), label="TATA Scan")
}
\seealso{
PatternHeatmap
}
| /man/PWMScanHeatmap.Rd | no_license | mgperry/heatmaps | R | false | true | 1,145 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PWMScanHeatmap.R
\docType{methods}
\name{PWMScanHeatmap}
\alias{PWMScanHeatmap}
\alias{PWMScanHeatmap,DNAStringSet,matrix-method}
\title{Generate a Heatmap of PWM Scores in DNA sequnce}
\usage{
PWMScanHeatmap(seq, pwm, ...)
\S4method{PWMScanHeatmap}{DNAStringSet,matrix}(seq, pwm, coords = NULL,
label = "")
}
\arguments{
\item{seq}{A DNAString of equal length}
\item{pwm}{A PWM}
\item{...}{additional arguments used by methods
This function creates a heatmap where each point is the score of a PWM match
starting from that position, which can visualise regions of enrichment or exclusion
of certain motifs}
\item{coords}{Co-ordinates for the heatmap, defaults to c(0, width(windows))}
\item{label}{Label for the heatmap}
}
\value{
A heatmap
}
\description{
Generate a Heatmap of PWM Scores in DNA sequnce
}
\section{Methods (by class)}{
\itemize{
\item \code{seq = DNAStringSet,pwm = matrix}: Heatmap of PWM Scores
}}
\examples{
data(HeatmapExamples)
PatternHeatmap(string_set, tata_pwm, coords=c(-100, 100), label="TATA Scan")
}
\seealso{
PatternHeatmap
}
|
library(readr)
library(dplyr)
insurance <- read_csv("./BIDS/data/insurance.csv")
df <- as_tibble(insurance)
View(df)
summary(df)
### convert to factor (categories)
df$sex <- factor(df$sex)
df$smoker <- factor(df$smoker)
df$region <- factor(df$region)
summary(df)
### create graphs
par(mfrow=c(3,2))
#pdf("insurance_graphs.pdf")
for (i in names(df)[1:6]) {
plot(df$charges ~ df[[i]],
main=paste("Charges vs",i), ylab="Charges",xlab=i)
}
par(mfrow=c(1,1))
#dev.off()
## Age
plot(df$charges ~ df$age,col=df$sex)
plot(df$charges ~ df$age,col=df$smoker)
plot(df$charges ~ df$age,col=df$region)
## Age-Smoker
plot(df$charges ~ df$age,col=df$smoker)
abline(h=15000,col="darkgreen")
abline(h=32000,col="darkgreen")
## BMI
plot(df$charges ~ df$bmi,col=df$sex)
plot(df$charges ~ df$bmi,col=df$smoker)
plot(df$charges ~ df$bmi,col=df$region)
## BMI-Smokers
plot(df$charges ~ df$bmi,col=df$smoker)
abline(h=15000,col="darkgreen")
abline(h=32000,col="darkgreen")
abline(v=30,col="darkgreen")
### Analysis with agregations
df %>%
group_by(smoker) %>%
summarise(age_min=min(age,na.rm=TRUE),
age_25pct=quantile(age,probs = 0.25,na.rm=TRUE),
age_mean=mean(age,na.rm=TRUE),
age_median=median(age,na.rm=TRUE),
age_75pct=quantile(age,probs = 0.75,na.rm=TRUE),
age_max=max(age,na.rm=TRUE),
bmi_min=min(bmi,na.rm=TRUE),
bmi_25pct=quantile(bmi,probs = 0.25,na.rm=TRUE),
bmi_mean=mean(bmi,na.rm=TRUE),
bmi_median=median(bmi,na.rm=TRUE),
bmi_75pct=quantile(bmi,probs = 0.75,na.rm=TRUE),
bmi_max=max(bmi,na.rm=TRUE),
charge_min=min(charges,na.rm=TRUE),
charge_25pct=quantile(charges,probs = 0.25,na.rm=TRUE),
charge_mean=mean(charges,na.rm=TRUE),
charge_median=median(charges,na.rm=TRUE),
charge_75pct=quantile(charges,probs = 0.75,na.rm=TRUE),
charge_max=max(charges,na.rm=TRUE)
) %>%
t()
df %>%
mutate(obese=factor(ifelse(bmi >= 30,1,0),
levels=c(0,1),
labels=c("Normal","Obese"))) %>%
group_by(smoker, obese) %>%
summarise(age_min=min(age,na.rm=TRUE),
age_25pct=quantile(age,probs = 0.25,na.rm=TRUE),
age_mean=mean(age,na.rm=TRUE),
age_median=median(age,na.rm=TRUE),
age_75pct=quantile(age,probs = 0.75,na.rm=TRUE),
age_max=max(age,na.rm=TRUE),
charge_min=min(charges,na.rm=TRUE),
charge_25pct=quantile(charges,probs = 0.25,na.rm=TRUE),
charge_mean=mean(charges,na.rm=TRUE),
charge_median=median(charges,na.rm=TRUE),
charge_75pct=quantile(charges,probs = 0.75,na.rm=TRUE),
charge_max=max(charges,na.rm=TRUE)
) %>%
t()
age_charge_nonsmokers <- df %>%
filter(smoker=="no") %>%
group_by(age) %>%
summarise(charge_min=min(charges,na.rm=TRUE),
charge_25pct=quantile(charges,probs = 0.25,na.rm=TRUE),
charge_mean=mean(charges,na.rm=TRUE),
charge_median=median(charges,na.rm=TRUE),
charge_75pct=quantile(charges,probs = 0.75,na.rm=TRUE),
charge_max=max(charges,na.rm=TRUE)
)
### Age by smoke and obese
df$smoke_obese <- ifelse(df$smoker=="yes" & df$bmi >= 30,3,
ifelse(df$smoker=="yes" & df$bmi < 30,2,1))
plot(df$charges ~ df$age,col=df$smoke_obese)
### check for the second group:
## by sex
df %>%
filter(smoke_obese == 2) %>%
mutate(obese=factor(ifelse(bmi >= 30,1,0),
levels=c(0,1),
labels=c("Normal","Obese"))) %>%
group_by(sex) %>%
summarize(age_min=min(age,na.rm=TRUE),
age_25pct=quantile(age,probs = 0.25,na.rm=TRUE),
age_mean=mean(age,na.rm=TRUE),
age_median=median(age,na.rm=TRUE),
age_75pct=quantile(age,probs = 0.75,na.rm=TRUE),
age_max=max(age,na.rm=TRUE),
bmi_min=min(bmi,na.rm=TRUE),
bmi_25pct=quantile(bmi,probs = 0.25,na.rm=TRUE),
bmi_mean=mean(bmi,na.rm=TRUE),
bmi_median=median(bmi,na.rm=TRUE),
bmi_75pct=quantile(bmi,probs = 0.75,na.rm=TRUE),
bmi_max=max(bmi,na.rm=TRUE)) %>%
t()
## by children
df %>%
filter(smoke_obese == 2) %>%
mutate(obese=factor(ifelse(bmi >= 30,1,0),
levels=c(0,1),
labels=c("Normal","Obese"))) %>%
group_by(children) %>%
summarize(age_min=min(age,na.rm=TRUE),
age_25pct=quantile(age,probs = 0.25,na.rm=TRUE),
age_mean=mean(age,na.rm=TRUE),
age_median=median(age,na.rm=TRUE),
age_75pct=quantile(age,probs = 0.75,na.rm=TRUE),
age_max=max(age,na.rm=TRUE),
bmi_min=min(bmi,na.rm=TRUE),
bmi_25pct=quantile(bmi,probs = 0.25,na.rm=TRUE),
bmi_mean=mean(bmi,na.rm=TRUE),
bmi_median=median(bmi,na.rm=TRUE),
bmi_75pct=quantile(bmi,probs = 0.75,na.rm=TRUE),
bmi_max=max(bmi,na.rm=TRUE),
cnt=n()) %>%
t()
## BMI - Smokers and obese
plot(df$charges ~ df$bmi,col=df$smoke_obese)
abline(h=15000,col="darkgreen")
abline(h=32000,col="darkgreen")
abline(v=30,col="darkgreen")
## Age - Smokers and obese
plot(df$charges ~ df$age,col=df$smoke_obese)
abline(h=15000,col="darkgreen")
abline(h=32000,col="darkgreen")
## the middle group is less homogeneous. This does not depend on
## the smoking, age or bmi. There must be other factors influencing
## the value of charges
col <- ifelse(df$charges < 15000, 1,
ifelse(df$charges >= 15000 & df$bmi < 30, 2,
ifelse(df$charges < 32000,3, 4 )))
plot(df$charges ~ df$bmi,col=col)
plot(df$charges ~ df$age,col=col)
| /R/analysis_example.R | no_license | alxdubov/DataScience | R | false | false | 6,000 | r | library(readr)
library(dplyr)
insurance <- read_csv("./BIDS/data/insurance.csv")
df <- as_tibble(insurance)
View(df)
summary(df)
### convert to factor (categories)
df$sex <- factor(df$sex)
df$smoker <- factor(df$smoker)
df$region <- factor(df$region)
summary(df)
### create graphs
par(mfrow=c(3,2))
#pdf("insurance_graphs.pdf")
for (i in names(df)[1:6]) {
plot(df$charges ~ df[[i]],
main=paste("Charges vs",i), ylab="Charges",xlab=i)
}
par(mfrow=c(1,1))
#dev.off()
## Age
plot(df$charges ~ df$age,col=df$sex)
plot(df$charges ~ df$age,col=df$smoker)
plot(df$charges ~ df$age,col=df$region)
## Age-Smoker
plot(df$charges ~ df$age,col=df$smoker)
abline(h=15000,col="darkgreen")
abline(h=32000,col="darkgreen")
## BMI
plot(df$charges ~ df$bmi,col=df$sex)
plot(df$charges ~ df$bmi,col=df$smoker)
plot(df$charges ~ df$bmi,col=df$region)
## BMI-Smokers
plot(df$charges ~ df$bmi,col=df$smoker)
abline(h=15000,col="darkgreen")
abline(h=32000,col="darkgreen")
abline(v=30,col="darkgreen")
### Analysis with agregations
df %>%
group_by(smoker) %>%
summarise(age_min=min(age,na.rm=TRUE),
age_25pct=quantile(age,probs = 0.25,na.rm=TRUE),
age_mean=mean(age,na.rm=TRUE),
age_median=median(age,na.rm=TRUE),
age_75pct=quantile(age,probs = 0.75,na.rm=TRUE),
age_max=max(age,na.rm=TRUE),
bmi_min=min(bmi,na.rm=TRUE),
bmi_25pct=quantile(bmi,probs = 0.25,na.rm=TRUE),
bmi_mean=mean(bmi,na.rm=TRUE),
bmi_median=median(bmi,na.rm=TRUE),
bmi_75pct=quantile(bmi,probs = 0.75,na.rm=TRUE),
bmi_max=max(bmi,na.rm=TRUE),
charge_min=min(charges,na.rm=TRUE),
charge_25pct=quantile(charges,probs = 0.25,na.rm=TRUE),
charge_mean=mean(charges,na.rm=TRUE),
charge_median=median(charges,na.rm=TRUE),
charge_75pct=quantile(charges,probs = 0.75,na.rm=TRUE),
charge_max=max(charges,na.rm=TRUE)
) %>%
t()
df %>%
mutate(obese=factor(ifelse(bmi >= 30,1,0),
levels=c(0,1),
labels=c("Normal","Obese"))) %>%
group_by(smoker, obese) %>%
summarise(age_min=min(age,na.rm=TRUE),
age_25pct=quantile(age,probs = 0.25,na.rm=TRUE),
age_mean=mean(age,na.rm=TRUE),
age_median=median(age,na.rm=TRUE),
age_75pct=quantile(age,probs = 0.75,na.rm=TRUE),
age_max=max(age,na.rm=TRUE),
charge_min=min(charges,na.rm=TRUE),
charge_25pct=quantile(charges,probs = 0.25,na.rm=TRUE),
charge_mean=mean(charges,na.rm=TRUE),
charge_median=median(charges,na.rm=TRUE),
charge_75pct=quantile(charges,probs = 0.75,na.rm=TRUE),
charge_max=max(charges,na.rm=TRUE)
) %>%
t()
age_charge_nonsmokers <- df %>%
filter(smoker=="no") %>%
group_by(age) %>%
summarise(charge_min=min(charges,na.rm=TRUE),
charge_25pct=quantile(charges,probs = 0.25,na.rm=TRUE),
charge_mean=mean(charges,na.rm=TRUE),
charge_median=median(charges,na.rm=TRUE),
charge_75pct=quantile(charges,probs = 0.75,na.rm=TRUE),
charge_max=max(charges,na.rm=TRUE)
)
### Age by smoke and obese
df$smoke_obese <- ifelse(df$smoker=="yes" & df$bmi >= 30,3,
ifelse(df$smoker=="yes" & df$bmi < 30,2,1))
plot(df$charges ~ df$age,col=df$smoke_obese)
### check for the second group:
## by sex
df %>%
filter(smoke_obese == 2) %>%
mutate(obese=factor(ifelse(bmi >= 30,1,0),
levels=c(0,1),
labels=c("Normal","Obese"))) %>%
group_by(sex) %>%
summarize(age_min=min(age,na.rm=TRUE),
age_25pct=quantile(age,probs = 0.25,na.rm=TRUE),
age_mean=mean(age,na.rm=TRUE),
age_median=median(age,na.rm=TRUE),
age_75pct=quantile(age,probs = 0.75,na.rm=TRUE),
age_max=max(age,na.rm=TRUE),
bmi_min=min(bmi,na.rm=TRUE),
bmi_25pct=quantile(bmi,probs = 0.25,na.rm=TRUE),
bmi_mean=mean(bmi,na.rm=TRUE),
bmi_median=median(bmi,na.rm=TRUE),
bmi_75pct=quantile(bmi,probs = 0.75,na.rm=TRUE),
bmi_max=max(bmi,na.rm=TRUE)) %>%
t()
## by children
df %>%
filter(smoke_obese == 2) %>%
mutate(obese=factor(ifelse(bmi >= 30,1,0),
levels=c(0,1),
labels=c("Normal","Obese"))) %>%
group_by(children) %>%
summarize(age_min=min(age,na.rm=TRUE),
age_25pct=quantile(age,probs = 0.25,na.rm=TRUE),
age_mean=mean(age,na.rm=TRUE),
age_median=median(age,na.rm=TRUE),
age_75pct=quantile(age,probs = 0.75,na.rm=TRUE),
age_max=max(age,na.rm=TRUE),
bmi_min=min(bmi,na.rm=TRUE),
bmi_25pct=quantile(bmi,probs = 0.25,na.rm=TRUE),
bmi_mean=mean(bmi,na.rm=TRUE),
bmi_median=median(bmi,na.rm=TRUE),
bmi_75pct=quantile(bmi,probs = 0.75,na.rm=TRUE),
bmi_max=max(bmi,na.rm=TRUE),
cnt=n()) %>%
t()
## BMI - Smokers and obese
plot(df$charges ~ df$bmi,col=df$smoke_obese)
abline(h=15000,col="darkgreen")
abline(h=32000,col="darkgreen")
abline(v=30,col="darkgreen")
## Age - Smokers and obese
plot(df$charges ~ df$age,col=df$smoke_obese)
abline(h=15000,col="darkgreen")
abline(h=32000,col="darkgreen")
## the middle group is less homogeneous. This does not depend on
## the smoking, age or bmi. There must be other factors influencing
## the value of charges
col <- ifelse(df$charges < 15000, 1,
ifelse(df$charges >= 15000 & df$bmi < 30, 2,
ifelse(df$charges < 32000,3, 4 )))
plot(df$charges ~ df$bmi,col=col)
plot(df$charges ~ df$age,col=col)
|
merge_trips <- function(kobo_trips_2, trips_from_points){
require(dplyr)
trips_with_imei <- trips_from_points %>%
filter(!is.na(imei))
kobo_trips_2 %>%
dplyr::full_join(trips_with_imei, by = c("trip_imei" = "imei",
"trip_date" = "trip_end_date_pds"))
}
| /R/merge_trips.R | permissive | WorldFishCenter/timor-catch-estimation | R | false | false | 319 | r | merge_trips <- function(kobo_trips_2, trips_from_points){
require(dplyr)
trips_with_imei <- trips_from_points %>%
filter(!is.na(imei))
kobo_trips_2 %>%
dplyr::full_join(trips_with_imei, by = c("trip_imei" = "imei",
"trip_date" = "trip_end_date_pds"))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/OneThousandBackground.R
\name{OneThousandBackground}
\alias{OneThousandBackground}
\title{Process module: OneThousandBackground}
\usage{
OneThousandBackground(.data, seed = NULL)
}
\arguments{
\item{.data}{\strong{Internal parameter, do not use in the workflow function}. \code{.data} is a list of a data frame and a raster object returned from occurrence modules and covariate modules respectively. \code{.data} is passed automatically in workflow from the occurrence and covariate modules to the process module(s) and should not be passed by the user.}
\item{seed}{Numeric used with \code{\link[base]{set.seed}}}
}
\description{
Process module to generate up to 1000 background records at random in
cells of the covariate raster and return these along with the occurrence data.
}
\section{Version}{
1.0
}
\section{Date submitted}{
2015-11-13
}
\section{Data type}{
presence-only
}
\seealso{
Other process: \code{\link{AddRandomUniformPredictors}},
\code{\link{BackgroundAndCrossvalid}},
\code{\link{Background}}, \code{\link{Bootstrap}},
\code{\link{CarolinaWrenValidation}},
\code{\link{Clean}}, \code{\link{Crossvalidate}},
\code{\link{JitterOccurrence}}, \code{\link{MESSMask}},
\code{\link{NoProcess}},
\code{\link{OneHundredBackground}},
\code{\link{PartitionDisc}},
\code{\link{StandardiseCov}},
\code{\link{SubsampleOccurrence}},
\code{\link{TargetGroupBackground}},
\code{\link{Transform}}, \code{\link{addInteraction}}
}
\author{
ZOON Developers, \email{zoonproject@gmail.com}
}
| /man/OneThousandBackground.Rd | no_license | smwindecker/modules | R | false | true | 1,604 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/OneThousandBackground.R
\name{OneThousandBackground}
\alias{OneThousandBackground}
\title{Process module: OneThousandBackground}
\usage{
OneThousandBackground(.data, seed = NULL)
}
\arguments{
\item{.data}{\strong{Internal parameter, do not use in the workflow function}. \code{.data} is a list of a data frame and a raster object returned from occurrence modules and covariate modules respectively. \code{.data} is passed automatically in workflow from the occurrence and covariate modules to the process module(s) and should not be passed by the user.}
\item{seed}{Numeric used with \code{\link[base]{set.seed}}}
}
\description{
Process module to generate up to 1000 background records at random in
cells of the covariate raster and return these along with the occurrence data.
}
\section{Version}{
1.0
}
\section{Date submitted}{
2015-11-13
}
\section{Data type}{
presence-only
}
\seealso{
Other process: \code{\link{AddRandomUniformPredictors}},
\code{\link{BackgroundAndCrossvalid}},
\code{\link{Background}}, \code{\link{Bootstrap}},
\code{\link{CarolinaWrenValidation}},
\code{\link{Clean}}, \code{\link{Crossvalidate}},
\code{\link{JitterOccurrence}}, \code{\link{MESSMask}},
\code{\link{NoProcess}},
\code{\link{OneHundredBackground}},
\code{\link{PartitionDisc}},
\code{\link{StandardiseCov}},
\code{\link{SubsampleOccurrence}},
\code{\link{TargetGroupBackground}},
\code{\link{Transform}}, \code{\link{addInteraction}}
}
\author{
ZOON Developers, \email{zoonproject@gmail.com}
}
|
\name{plausibility}
\alias{plausibility}
\title{Compute word (or compound) plausibility}
\encoding{latin1}
\description{Gives measures of semantic transparency (plausibility) for words or compounds}
\details{
The format of \code{x} should be of the kind \code{x <- "word1 word2 word3"} instead of\cr\code{x <- c("word1", "word2", "word3")} if phrases of more than one word are used as input. Simple vector addition of the constituent vectors is then used to compute the phrase vector.\cr
Since \code{x} can also be chosen to be any vector of the active LSA Space, this function can be combined with \code{compose()} to compute semantic transparency measures of complex expressions (see examples). Since semantic transparency methods were developed as measures for composed vectors, applying them makes most sense for those.\cr\cr
The methods are defined as follows:
\itemize{
\item{\code{method = "n_density"} The average cosine between a (word or phrase) vector and its \emph{n} nearest neighbors (see \code{\link[LSAfun2]{neighbors}})}
\item{\code{method = "length"} The length of a vector (as computed by the standard Euclidean norm)}
\item{\code{method = "proximity"} The cosine similarity between a compound vector and its stem word (for example between \emph{mad hatter} and \emph{hatter} or between \emph{objectify} and \emph{object})}
\item{\code{method = "entropy"} The entropy of the \emph{K}-dimensional vector with the vector components \eqn{t_1,...,t_K }, as computed by
\deqn{entropy = \log{K} - \sum{t_i * \log{t_i}} }}
}
}
\usage{plausibility(x,method, n=10,stem,tvectors=tvectors,breakdown=TRUE)}
\arguments{
\item{x}{a character vector of \code{length(x) = 1} or a numeric of \code{length=ncol(tvectors)} vector with same dimensionality as LSA space}
\item{method}{the measure of semantic transparency, can be one of \code{n_density},\code{length}, \code{proximity}, or \code{entropy} (see \emph{Details})}
\item{n}{the number of neighbors for the \code{n_density} method}
\item{stem}{the stem (or word) of comparison for the \code{proximity} method}
\item{tvectors}{the semantic space in which the computation is to be done (a numeric matrix where every row is a word vector)}
\item{breakdown}{if \code{TRUE}, the function \code{\link[LSAfun2]{breakdown}} is applied to the input}
}
\value{The semantic transparency as a numeric}
\author{
Fritz G?nther
}
\seealso{
\code{\link[LSAfun2]{Cosine}},
\code{\link[LSAfun2]{neighbors}},
\code{\link[LSAfun2]{compose}}
}
\references{
Lazaridou, A., Vecchi, E., & Baroni, M. (2013). Fish transporters and miracle homes:
How compositional distributional semantics can help NP parsing. In \emph{Proceedings
of EMNLP 2013} (pp. 1908 - 1913). Seattle, WA.
Marelli, M., & Baroni, M. (in press). Affixation in semantic space: Modeling morpheme meanings with compositional distributional semantics. \emph{Psychological Review.}
Vecchi, E. M., Baroni, M., & Zamparelli, R. (2011). (Linear) maps of the impossible:
Capturing semantic anomalies in distributional space. In \emph{Proceedings of the
ACL Workshop on Distributional Semantics and Compositionality} (pp. 1-9).
Portland, OR.
}
\examples{data(wonderland)
plausibility("cheshire cat",method="n_density",n=10,tvectors=wonderland)
plausibility(compose("mad","hatter",method="Multiply",tvectors=wonderland),
method="proximity",stem="hatter",tvectors=wonderland)} | /man/plausibility.Rd | no_license | codymarquart/LSAfun2 | R | false | false | 3,449 | rd | \name{plausibility}
\alias{plausibility}
\title{Compute word (or compound) plausibility}
\encoding{latin1}
\description{Gives measures of semantic transparency (plausibility) for words or compounds}
\details{
The format of \code{x} should be of the kind \code{x <- "word1 word2 word3"} instead of\cr\code{x <- c("word1", "word2", "word3")} if phrases of more than one word are used as input. Simple vector addition of the constituent vectors is then used to compute the phrase vector.\cr
Since \code{x} can also be chosen to be any vector of the active LSA Space, this function can be combined with \code{compose()} to compute semantic transparency measures of complex expressions (see examples). Since semantic transparency methods were developed as measures for composed vectors, applying them makes most sense for those.\cr\cr
The methods are defined as follows:
\itemize{
\item{\code{method = "n_density"} The average cosine between a (word or phrase) vector and its \emph{n} nearest neighbors (see \code{\link[LSAfun2]{neighbors}})}
\item{\code{method = "length"} The length of a vector (as computed by the standard Euclidean norm)}
\item{\code{method = "proximity"} The cosine similarity between a compound vector and its stem word (for example between \emph{mad hatter} and \emph{hatter} or between \emph{objectify} and \emph{object})}
\item{\code{method = "entropy"} The entropy of the \emph{K}-dimensional vector with the vector components \eqn{t_1,...,t_K }, as computed by
\deqn{entropy = \log{K} - \sum{t_i * \log{t_i}} }}
}
}
\usage{plausibility(x,method, n=10,stem,tvectors=tvectors,breakdown=TRUE)}
\arguments{
\item{x}{a character vector of \code{length(x) = 1} or a numeric of \code{length=ncol(tvectors)} vector with same dimensionality as LSA space}
\item{method}{the measure of semantic transparency, can be one of \code{n_density},\code{length}, \code{proximity}, or \code{entropy} (see \emph{Details})}
\item{n}{the number of neighbors for the \code{n_density} method}
\item{stem}{the stem (or word) of comparison for the \code{proximity} method}
\item{tvectors}{the semantic space in which the computation is to be done (a numeric matrix where every row is a word vector)}
\item{breakdown}{if \code{TRUE}, the function \code{\link[LSAfun2]{breakdown}} is applied to the input}
}
\value{The semantic transparency as a numeric}
\author{
Fritz G?nther
}
\seealso{
\code{\link[LSAfun2]{Cosine}},
\code{\link[LSAfun2]{neighbors}},
\code{\link[LSAfun2]{compose}}
}
\references{
Lazaridou, A., Vecchi, E., & Baroni, M. (2013). Fish transporters and miracle homes:
How compositional distributional semantics can help NP parsing. In \emph{Proceedings
of EMNLP 2013} (pp. 1908 - 1913). Seattle, WA.
Marelli, M., & Baroni, M. (in press). Affixation in semantic space: Modeling morpheme meanings with compositional distributional semantics. \emph{Psychological Review.}
Vecchi, E. M., Baroni, M., & Zamparelli, R. (2011). (Linear) maps of the impossible:
Capturing semantic anomalies in distributional space. In \emph{Proceedings of the
ACL Workshop on Distributional Semantics and Compositionality} (pp. 1-9).
Portland, OR.
}
\examples{data(wonderland)
plausibility("cheshire cat",method="n_density",n=10,tvectors=wonderland)
plausibility(compose("mad","hatter",method="Multiply",tvectors=wonderland),
method="proximity",stem="hatter",tvectors=wonderland)} |
## Coursera Hopkins Exploratory Data Course assignment 1: Plot 4
data <- read.csv2(".//household_power_consumption.txt", stringsAsFactor=FALSE)
data$Date <- as.Date(as.character(data$Date), "%d/%m/%Y")
dataSelected <- subset(data, Date >= "2007-02-01" & Date <= "2007-02-02")
## Clean up any NAs
dataClean <- na.omit(dataSelected)
## The data was loaded as STRINGS -- otherwise will load as FACTORS, either way
## need to convert to numeric. A mapping function to convert to numeric would be much
## better here,just done this way now for expediency in the rough.
dataClean$Global_active_power <- as.numeric(dataClean$Global_active_power)
dataClean$Global_reactive_power <- as.numeric(dataClean$Global_reactive_power)
dataClean$voltage <- as.numeric(dataClean$voltage)
dataClean$Global_intensity <- as.numeric(dataClean$Global_intensity)
dataClean$Sub_metering_1 <- as.numeric(dataClean$Sub_metering_1)
dataClean$Sub_metering_2 <- as.numeric(dataClean$Sub_metering_2)
dataClean$Sub_metering_3 <- as.numeric(dataClean$Sub_metering_3)
## Now chart!
## Open file for PNG write
png("plot4.png")
## Set multiple rows/cols for 4 charts
par(mfrow=c(2,2))
## Cleaning up date/time stamps -- credit to forum for help on this! Setting y to keep readable
x <- strptime( paste(dataClean$Date,dataClean$Time), format="%Y-%m-%d %H:%M:%S")
## first chart!
y <- dataClean$Global_active_power
plot(x,y, type="l", ylab="Global Active Power (kilowatts)")
## second chart!
y <- dataClean$Voltage
plot(x,y, type="l", ylab="Voltage", xlab="datetime")
## third chart!
y <- dataClean$Sub_metering_1
plot(x,y, type="l", ylab="Energy sub metering")
lines(x,dataClean$Sub_metering_2, type="l", col="red")
lines(x,dataClean$Sub_metering_3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2","Sub_metering_3"), lty=c(1,1,1),lwd=c(2,2,2), col=c("black", "red", "blue"), bty="n")
## fourth chart!
y <- dataClean$Global_reactive_power
plot(x,y, type="l", ylab="Global Reactive Power", xlab="datetime")
## Close file
dev.off()
| /work/plot4.R | no_license | mrmra/ExData_Plotting1 | R | false | false | 2,033 | r | ## Coursera Hopkins Exploratory Data Course assignment 1: Plot 4
data <- read.csv2(".//household_power_consumption.txt", stringsAsFactor=FALSE)
data$Date <- as.Date(as.character(data$Date), "%d/%m/%Y")
dataSelected <- subset(data, Date >= "2007-02-01" & Date <= "2007-02-02")
## Clean up any NAs
dataClean <- na.omit(dataSelected)
## The data was loaded as STRINGS -- otherwise will load as FACTORS, either way
## need to convert to numeric. A mapping function to convert to numeric would be much
## better here,just done this way now for expediency in the rough.
dataClean$Global_active_power <- as.numeric(dataClean$Global_active_power)
dataClean$Global_reactive_power <- as.numeric(dataClean$Global_reactive_power)
dataClean$voltage <- as.numeric(dataClean$voltage)
dataClean$Global_intensity <- as.numeric(dataClean$Global_intensity)
dataClean$Sub_metering_1 <- as.numeric(dataClean$Sub_metering_1)
dataClean$Sub_metering_2 <- as.numeric(dataClean$Sub_metering_2)
dataClean$Sub_metering_3 <- as.numeric(dataClean$Sub_metering_3)
## Now chart!
## Open file for PNG write
png("plot4.png")
## Set multiple rows/cols for 4 charts
par(mfrow=c(2,2))
## Cleaning up date/time stamps -- credit to forum for help on this! Setting y to keep readable
x <- strptime( paste(dataClean$Date,dataClean$Time), format="%Y-%m-%d %H:%M:%S")
## first chart!
y <- dataClean$Global_active_power
plot(x,y, type="l", ylab="Global Active Power (kilowatts)")
## second chart!
y <- dataClean$Voltage
plot(x,y, type="l", ylab="Voltage", xlab="datetime")
## third chart!
y <- dataClean$Sub_metering_1
plot(x,y, type="l", ylab="Energy sub metering")
lines(x,dataClean$Sub_metering_2, type="l", col="red")
lines(x,dataClean$Sub_metering_3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2","Sub_metering_3"), lty=c(1,1,1),lwd=c(2,2,2), col=c("black", "red", "blue"), bty="n")
## fourth chart!
y <- dataClean$Global_reactive_power
plot(x,y, type="l", ylab="Global Reactive Power", xlab="datetime")
## Close file
dev.off()
|
function (X, max_number_of_iterations)
{
e <- get("data.env", .GlobalEnv)
e[["translate_to_binary"]][[length(e[["translate_to_binary"]]) +
1]] <- list(X = X, max_number_of_iterations = max_number_of_iterations)
.Call("_xyz_translate_to_binary", X, max_number_of_iterations)
}
| /valgrind_test_dir/translate_to_binary-test.R | no_license | akhikolla/RcppDeepStateTest | R | false | false | 298 | r | function (X, max_number_of_iterations)
{
e <- get("data.env", .GlobalEnv)
e[["translate_to_binary"]][[length(e[["translate_to_binary"]]) +
1]] <- list(X = X, max_number_of_iterations = max_number_of_iterations)
.Call("_xyz_translate_to_binary", X, max_number_of_iterations)
}
|
#------------------------
# Archimedes with an iPod
#------------------------
theta=10*pi*(0:1000)/1000
r=.1+theta/(2*pi)+.5*sin(10*theta)
x=r*cos(theta)
y=r*sin(theta)
par(pin=c(4,4))
plot(x,y,type="l",lty=1) | /Archimedes with an iPod.R | no_license | AndyTian-Devops/RLearning | R | false | false | 209 | r | #------------------------
# Archimedes with an iPod
#------------------------
theta=10*pi*(0:1000)/1000
r=.1+theta/(2*pi)+.5*sin(10*theta)
x=r*cos(theta)
y=r*sin(theta)
par(pin=c(4,4))
plot(x,y,type="l",lty=1) |
# Tests for multi_dittoDimPlotVaryCells function
# library(dittoSeq); library(testthat); source("setup.R"); source("test-multi_VaryCells.R")
sce$number <- as.numeric(seq_along(colnames(sce)))
grp <- "age"
cont <- "gene2"
disc <- "groups"
test_that("VaryCells fxn can show continuous or discrete data", {
expect_s3_class(
multi_dittoDimPlotVaryCells(cont, object=sce, grp),
"gtable")
expect_s3_class(
multi_dittoDimPlotVaryCells(disc, object=sce, grp),
"gtable")
})
test_that("VaryCells fxn can output plots as a list", {
expect_type(
multi_dittoDimPlotVaryCells(cont, object=sce, grp,
list.out = TRUE),
"list")
})
test_that("VaryCells fxn can adjust how expression data is obtained", {
expect_s3_class(
multi_dittoDimPlotVaryCells(cont, object=sce, grp,
min = 0, max =2000),
"gtable")
#Manual Check: scales should be different in the next 2
expect_s3_class(
multi_dittoDimPlotVaryCells(cont, object=sce, grp,
slot = "counts"),
"gtable")
expect_s3_class(
multi_dittoDimPlotVaryCells(cont, object=sce, grp),
"gtable")
})
test_that("VaryCells fxn levels subsetting works", {
expect_s3_class(
multi_dittoDimPlotVaryCells(cont, object=sce, grp,
vary.cells.levels = 1:2),
"gtable")
})
test_that("VaryCells 'show.' tweaks all work", {
# Manual Check: Removes allcells & legend & titles
expect_s3_class(
multi_dittoDimPlotVaryCells(cont, object=sce, grp,
show.allcells.plot = FALSE, show.legend.single = FALSE,
show.titles = FALSE),
"gtable")
# Manual Check: Adds legends to all plots
expect_s3_class(
multi_dittoDimPlotVaryCells(cont, object=sce, grp,
show.legend.allcells.plot = TRUE, show.legend.plots = TRUE),
"gtable")
})
test_that("VaryCells allcells title can be changed", {
expect_s3_class(
multi_dittoDimPlotVaryCells(cont, object=sce, grp,
allcells.main = "DIFFERENT"),
"gtable")
})
test_that("VaryCells color.panel can be adjusted", {
expect_s3_class(
multi_dittoDimPlotVaryCells(disc, object=sce, grp,
color.panel = c("red","blue","yellow","gray50","purple"),
colors = 5:1),
"gtable")
})
test_that("VaryCells color.panel can be adjusted", {
expect_s3_class(
multi_dittoDimPlotVaryCells(disc, object=sce, grp,
color.panel = c("red","blue","yellow","gray50","purple"),
colors = 5:1),
"gtable")
})
test_that("VaryCells fxn errors as wanted when given 'cells.use'.", {
expect_error(
multi_dittoDimPlotVaryCells(cont, object=sce, grp,
cells.use = colnames(sce)[1:5]),
"Further subsetting with 'cells.use'", fixed = TRUE)
})
test_that("VaryCells tells that 'main' is ignored.", {
expect_message(
multi_dittoDimPlotVaryCells(cont, object=sce, grp,
main = "HELLO"),
"'main' ignored", fixed = TRUE)
})
test_that("VaryCells swap.rownames works", {
expect_s3_class(
multi_dittoDimPlotVaryCells(
sce, "gene1_symb", grp, swap.rownames = "symbol"),
"gtable")
})
| /tests/testthat/test-multi_VaryCells.R | permissive | dtm2451/dittoSeq | R | false | false | 3,256 | r | # Tests for multi_dittoDimPlotVaryCells function
# library(dittoSeq); library(testthat); source("setup.R"); source("test-multi_VaryCells.R")
sce$number <- as.numeric(seq_along(colnames(sce)))
grp <- "age"
cont <- "gene2"
disc <- "groups"
test_that("VaryCells fxn can show continuous or discrete data", {
expect_s3_class(
multi_dittoDimPlotVaryCells(cont, object=sce, grp),
"gtable")
expect_s3_class(
multi_dittoDimPlotVaryCells(disc, object=sce, grp),
"gtable")
})
test_that("VaryCells fxn can output plots as a list", {
expect_type(
multi_dittoDimPlotVaryCells(cont, object=sce, grp,
list.out = TRUE),
"list")
})
test_that("VaryCells fxn can adjust how expression data is obtained", {
expect_s3_class(
multi_dittoDimPlotVaryCells(cont, object=sce, grp,
min = 0, max =2000),
"gtable")
#Manual Check: scales should be different in the next 2
expect_s3_class(
multi_dittoDimPlotVaryCells(cont, object=sce, grp,
slot = "counts"),
"gtable")
expect_s3_class(
multi_dittoDimPlotVaryCells(cont, object=sce, grp),
"gtable")
})
test_that("VaryCells fxn levels subsetting works", {
expect_s3_class(
multi_dittoDimPlotVaryCells(cont, object=sce, grp,
vary.cells.levels = 1:2),
"gtable")
})
test_that("VaryCells 'show.' tweaks all work", {
# Manual Check: Removes allcells & legend & titles
expect_s3_class(
multi_dittoDimPlotVaryCells(cont, object=sce, grp,
show.allcells.plot = FALSE, show.legend.single = FALSE,
show.titles = FALSE),
"gtable")
# Manual Check: Adds legends to all plots
expect_s3_class(
multi_dittoDimPlotVaryCells(cont, object=sce, grp,
show.legend.allcells.plot = TRUE, show.legend.plots = TRUE),
"gtable")
})
test_that("VaryCells allcells title can be changed", {
expect_s3_class(
multi_dittoDimPlotVaryCells(cont, object=sce, grp,
allcells.main = "DIFFERENT"),
"gtable")
})
test_that("VaryCells color.panel can be adjusted", {
expect_s3_class(
multi_dittoDimPlotVaryCells(disc, object=sce, grp,
color.panel = c("red","blue","yellow","gray50","purple"),
colors = 5:1),
"gtable")
})
test_that("VaryCells color.panel can be adjusted", {
expect_s3_class(
multi_dittoDimPlotVaryCells(disc, object=sce, grp,
color.panel = c("red","blue","yellow","gray50","purple"),
colors = 5:1),
"gtable")
})
test_that("VaryCells fxn errors as wanted when given 'cells.use'.", {
expect_error(
multi_dittoDimPlotVaryCells(cont, object=sce, grp,
cells.use = colnames(sce)[1:5]),
"Further subsetting with 'cells.use'", fixed = TRUE)
})
test_that("VaryCells tells that 'main' is ignored.", {
expect_message(
multi_dittoDimPlotVaryCells(cont, object=sce, grp,
main = "HELLO"),
"'main' ignored", fixed = TRUE)
})
test_that("VaryCells swap.rownames works", {
expect_s3_class(
multi_dittoDimPlotVaryCells(
sce, "gene1_symb", grp, swap.rownames = "symbol"),
"gtable")
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\docType{data}
\name{managers}
\alias{managers}
\title{time-series data}
\source{
TBA
}
\usage{
data("managers")
}
\description{
Hypothetical Alternative Asset Manager and Benchmark Data for Time Series Factor Model Fit
}
| /man/managers.Rd | permissive | kecoli/PCRM | R | false | true | 326 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\docType{data}
\name{managers}
\alias{managers}
\title{time-series data}
\source{
TBA
}
\usage{
data("managers")
}
\description{
Hypothetical Alternative Asset Manager and Benchmark Data for Time Series Factor Model Fit
}
|
################################################################################
################## Portfolio Functions ##################
################################################################################
library('Sim.DiffProc')
library('dplyr')
library('tidyverse')
library('quadprog')
library('ggplot2')
#*******************************************************************************
# I .- Function to estimate returns
getReturns<-function(x){
names<-c(names(x)[-1]);
time<-x[-1,1];
x<-as.matrix(x[,-1],ncol=ncol(x[,-1]),byrow=FALSE)
B<- matrix(ncol=ncol(x),nrow=nrow(x)-1);
for(i in 1:ncol(x)){
B[,i]<-diff(log(x[,i]),lag=1);
}
B<-data.frame(B)
colnames(B)<-names
C<-data.frame(Fecha=time,B)
return(C)
}
#**************************************************************************
# II.- Portfolio optimization
getMinPortfolio<-function(r.e,mat.cov){
Dmat <- 2*mat.cov
dvec <- rep.int(0, length(r.e))
Amat <- cbind(rep(1,length(r.e)), # res1: sum w_i = 1
diag(1,length(r.e)), # res2: w_i > 0
-diag(1,length(r.e))) # res3: w_i <= b => - w_i >= b
bvec <- c(1, # res1
rep(0,length(r.e)), # res2
-rep(1,length(r.e))) # res3
resultado <- solve.QP(Dmat=Dmat,dvec=dvec,Amat=Amat,bvec=bvec,meq=1)
w.min <- round(resultado$solution, 4)
names(w.min) <- names(r.e)
port.min <- list("pesos" = w.min)
return(port.min)
}
#*************************************************************************
# III. Portfolio returns
getPortfolioReturns <-function(returns,weights){
#w<- rep(weights,nrow(returns))
r.port <- NULL
for(i in 1:nrow(returns)){
r.port[i] <- sum(weights*returns[i,-1])
}
return(r.port)
}
#*******************************************************************************
# IV. Selecting the top_n stocks
top_n <- function(returns,top=10){
df_stat <<- returns %>%
gather(key='asset',value='valor',-Fecha) %>%
group_by(asset) %>%
summarise(n = n(),
mean = mean(valor),
sd = sd(valor),
min = min(valor),
max = max(valor),
skeness = skewness(valor),
kurtosis = kurtosis(valor)) %>%
ungroup()%>%data.frame()%>%arrange(desc(sd))
topn <- df_stat$asset[1:top]
df <- returns[,c('Fecha',topn)]
return(df)
}
#*******************************************************************************
# V. Random weights
random_weights <- function(n_assets){
weights <- c()
for(i in 1:(n_assets-1)){
if(i == 1){
weights[i] <- sample(10000,1,replace = TRUE)
} else{
weights[i] <- sample((10000-sum(weights)),1,replace = TRUE)
}
}
weights[n_assets] <- 10000 - sum(weights)
return(sample(weights,n_assets,replace=FALSE)/10000)
}
#*******************************************************************************
# VI. parameter estimation
param_estimation <- function(X){
X <- as.ts(X)
## drift
fx <- expression( theta[1]*x)
## diffusion
gx <- expression( theta[2]*x)
pmle <- "kessler"
fitmodel <- fitsde(X,drift=fx,diffusion=gx,pmle=pmle,start = list(theta1=1,theta2=1))
Coef <- data.frame(coef(fitmodel))
Info <- rbind(logLik(fitmodel),
AIC(fitmodel),
BIC(fitmodel))
colnames(Coef) <- c(pmle)
rownames(Info) <- c("logLik","AIC","BIC")
return(Coef$kessler)
}
#*******************************************************************************
# VII. Variance and covariance matrix
mat_cov <- function(base_roll,type='mv'){
if (type == 'mv'){
mat.cov <- cov(as.matrix(base_roll[,-1]))
} else {
t <- base_roll%>%nrow()
names <- colnames(base_roll[,-1])
e.r <- NULL
sd.r <- NULL
for(i in 1:length(names)){
X <- base_roll[,i+1]
est <- param_estimation(X)
e.r[i] <- est[1]
sd.r[i] <- est[2]
}
# expected value and risk
r.e_roll <- ((e.r - sd.r^2)/2)*t
mat.cov_roll <- cov(base_roll[,-1])
mat.cov_roll2 <- matrix(0,ncol=ncol(mat.cov_roll),nrow=nrow(mat.cov_roll))
for(i in 1:ncol(mat.cov_roll)){
for(j in 1:nrow(mat.cov_roll)){
if(i != j){
mat.cov_roll2[j,i] <- sd.r[j]*sd.r[i]*cor(base_roll[,j+1],base_roll[,i+1])
} else{
mat.cov_roll2[j,i] <- sd.r[i]^2
}
}
}
mat.cov <- mat.cov_roll2*t
}
return(mat.cov)
}
#*******************************************************************************
# VII. Function to match weights
all_weights <- function(base,pesos){
mat.weights <- data.frame(matrix(0,ncol=length(base[,-1]),nrow=1))
colnames(mat.weights) <- colnames(base[,-1])
mat.weights[,names(pesos)] <- pesos
return(mat.weights)
}
#*******************************************************************************
# VIII. Portfolio weights
getPortfolio <- function(base,year_to_start, rebalance_period=24,mod='mv',top.k=10){
df_rend <- getReturns(base)
# filter assets
assets <- colnames(df_rend[,-1])
df_rend2 <- df_rend%>%
gather(key='asset',value='valor',-Fecha)%>%
filter(asset %in% assets)%>%
spread(asset,valor)%>%
mutate(Fecha = as.Date(Fecha,"%d/%m/%Y"),
year = format(Fecha, '%Y'),
month = format(Fecha, '%m'),
year_month_index=paste(year,month,sep='-'))
# create sequence of periods
initial.periods <- unique(df_rend2$year_month_index)[1:(length(unique(df_rend2$year_month_index))-which(unique(df_rend2$year_month_index)==paste(year_to_start,'01',sep='-'))+1)]
final.periods <- unique(df_rend2$year_month_index)[which(unique(df_rend2$year_month_index)==paste(year_to_start,'01',sep='-')):length(unique(df_rend2$year_month_index))]
start.period <- initial.periods[seq(1,length(initial.periods),by=rebalance_period)]
final.period <- final.periods[seq(1,length(final.periods),by=rebalance_period)]
# initial values
df_re_all_years <- NULL
df_sd_all_years <- NULL
df_min.ret.weights <- NULL
df_eqw.ret.weights <- NULL
df_ran.ret.weights <- NULL
df_cum_return <- NULL
for (i in 1:length(final.period)){
date_min <- start.period[i]
date_max <- final.period[i]
# filter by max_date
base_roll <- df_rend2 %>%
filter(year_month_index >= date_min & year_month_index < date_max) %>%
dplyr::select(-year,-month,-year_month_index)
# selecting the best k stocks
base_roll_top <- top_n(base_roll,top=top.k)
# parameters
mat.cov_roll2 <- mat_cov(base_roll_top,type=mod)
sd_roll <- sqrt(diag(mat.cov_roll2))
r.e_roll <- colMeans(base_roll_top[,-1])
# get min porfolio
port.min<-getMinPortfolio(r.e_roll,mat.cov_roll2)
# weights
w_min <- port.min$pesos
w_eqw <- rep(1/length(base_roll_top[,-1]),length(base_roll_top[,-1]))
names(w_eqw) <- names(w_min)
w_rand <- random_weights(ncol(base_roll_top[,-1]))
names(w_rand) <- names(w_min)
# weights matched
min.ret.weights <- all_weights(base=df_rend, w_min)
eqw.ret.weights <- all_weights(base=df_rend, w_eqw)
ran.ret.weights <- all_weights(base=df_rend, w_rand)
# cummulative weights
df_min.ret.weights <- rbind(df_min.ret.weights,min.ret.weights)
df_eqw.ret.weights <- rbind(df_eqw.ret.weights,eqw.ret.weights)
df_ran.ret.weights <- rbind(df_ran.ret.weights,ran.ret.weights)
# total portfolio base
if(i < length(final.period)){
base_total_port <- df_rend2 %>%
filter(year_month_index >= final.period[i] & year_month_index < final.period[i+1]) %>%
dplyr::select(-year,-month,-year_month_index)
} else{
base_total_port <- df_rend2 %>%
filter(year_month_index >= final.period[i] & Fecha < max(Fecha)) %>%
dplyr::select(-year,-month,-year_month_index)
}
# df of total portfolio
df_cum <-
data.frame('date' = base_total_port[,1],
'min.ret' = getPortfolioReturns(base_total_port,w_min),
'eqw.ret' = getPortfolioReturns(base_total_port,w_eqw),
'ran.ret' = getPortfolioReturns(base_total_port,w_rand))
# cummulative total portfolio
df_cum_return <- rbind(df_cum_return, df_cum)
cat(paste('Estimated period : ', date_max,sep=''),"\n")
}
# df weights
df_min_weights <- data.frame('year' = final.period, df_min.ret.weights)
df_eqw_weights <- data.frame('year' = final.period, df_eqw.ret.weights)
df_ran_weights <- data.frame('year' = final.period, df_ran.ret.weights)
all <- list('df_min_weights' = df_min_weights,
'df_eqw_weights' = df_eqw_weights,
'df_ran_weights' = df_ran_weights,
'df.port.ret' = df_cum_return)
return(all)
}
################################################################################
| /Functions/PortfolioFunctions_v1.R | no_license | JulioCesar-MS/Portfolio-Analysis | R | false | false | 9,133 | r | ################################################################################
################## Portfolio Functions ##################
################################################################################
library('Sim.DiffProc')
library('dplyr')
library('tidyverse')
library('quadprog')
library('ggplot2')
#*******************************************************************************
# I .- Function to estimate returns
getReturns<-function(x){
names<-c(names(x)[-1]);
time<-x[-1,1];
x<-as.matrix(x[,-1],ncol=ncol(x[,-1]),byrow=FALSE)
B<- matrix(ncol=ncol(x),nrow=nrow(x)-1);
for(i in 1:ncol(x)){
B[,i]<-diff(log(x[,i]),lag=1);
}
B<-data.frame(B)
colnames(B)<-names
C<-data.frame(Fecha=time,B)
return(C)
}
#**************************************************************************
# II.- Portfolio optimization
getMinPortfolio<-function(r.e,mat.cov){
Dmat <- 2*mat.cov
dvec <- rep.int(0, length(r.e))
Amat <- cbind(rep(1,length(r.e)), # res1: sum w_i = 1
diag(1,length(r.e)), # res2: w_i > 0
-diag(1,length(r.e))) # res3: w_i <= b => - w_i >= b
bvec <- c(1, # res1
rep(0,length(r.e)), # res2
-rep(1,length(r.e))) # res3
resultado <- solve.QP(Dmat=Dmat,dvec=dvec,Amat=Amat,bvec=bvec,meq=1)
w.min <- round(resultado$solution, 4)
names(w.min) <- names(r.e)
port.min <- list("pesos" = w.min)
return(port.min)
}
#*************************************************************************
# III. Portfolio returns
getPortfolioReturns <-function(returns,weights){
#w<- rep(weights,nrow(returns))
r.port <- NULL
for(i in 1:nrow(returns)){
r.port[i] <- sum(weights*returns[i,-1])
}
return(r.port)
}
#*******************************************************************************
# IV. Selecting the top_n stocks
top_n <- function(returns,top=10){
df_stat <<- returns %>%
gather(key='asset',value='valor',-Fecha) %>%
group_by(asset) %>%
summarise(n = n(),
mean = mean(valor),
sd = sd(valor),
min = min(valor),
max = max(valor),
skeness = skewness(valor),
kurtosis = kurtosis(valor)) %>%
ungroup()%>%data.frame()%>%arrange(desc(sd))
topn <- df_stat$asset[1:top]
df <- returns[,c('Fecha',topn)]
return(df)
}
#*******************************************************************************
# V. Random weights
random_weights <- function(n_assets){
weights <- c()
for(i in 1:(n_assets-1)){
if(i == 1){
weights[i] <- sample(10000,1,replace = TRUE)
} else{
weights[i] <- sample((10000-sum(weights)),1,replace = TRUE)
}
}
weights[n_assets] <- 10000 - sum(weights)
return(sample(weights,n_assets,replace=FALSE)/10000)
}
#*******************************************************************************
# VI. parameter estimation
param_estimation <- function(X){
X <- as.ts(X)
## drift
fx <- expression( theta[1]*x)
## diffusion
gx <- expression( theta[2]*x)
pmle <- "kessler"
fitmodel <- fitsde(X,drift=fx,diffusion=gx,pmle=pmle,start = list(theta1=1,theta2=1))
Coef <- data.frame(coef(fitmodel))
Info <- rbind(logLik(fitmodel),
AIC(fitmodel),
BIC(fitmodel))
colnames(Coef) <- c(pmle)
rownames(Info) <- c("logLik","AIC","BIC")
return(Coef$kessler)
}
#*******************************************************************************
# VII. Variance and covariance matrix
mat_cov <- function(base_roll,type='mv'){
if (type == 'mv'){
mat.cov <- cov(as.matrix(base_roll[,-1]))
} else {
t <- base_roll%>%nrow()
names <- colnames(base_roll[,-1])
e.r <- NULL
sd.r <- NULL
for(i in 1:length(names)){
X <- base_roll[,i+1]
est <- param_estimation(X)
e.r[i] <- est[1]
sd.r[i] <- est[2]
}
# expected value and risk
r.e_roll <- ((e.r - sd.r^2)/2)*t
mat.cov_roll <- cov(base_roll[,-1])
mat.cov_roll2 <- matrix(0,ncol=ncol(mat.cov_roll),nrow=nrow(mat.cov_roll))
for(i in 1:ncol(mat.cov_roll)){
for(j in 1:nrow(mat.cov_roll)){
if(i != j){
mat.cov_roll2[j,i] <- sd.r[j]*sd.r[i]*cor(base_roll[,j+1],base_roll[,i+1])
} else{
mat.cov_roll2[j,i] <- sd.r[i]^2
}
}
}
mat.cov <- mat.cov_roll2*t
}
return(mat.cov)
}
#*******************************************************************************
# VII. Function to match weights
all_weights <- function(base,pesos){
mat.weights <- data.frame(matrix(0,ncol=length(base[,-1]),nrow=1))
colnames(mat.weights) <- colnames(base[,-1])
mat.weights[,names(pesos)] <- pesos
return(mat.weights)
}
#*******************************************************************************
# VIII. Portfolio weights
getPortfolio <- function(base,year_to_start, rebalance_period=24,mod='mv',top.k=10){
df_rend <- getReturns(base)
# filter assets
assets <- colnames(df_rend[,-1])
df_rend2 <- df_rend%>%
gather(key='asset',value='valor',-Fecha)%>%
filter(asset %in% assets)%>%
spread(asset,valor)%>%
mutate(Fecha = as.Date(Fecha,"%d/%m/%Y"),
year = format(Fecha, '%Y'),
month = format(Fecha, '%m'),
year_month_index=paste(year,month,sep='-'))
# create sequence of periods
initial.periods <- unique(df_rend2$year_month_index)[1:(length(unique(df_rend2$year_month_index))-which(unique(df_rend2$year_month_index)==paste(year_to_start,'01',sep='-'))+1)]
final.periods <- unique(df_rend2$year_month_index)[which(unique(df_rend2$year_month_index)==paste(year_to_start,'01',sep='-')):length(unique(df_rend2$year_month_index))]
start.period <- initial.periods[seq(1,length(initial.periods),by=rebalance_period)]
final.period <- final.periods[seq(1,length(final.periods),by=rebalance_period)]
# initial values
df_re_all_years <- NULL
df_sd_all_years <- NULL
df_min.ret.weights <- NULL
df_eqw.ret.weights <- NULL
df_ran.ret.weights <- NULL
df_cum_return <- NULL
for (i in 1:length(final.period)){
date_min <- start.period[i]
date_max <- final.period[i]
# filter by max_date
base_roll <- df_rend2 %>%
filter(year_month_index >= date_min & year_month_index < date_max) %>%
dplyr::select(-year,-month,-year_month_index)
# selecting the best k stocks
base_roll_top <- top_n(base_roll,top=top.k)
# parameters
mat.cov_roll2 <- mat_cov(base_roll_top,type=mod)
sd_roll <- sqrt(diag(mat.cov_roll2))
r.e_roll <- colMeans(base_roll_top[,-1])
# get min porfolio
port.min<-getMinPortfolio(r.e_roll,mat.cov_roll2)
# weights
w_min <- port.min$pesos
w_eqw <- rep(1/length(base_roll_top[,-1]),length(base_roll_top[,-1]))
names(w_eqw) <- names(w_min)
w_rand <- random_weights(ncol(base_roll_top[,-1]))
names(w_rand) <- names(w_min)
# weights matched
min.ret.weights <- all_weights(base=df_rend, w_min)
eqw.ret.weights <- all_weights(base=df_rend, w_eqw)
ran.ret.weights <- all_weights(base=df_rend, w_rand)
# cummulative weights
df_min.ret.weights <- rbind(df_min.ret.weights,min.ret.weights)
df_eqw.ret.weights <- rbind(df_eqw.ret.weights,eqw.ret.weights)
df_ran.ret.weights <- rbind(df_ran.ret.weights,ran.ret.weights)
# total portfolio base
if(i < length(final.period)){
base_total_port <- df_rend2 %>%
filter(year_month_index >= final.period[i] & year_month_index < final.period[i+1]) %>%
dplyr::select(-year,-month,-year_month_index)
} else{
base_total_port <- df_rend2 %>%
filter(year_month_index >= final.period[i] & Fecha < max(Fecha)) %>%
dplyr::select(-year,-month,-year_month_index)
}
# df of total portfolio
df_cum <-
data.frame('date' = base_total_port[,1],
'min.ret' = getPortfolioReturns(base_total_port,w_min),
'eqw.ret' = getPortfolioReturns(base_total_port,w_eqw),
'ran.ret' = getPortfolioReturns(base_total_port,w_rand))
# cummulative total portfolio
df_cum_return <- rbind(df_cum_return, df_cum)
cat(paste('Estimated period : ', date_max,sep=''),"\n")
}
# df weights
df_min_weights <- data.frame('year' = final.period, df_min.ret.weights)
df_eqw_weights <- data.frame('year' = final.period, df_eqw.ret.weights)
df_ran_weights <- data.frame('year' = final.period, df_ran.ret.weights)
all <- list('df_min_weights' = df_min_weights,
'df_eqw_weights' = df_eqw_weights,
'df_ran_weights' = df_ran_weights,
'df.port.ret' = df_cum_return)
return(all)
}
################################################################################
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PrisonersDilemmaStratTourn.R
\name{Memory.Random.Play.PD}
\alias{Memory.Random.Play.PD}
\title{Generate Memory where strategies play against a random strategy}
\usage{
Memory.Random.Play.PD(game.object, algo.par)
}
\arguments{
\item{game.object}{as specified by Get.Game.Object}
\item{algo.par}{as e.g. given by Get.Def.Par.QLearning}
}
\description{
Each strategy within the game.object plays against a random strategy of the given defection probability
}
\details{
Outputs List of lists with the following elements:
\itemize{
\item state - Already encoded game state, if algo.par$mem.type=="game.encoded"
\item action - Which of the actions has been taken?
\item next.state - resulting next, encoded, state
\item reward - What did we get from transitioning to the next state?
\item done - Boolean; is the game over?
}
Public Function which might be called by algorithms.
}
| /man/Memory.Random.Play.PD.Rd | no_license | MartinKies/RLR | R | false | true | 954 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PrisonersDilemmaStratTourn.R
\name{Memory.Random.Play.PD}
\alias{Memory.Random.Play.PD}
\title{Generate Memory where strategies play against a random strategy}
\usage{
Memory.Random.Play.PD(game.object, algo.par)
}
\arguments{
\item{game.object}{as specified by Get.Game.Object}
\item{algo.par}{as e.g. given by Get.Def.Par.QLearning}
}
\description{
Each strategy within the game.object plays against a random strategy of the given defection probability
}
\details{
Outputs List of lists with the following elements:
\itemize{
\item state - Already encoded game state, if algo.par$mem.type=="game.encoded"
\item action - Which of the actions has been taken?
\item next.state - resulting next, encoded, state
\item reward - What did we get from transitioning to the next state?
\item done - Boolean; is the game over?
}
Public Function which might be called by algorithms.
}
|
#' plotARRW
#' @description plot signal, y datapoints and changepoints on a same graph
#' @param y An object created by the dataARRW function
plotARRW <- function(y)
{
ylim <- c(min(y$mu,y$y), max(y$mu,y$y))
plot(y$y, col = 1, ylim = ylim, pch = '+', ylab = "")# y in blue
par(new= TRUE)
plot(y$mu, col = 2, ylim = ylim, pch = '+', ylab = "") # true signal in red
abline(v = y$changepoints)
}
#' plotARRWdiff
#' @description plot y_{t+1} - y_t signal and changepoint locations to identify obvious changepoints
#' @param y An object created by the dataARRW function
plotARRWdiff <- function(y)
{
z <- diff(c(0,y$y))
plot(z,xlim = c(1,length(z)))
if(length(y$changepoints) > 0)
{
par(new = TRUE)
plot(y$changepoints,z[y$changepoints], xlim = c(1,length(z)), col = 2)
abline(v = y$changepoints)
}
}
#' plotVarVarEstim
#' @description plot the estimated variances v_k against the true variances for the diff k operator (y_{t+k} - y_t) for k = 1 to nbK
#' @param v the estimated variances of the diff k operator
#' @param sdEta2 the Random Walk variance
#' @param sdNu2 the AR(1) variance
#' @param phi the autocorrelative AR(1) parameter
#' @param nbK number of diff k elements to consider
plotVarVarEstim <- function(v, sdEta2, sdNu2, phi, nbK = 10)
{
v <- v$varEst
#### ESTIM var
vari <- rep(0,nbK)
for(k in 1:nbK)
{
vari[k] <- k*sdEta2 + 2*((1-phi^k)/(1-phi^2))*sdNu2
}
ylim <- c(min(vari,v), max(vari,v))
plot(vari, ylim = c(ylim), col = 1)
par(new = TRUE)
plot(v, ylim = c(ylim), col = 2) # red = estimation
}
| /R/plot.R | no_license | aminaghoul/ARRWestim | R | false | false | 1,572 | r | #' plotARRW
#' @description plot signal, y datapoints and changepoints on a same graph
#' @param y An object created by the dataARRW function
plotARRW <- function(y)
{
ylim <- c(min(y$mu,y$y), max(y$mu,y$y))
plot(y$y, col = 1, ylim = ylim, pch = '+', ylab = "")# y in blue
par(new= TRUE)
plot(y$mu, col = 2, ylim = ylim, pch = '+', ylab = "") # true signal in red
abline(v = y$changepoints)
}
#' plotARRWdiff
#' @description plot y_{t+1} - y_t signal and changepoint locations to identify obvious changepoints
#' @param y An object created by the dataARRW function
plotARRWdiff <- function(y)
{
z <- diff(c(0,y$y))
plot(z,xlim = c(1,length(z)))
if(length(y$changepoints) > 0)
{
par(new = TRUE)
plot(y$changepoints,z[y$changepoints], xlim = c(1,length(z)), col = 2)
abline(v = y$changepoints)
}
}
#' plotVarVarEstim
#' @description plot the estimated variances v_k against the true variances for the diff k operator (y_{t+k} - y_t) for k = 1 to nbK
#' @param v the estimated variances of the diff k operator
#' @param sdEta2 the Random Walk variance
#' @param sdNu2 the AR(1) variance
#' @param phi the autocorrelative AR(1) parameter
#' @param nbK number of diff k elements to consider
plotVarVarEstim <- function(v, sdEta2, sdNu2, phi, nbK = 10)
{
v <- v$varEst
#### ESTIM var
vari <- rep(0,nbK)
for(k in 1:nbK)
{
vari[k] <- k*sdEta2 + 2*((1-phi^k)/(1-phi^2))*sdNu2
}
ylim <- c(min(vari,v), max(vari,v))
plot(vari, ylim = c(ylim), col = 1)
par(new = TRUE)
plot(v, ylim = c(ylim), col = 2) # red = estimation
}
|
/Practica_5/Codigo/P5_N.R | no_license | xoce15/Simulacion_Sistemas | R | false | false | 4,316 | r | ||
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cfbd_metrics.R
\name{cfbd_metrics_wp_pregame}
\alias{cfbd_metrics_wp_pregame}
\title{\strong{Get pre-game win probability data from API}}
\usage{
cfbd_metrics_wp_pregame(
year = NULL,
week = NULL,
team = NULL,
season_type = "regular"
)
}
\arguments{
\item{year}{(\emph{Integer} optional): Year, 4 digit format (\emph{YYYY})}
\item{week}{(\emph{Integer} optional): Week - values from 1-15, 1-14 for seasons pre-playoff, i.e. 2013 or earlier}
\item{team}{(\emph{String} optional): D-I Team}
\item{season_type}{(\emph{String} default regular): Select Season Type: regular or postseason}
}
\value{
\code{\link[=cfbd_metrics_wp_pregame]{cfbd_metrics_wp_pregame()}} - A data frame with 9 variables:
\describe{
\item{\code{season}: integer.}{Season of game.}
\item{\code{season_type}: character.}{Season type of game.}
\item{\code{week}: integer.}{Game week of the season.}
\item{\code{game_id}: integer.}{Referencing game id.}
\item{\code{home_team}: character.}{Home team name.}
\item{\code{away_team}: character.}{Away team name.}
\item{\code{spread}: integer.}{Betting line provider spread.}
\item{\code{home_win_prob}: double.}{Home win probability - pre-game prediction.}
\item{\code{away_win_prob}: double.}{Away win probability - pre-game prediction.}
}
}
\description{
\strong{Get pre-game win probability data from API}
}
\examples{
\donttest{
cfbd_metrics_wp_pregame(year = 2019, week = 9, team = "Texas A&M")
}
}
\keyword{Data}
\keyword{Pre-game}
\keyword{Probability}
\keyword{Win}
| /man/cfbd_metrics_wp_pregame.Rd | permissive | Engy-22/cfbfastR | R | false | true | 1,578 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cfbd_metrics.R
\name{cfbd_metrics_wp_pregame}
\alias{cfbd_metrics_wp_pregame}
\title{\strong{Get pre-game win probability data from API}}
\usage{
cfbd_metrics_wp_pregame(
year = NULL,
week = NULL,
team = NULL,
season_type = "regular"
)
}
\arguments{
\item{year}{(\emph{Integer} optional): Year, 4 digit format (\emph{YYYY})}
\item{week}{(\emph{Integer} optional): Week - values from 1-15, 1-14 for seasons pre-playoff, i.e. 2013 or earlier}
\item{team}{(\emph{String} optional): D-I Team}
\item{season_type}{(\emph{String} default regular): Select Season Type: regular or postseason}
}
\value{
\code{\link[=cfbd_metrics_wp_pregame]{cfbd_metrics_wp_pregame()}} - A data frame with 9 variables:
\describe{
\item{\code{season}: integer.}{Season of game.}
\item{\code{season_type}: character.}{Season type of game.}
\item{\code{week}: integer.}{Game week of the season.}
\item{\code{game_id}: integer.}{Referencing game id.}
\item{\code{home_team}: character.}{Home team name.}
\item{\code{away_team}: character.}{Away team name.}
\item{\code{spread}: integer.}{Betting line provider spread.}
\item{\code{home_win_prob}: double.}{Home win probability - pre-game prediction.}
\item{\code{away_win_prob}: double.}{Away win probability - pre-game prediction.}
}
}
\description{
\strong{Get pre-game win probability data from API}
}
\examples{
\donttest{
cfbd_metrics_wp_pregame(year = 2019, week = 9, team = "Texas A&M")
}
}
\keyword{Data}
\keyword{Pre-game}
\keyword{Probability}
\keyword{Win}
|
bio <- read.csv('http://openmv.net/file/bioreactor-yields.csv')
summary(bio)
# Temperature-Yield model
model.temp <- lm(bio$yield ~ bio$temperature)
summary(model.temp)
# Impeller speed-Yield model
model.speed <- lm(bio$yield ~ bio$speed)
summary(model.speed)
# Baffles-Yield model
model.baffles <- lm(bio$yield ~ bio$baffles)
summary(model.baffles)
# Scatterplot matrix
bitmap('bioreactor-scatterplot-matrix.png', type="png256",
width=10, height=10, res=300)
plot(bio)
dev.off()
| /least-squares/bioreactor-regression-assignment.R | no_license | kgdunn/figures | R | false | false | 493 | r | bio <- read.csv('http://openmv.net/file/bioreactor-yields.csv')
summary(bio)
# Temperature-Yield model
model.temp <- lm(bio$yield ~ bio$temperature)
summary(model.temp)
# Impeller speed-Yield model
model.speed <- lm(bio$yield ~ bio$speed)
summary(model.speed)
# Baffles-Yield model
model.baffles <- lm(bio$yield ~ bio$baffles)
summary(model.baffles)
# Scatterplot matrix
bitmap('bioreactor-scatterplot-matrix.png', type="png256",
width=10, height=10, res=300)
plot(bio)
dev.off()
|
library(ncdf)
output.addr <- '/home/trn/Desktop/diploma-thesis/data'
save.addr <- '/home/trn/diploma-thesis/R/scripts/measurements/artificial-measurements3' #name of file without end (.csv or .rda)
func.addr <- '/home/trn/diploma-thesis/R/scripts/measurements' #where is create-ncdf & clear measurements files script
water.addr <- '/home/trn/Desktop/diploma-thesis/software/multi-iteration/init/GRIDCRO2D_aa2006test'
number <- 400 #how many measurements generate
num.locations <- 380 #how many locations use
num.locations.redundant <- 20 #different locations in same cells
max.time <- 1 #maximum time for passive measurements
row <- 121
col <- 121
multiply.by <- 1000 #real measurements are 1000 bigger then cmaq outputs
#LOAD DATA
simulation <- 1 #if not used data from iterative simulations, use ''
date.from <- as.Date('2006-01-01')
date.to <- as.Date('2006-01-01')
source(paste(func.addr,"/create-ncdf.r",sep=""))
source(paste(func.addr,"/clear-measurements.r",sep=""))
days.num <- as.numeric(date.to-date.from,units="days")+1 #number of days
days.seq <- seq(from=date.from,to=date.to,by='days') #vector of days, format %Y-%m-%d
days.seq2 <- rep("",length(days.seq)) #vector of days, format %Y%j
for(i in 1:length(days.seq)) {
days.seq2[i] <- format(days.seq[i], "%Y%j")
}
traverseAconc <- function(dir, aconc.name, variables) {
result <- array(0,dim=c(row,col,days.num))
for(i in 1:length(days.seq2)) {
if(simulation=='') {
file.addr <- paste(dir,'/',aconc.name,'.',days.seq2[i],'.ncf',sep="") #name of file is created
} else {
file.addr <- paste(dir,'/',aconc.name,'.',days.seq2[i],'_',simulation,'.ncf',sep="") #name of file is created
}
print(paste("Reading:",file.addr))
file <- open.ncdf(file.addr)
temp.result <- matrix(0,nrow=row,ncol=col)
for(j in variables) { #each variable is read
temp <- get.var.ncdf(file, j)
temp.variable <- apply(temp, 2, function(data) { #variable is averaged over hours
apply(data,1,mean)
})
temp.result <- temp.result+temp.variable #variables are summed together !!! (ABAPI, ABAPJ, ABAPK)
}
close.ncdf(file)
result[,,i] <- temp.result #save temp result for one day to summary matrix
}
return(result)
}
result <- traverseAconc(output.addr,'CCTM_e1a_Linux2_x86_64ifort.benchmark.ACONC',c('BAP'))
#CREATE LIST OF LOCATIONS
generateLocation <- function(col,row,number,water) { #once every location
loc.all <- 1:(row*col)
loc <- sample(loc.all[-water],number)
coord <- sapply(loc, function(x) { #have to be flipped because water (CMAQ output) has col, row dimensions
loc.col <- x%%col
loc.row <- ceiling(x/row)
if(loc.col==0) loc.col <- col
return(c(loc.row,loc.col))
})
return(data.frame(row=coord[1,],col=coord[2,],nameCS.1=paste('Location ',1:number,sep=""))) #keep it flipped
}
generateRedundantLocation <- function(locations,num) {
loc <- sample(1:nrow(locations),num,replace=TRUE)
return(data.frame(row=locations$row[loc],col=locations$col[loc],nameCS.1=paste('Location ',(nrow(locations)+1):(nrow(locations)+num),sep="")))
}
generateRestOfTheLocations <- function(locations,num) {
loc <- sample(1:nrow(locations),num,replace=TRUE)
return(locations[loc,])
}
water.file <- open.ncdf(water.addr)
water <- get.var.ncdf(water.file, 'LWMASK')
water <- which(water==0) #measurements aren't in water, !!! dimensions of water are col,row
locations <- generateLocation(col,row,num.locations,water) #once every location
#add redundant locations
locations <- rbind(locations,generateRedundantLocation(locations,num.locations.redundant))
#add rest of the locations
locations <- rbind(locations,generateRestOfTheLocations(locations,(number-nrow(locations))))
rownames(locations) <- 1:nrow(locations)
#CREATE LIST OF DATES
generateTimes <- function(number,max.time,times) {
from <- sample(1:(length(times)),number,replace=TRUE)
to <- sapply(from,function(x) sample(x:(x+max.time),1))
to[to>length(times)] <- length(times)
return(data.frame(from=times[from],to=times[to]))
}
times <- generateTimes(number,max.time,days.seq)
vystup <- cbind(locations,times)
#COMPUTE VALUES IN LOCATIONS OVER DEFINED TIME
vystup[,c('value')] <- NA
for(i in 1:nrow(vystup)) {
vystup$value[i] <- (mean(result[vystup$col[i],vystup$row[i],which(vystup$from[i]==days.seq):which(vystup$to[i]==days.seq)]))*multiply.by #dimensions of result are col,row,time
}
save(vystup,file=paste(save.addr,'.rda',sep=""))
write.csv(vystup,paste(save.addr,'.csv',sep=""),row.names=FALSE)
#save cleared measurements
#vystup <- cleanMeasurements(vystup)
#save(vystup,file=paste(save.addr,'.rda',sep=""))
#write.csv(vystup,paste(save.addr,'.csv',sep=""),row.names=FALSE) | /measurements/artificial-measurements.r | no_license | martintomas/iterative_prediction_of_pollutants | R | false | false | 4,701 | r | library(ncdf)
output.addr <- '/home/trn/Desktop/diploma-thesis/data'
save.addr <- '/home/trn/diploma-thesis/R/scripts/measurements/artificial-measurements3' #name of file without end (.csv or .rda)
func.addr <- '/home/trn/diploma-thesis/R/scripts/measurements' #where is create-ncdf & clear measurements files script
water.addr <- '/home/trn/Desktop/diploma-thesis/software/multi-iteration/init/GRIDCRO2D_aa2006test'
number <- 400 #how many measurements generate
num.locations <- 380 #how many locations use
num.locations.redundant <- 20 #different locations in same cells
max.time <- 1 #maximum time for passive measurements
row <- 121
col <- 121
multiply.by <- 1000 #real measurements are 1000 bigger then cmaq outputs
#LOAD DATA
simulation <- 1 #if not used data from iterative simulations, use ''
date.from <- as.Date('2006-01-01')
date.to <- as.Date('2006-01-01')
source(paste(func.addr,"/create-ncdf.r",sep=""))
source(paste(func.addr,"/clear-measurements.r",sep=""))
days.num <- as.numeric(date.to-date.from,units="days")+1 #number of days
days.seq <- seq(from=date.from,to=date.to,by='days') #vector of days, format %Y-%m-%d
days.seq2 <- rep("",length(days.seq)) #vector of days, format %Y%j
for(i in 1:length(days.seq)) {
days.seq2[i] <- format(days.seq[i], "%Y%j")
}
traverseAconc <- function(dir, aconc.name, variables) {
result <- array(0,dim=c(row,col,days.num))
for(i in 1:length(days.seq2)) {
if(simulation=='') {
file.addr <- paste(dir,'/',aconc.name,'.',days.seq2[i],'.ncf',sep="") #name of file is created
} else {
file.addr <- paste(dir,'/',aconc.name,'.',days.seq2[i],'_',simulation,'.ncf',sep="") #name of file is created
}
print(paste("Reading:",file.addr))
file <- open.ncdf(file.addr)
temp.result <- matrix(0,nrow=row,ncol=col)
for(j in variables) { #each variable is read
temp <- get.var.ncdf(file, j)
temp.variable <- apply(temp, 2, function(data) { #variable is averaged over hours
apply(data,1,mean)
})
temp.result <- temp.result+temp.variable #variables are summed together !!! (ABAPI, ABAPJ, ABAPK)
}
close.ncdf(file)
result[,,i] <- temp.result #save temp result for one day to summary matrix
}
return(result)
}
result <- traverseAconc(output.addr,'CCTM_e1a_Linux2_x86_64ifort.benchmark.ACONC',c('BAP'))
#CREATE LIST OF LOCATIONS
generateLocation <- function(col,row,number,water) { #once every location
loc.all <- 1:(row*col)
loc <- sample(loc.all[-water],number)
coord <- sapply(loc, function(x) { #have to be flipped because water (CMAQ output) has col, row dimensions
loc.col <- x%%col
loc.row <- ceiling(x/row)
if(loc.col==0) loc.col <- col
return(c(loc.row,loc.col))
})
return(data.frame(row=coord[1,],col=coord[2,],nameCS.1=paste('Location ',1:number,sep=""))) #keep it flipped
}
generateRedundantLocation <- function(locations,num) {
loc <- sample(1:nrow(locations),num,replace=TRUE)
return(data.frame(row=locations$row[loc],col=locations$col[loc],nameCS.1=paste('Location ',(nrow(locations)+1):(nrow(locations)+num),sep="")))
}
generateRestOfTheLocations <- function(locations,num) {
loc <- sample(1:nrow(locations),num,replace=TRUE)
return(locations[loc,])
}
water.file <- open.ncdf(water.addr)
water <- get.var.ncdf(water.file, 'LWMASK')
water <- which(water==0) #measurements aren't in water, !!! dimensions of water are col,row
locations <- generateLocation(col,row,num.locations,water) #once every location
#add redundant locations
locations <- rbind(locations,generateRedundantLocation(locations,num.locations.redundant))
#add rest of the locations
locations <- rbind(locations,generateRestOfTheLocations(locations,(number-nrow(locations))))
rownames(locations) <- 1:nrow(locations)
#CREATE LIST OF DATES
generateTimes <- function(number,max.time,times) {
from <- sample(1:(length(times)),number,replace=TRUE)
to <- sapply(from,function(x) sample(x:(x+max.time),1))
to[to>length(times)] <- length(times)
return(data.frame(from=times[from],to=times[to]))
}
times <- generateTimes(number,max.time,days.seq)
vystup <- cbind(locations,times)
#COMPUTE VALUES IN LOCATIONS OVER DEFINED TIME
vystup[,c('value')] <- NA
for(i in 1:nrow(vystup)) {
vystup$value[i] <- (mean(result[vystup$col[i],vystup$row[i],which(vystup$from[i]==days.seq):which(vystup$to[i]==days.seq)]))*multiply.by #dimensions of result are col,row,time
}
save(vystup,file=paste(save.addr,'.rda',sep=""))
write.csv(vystup,paste(save.addr,'.csv',sep=""),row.names=FALSE)
#save cleared measurements
#vystup <- cleanMeasurements(vystup)
#save(vystup,file=paste(save.addr,'.rda',sep=""))
#write.csv(vystup,paste(save.addr,'.csv',sep=""),row.names=FALSE) |
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{distance}
\alias{distance}
\title{Collection of distance matrix computation functions.}
\usage{
distance(measure = "JS")
}
\arguments{
\item{method}{character string. Specifies which distance
measure to use. 'JS' and 'KL' are currently supported.}
}
\value{
Returns a bivariate function
x <- matrix(rnorm(100, mean = 100), nrow = 5) dist(x)
library(proxy) dist(x, method = distance()) dist(x, method
= distance(measure = "KL"))
}
\description{
The proxy library allows us to pass an arbitrary bivariate
function for distance matrix computations. This function
will return different bivariate functions based on the
input.
}
| /man/distance.Rd | permissive | tcarnus/LDAvis | R | false | false | 689 | rd | % Generated by roxygen2 (4.0.1): do not edit by hand
\name{distance}
\alias{distance}
\title{Collection of distance matrix computation functions.}
\usage{
distance(measure = "JS")
}
\arguments{
\item{method}{character string. Specifies which distance
measure to use. 'JS' and 'KL' are currently supported.}
}
\value{
Returns a bivariate function
x <- matrix(rnorm(100, mean = 100), nrow = 5) dist(x)
library(proxy) dist(x, method = distance()) dist(x, method
= distance(measure = "KL"))
}
\description{
The proxy library allows us to pass an arbitrary bivariate
function for distance matrix computations. This function
will return different bivariate functions based on the
input.
}
|
dataFile <- "./household_power_consumption.txt"
data <- read.table(dataFile, header = TRUE, sep = ";", stringsAsFactors = FALSE, dec = ".")
subSetData <- data[data$Date %in% c("1/2/2007","2/2/2007"),]
datetime <- strptime(paste(subSetData$Date, subSetData$Time, sep = " "), "%d/%m/%Y %H:%M:%S")
globalActivePower <- as.numeric(subSetData$Global_active_power)
png("plot2.png", width = 480, height = 480)
plot(datetime, globalActivePower, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off() | /plot2.R | no_license | shubhambansal1996/exploratory_data_analysis1 | R | false | false | 508 | r | dataFile <- "./household_power_consumption.txt"
data <- read.table(dataFile, header = TRUE, sep = ";", stringsAsFactors = FALSE, dec = ".")
subSetData <- data[data$Date %in% c("1/2/2007","2/2/2007"),]
datetime <- strptime(paste(subSetData$Date, subSetData$Time, sep = " "), "%d/%m/%Y %H:%M:%S")
globalActivePower <- as.numeric(subSetData$Global_active_power)
png("plot2.png", width = 480, height = 480)
plot(datetime, globalActivePower, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off() |
## When adding new options, be sure to update the VALID_OPTIONS list
## (define your own custom validators by assigning a function)
## and update the initOptions + documentation below
VALID_OPTIONS <- list(
auto.snapshot = function(x) x %in% c(TRUE, FALSE),
vcs.ignore.lib = list(TRUE, FALSE),
vcs.ignore.src = list(TRUE, FALSE)
)
initOptions <- function(project = NULL) {
project <- getProjectDir(project)
set_opts(
project = project,
auto.snapshot = TRUE,
vcs.ignore.lib = TRUE,
vcs.ignore.src = FALSE
)
}
##' Get/set packrat project options
##'
##' Get and set options for the current packrat-managed project.
##'
##' \itemize{
##' \item \code{auto.snapshot}: Perform automatic, asynchronous snapshots when running interactively?
##' (\code{TRUE} / \code{FALSE})
##' \item \code{vcs.ignore.lib}: Add the packrat private library to your version control system ignore? (\code{TRUE} / \code{FALSE})
##' \item \code{vcs.ignore.src}: Add the packrat private sources to your version control system ignore? (\code{TRUE} / \code{FALSE})
##' }
##' @param options A character vector of valid option names.
##' @param simplify Boolean; \code{unlist} the returned options? Useful for when retrieving
##' a single option.
##' @param project The project directory. When in packrat mode, defaults to the current project;
##' otherwise, defaults to the current working directory.
##' @param ... Entries of the form \code{key = value}, used for setting packrat project options.
##' @rdname packrat-options
##' @name packrat-options
##' @export
get_opts <- function(options = NULL, simplify = TRUE, project = NULL) {
project <- getProjectDir(project)
opts <- read_opts(project = project)
if (is.null(options)) {
opts
} else {
result <- opts[names(opts) %in% options]
if (simplify) unlist(result)
else result
}
}
##' @rdname packrat-options
##' @name packrat-options
##' @export
set_opts <- function(..., project = NULL) {
project <- getProjectDir(project)
optsPath <- packratOptionsFilePath(project)
if (!file.exists(optsPath)) {
dir.create(dirname(optsPath), recursive = TRUE, showWarnings = FALSE)
file.create(optsPath)
}
dots <- list(...)
validateOptions(dots)
keys <- names(dots)
values <- unname(unlist(dots))
opts <- read_opts(project = project)
for (i in seq_along(keys)) {
opts[[keys[[i]]]] <- values[[i]]
}
write.dcf(opts, file = optsPath)
updateSettings(project)
invisible(opts)
}
validateOptions <- function(opts) {
for (i in seq_along(opts)) {
key <- names(opts)[[i]]
value <- opts[[i]]
if (!(key %in% names(VALID_OPTIONS))) {
stop("'", key, "' is not a valid packrat option", call. = FALSE)
}
opt <- VALID_OPTIONS[[key]]
if (is.list(opt)) {
if (!(value %in% opt)) {
stop("'", value, "' is not a valid setting for packrat option '", key, "'", call. = FALSE)
}
} else if (is.function(opt)) {
if (!opt(value)) {
stop("'", value, "' is not a valid setting for packrat option '", key, "'", call. = FALSE)
}
}
}
}
read_opts <- function(project = NULL) {
project <- getProjectDir(project)
path <- packratOptionsFilePath(project)
if (!file.exists(path)) return(invisible(NULL))
opts <- readDcf(path)
as.list(apply(opts, 2, function(x) {
eval(parse(text = x), envir = emptyenv())
}))
}
| /R/options.R | no_license | robertzk/packrat | R | false | false | 3,383 | r | ## When adding new options, be sure to update the VALID_OPTIONS list
## (define your own custom validators by assigning a function)
## and update the initOptions + documentation below
VALID_OPTIONS <- list(
auto.snapshot = function(x) x %in% c(TRUE, FALSE),
vcs.ignore.lib = list(TRUE, FALSE),
vcs.ignore.src = list(TRUE, FALSE)
)
initOptions <- function(project = NULL) {
project <- getProjectDir(project)
set_opts(
project = project,
auto.snapshot = TRUE,
vcs.ignore.lib = TRUE,
vcs.ignore.src = FALSE
)
}
##' Get/set packrat project options
##'
##' Get and set options for the current packrat-managed project.
##'
##' \itemize{
##' \item \code{auto.snapshot}: Perform automatic, asynchronous snapshots when running interactively?
##' (\code{TRUE} / \code{FALSE})
##' \item \code{vcs.ignore.lib}: Add the packrat private library to your version control system ignore? (\code{TRUE} / \code{FALSE})
##' \item \code{vcs.ignore.src}: Add the packrat private sources to your version control system ignore? (\code{TRUE} / \code{FALSE})
##' }
##' @param options A character vector of valid option names.
##' @param simplify Boolean; \code{unlist} the returned options? Useful for when retrieving
##' a single option.
##' @param project The project directory. When in packrat mode, defaults to the current project;
##' otherwise, defaults to the current working directory.
##' @param ... Entries of the form \code{key = value}, used for setting packrat project options.
##' @rdname packrat-options
##' @name packrat-options
##' @export
get_opts <- function(options = NULL, simplify = TRUE, project = NULL) {
project <- getProjectDir(project)
opts <- read_opts(project = project)
if (is.null(options)) {
opts
} else {
result <- opts[names(opts) %in% options]
if (simplify) unlist(result)
else result
}
}
##' @rdname packrat-options
##' @name packrat-options
##' @export
set_opts <- function(..., project = NULL) {
project <- getProjectDir(project)
optsPath <- packratOptionsFilePath(project)
if (!file.exists(optsPath)) {
dir.create(dirname(optsPath), recursive = TRUE, showWarnings = FALSE)
file.create(optsPath)
}
dots <- list(...)
validateOptions(dots)
keys <- names(dots)
values <- unname(unlist(dots))
opts <- read_opts(project = project)
for (i in seq_along(keys)) {
opts[[keys[[i]]]] <- values[[i]]
}
write.dcf(opts, file = optsPath)
updateSettings(project)
invisible(opts)
}
validateOptions <- function(opts) {
for (i in seq_along(opts)) {
key <- names(opts)[[i]]
value <- opts[[i]]
if (!(key %in% names(VALID_OPTIONS))) {
stop("'", key, "' is not a valid packrat option", call. = FALSE)
}
opt <- VALID_OPTIONS[[key]]
if (is.list(opt)) {
if (!(value %in% opt)) {
stop("'", value, "' is not a valid setting for packrat option '", key, "'", call. = FALSE)
}
} else if (is.function(opt)) {
if (!opt(value)) {
stop("'", value, "' is not a valid setting for packrat option '", key, "'", call. = FALSE)
}
}
}
}
read_opts <- function(project = NULL) {
project <- getProjectDir(project)
path <- packratOptionsFilePath(project)
if (!file.exists(path)) return(invisible(NULL))
opts <- readDcf(path)
as.list(apply(opts, 2, function(x) {
eval(parse(text = x), envir = emptyenv())
}))
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.