content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataInfo.R
\docType{data}
\name{ratioSignals}
\alias{ratioSignals}
\title{ratioSignals}
\description{
Example data ratioSignals. Contains one column "ratioSignals" with the name of the different optical metrics used as
signals for different chemical species in freshwater.
}
\author{
Steve Corsi \email{srcorsi@usgs.gov}
}
\keyword{absorption}
| /man/ratioSignals.Rd | permissive | klingerf2/USGSHydroOpt | R | false | true | 423 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataInfo.R
\docType{data}
\name{ratioSignals}
\alias{ratioSignals}
\title{ratioSignals}
\description{
Example data ratioSignals. Contains one column "ratioSignals" with the name of the different optical metrics used as
signals for different chemical species in freshwater.
}
\author{
Steve Corsi \email{srcorsi@usgs.gov}
}
\keyword{absorption}
|
library(ggvis)
# Histogram, fully specified
mtcars %>% ggvis(x = ~wt) %>%
compute_bin(~wt, binwidth = 1) %>%
layer_rects(x = ~xmin_, x2 = ~xmax_, y = ~count_, y2 = 0)
# Or using shorthand layer
mtcars %>% ggvis(x = ~wt) %>% layer_histograms()
mtcars %>% ggvis(x = ~wt) %>% layer_histograms(binwidth = 1)
# Histogram, filled by cyl
mtcars %>% ggvis(x = ~wt, fill = ~factor(cyl)) %>%
group_by(cyl) %>%
layer_histograms(binwidth = 1)
# Bigger dataset
data(diamonds, package = "ggplot2")
diamonds %>% ggvis(x = ~table) %>% layer_histograms()
# Stacked histogram
diamonds %>% ggvis(x = ~table, fill = ~cut) %>%
group_by(cut) %>%
layer_histograms(binwidth = 1)
# Histogram of dates
set.seed(2934)
dat <- data.frame(times = as.POSIXct("2013-07-01", tz = "GMT") + rnorm(200) * 60 * 60 * 24 * 7)
dat %>% ggvis(x = ~times) %>% layer_histograms()
| /demo/histogram.r | no_license | jjallaire/ggvis | R | false | false | 854 | r | library(ggvis)
# Histogram, fully specified
mtcars %>% ggvis(x = ~wt) %>%
compute_bin(~wt, binwidth = 1) %>%
layer_rects(x = ~xmin_, x2 = ~xmax_, y = ~count_, y2 = 0)
# Or using shorthand layer
mtcars %>% ggvis(x = ~wt) %>% layer_histograms()
mtcars %>% ggvis(x = ~wt) %>% layer_histograms(binwidth = 1)
# Histogram, filled by cyl
mtcars %>% ggvis(x = ~wt, fill = ~factor(cyl)) %>%
group_by(cyl) %>%
layer_histograms(binwidth = 1)
# Bigger dataset
data(diamonds, package = "ggplot2")
diamonds %>% ggvis(x = ~table) %>% layer_histograms()
# Stacked histogram
diamonds %>% ggvis(x = ~table, fill = ~cut) %>%
group_by(cut) %>%
layer_histograms(binwidth = 1)
# Histogram of dates
set.seed(2934)
dat <- data.frame(times = as.POSIXct("2013-07-01", tz = "GMT") + rnorm(200) * 60 * 60 * 24 * 7)
dat %>% ggvis(x = ~times) %>% layer_histograms()
|
getHeritability <- function(object, ...) {
if(!object$model$geno$as.random)
stop("Heritability can only be calculated when genotype is random")
if(!is.null(object$model$geno$geno.decomp)) {
geno.decomp <- object$model$geno$geno.decomp
decomp <- unique(object$terms$geno$pop_names)
select <- paste(geno.decomp, decomp, sep = "")
dim <- object$dim[select]
} else {
select <- object$model$geno$genotype
dim <- object$dim[select]
}
#ed.geno <- object$eff.dim[select]/(dim - 1)
ed.geno <- object$eff.dim[select]/object$dim.nom[select]
names(ed.geno) <- select
res <- round(ed.geno, 2)
res
} | /R/getHeritability.R | no_license | cran/SpATS | R | false | false | 609 | r | getHeritability <- function(object, ...) {
if(!object$model$geno$as.random)
stop("Heritability can only be calculated when genotype is random")
if(!is.null(object$model$geno$geno.decomp)) {
geno.decomp <- object$model$geno$geno.decomp
decomp <- unique(object$terms$geno$pop_names)
select <- paste(geno.decomp, decomp, sep = "")
dim <- object$dim[select]
} else {
select <- object$model$geno$genotype
dim <- object$dim[select]
}
#ed.geno <- object$eff.dim[select]/(dim - 1)
ed.geno <- object$eff.dim[select]/object$dim.nom[select]
names(ed.geno) <- select
res <- round(ed.geno, 2)
res
} |
# read data table
fn = "household_power_consumption.txt"
d = read.table(fn, header=TRUE, sep=";", stringsAsFactors=FALSE, na.strings="?")
# select required vectors
period = d$Date == "1/2/2007" | d$Date == "2/2/2007"
d = d[period, ]
d$x = strptime(paste(d$Date,d$Time), "%d/%m/%Y %H:%M:%S")
# draw and save the picture
par(mfrow = c(1, 1), mar = c(4,4,3,1), oma = c(0,0,0,0), ps = 12)
plot (d$x, d[,7], type="l", pch=".", xlab="", ylab="Energy sub metering")
lines(d$x, d[,8], col="red")
lines(d$x, d[,9], col="blue")
legend("topright", lty=1,
col = c("black", "blue", "red"),
legend = c("Sub_metering_1","Sub_metering_2", "Sub_metering_3"))
dev.copy(png, file="plot3.png", width = 480, height = 480)
dev.off() | /plot3.R | no_license | star421/ExData_Plotting1 | R | false | false | 739 | r | # read data table
fn = "household_power_consumption.txt"
d = read.table(fn, header=TRUE, sep=";", stringsAsFactors=FALSE, na.strings="?")
# select required vectors
period = d$Date == "1/2/2007" | d$Date == "2/2/2007"
d = d[period, ]
d$x = strptime(paste(d$Date,d$Time), "%d/%m/%Y %H:%M:%S")
# draw and save the picture
par(mfrow = c(1, 1), mar = c(4,4,3,1), oma = c(0,0,0,0), ps = 12)
plot (d$x, d[,7], type="l", pch=".", xlab="", ylab="Energy sub metering")
lines(d$x, d[,8], col="red")
lines(d$x, d[,9], col="blue")
legend("topright", lty=1,
col = c("black", "blue", "red"),
legend = c("Sub_metering_1","Sub_metering_2", "Sub_metering_3"))
dev.copy(png, file="plot3.png", width = 480, height = 480)
dev.off() |
# Downloading & extracting data
fileurl <- "http://archive.ics.uci.edu/ml/machine-learning-databases/00235/household_power_consumption.zip"
download.file(fileurl,destfile = "data.zip")
unzip("data.zip")
# Reading & inspecting data into R
dt <- read.table("household_power_consumption.txt",header = TRUE, sep= ";", na.strings = "?")
str(dt)
summary(dt)
head(dt)
names(dt)
# Formatting date fields into the required format & extracting date range
dt$DateTime <- paste(dt$Date, dt$Time)
dt$DateTime <- strptime(dt$DateTime, "%d/%m/%Y %H:%M:%S")
head(dt$DateTime)
dtextract <- subset(dt,DateTime >= strptime("2007-02-01 00:00:00", "%Y-%m-%d %H:%M:%S") & DateTime <= strptime("2007-02-02 23:59:59","%Y-%m-%d %H:%M:%S"))
head(dtextract$DateTime)
tail(dtextract$DateTime)
#Plot 4
par(mfcol=c(2,2))
plot(dtextract$DateTime, dtextract$Global_active_power, type='l', ylab="Global Active Power", xlab="")
plot(dtextract$DateTime, dtextract$Sub_metering_1, type='l', xlab="", ylab ="Energy sub metering")
lines(dtextract$DateTime, dtextract$Sub_metering_2, type='l', col='red')
lines(dtextract$DateTime, dtextract$Sub_metering_3, type='l', col="blue")
legend('topright', c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), lty=c(1,1,1),col=c("black","red","blue"))
plot(dtextract$DateTime, dtextract$Voltage, type='l', ylab="Voltage", xlab="datetime")
plot(dtextract$DateTime, dtextract$Global_reactive_power, type='l', ylab="Global_reactive_power", xlab="datetime")
dev.copy(png,"plot4.png", width=480, height=480)
dev.off() | /Plot4.R | no_license | nsunassee/ExData_Plotting1 | R | false | false | 1,525 | r | # Downloading & extracting data
fileurl <- "http://archive.ics.uci.edu/ml/machine-learning-databases/00235/household_power_consumption.zip"
download.file(fileurl,destfile = "data.zip")
unzip("data.zip")
# Reading & inspecting data into R
dt <- read.table("household_power_consumption.txt",header = TRUE, sep= ";", na.strings = "?")
str(dt)
summary(dt)
head(dt)
names(dt)
# Formatting date fields into the required format & extracting date range
dt$DateTime <- paste(dt$Date, dt$Time)
dt$DateTime <- strptime(dt$DateTime, "%d/%m/%Y %H:%M:%S")
head(dt$DateTime)
dtextract <- subset(dt,DateTime >= strptime("2007-02-01 00:00:00", "%Y-%m-%d %H:%M:%S") & DateTime <= strptime("2007-02-02 23:59:59","%Y-%m-%d %H:%M:%S"))
head(dtextract$DateTime)
tail(dtextract$DateTime)
#Plot 4
par(mfcol=c(2,2))
plot(dtextract$DateTime, dtextract$Global_active_power, type='l', ylab="Global Active Power", xlab="")
plot(dtextract$DateTime, dtextract$Sub_metering_1, type='l', xlab="", ylab ="Energy sub metering")
lines(dtextract$DateTime, dtextract$Sub_metering_2, type='l', col='red')
lines(dtextract$DateTime, dtextract$Sub_metering_3, type='l', col="blue")
legend('topright', c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), lty=c(1,1,1),col=c("black","red","blue"))
plot(dtextract$DateTime, dtextract$Voltage, type='l', ylab="Voltage", xlab="datetime")
plot(dtextract$DateTime, dtextract$Global_reactive_power, type='l', ylab="Global_reactive_power", xlab="datetime")
dev.copy(png,"plot4.png", width=480, height=480)
dev.off() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/quick_plots.R
\name{forest_plot}
\alias{forest_plot}
\alias{influence_plot}
\alias{cumulative_plot}
\alias{funnel_plot}
\title{Quickly plot common visualizations for meta-analyses}
\usage{
forest_plot(x, estimate = estimate, study = study, size = weight,
shape = type, col = type, xmin = conf.low, xmax = conf.high,
group = NULL, alpha = 0.75, height = 0, ...)
influence_plot(x, estimate = l1o_estimate, study = study, size = 4,
shape = 15, col = type, xmin = l1o_conf.low, xmax = l1o_conf.high,
group = NULL, alpha = 0.75, height = 0, sum_lines = TRUE, ...)
cumulative_plot(x, estimate = cumul_estimate, study = study, size = 4,
shape = 15, col = type, xmin = cumul_conf.low, xmax = cumul_conf.high,
group = NULL, alpha = 0.75, height = 0, sum_lines = TRUE, ...)
funnel_plot(x, estimate = estimate, std.error = std.error, size = 3,
shape = NULL, col = NULL, alpha = 0.75, reverse_y = TRUE,
log_summary = FALSE, ...)
}
\arguments{
\item{x}{a tidied meta-analysis}
\item{estimate}{variable name of point estimates}
\item{study}{variable name of study labels}
\item{size}{point size; either an aesthetic variable or a specific shape.}
\item{shape}{shape of the points; either an aesthetic variable or a specific
shape.}
\item{col}{color of the points and lines; either an aesthetic variable or a
specific color.}
\item{xmin}{lower confidence interval variable name}
\item{xmax}{upper confidence interval variable name}
\item{group}{a grouping variable}
\item{alpha}{transparancy level}
\item{height}{line height for error bars}
\item{...}{additional arguments}
\item{sum_lines}{logical. Should vertical lines demarcating the summary
estimate and confidence intervals be included?}
\item{std.error}{variable name of standard error variable}
\item{reverse_y}{logical. Should the y-axis be reversed?}
\item{log_summary}{logical. Should the estimate and confidence intervals be
log-transformed?}
}
\value{
a \code{ggplot2} object
}
\description{
\code{forest_plot()} presents study and summary estimates. \code{influence_plot()}
shows the forest plot of senstivity analyses using \code{senstivity()}.
\code{cumulative_plot()} shows the forest plot for \code{cumulative()}. \code{funnel_plot()}
plots standard errors against the summary esitimate to assess publication
bias.
}
\examples{
library(dplyr)
ma <- iud_cxca \%>\%
group_by(group) \%>\%
meta_analysis(yi = lnes, sei = selnes, slab = study_name)
forest_plot(ma)
funnel_plot(ma)
ma \%>\%
sensitivity() \%>\%
influence_plot()
ma \%>\%
cumulative() \%>\%
cumulative_plot()
}
| /man/quickplots.Rd | permissive | ekothe/tidymeta | R | false | true | 2,654 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/quick_plots.R
\name{forest_plot}
\alias{forest_plot}
\alias{influence_plot}
\alias{cumulative_plot}
\alias{funnel_plot}
\title{Quickly plot common visualizations for meta-analyses}
\usage{
forest_plot(x, estimate = estimate, study = study, size = weight,
shape = type, col = type, xmin = conf.low, xmax = conf.high,
group = NULL, alpha = 0.75, height = 0, ...)
influence_plot(x, estimate = l1o_estimate, study = study, size = 4,
shape = 15, col = type, xmin = l1o_conf.low, xmax = l1o_conf.high,
group = NULL, alpha = 0.75, height = 0, sum_lines = TRUE, ...)
cumulative_plot(x, estimate = cumul_estimate, study = study, size = 4,
shape = 15, col = type, xmin = cumul_conf.low, xmax = cumul_conf.high,
group = NULL, alpha = 0.75, height = 0, sum_lines = TRUE, ...)
funnel_plot(x, estimate = estimate, std.error = std.error, size = 3,
shape = NULL, col = NULL, alpha = 0.75, reverse_y = TRUE,
log_summary = FALSE, ...)
}
\arguments{
\item{x}{a tidied meta-analysis}
\item{estimate}{variable name of point estimates}
\item{study}{variable name of study labels}
\item{size}{point size; either an aesthetic variable or a specific shape.}
\item{shape}{shape of the points; either an aesthetic variable or a specific
shape.}
\item{col}{color of the points and lines; either an aesthetic variable or a
specific color.}
\item{xmin}{lower confidence interval variable name}
\item{xmax}{upper confidence interval variable name}
\item{group}{a grouping variable}
\item{alpha}{transparancy level}
\item{height}{line height for error bars}
\item{...}{additional arguments}
\item{sum_lines}{logical. Should vertical lines demarcating the summary
estimate and confidence intervals be included?}
\item{std.error}{variable name of standard error variable}
\item{reverse_y}{logical. Should the y-axis be reversed?}
\item{log_summary}{logical. Should the estimate and confidence intervals be
log-transformed?}
}
\value{
a \code{ggplot2} object
}
\description{
\code{forest_plot()} presents study and summary estimates. \code{influence_plot()}
shows the forest plot of senstivity analyses using \code{senstivity()}.
\code{cumulative_plot()} shows the forest plot for \code{cumulative()}. \code{funnel_plot()}
plots standard errors against the summary esitimate to assess publication
bias.
}
\examples{
library(dplyr)
ma <- iud_cxca \%>\%
group_by(group) \%>\%
meta_analysis(yi = lnes, sei = selnes, slab = study_name)
forest_plot(ma)
funnel_plot(ma)
ma \%>\%
sensitivity() \%>\%
influence_plot()
ma \%>\%
cumulative() \%>\%
cumulative_plot()
}
|
\name{CPA}
\alias{CPA}
\alias{as.CPA}
\alias{is.CPA}
\title{
Representation of a conditional probability table as an array.
}
\description{
A conditional probability table for a node can be represented as a
array with the first \eqn{p} dimensions representing the parent
variables and the last dimension representing the
states of the node. Given a set of values for the parent variables,
the values in the last dimension contain the conditional probabilities
corresponding conditional probabilities. A \code{CPA} is a special
\code{\link[base]{array}} object which represents a conditional
probability table.
}
\usage{
is.CPA(x)
as.CPA(x)
}
\arguments{
\item{x}{
Object to be tested or coerced into a \code{CPA}.
}
}
\details{
One way to store a conditional probability table is as an array in
which the first \eqn{p} dimensions represent the parent variables, and
the \eqn{p+1} dimension represents the child variable. Here is an
example with two parents variables, \eqn{A} and \eqn{B}, and a single
child variable, \eqn{C}:
\code{, , C=c1}
\tabular{rrrr}{
\tab b1 \tab b2 \tab b3 \cr
a1 \tab 0.07 \tab 0.23 \tab 0.30 \cr
a2 \tab 0.12 \tab 0.25 \tab 0.31 \cr
a3 \tab 0.17 \tab 0.27 \tab 0.32 \cr
a4 \tab 0.20 \tab 0.29 \tab 0.33 \cr
}
\code{, , C=c2}
\tabular{rrrr}{
\tab b1 \tab b2 \tab b3 \cr
a1 \tab 0.93 \tab 0.77 \tab 0.70 \cr
a2 \tab 0.88 \tab 0.75 \tab 0.69 \cr
a3 \tab 0.83 \tab 0.73 \tab 0.68 \cr
a4 \tab 0.80 \tab 0.71 \tab 0.67 \cr
}
[Because R stores (and prints) arrays in column-major order, the
last value (in this case tables) is the one that sums to 1.]
The \code{CPA} class is a subclass of the
\code{\link[base]{array}} class (formally, it is class
\code{c("CPA","array")}). The \code{CPA} class interprets the
\code{dimnames} of the array in terms of the conditional probability
table. The first \eqn{p} values of \code{names(dimnames(x))} are the
input names of the edges (see \code{\link[RNetica]{NodeInputNames}()} or the
variable names (or the parent variable, see
\code{\link[RNetica]{NodeParents}()}, if the input names were not specified),
and the last value is the name of the child variable. Each of the
elements of \code{dimnames(x)} should give the state names (see
\code{\link[RNetica]{NodeStates}()}) for the respective value. In particular,
the conversion function \code{\link{as.CPF}()} relies on the existence
of this meta-data, and \code{as.CPA()} will raise a warning if an
array without the appropriate dimnames is supplied.
Although the intended interpretation is that of a conditional
probability table, the normalization constraint is not enforced. Thus
a \code{CPA} object could be used to store likelihoods, probability
potentials, contingency table counts, or other similarly shaped
objects. The function \code{\link{normalize}} scales the values of a
\code{CPA} so that the normalization constraint is enforced.
The method \code{\link[RNetica]{NodeProbs}()} returns a \code{CPA} object.
The function \code{as.CPA()} is designed to convert between
\code{\link{CPF}}s (that is, conditional probability tables stored as
data frames) and \code{CPA}s. It assumes that the factors variables
in the data frame represent the parent variables, and the numeric
values represent the states of the child variable. It also assumes
that the names of the numeric columns are of the form
\code{\var{varname}.\var{state}}, and attempts to derive variable and
state names from that.
If the argument to \code{as.CPA(x)} is an array, then it assumes that
the \code{dimnames(x)} and \code{names(dimnames(x))} are set to the
states of the variables and the names of the variables respectively.
A warning is issued if the names are missing.
}
\value{
The function \code{is.CPA()} returns a logical value indicating
whether or not the \code{is(x,"CPA")} is true.
The function \code{as.CPA} returns an object of class
\code{c("CPA","array")}, which is essentially an array with the
dimnames set to reflect the variable names and states.
}
\author{
Russell Almond
}
\note{
The obvious way to print a \code{CPA} would be to always show the
child variable as the rows in the individual tables, with the parents
corresponding to rows and tables. R, however, internally stores
arrays in column-major order, and hence the rows in the printed tables
always correspond to the second dimension. A new print method for
\code{CPA} would be nice.
This is an S3 object, as it just an array with a special
interpretation.
}
\seealso{
\code{\link[RNetica]{NodeProbs}()}, \code{\link[RNetica]{Extract.NeticaNode}},
\code{\link{CPF}}, \code{\link{normalize}()}
}
\examples{
# Note: in R 4.0, the factor() call is required.
arf <- data.frame(A=factor(rep(c("a1","a2"),each=3)),
B=factor(rep(c("b1","b2","b3"),2)),
C.c1=1:6, C.c2=7:12, C.c3=13:18, C.c4=19:24)
arfa <- as.CPA(arf)
stopifnot(
is.CPA(arfa),
all(dim(arfa)==c(2,3,4))
)
arr1 <- array(1:24,c(4,3,2),
dimnames=list(A=c("a1","a2","a3","a4"),B=c("b1","b2","b3"),
C=c("c1","c2")))
arr1a <- as.CPF(arr1)
stopifnot(
is.CPA(as.CPA(arr1a))
)
\dontrun{
## Requires RNetica
as.CPA(node[])
}
}
\keyword{ array }
\keyword{ classes }
| /man/CPA.Rd | permissive | erge324/CPTtools | R | false | false | 5,396 | rd | \name{CPA}
\alias{CPA}
\alias{as.CPA}
\alias{is.CPA}
\title{
Representation of a conditional probability table as an array.
}
\description{
A conditional probability table for a node can be represented as a
array with the first \eqn{p} dimensions representing the parent
variables and the last dimension representing the
states of the node. Given a set of values for the parent variables,
the values in the last dimension contain the conditional probabilities
corresponding conditional probabilities. A \code{CPA} is a special
\code{\link[base]{array}} object which represents a conditional
probability table.
}
\usage{
is.CPA(x)
as.CPA(x)
}
\arguments{
\item{x}{
Object to be tested or coerced into a \code{CPA}.
}
}
\details{
One way to store a conditional probability table is as an array in
which the first \eqn{p} dimensions represent the parent variables, and
the \eqn{p+1} dimension represents the child variable. Here is an
example with two parents variables, \eqn{A} and \eqn{B}, and a single
child variable, \eqn{C}:
\code{, , C=c1}
\tabular{rrrr}{
\tab b1 \tab b2 \tab b3 \cr
a1 \tab 0.07 \tab 0.23 \tab 0.30 \cr
a2 \tab 0.12 \tab 0.25 \tab 0.31 \cr
a3 \tab 0.17 \tab 0.27 \tab 0.32 \cr
a4 \tab 0.20 \tab 0.29 \tab 0.33 \cr
}
\code{, , C=c2}
\tabular{rrrr}{
\tab b1 \tab b2 \tab b3 \cr
a1 \tab 0.93 \tab 0.77 \tab 0.70 \cr
a2 \tab 0.88 \tab 0.75 \tab 0.69 \cr
a3 \tab 0.83 \tab 0.73 \tab 0.68 \cr
a4 \tab 0.80 \tab 0.71 \tab 0.67 \cr
}
[Because R stores (and prints) arrays in column-major order, the
last value (in this case tables) is the one that sums to 1.]
The \code{CPA} class is a subclass of the
\code{\link[base]{array}} class (formally, it is class
\code{c("CPA","array")}). The \code{CPA} class interprets the
\code{dimnames} of the array in terms of the conditional probability
table. The first \eqn{p} values of \code{names(dimnames(x))} are the
input names of the edges (see \code{\link[RNetica]{NodeInputNames}()} or the
variable names (or the parent variable, see
\code{\link[RNetica]{NodeParents}()}, if the input names were not specified),
and the last value is the name of the child variable. Each of the
elements of \code{dimnames(x)} should give the state names (see
\code{\link[RNetica]{NodeStates}()}) for the respective value. In particular,
the conversion function \code{\link{as.CPF}()} relies on the existence
of this meta-data, and \code{as.CPA()} will raise a warning if an
array without the appropriate dimnames is supplied.
Although the intended interpretation is that of a conditional
probability table, the normalization constraint is not enforced. Thus
a \code{CPA} object could be used to store likelihoods, probability
potentials, contingency table counts, or other similarly shaped
objects. The function \code{\link{normalize}} scales the values of a
\code{CPA} so that the normalization constraint is enforced.
The method \code{\link[RNetica]{NodeProbs}()} returns a \code{CPA} object.
The function \code{as.CPA()} is designed to convert between
\code{\link{CPF}}s (that is, conditional probability tables stored as
data frames) and \code{CPA}s. It assumes that the factors variables
in the data frame represent the parent variables, and the numeric
values represent the states of the child variable. It also assumes
that the names of the numeric columns are of the form
\code{\var{varname}.\var{state}}, and attempts to derive variable and
state names from that.
If the argument to \code{as.CPA(x)} is an array, then it assumes that
the \code{dimnames(x)} and \code{names(dimnames(x))} are set to the
states of the variables and the names of the variables respectively.
A warning is issued if the names are missing.
}
\value{
The function \code{is.CPA()} returns a logical value indicating
whether or not the \code{is(x,"CPA")} is true.
The function \code{as.CPA} returns an object of class
\code{c("CPA","array")}, which is essentially an array with the
dimnames set to reflect the variable names and states.
}
\author{
Russell Almond
}
\note{
The obvious way to print a \code{CPA} would be to always show the
child variable as the rows in the individual tables, with the parents
corresponding to rows and tables. R, however, internally stores
arrays in column-major order, and hence the rows in the printed tables
always correspond to the second dimension. A new print method for
\code{CPA} would be nice.
This is an S3 object, as it just an array with a special
interpretation.
}
\seealso{
\code{\link[RNetica]{NodeProbs}()}, \code{\link[RNetica]{Extract.NeticaNode}},
\code{\link{CPF}}, \code{\link{normalize}()}
}
\examples{
# Note: in R 4.0, the factor() call is required.
arf <- data.frame(A=factor(rep(c("a1","a2"),each=3)),
B=factor(rep(c("b1","b2","b3"),2)),
C.c1=1:6, C.c2=7:12, C.c3=13:18, C.c4=19:24)
arfa <- as.CPA(arf)
stopifnot(
is.CPA(arfa),
all(dim(arfa)==c(2,3,4))
)
arr1 <- array(1:24,c(4,3,2),
dimnames=list(A=c("a1","a2","a3","a4"),B=c("b1","b2","b3"),
C=c("c1","c2")))
arr1a <- as.CPF(arr1)
stopifnot(
is.CPA(as.CPA(arr1a))
)
\dontrun{
## Requires RNetica
as.CPA(node[])
}
}
\keyword{ array }
\keyword{ classes }
|
#-------------------------------------------------------#
# Distance Matrix calcs for YoloAce data
# refactored, M. Johnston
# Fri Feb 25 08:48:44 2022 ------------------------------
library(telemetry)
source("R/utils.R")
#-------------------------------------------------------#
dm_yoloace = read.csv("data/distance_matrices/Distance_Matrix_MJ_corr_mean.csv")
dm_yoloace = dm_yoloace[ , c("Name_corr", "mean_Total_Length")]
colnames(dm_yoloace) = c("Name", "Total_Length_m")
dm_yoloace = tidyr::separate(dm_yoloace, Name, into = c("Rec1", "Rec2"), sep = "-", remove = FALSE)
i = dm_yoloace$Rec1 == dm_yoloace$Rec2
dm_yoloace$Total_Length_m[i] <- 0
dm_yoloace[i, ] # chk
dm_yoloace$Name = gsub("-", " - ", dm_yoloace$Name)
## Load clean detections of interest
yolo_ace = readRDS("data_clean/YoloAce/yoloace_dfa_detects.rds") # created in R/clean_yolo_ace.R
yolo_ace$DetectDate = as.Date(yolo_ace$DateTime_PST)
yolo_ace$Rel_rkm = yolo_ace$Rkm
#-------------------------------------------------------#
# big test: all fish
f1 = split(yolo_ace, yolo_ace$FishID)
f2 = lapply(f1, dpd_allfish, distance_matrix = dm_yoloace)
ans = lapply(f2, hs)
ans5 = data.frame(data.table::rbindlist(ans, idcol = TRUE))
colnames(ans5) <- c("FishID", "date_time", "prop_dist")
write.csv(ans5, "results/YoloAce/yoloace_dpd_refactored.csv") | /R/results_scripts/make_dpd_csvs/YoloAce_dpd.R | no_license | goertler/acoustic-telemetry-synthesis | R | false | false | 1,328 | r | #-------------------------------------------------------#
# Distance Matrix calcs for YoloAce data
# refactored, M. Johnston
# Fri Feb 25 08:48:44 2022 ------------------------------
library(telemetry)
source("R/utils.R")
#-------------------------------------------------------#
dm_yoloace = read.csv("data/distance_matrices/Distance_Matrix_MJ_corr_mean.csv")
dm_yoloace = dm_yoloace[ , c("Name_corr", "mean_Total_Length")]
colnames(dm_yoloace) = c("Name", "Total_Length_m")
dm_yoloace = tidyr::separate(dm_yoloace, Name, into = c("Rec1", "Rec2"), sep = "-", remove = FALSE)
i = dm_yoloace$Rec1 == dm_yoloace$Rec2
dm_yoloace$Total_Length_m[i] <- 0
dm_yoloace[i, ] # chk
dm_yoloace$Name = gsub("-", " - ", dm_yoloace$Name)
## Load clean detections of interest
yolo_ace = readRDS("data_clean/YoloAce/yoloace_dfa_detects.rds") # created in R/clean_yolo_ace.R
yolo_ace$DetectDate = as.Date(yolo_ace$DateTime_PST)
yolo_ace$Rel_rkm = yolo_ace$Rkm
#-------------------------------------------------------#
# big test: all fish
f1 = split(yolo_ace, yolo_ace$FishID)
f2 = lapply(f1, dpd_allfish, distance_matrix = dm_yoloace)
ans = lapply(f2, hs)
ans5 = data.frame(data.table::rbindlist(ans, idcol = TRUE))
colnames(ans5) <- c("FishID", "date_time", "prop_dist")
write.csv(ans5, "results/YoloAce/yoloace_dpd_refactored.csv") |
#' @export
#' @method print mhmm
#' @rdname print
print.mhmm <- function(x, digits = 3, ...){
cat("Coefficients :\n")
print(x$coefficients, digits = digits, ...)
cat("\nInitial probabilities :\n")
print.listof(x$initial_probs, digits = digits, ...)
cat("Transition probabilities :\n")
print.listof(x$transition_matrix, digits = digits, ...)
cat("Emission probabilities :\n")
if (x$n_channels == 1) {
print.listof(x$emission_matrix, digits = digits, ...)
} else {
for(i in 1:length(x$emission_matrix)){
cat(names(x$emission_matrix)[i], ":\n\n")
print.listof(x$emission_matrix[[i]], digits = digits, ...)
}
}
cat("\n")
} | /R/print.mhmm.R | no_license | zencoding/seqHMM | R | false | false | 700 | r | #' @export
#' @method print mhmm
#' @rdname print
print.mhmm <- function(x, digits = 3, ...){
cat("Coefficients :\n")
print(x$coefficients, digits = digits, ...)
cat("\nInitial probabilities :\n")
print.listof(x$initial_probs, digits = digits, ...)
cat("Transition probabilities :\n")
print.listof(x$transition_matrix, digits = digits, ...)
cat("Emission probabilities :\n")
if (x$n_channels == 1) {
print.listof(x$emission_matrix, digits = digits, ...)
} else {
for(i in 1:length(x$emission_matrix)){
cat(names(x$emission_matrix)[i], ":\n\n")
print.listof(x$emission_matrix[[i]], digits = digits, ...)
}
}
cat("\n")
} |
## Read file data into table.
## Separator in file is ";" and NA values in file are "?"
library(data.table)
powerData <- read.table("household_power_consumption.txt", sep=";", header=TRUE, na.strings="?")
## Convert Date column/variable to Date class
powerData$Date <- as.Date(powerData$Date, "%d/%m/%Y")
## Only keep data from the dates 2007-02-01 and 2007-02-02
powerData <- subset(powerData,powerData$Date>="2007-02-01" & powerData$Date<="2007-02-02")
## Convert Time column/variable to Time class
powerData$Time <- strptime(paste(powerData$Date,powerData$Time), "%Y-%m-%d %T")
## Plot multiple lines for Sub metering 1, 2 and 3 data over time
with(powerData,plot(Time,Sub_metering_1, type="l", xlab="", ylab="Energy sub metering"))
with(powerData,lines(Time, Sub_metering_2, col="red"))
with(powerData,lines(Time, Sub_metering_3, col="blue"))
## Create legend
legend("topright", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty=1, col=c("black","red","blue"))
## Output plot to a png file
dev.copy(png, file="plot3.png", height=480, width=480, units="px")
dev.off() | /plot3.R | no_license | nbhatt/ExData_Plotting1 | R | false | false | 1,102 | r |
## Read file data into table.
## Separator in file is ";" and NA values in file are "?"
library(data.table)
powerData <- read.table("household_power_consumption.txt", sep=";", header=TRUE, na.strings="?")
## Convert Date column/variable to Date class
powerData$Date <- as.Date(powerData$Date, "%d/%m/%Y")
## Only keep data from the dates 2007-02-01 and 2007-02-02
powerData <- subset(powerData,powerData$Date>="2007-02-01" & powerData$Date<="2007-02-02")
## Convert Time column/variable to Time class
powerData$Time <- strptime(paste(powerData$Date,powerData$Time), "%Y-%m-%d %T")
## Plot multiple lines for Sub metering 1, 2 and 3 data over time
with(powerData,plot(Time,Sub_metering_1, type="l", xlab="", ylab="Energy sub metering"))
with(powerData,lines(Time, Sub_metering_2, col="red"))
with(powerData,lines(Time, Sub_metering_3, col="blue"))
## Create legend
legend("topright", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty=1, col=c("black","red","blue"))
## Output plot to a png file
dev.copy(png, file="plot3.png", height=480, width=480, units="px")
dev.off() |
###unexported object derived from usethis###
todo_bullet <- function() crayon::red(clisymbols::symbol$bullet)
done_bullet <- function() crayon::green(clisymbols::symbol$tick)
fail_bullet <- function() crayon::bgRed(clisymbols::symbol$cross)
bulletize <- function(line, bullet) paste0(bullet, " ", line)
todo <- function(..., .envir = parent.frame()) {
out <- stringr::str_glue(..., .envir = .envir)
cli::cat_line(bulletize(out, bullet = todo_bullet()))
}
done <- function(..., .envir = parent.frame()) {
out <- stringr::str_glue(..., .envir = .envir)
cli::cat_line(bulletize(out, bullet = done_bullet()))
}
fail <- function(..., .envir = parent.frame()) {
out <- stringr::str_glue(..., .envir = .envir)
cli::cat_line(bulletize(out, bullet = fail_bullet()))
}
| /R/utils-other.R | no_license | sinnhazime/jobwatcher | R | false | false | 774 | r | ###unexported object derived from usethis###
todo_bullet <- function() crayon::red(clisymbols::symbol$bullet)
done_bullet <- function() crayon::green(clisymbols::symbol$tick)
fail_bullet <- function() crayon::bgRed(clisymbols::symbol$cross)
bulletize <- function(line, bullet) paste0(bullet, " ", line)
todo <- function(..., .envir = parent.frame()) {
out <- stringr::str_glue(..., .envir = .envir)
cli::cat_line(bulletize(out, bullet = todo_bullet()))
}
done <- function(..., .envir = parent.frame()) {
out <- stringr::str_glue(..., .envir = .envir)
cli::cat_line(bulletize(out, bullet = done_bullet()))
}
fail <- function(..., .envir = parent.frame()) {
out <- stringr::str_glue(..., .envir = .envir)
cli::cat_line(bulletize(out, bullet = fail_bullet()))
}
|
### R code from vignette source 'ordinalgmifs.Rnw'
###################################################
### code chunk number 1: ordinalgmifs.Rnw:93-94
###################################################
options(width = 70)
###################################################
### code chunk number 2: ordinalgmifs.Rnw:96-100
###################################################
library("ordinalgmifs")
data(hccframe)
cumulative.logit<-ordinalgmifs(group ~ 1, x = hccframe[,-1],
data = hccframe, epsilon=0.01)
###################################################
### code chunk number 3: ordinalgmifs.Rnw:105-106
###################################################
print(cumulative.logit)
###################################################
### code chunk number 4: ordinalgmifs.Rnw:109-110
###################################################
summary(cumulative.logit)
###################################################
### code chunk number 5: ordinalgmifs.Rnw:118-119
###################################################
plot(cumulative.logit)
###################################################
### code chunk number 6: ordinalgmifs.Rnw:126-127
###################################################
coef(cumulative.logit)
###################################################
### code chunk number 7: ordinalgmifs.Rnw:132-135
###################################################
phat <- predict(cumulative.logit)
table(phat$class, hccframe$group)
head(phat$predicted)
| /inst/doc/ordinalgmifs.R | no_license | cran/ordinalgmifs | R | false | false | 1,479 | r | ### R code from vignette source 'ordinalgmifs.Rnw'
###################################################
### code chunk number 1: ordinalgmifs.Rnw:93-94
###################################################
options(width = 70)
###################################################
### code chunk number 2: ordinalgmifs.Rnw:96-100
###################################################
library("ordinalgmifs")
data(hccframe)
cumulative.logit<-ordinalgmifs(group ~ 1, x = hccframe[,-1],
data = hccframe, epsilon=0.01)
###################################################
### code chunk number 3: ordinalgmifs.Rnw:105-106
###################################################
print(cumulative.logit)
###################################################
### code chunk number 4: ordinalgmifs.Rnw:109-110
###################################################
summary(cumulative.logit)
###################################################
### code chunk number 5: ordinalgmifs.Rnw:118-119
###################################################
plot(cumulative.logit)
###################################################
### code chunk number 6: ordinalgmifs.Rnw:126-127
###################################################
coef(cumulative.logit)
###################################################
### code chunk number 7: ordinalgmifs.Rnw:132-135
###################################################
phat <- predict(cumulative.logit)
table(phat$class, hccframe$group)
head(phat$predicted)
|
# Beer Mixed Effects Analysis
# Script for Running a Mixed Effects Analsyis
library(ggplot2)
library(data.table)
library(lme4)
beer <- fread('~/Desktop/projects/davidjohnbaker/content/post/data/beer_reviews.csv')
# Wrote a post with basic linear models, was about to publish, models were bad.
# Gotta pick the right model
# That model is one that takes into account noise from participants and individual beers, have enough data.
# Need to make sure each kind of beer has well over enough ratings
# Not exactly sure approriateness of Mixed models so going to play it safe ??
# Need to get the "good" data out of this
beer[, beer_name_unique := paste(brewery_name,beer_name, beer_style)]
names(beer)
# Beers that have over 30 reviews
quality.beers <- beer[, .(TimesBeerReviewed = .N), by = beer_name_unique][order(-TimesBeerReviewed)][TimesBeerReviewed >= 2000]
# Beer Types that are heavily represented in beer set (Top 20 Beer Types)
top.beer.styles <- beer[, .(TimesTypeInDataTable = .N), by = beer_style][order(-TimesTypeInDataTable)][1:5]
# Only use reviewers who have made over 100 reviews
top.100.reviewers <- beer[, .(NumberOfReviewsByReviewer = .N),
by = review_profilename][order(-NumberOfReviewsByReviewer)][NumberOfReviewsByReviewer >= 1000]
reviewers <- top.100.reviewers[beer, on = "review_profilename", nomatch=0]
top.beers.good.reviewers <- top.beer.styles[reviewers, on = "beer_style", nomatch=0]
quality.data <- quality.beers[top.beers.good.reviewers, on = "beer_name_unique",nomatch=0]
quality.data
#--------------------------------------------------
# Show
#--------------------------------------------------
# Visually find all the systematic differences you are going to expect
names(quality.data)
# DV is Review Overall
# IVs to inspect are aroma, taste, appearance, palette
# Reviewers Should have Random Intercepts and Random Slopes
# Individal Beers should have random slopes and intercepts
# Think of things that will add noise to your data
# Random Slope-y things: Quirks of Individual Raters, Quirks of individual Items general level of mean variability
# Things you know are going to be different, aka Gender effect on voice
# Then think of things that might add in random slopes (basically continuous variables that are random intercepts)
# Make A Null Model with all the noise
# Gender --> categorical variable we think that exhasutive inclusion will be able to help with remove systematic noise
# Beer style -- > " ... "
# Attitude and Subject are ITEM LEVEL and SUBJECT LEVEL Random Factors
# subjects will have their own level of quirky ness
# 1 + thing that we want to have Random Slope control for
beer.null <- lmer(review_overall ~ beer_style +
(1 + review_overall|review_profilename) + (1 + review_overall|beer_name_unique),
data=quality.data, REML=FALSE)
# review_aroma + review_appearance + review_palate + review_taste
beer.ar <- lmer(review_overall ~ review_aroma + beer_style +
(1 + review_overall|review_profilename) + (1 + review_overall|beer_name_unique),
data=quality.data, REML=FALSE)
beer.ar.ap <- lmer(review_overall ~ review_aroma + review_appearance + beer_style +
(1 + review_overall|review_profilename) + (1 + review_overall|beer_name_unique),
data=quality.data, REML=FALSE)
beer.ar.ap.pa <- lmer(review_overall ~ review_aroma + review_appearance + review_palate + beer_style +
(1 + review_overall|review_profilename) + (1 + review_overall|beer_name_unique),
data=quality.data, REML=FALSE)
beer.ar.ap.pa.ta <- lmer(review_overall ~ review_aroma + review_appearance + review_palate + review_taste +beer_style +
(1 + review_overall|review_profilename) + (1 + review_overall|beer_name_unique),
data=quality.data, REML=FALSE)
summary(beer.ar.ap.pa.ta)
anova(beer.null, beer.ar, beer.ar.ap, beer.ar.ap.pa, beer.ar.ap.pa.ta)
# Then add in IVs you think are goign to help, anvova them, also check for interactions
#======================================================================================================
#
| /retired_academic_theme/old_website/drafts/graveyard/saveme.R | permissive | davidjohnbaker1/davidjohnbaker | R | false | false | 4,232 | r | # Beer Mixed Effects Analysis
# Script for Running a Mixed Effects Analsyis
library(ggplot2)
library(data.table)
library(lme4)
beer <- fread('~/Desktop/projects/davidjohnbaker/content/post/data/beer_reviews.csv')
# Wrote a post with basic linear models, was about to publish, models were bad.
# Gotta pick the right model
# That model is one that takes into account noise from participants and individual beers, have enough data.
# Need to make sure each kind of beer has well over enough ratings
# Not exactly sure approriateness of Mixed models so going to play it safe ??
# Need to get the "good" data out of this
beer[, beer_name_unique := paste(brewery_name,beer_name, beer_style)]
names(beer)
# Beers that have over 30 reviews
quality.beers <- beer[, .(TimesBeerReviewed = .N), by = beer_name_unique][order(-TimesBeerReviewed)][TimesBeerReviewed >= 2000]
# Beer Types that are heavily represented in beer set (Top 20 Beer Types)
top.beer.styles <- beer[, .(TimesTypeInDataTable = .N), by = beer_style][order(-TimesTypeInDataTable)][1:5]
# Only use reviewers who have made over 100 reviews
top.100.reviewers <- beer[, .(NumberOfReviewsByReviewer = .N),
by = review_profilename][order(-NumberOfReviewsByReviewer)][NumberOfReviewsByReviewer >= 1000]
reviewers <- top.100.reviewers[beer, on = "review_profilename", nomatch=0]
top.beers.good.reviewers <- top.beer.styles[reviewers, on = "beer_style", nomatch=0]
quality.data <- quality.beers[top.beers.good.reviewers, on = "beer_name_unique",nomatch=0]
quality.data
#--------------------------------------------------
# Show
#--------------------------------------------------
# Visually find all the systematic differences you are going to expect
names(quality.data)
# DV is Review Overall
# IVs to inspect are aroma, taste, appearance, palette
# Reviewers Should have Random Intercepts and Random Slopes
# Individal Beers should have random slopes and intercepts
# Think of things that will add noise to your data
# Random Slope-y things: Quirks of Individual Raters, Quirks of individual Items general level of mean variability
# Things you know are going to be different, aka Gender effect on voice
# Then think of things that might add in random slopes (basically continuous variables that are random intercepts)
# Make A Null Model with all the noise
# Gender --> categorical variable we think that exhasutive inclusion will be able to help with remove systematic noise
# Beer style -- > " ... "
# Attitude and Subject are ITEM LEVEL and SUBJECT LEVEL Random Factors
# subjects will have their own level of quirky ness
# 1 + thing that we want to have Random Slope control for
beer.null <- lmer(review_overall ~ beer_style +
(1 + review_overall|review_profilename) + (1 + review_overall|beer_name_unique),
data=quality.data, REML=FALSE)
# review_aroma + review_appearance + review_palate + review_taste
beer.ar <- lmer(review_overall ~ review_aroma + beer_style +
(1 + review_overall|review_profilename) + (1 + review_overall|beer_name_unique),
data=quality.data, REML=FALSE)
beer.ar.ap <- lmer(review_overall ~ review_aroma + review_appearance + beer_style +
(1 + review_overall|review_profilename) + (1 + review_overall|beer_name_unique),
data=quality.data, REML=FALSE)
beer.ar.ap.pa <- lmer(review_overall ~ review_aroma + review_appearance + review_palate + beer_style +
(1 + review_overall|review_profilename) + (1 + review_overall|beer_name_unique),
data=quality.data, REML=FALSE)
beer.ar.ap.pa.ta <- lmer(review_overall ~ review_aroma + review_appearance + review_palate + review_taste +beer_style +
(1 + review_overall|review_profilename) + (1 + review_overall|beer_name_unique),
data=quality.data, REML=FALSE)
summary(beer.ar.ap.pa.ta)
anova(beer.null, beer.ar, beer.ar.ap, beer.ar.ap.pa, beer.ar.ap.pa.ta)
# Then add in IVs you think are goign to help, anvova them, also check for interactions
#======================================================================================================
#
|
library(tidyverse)
library(cowplot)
read_sheets <- function(path) {
lapply(
setNames(nm = readxl::excel_sheets(path)),
function(sheet) {
readxl::read_xlsx(path, sheet = sheet)
}
)
}
gene_tables <- list(
de = read_sheets("data/genes/Enrichments/DE_GOST.xlsx"),
de_spec = read_sheets("data/genes/Enrichments/DE_GOST_SPEC.xlsx"),
de_spec_maxSize1000 = lapply(
read_sheets("data/genes/Enrichments/DE_GOST_SPEC.xlsx"),
function(enr) {
filter(enr, term_size <= 1000)
}
),
de_spec_maxSize400 = lapply(
read_sheets("data/genes/Enrichments/DE_GOST_SPEC.xlsx"),
function(enr) {
filter(enr, term_size <= 400)
}
),
lof_de = read_sheets("data/genes/Enrichments/LOF_DE_GOST.xlsx"),
lof_de_spec = read_sheets("data/genes/Enrichments/LOF_DE_SPEC_GOST.xlsx"),
lof_de_spec_maxSize1000 = lapply(
read_sheets("data/genes/Enrichments/LOF_DE_SPEC_GOST.xlsx"),
function(enr) {
filter(enr, term_size <= 1000)
}
),
lof_de_spec_maxSize400 = lapply(
read_sheets("data/genes/Enrichments/LOF_DE_SPEC_GOST.xlsx"),
function(enr) {
filter(enr, term_size <= 400)
}
)
)
isoform_tables <- list(
de = read_sheets("data/isoforms/Enrichments/DE_GOST.xlsx"),
de_spec = read_sheets("data/isoforms/Enrichments/DE_GOST_SPEC.xlsx"),
de_spec_maxSize1000 = lapply(
read_sheets("data/isoforms/Enrichments/DE_GOST_SPEC.xlsx"),
function(enr) {
filter(enr, term_size <= 1000)
}
),
de_spec_maxSize400 = lapply(
read_sheets("data/isoforms/Enrichments/DE_GOST_SPEC.xlsx"),
function(enr) {
filter(enr, term_size <= 400)
}
),
lof_de = read_sheets("data/isoforms/Enrichments/LOF_DE_GOST.xlsx"),
lof_de_spec = read_sheets("data/isoforms/Enrichments/LOF_DE_SPEC_GOST.xlsx"),
lof_de_spec_maxSize1000 = lapply(
read_sheets("data/isoforms/Enrichments/LOF_DE_SPEC_GOST.xlsx"),
function(enr) {
filter(enr, term_size <= 1000)
}
),
lof_de_spec_maxSize400 = lapply(
read_sheets("data/isoforms/Enrichments/LOF_DE_SPEC_GOST.xlsx"),
function(enr) {
filter(enr, term_size <= 400)
}
)
)
plot_gost <- function(enr, n, title = "") {
ggplot(
data = enr %>%
arrange(p_value) %>%
mutate(Order = seq(nrow(.), 1)) %>%
top_n(n, Order),
mapping = aes(
x = -log10(p_value),
y = reorder(str_wrap(term_name, 40), Order),
size = intersection_size, fill = -log10(p_value)
)
) +
geom_point(shape = 21) +
geom_vline(xintercept = -log10(0.05), colour = "red") +
labs(
x = expression("-log"["10"]*"FDR"),
title = title
) +
guides(
fill = guide_colourbar(title = expression("-log"["10"]*"FDR")),
size = guide_legend(title = "Overlap")
) +
scale_fill_gradient(low = "white", high = "red") +
scale_size_continuous(range = c(5, 15)) +
theme_bw() +
theme(
text = element_text(size = 24),
axis.title.y = element_blank()
)
}
# plot_gost(isoform_tables$de$P02P03 %>% top_n(5, -p_value))
gene_tables_plots <- lapply(
names(gene_tables),
function(gt) {
lapply(
names(gene_tables[[gt]]),
function(period) {
plot_gost(
gene_tables[[gt]][[period]],
10,
title = paste0(gt, ", ", period)
)
}
)
}
)
pdf("data/genes/Enrichments/DE_GOST_Figures.pdf", width = 12, height = 12)
invisible(lapply(gene_tables_plots, print))
dev.off()
isoform_tables_plots <- lapply(
names(isoform_tables),
function(it) {
lapply(
names(isoform_tables[[it]]),
function(period) {
plot_gost(
isoform_tables[[it]][[period]],
10,
title = paste0(it, ", ", period)
)
}
)
}
)
pdf("data/isoforms/Enrichments/DE_GOST_Figures.pdf", width = 12, height = 12)
invisible(lapply(isoform_tables_plots, print))
dev.off()
################################################################################
# Plot all tables together #
################################################################################
all_names <- intersect(names(gene_tables), names(isoform_tables))
combined_plots <- lapply(
setNames(nm = all_names),
function(table_name) {
contrasts <- intersect(
names(gene_tables[[table_name]]),
names(isoform_tables[[table_name]])
)
lapply(
setNames(nm = contrasts),
function(contrast) {
enr <- bind_rows(
gene_tables[[table_name]][[contrast]] %>%
mutate(Data = "Gene") %>%
arrange(p_value) %>%
mutate(Order = seq(nrow(.), 1)),
isoform_tables[[table_name]][[contrast]] %>%
mutate(Data = "Isoform") %>%
arrange(p_value) %>%
mutate(Order = seq(nrow(.), 1))
) %>%
group_by(Data) %>%
top_n(5, Order) %>%
ungroup()
ggplot(
data = enr,
mapping = aes(
x = -log10(p_value),
y = reorder(str_wrap(term_name, 20), Order),
size = intersection_size, fill = -log10(p_value)
)
) +
facet_grid(Data ~ ., scales = "free_y") +
geom_point(shape = 21) +
# geom_vline(xintercept = -log10(0.05), colour = "red") +
labs(
x = expression("-log"["10"]*"FDR"),
title = paste(table_name, contrast)
) +
guides(
fill = guide_colourbar(
title = expression("-log"["10"]*"FDR")
),
size = guide_legend(title = "Overlap")
) +
scale_fill_gradient(low = "white", high = "red") +
scale_size_continuous(range = c(5, 15)) +
theme_bw() +
theme(
text = element_text(size = 24),
axis.title.y = element_blank()
)
}
)
}
)
# combined_plots[[1]][[1]]
# pdf("data/figures/DE_GOST_Figures.pdf", width = 16, height = 16)
# invisible(lapply(combined_plots, function(plt) invisible(lapply(plt, print))))
# dev.off()
################################################################################
# Final Plots #
################################################################################
# DE Spec
de_spec_finalPlots <- lapply(
list(
combined_plots$de_spec_maxSize1000$P04P05 +
labs(title = "P04P05") +
scale_x_continuous(
breaks = c(5, 10)
),
combined_plots$de_spec_maxSize1000$P07P08 +
labs(title = "P07P08"),
combined_plots$de_spec_maxSize1000$P08P09 +
labs(title = "P08P09")
),
function(plt) {
plt +
scale_fill_gradient(
low = "white", high = "red", limits = c(0, 25)
) +
scale_size_continuous(
limits = c(0, 200)
) +
labs(
x = expression("-log"["10"]*"(FDR)")
) +
theme(
text = element_text(size = 24),
axis.text.x = element_text(size = 20),
axis.text.y = element_text(size = 20)
)
}
)
final_plot_deSpec <- plot_grid(
plot_grid(
plotlist = lapply(
de_spec_finalPlots,
function(plt) plt + theme(legend.position = "none")
),
nrow = 1,
labels = "AUTO", label_size = 40, label_fontface = "bold"
),
get_legend(de_spec_finalPlots[[1]]),
ncol = 2, rel_widths = c(1, 0.12)
)
ggsave(
filename = "data/figures/DE_GOST_DESpec_Selected.pdf",
final_plot_deSpec,
device = "pdf", width = 16, height = 12,
useDingbats = FALSE
)
# LOF DE SPEC
lof_de_spec_finalPlots <- lapply(
list(
combined_plots$lof_de_spec_maxSize1000$P04P05 +
labs(title = "P04P05"),
combined_plots$lof_de_spec_maxSize1000$P07P08 +
labs(title = "P07P08"),
combined_plots$lof_de_spec_maxSize1000$P08P09 +
labs(title = "P08P09")
),
function(plt) {
plt +
scale_fill_gradient(
low = "white", high = "red", limits = c(0, 5)
) +
scale_size_continuous(
limits = c(0, 15)
) +
labs(
x = expression("-log"["10"]*"(FDR)")
) +
theme(
text = element_text(size = 24),
axis.text.x = element_text(size = 20),
axis.text.y = element_text(size = 10)
)
}
)
final_plot_LofDeSpec <- plot_grid(
plot_grid(
plotlist = lapply(
lof_de_spec_finalPlots,
function(plt) plt + theme(legend.position = "none")
),
nrow = 1,
labels = "AUTO", label_size = 40, label_fontface = "bold"
),
get_legend(lof_de_spec_finalPlots[[1]]),
ncol = 2, rel_widths = c(1, 0.12)
)
ggsave(
filename = "data/figures/LOF_DE_GOST_DESpec_Selected.pdf",
final_plot_LofDeSpec,
device = "pdf", width = 16, height = 8
)
# PREPOST
prepost_finalPlots <- lapply(
list(
combined_plots$de_spec_maxSize1000$PrePost +
labs(title = "PrePost, All DE"),
combined_plots$lof_de_spec_maxSize1000$PrePost +
labs(title = "PrePost, LoF targets")
),
function(plt) {
plt +
scale_fill_gradient(
low = "white", high = "red", limits = c(0, 15)
) +
scale_size_continuous(
limits = c(0, 155)
) +
labs(
x = expression("-log"["10"]*"(FDR)")
) +
theme(
text = element_text(size = 24),
axis.text.x = element_text(size = 20),
axis.text.y = element_text(size = 10)
)
}
)
final_plot_PrePost <- plot_grid(
plot_grid(
plotlist = lapply(
prepost_finalPlots,
function(plt) plt + theme(legend.position = "none")
),
nrow = 1,
labels = "AUTO", label_size = 40, label_fontface = "bold"
),
get_legend(prepost_finalPlots[[1]]),
ncol = 2, rel_widths = c(1, 0.12)
)
final_plot_PrePost
ggsave(
filename = "data/figures/LOF_DE_PREPOST_GOST_Selected.pdf",
final_plot_PrePost,
device = "pdf", width = 16, height = 8
)
| /scripts/DE_GO_Plots.R | no_license | EugeniaRadulescu/Isoform_BrainSpan | R | false | false | 11,423 | r | library(tidyverse)
library(cowplot)
read_sheets <- function(path) {
lapply(
setNames(nm = readxl::excel_sheets(path)),
function(sheet) {
readxl::read_xlsx(path, sheet = sheet)
}
)
}
gene_tables <- list(
de = read_sheets("data/genes/Enrichments/DE_GOST.xlsx"),
de_spec = read_sheets("data/genes/Enrichments/DE_GOST_SPEC.xlsx"),
de_spec_maxSize1000 = lapply(
read_sheets("data/genes/Enrichments/DE_GOST_SPEC.xlsx"),
function(enr) {
filter(enr, term_size <= 1000)
}
),
de_spec_maxSize400 = lapply(
read_sheets("data/genes/Enrichments/DE_GOST_SPEC.xlsx"),
function(enr) {
filter(enr, term_size <= 400)
}
),
lof_de = read_sheets("data/genes/Enrichments/LOF_DE_GOST.xlsx"),
lof_de_spec = read_sheets("data/genes/Enrichments/LOF_DE_SPEC_GOST.xlsx"),
lof_de_spec_maxSize1000 = lapply(
read_sheets("data/genes/Enrichments/LOF_DE_SPEC_GOST.xlsx"),
function(enr) {
filter(enr, term_size <= 1000)
}
),
lof_de_spec_maxSize400 = lapply(
read_sheets("data/genes/Enrichments/LOF_DE_SPEC_GOST.xlsx"),
function(enr) {
filter(enr, term_size <= 400)
}
)
)
isoform_tables <- list(
de = read_sheets("data/isoforms/Enrichments/DE_GOST.xlsx"),
de_spec = read_sheets("data/isoforms/Enrichments/DE_GOST_SPEC.xlsx"),
de_spec_maxSize1000 = lapply(
read_sheets("data/isoforms/Enrichments/DE_GOST_SPEC.xlsx"),
function(enr) {
filter(enr, term_size <= 1000)
}
),
de_spec_maxSize400 = lapply(
read_sheets("data/isoforms/Enrichments/DE_GOST_SPEC.xlsx"),
function(enr) {
filter(enr, term_size <= 400)
}
),
lof_de = read_sheets("data/isoforms/Enrichments/LOF_DE_GOST.xlsx"),
lof_de_spec = read_sheets("data/isoforms/Enrichments/LOF_DE_SPEC_GOST.xlsx"),
lof_de_spec_maxSize1000 = lapply(
read_sheets("data/isoforms/Enrichments/LOF_DE_SPEC_GOST.xlsx"),
function(enr) {
filter(enr, term_size <= 1000)
}
),
lof_de_spec_maxSize400 = lapply(
read_sheets("data/isoforms/Enrichments/LOF_DE_SPEC_GOST.xlsx"),
function(enr) {
filter(enr, term_size <= 400)
}
)
)
plot_gost <- function(enr, n, title = "") {
ggplot(
data = enr %>%
arrange(p_value) %>%
mutate(Order = seq(nrow(.), 1)) %>%
top_n(n, Order),
mapping = aes(
x = -log10(p_value),
y = reorder(str_wrap(term_name, 40), Order),
size = intersection_size, fill = -log10(p_value)
)
) +
geom_point(shape = 21) +
geom_vline(xintercept = -log10(0.05), colour = "red") +
labs(
x = expression("-log"["10"]*"FDR"),
title = title
) +
guides(
fill = guide_colourbar(title = expression("-log"["10"]*"FDR")),
size = guide_legend(title = "Overlap")
) +
scale_fill_gradient(low = "white", high = "red") +
scale_size_continuous(range = c(5, 15)) +
theme_bw() +
theme(
text = element_text(size = 24),
axis.title.y = element_blank()
)
}
# plot_gost(isoform_tables$de$P02P03 %>% top_n(5, -p_value))
gene_tables_plots <- lapply(
names(gene_tables),
function(gt) {
lapply(
names(gene_tables[[gt]]),
function(period) {
plot_gost(
gene_tables[[gt]][[period]],
10,
title = paste0(gt, ", ", period)
)
}
)
}
)
pdf("data/genes/Enrichments/DE_GOST_Figures.pdf", width = 12, height = 12)
invisible(lapply(gene_tables_plots, print))
dev.off()
isoform_tables_plots <- lapply(
names(isoform_tables),
function(it) {
lapply(
names(isoform_tables[[it]]),
function(period) {
plot_gost(
isoform_tables[[it]][[period]],
10,
title = paste0(it, ", ", period)
)
}
)
}
)
pdf("data/isoforms/Enrichments/DE_GOST_Figures.pdf", width = 12, height = 12)
invisible(lapply(isoform_tables_plots, print))
dev.off()
################################################################################
# Plot all tables together #
################################################################################
all_names <- intersect(names(gene_tables), names(isoform_tables))
combined_plots <- lapply(
setNames(nm = all_names),
function(table_name) {
contrasts <- intersect(
names(gene_tables[[table_name]]),
names(isoform_tables[[table_name]])
)
lapply(
setNames(nm = contrasts),
function(contrast) {
enr <- bind_rows(
gene_tables[[table_name]][[contrast]] %>%
mutate(Data = "Gene") %>%
arrange(p_value) %>%
mutate(Order = seq(nrow(.), 1)),
isoform_tables[[table_name]][[contrast]] %>%
mutate(Data = "Isoform") %>%
arrange(p_value) %>%
mutate(Order = seq(nrow(.), 1))
) %>%
group_by(Data) %>%
top_n(5, Order) %>%
ungroup()
ggplot(
data = enr,
mapping = aes(
x = -log10(p_value),
y = reorder(str_wrap(term_name, 20), Order),
size = intersection_size, fill = -log10(p_value)
)
) +
facet_grid(Data ~ ., scales = "free_y") +
geom_point(shape = 21) +
# geom_vline(xintercept = -log10(0.05), colour = "red") +
labs(
x = expression("-log"["10"]*"FDR"),
title = paste(table_name, contrast)
) +
guides(
fill = guide_colourbar(
title = expression("-log"["10"]*"FDR")
),
size = guide_legend(title = "Overlap")
) +
scale_fill_gradient(low = "white", high = "red") +
scale_size_continuous(range = c(5, 15)) +
theme_bw() +
theme(
text = element_text(size = 24),
axis.title.y = element_blank()
)
}
)
}
)
# combined_plots[[1]][[1]]
# pdf("data/figures/DE_GOST_Figures.pdf", width = 16, height = 16)
# invisible(lapply(combined_plots, function(plt) invisible(lapply(plt, print))))
# dev.off()
################################################################################
# Final Plots #
################################################################################
# DE Spec
de_spec_finalPlots <- lapply(
list(
combined_plots$de_spec_maxSize1000$P04P05 +
labs(title = "P04P05") +
scale_x_continuous(
breaks = c(5, 10)
),
combined_plots$de_spec_maxSize1000$P07P08 +
labs(title = "P07P08"),
combined_plots$de_spec_maxSize1000$P08P09 +
labs(title = "P08P09")
),
function(plt) {
plt +
scale_fill_gradient(
low = "white", high = "red", limits = c(0, 25)
) +
scale_size_continuous(
limits = c(0, 200)
) +
labs(
x = expression("-log"["10"]*"(FDR)")
) +
theme(
text = element_text(size = 24),
axis.text.x = element_text(size = 20),
axis.text.y = element_text(size = 20)
)
}
)
final_plot_deSpec <- plot_grid(
plot_grid(
plotlist = lapply(
de_spec_finalPlots,
function(plt) plt + theme(legend.position = "none")
),
nrow = 1,
labels = "AUTO", label_size = 40, label_fontface = "bold"
),
get_legend(de_spec_finalPlots[[1]]),
ncol = 2, rel_widths = c(1, 0.12)
)
ggsave(
filename = "data/figures/DE_GOST_DESpec_Selected.pdf",
final_plot_deSpec,
device = "pdf", width = 16, height = 12,
useDingbats = FALSE
)
# LOF DE SPEC
lof_de_spec_finalPlots <- lapply(
list(
combined_plots$lof_de_spec_maxSize1000$P04P05 +
labs(title = "P04P05"),
combined_plots$lof_de_spec_maxSize1000$P07P08 +
labs(title = "P07P08"),
combined_plots$lof_de_spec_maxSize1000$P08P09 +
labs(title = "P08P09")
),
function(plt) {
plt +
scale_fill_gradient(
low = "white", high = "red", limits = c(0, 5)
) +
scale_size_continuous(
limits = c(0, 15)
) +
labs(
x = expression("-log"["10"]*"(FDR)")
) +
theme(
text = element_text(size = 24),
axis.text.x = element_text(size = 20),
axis.text.y = element_text(size = 10)
)
}
)
final_plot_LofDeSpec <- plot_grid(
plot_grid(
plotlist = lapply(
lof_de_spec_finalPlots,
function(plt) plt + theme(legend.position = "none")
),
nrow = 1,
labels = "AUTO", label_size = 40, label_fontface = "bold"
),
get_legend(lof_de_spec_finalPlots[[1]]),
ncol = 2, rel_widths = c(1, 0.12)
)
ggsave(
filename = "data/figures/LOF_DE_GOST_DESpec_Selected.pdf",
final_plot_LofDeSpec,
device = "pdf", width = 16, height = 8
)
# PREPOST
prepost_finalPlots <- lapply(
list(
combined_plots$de_spec_maxSize1000$PrePost +
labs(title = "PrePost, All DE"),
combined_plots$lof_de_spec_maxSize1000$PrePost +
labs(title = "PrePost, LoF targets")
),
function(plt) {
plt +
scale_fill_gradient(
low = "white", high = "red", limits = c(0, 15)
) +
scale_size_continuous(
limits = c(0, 155)
) +
labs(
x = expression("-log"["10"]*"(FDR)")
) +
theme(
text = element_text(size = 24),
axis.text.x = element_text(size = 20),
axis.text.y = element_text(size = 10)
)
}
)
final_plot_PrePost <- plot_grid(
plot_grid(
plotlist = lapply(
prepost_finalPlots,
function(plt) plt + theme(legend.position = "none")
),
nrow = 1,
labels = "AUTO", label_size = 40, label_fontface = "bold"
),
get_legend(prepost_finalPlots[[1]]),
ncol = 2, rel_widths = c(1, 0.12)
)
final_plot_PrePost
ggsave(
filename = "data/figures/LOF_DE_PREPOST_GOST_Selected.pdf",
final_plot_PrePost,
device = "pdf", width = 16, height = 8
)
|
# R code for function-on-scalar linear model by Scheipl et al. (2015) for electricity data
# This is slow (But, faster than function-on-scalar additive model by Scheipl et al. (2015))
# Load necessary packages
library(readxl)
library(refund)
library(pracma)
# Load necessary source file
source('C:/Downloads/R_functions_for_pre-smoothing.R')
# Get response
electricity=read_xlsx('C:/Downloads/electricity.xlsx') # path of the electricity.xlsx file
Y_raw=t(electricity[,-1]) # make response as a (sample size,number of observed times) matrix
n=nrow(Y_raw)
T=ncol(Y_raw)
# Pre-smoothing step
time_vector=seq(0,1,length=T) # equally-spaced observed times re-scailed on [0,1]
eval_points=277 # number of evaluation time points
eval_vector=seq(0,1,length=eval_points) # vector of evaluation time points
h_add=0.1 # (minimum bandwidth,minimum bandwidth+h_add) will be the range for candidate bandwidths
h_length=101 # number of candidate bandwidths
pre_smoothing_h=c() # its i-th component will be an optimal bandwidth for pre-smoothing the i-th curve
Y=matrix(,n,eval_points) # pre-smoothed response matrix
for(i in 1:n)
{
pre_smoothing_h[i]=optimal_h_loocv(time_vector,Y_raw[i,],h_add,h_length)$h_optimal
for(t in 1:eval_points)
{
Y[i,t]=nw(eval_vector[t],time_vector,Y_raw[i,],pre_smoothing_h[i])
}
}
# Get predictor
X1=read_xlsx('C:/Downloads/temperature.xlsx')[,2] # path of the temperature.xlsx file
X2=read_xlsx('C:/Downloads/cloudiness.xlsx')[,2] # path of the cloudiness.xlsx file
X=cbind(X1,X2)
Y_hat=matrix(,nrow=n,ncol=eval_points)
error=c()
k=100 # number of spline basis
# Get ASPE
# It takes long time
for(i in 1:n)
{
print(i)
data_training=list(Y=Y[-i,],X1=X[-i,1],X2=X[-i,2])
fit=pffr(Y~X1+X2,data=data_training,yind=eval_vector,bs.yindex=list(bs='ps',k=k,m=c(2,1)),bs.int=list(bs='ps',k=k,m=c(2,1)))
Y_hat[i,]=predict(fit,newdata=list(X1=X[i,1],X2=X[i,2]))
error[i]=trapz(eval_vector,(Y[i,]-Y_hat[i,])^2)
}
mean(error)
| /Codes for electricity data/electricity_function-on-scalar_linear_model.R | no_license | kw-lee/Add-Reg-Hilbert-Res | R | false | false | 2,018 | r | # R code for function-on-scalar linear model by Scheipl et al. (2015) for electricity data
# This is slow (But, faster than function-on-scalar additive model by Scheipl et al. (2015))
# Load necessary packages
library(readxl)
library(refund)
library(pracma)
# Load necessary source file
source('C:/Downloads/R_functions_for_pre-smoothing.R')
# Get response
electricity=read_xlsx('C:/Downloads/electricity.xlsx') # path of the electricity.xlsx file
Y_raw=t(electricity[,-1]) # make response as a (sample size,number of observed times) matrix
n=nrow(Y_raw)
T=ncol(Y_raw)
# Pre-smoothing step
time_vector=seq(0,1,length=T) # equally-spaced observed times re-scailed on [0,1]
eval_points=277 # number of evaluation time points
eval_vector=seq(0,1,length=eval_points) # vector of evaluation time points
h_add=0.1 # (minimum bandwidth,minimum bandwidth+h_add) will be the range for candidate bandwidths
h_length=101 # number of candidate bandwidths
pre_smoothing_h=c() # its i-th component will be an optimal bandwidth for pre-smoothing the i-th curve
Y=matrix(,n,eval_points) # pre-smoothed response matrix
for(i in 1:n)
{
pre_smoothing_h[i]=optimal_h_loocv(time_vector,Y_raw[i,],h_add,h_length)$h_optimal
for(t in 1:eval_points)
{
Y[i,t]=nw(eval_vector[t],time_vector,Y_raw[i,],pre_smoothing_h[i])
}
}
# Get predictor
X1=read_xlsx('C:/Downloads/temperature.xlsx')[,2] # path of the temperature.xlsx file
X2=read_xlsx('C:/Downloads/cloudiness.xlsx')[,2] # path of the cloudiness.xlsx file
X=cbind(X1,X2)
Y_hat=matrix(,nrow=n,ncol=eval_points)
error=c()
k=100 # number of spline basis
# Get ASPE
# It takes long time
for(i in 1:n)
{
print(i)
data_training=list(Y=Y[-i,],X1=X[-i,1],X2=X[-i,2])
fit=pffr(Y~X1+X2,data=data_training,yind=eval_vector,bs.yindex=list(bs='ps',k=k,m=c(2,1)),bs.int=list(bs='ps',k=k,m=c(2,1)))
Y_hat[i,]=predict(fit,newdata=list(X1=X[i,1],X2=X[i,2]))
error[i]=trapz(eval_vector,(Y[i,]-Y_hat[i,])^2)
}
mean(error)
|
library(rgdal)
library(ggplot2)
library(wesanderson)
library(RColorBrewer)
library(maptools)
library(rosm)
library(prettymapr)
library(rgeos)
library(ggmap)
library(viridis)
library(maps)
buurts = readOGR("buurten.shp")
buurt_df = data.frame(buurts)
#buurt_transform = spTransform(buurts, CRS("+proj=longlat +init=epsg:4326"))
buurt_codes = fortify(buurts, region = "BUURTCODE")
buurt_codes$id = as.numeric(as.character(buurt_codes$id))
buurt_codes$group = as.numeric(as.character(buurt_codes$group))
consumption_data = read.csv('Foodconsumedperbuurt.csv')
#types = c("01. Potatoes and other tubers","02. Vegetables","03. Legumes")
#types = c(types,"04. Fruits","05. Dairy products and substitutes")
#types = c(types,"06. Cereals and cereal products","07. Meat,meat products and substitutes","08. Fish","09. Eggs and egg products","10. Fats and oils","11. Sugar and confectionery","12. Cakes and sweet biscuits","13. Non-alcoholic beverages","14. Alcoholic beverages","15. Sauces and seasonings","16. Soups and Stocks","17. Miscellaneous","18. Savoury snacks")
lci_fish = mean(c(1.9384,2.56469)) #Marine and demersal fish
lci_value = lci_fish
types = c("08. Fish")
count = 1
for (type in types){
if(count == 1){
df = consumption_data[consumption_data$food == type,]
df$foodpercapita = df$Amount.per.day/df$Population
fin_df = df[c("Name","BUURTCODE")]
fin_df$env_impact = (df$foodpercapita)*lci_value[count]
count = count + 1
}else{
df = consumption_data[consumption_data$food == type,]
df$foodpercapita = df$Amount.per.day/df$Population
fin_df$env_impact = fin_df$env_impact + ((df$foodpercapita)*lci_value[count])
count = count + 1
}
}
df = fin_df
consumption_per_buurt = (merge(buurt_codes,df, by.x = 'id', by.y = 'BUURTCODE',all=FALSE))
consumption_per_buurt = consumption_per_buurt[order(consumption_per_buurt$order),]
consumption_per_buurt$env_impact = (consumption_per_buurt$env_impact)*(365/1000)
ggplot() +
geom_polygon(data =consumption_per_buurt, aes(x = long, y = lat, group = group, fill = env_impact), color = "NA", size = 0.15) +
coord_fixed() + scale_fill_viridis() +
labs(title = expression('GWP per capita (in kg CO'[2]*' equivalent) due to fish consumption'),
subtitle = "",
x = "", y = "") + theme_void() +
theme(plot.title=element_text(size=14,face="bold",hjust=0))+
theme(legend.title = element_blank())
#write.csv(clothing_consumption_per_buurt,'inspect.csv',row.names=F)
ggplot() +
geom_polygon(data =consumption_per_buurt, aes(x = long, y = lat, group = group, fill = Total.Consumption), color = "white", size = 0.25) +
coord_fixed() + scale_fill_distiller(palette="YlOrRd") +
labs(title = "Total fruits Consumption",
subtitle = "",
x = "", y = "") + theme_void() +
theme(plot.title=element_text(size=15,face="bold",hjust=0.75))
display.brewer.all()
names(wes_palettes)
wes = wes_palette(n=3, name= "GrandBudapest1")
| /DataCleaning/Food/Visualization_empirical/Fish.R | no_license | ruchik1504/ThesisCode | R | false | false | 2,954 | r | library(rgdal)
library(ggplot2)
library(wesanderson)
library(RColorBrewer)
library(maptools)
library(rosm)
library(prettymapr)
library(rgeos)
library(ggmap)
library(viridis)
library(maps)
buurts = readOGR("buurten.shp")
buurt_df = data.frame(buurts)
#buurt_transform = spTransform(buurts, CRS("+proj=longlat +init=epsg:4326"))
buurt_codes = fortify(buurts, region = "BUURTCODE")
buurt_codes$id = as.numeric(as.character(buurt_codes$id))
buurt_codes$group = as.numeric(as.character(buurt_codes$group))
consumption_data = read.csv('Foodconsumedperbuurt.csv')
#types = c("01. Potatoes and other tubers","02. Vegetables","03. Legumes")
#types = c(types,"04. Fruits","05. Dairy products and substitutes")
#types = c(types,"06. Cereals and cereal products","07. Meat,meat products and substitutes","08. Fish","09. Eggs and egg products","10. Fats and oils","11. Sugar and confectionery","12. Cakes and sweet biscuits","13. Non-alcoholic beverages","14. Alcoholic beverages","15. Sauces and seasonings","16. Soups and Stocks","17. Miscellaneous","18. Savoury snacks")
lci_fish = mean(c(1.9384,2.56469)) #Marine and demersal fish
lci_value = lci_fish
types = c("08. Fish")
count = 1
for (type in types){
if(count == 1){
df = consumption_data[consumption_data$food == type,]
df$foodpercapita = df$Amount.per.day/df$Population
fin_df = df[c("Name","BUURTCODE")]
fin_df$env_impact = (df$foodpercapita)*lci_value[count]
count = count + 1
}else{
df = consumption_data[consumption_data$food == type,]
df$foodpercapita = df$Amount.per.day/df$Population
fin_df$env_impact = fin_df$env_impact + ((df$foodpercapita)*lci_value[count])
count = count + 1
}
}
df = fin_df
consumption_per_buurt = (merge(buurt_codes,df, by.x = 'id', by.y = 'BUURTCODE',all=FALSE))
consumption_per_buurt = consumption_per_buurt[order(consumption_per_buurt$order),]
consumption_per_buurt$env_impact = (consumption_per_buurt$env_impact)*(365/1000)
ggplot() +
geom_polygon(data =consumption_per_buurt, aes(x = long, y = lat, group = group, fill = env_impact), color = "NA", size = 0.15) +
coord_fixed() + scale_fill_viridis() +
labs(title = expression('GWP per capita (in kg CO'[2]*' equivalent) due to fish consumption'),
subtitle = "",
x = "", y = "") + theme_void() +
theme(plot.title=element_text(size=14,face="bold",hjust=0))+
theme(legend.title = element_blank())
#write.csv(clothing_consumption_per_buurt,'inspect.csv',row.names=F)
ggplot() +
geom_polygon(data =consumption_per_buurt, aes(x = long, y = lat, group = group, fill = Total.Consumption), color = "white", size = 0.25) +
coord_fixed() + scale_fill_distiller(palette="YlOrRd") +
labs(title = "Total fruits Consumption",
subtitle = "",
x = "", y = "") + theme_void() +
theme(plot.title=element_text(size=15,face="bold",hjust=0.75))
display.brewer.all()
names(wes_palettes)
wes = wes_palette(n=3, name= "GrandBudapest1")
|
##READ AND FORMAT
temp <- tempfile()
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",temp)
data <- read.table(unz(temp, "household_power_consumption.txt"),sep=";",header=TRUE, na.strings="?",skip=66636,nrows = 2880)
header <- read.table(unz(temp, "household_power_consumption.txt"),nrows = 1, header = FALSE, sep =';', stringsAsFactors = FALSE)
colnames( data ) <- unlist(header)
unlink(temp)
data$timestamp<-strptime(paste(data$Date, data$Time), format="%d/%m/%Y %H:%M:%S")
## PLOT 2
png(filename = "Plot 2.png", width = 480, height = 480)
with(data
,plot(timestamp,Global_active_power,type="l",xlab="",ylab="Global Active Power (kilowatts)"))
dev.off() | /Plot 2.R | no_license | lkaraseva/ExData_Plotting1 | R | false | false | 729 | r | ##READ AND FORMAT
temp <- tempfile()
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",temp)
data <- read.table(unz(temp, "household_power_consumption.txt"),sep=";",header=TRUE, na.strings="?",skip=66636,nrows = 2880)
header <- read.table(unz(temp, "household_power_consumption.txt"),nrows = 1, header = FALSE, sep =';', stringsAsFactors = FALSE)
colnames( data ) <- unlist(header)
unlink(temp)
data$timestamp<-strptime(paste(data$Date, data$Time), format="%d/%m/%Y %H:%M:%S")
## PLOT 2
png(filename = "Plot 2.png", width = 480, height = 480)
with(data
,plot(timestamp,Global_active_power,type="l",xlab="",ylab="Global Active Power (kilowatts)"))
dev.off() |
createIPRange <- function(df){
df %>% mutate( rngNumber = ifelse( ip < 126400, 1, ifelse( ip < 212750, 2, ifelse(ip < 287000, 3, 4))))
}
addTimeVars <- function(df){
df %>%
mutate(day=as.numeric(format(click_time,"%d")), hour=as.numeric(format(click_time,"%H") )) %>%
mutate(tm=day*24+hour+8) %>%
mutate(day=tm %/% 24, hour=tm %% 24 ) %>%
select(-tm)
}
counts <- function(pt){
pt %>%
group_by(ip, day, channel, device, os, app ) %>%
mutate(n_day=n(), r_day=1:n()) %>%
group_by(hour, add=T) %>%
mutate(n_hour=n(), r_hour=1:n()) %>%
ungroup()
}
zero_set <- function(pt){
pt %<>% left_join(tr_hour, by="hour")
pt %<>% left_join(tr_app, by="app")
pt %<>% left_join(tr_channel, by="channel")
pt %<>% left_join(tr_os, by="os")
pt %<>% left_join(tr_device, by="device")
pt %<>% left_join(tr_channel_device_os, by=c("device","channel","os"))
pt %<>% left_join(tr_device_app, by=c("device","app"))
pt %<>% left_join(tr_device_os, by=c("device","os"))
pt %<>% left_join(tr_hour_channel_device_os, by=c("hour","channel","device","os") )
pt %<>% left_join(tr_channel_hour, by=c("hour","channel") )
pt %<>% left_join(tr_hour_device_app, by=c("hour","device","app") )
pt %<>% left_join(tr_hour_device_os, by=c("hour","device","os") )
pt
}
first_set <- function(pt){
pt %<>% left_join(tb_ip, by="ip")
pt %<>% left_join(tb_rng %>% select(-min_ip,-max_ip,-pct_mean), by="rngNumber")
pt %<>% left_join(tb_ip_app, by=c("rngNumber","app") )
pt %<>% left_join(tb_ip_channel, by=c("rngNumber","channel") )
pt %<>% left_join(tb_ip_device, by=c("rngNumber","device") )
pt %<>% left_join(tb_ip_os, by=c("rngNumber","os") )
pt
}
second_set <- function(pt){
pt %<>% left_join(tb_ip_app_channel, by=c("rngNumber","app","channel"))
pt %<>% left_join(tb_ip_channel_device, by=c("rngNumber","channel","device") )
pt %<>% left_join(tb_ip_device_os, by=c("rngNumber","device","os") )
pt %<>% left_join(tb_ip_os_channel, by=c("rngNumber","os","channel"))
pt %<>% left_join(tb_ip_device_app, by=c("rngNumber","app","device"))
pt %<>% left_join(tb_ip_os_app, by=c("rngNumber","app","os"))
pt
}
third_set <- function(pt){
pt %<>% left_join(tb_ip_channel_device_os, by=c("rngNumber","channel","device","os") )
pt %<>% left_join(tb_ip_device_os_app, by=c("rngNumber","app","device","os") )
pt %<>% left_join(tb_ip_channel_device_app, by=c("rngNumber","channel","device","app"))
pt %<>% left_join(tb_ip_channel_os_app, by=c("rngNumber","channel","app","os"))
pt
}
addNB <- function(pt) {
pt %>% mutate(pct_NB=pct_att_hour+pct_att_app+pct_att_channel+pct_att_os+pct_att_device+pct_att_channel_device_os+
pct_att_device_app+pct_att_device_os+pct_att_hour_channel_device_os+
pct_att_channel_hour+pct_att_hour_device_app+pct_att_hour_device_os+
pct_ip_app+pct_ip_channel+pct_ip_device+pct_ip_os+
pct_ip_app_channel+pct_ip_channel_device+pct_ip_device_device_os+pct_ip_os_channel+pct_ip_device_app+
pct_ip_os_app+pct_ip_channel_device_os+pct_ip_device_os_app+pct_ip_channel_device_app+pct_ip_channel_os_app)
}
adjust_set <- function(pt){
# pt %<>% select(-tm)
pt %<>% mutate(hour=as.numeric(hour), ip=as.numeric(ip), os=as.numeric(os),
channel=as.numeric(channel), device=as.numeric(device), app=as.numeric(app))
pt %<>% mutate(n_app=as.numeric(n_app), n_channel=as.numeric(n_channel), n_os=as.numeric(n_os),
# diff_mean=as.numeric(diff_mean),
n_device=as.numeric(n_device), n_device_os=as.numeric(n_device_os),
n_channel_device_os=as.numeric(n_channel_device_os))
# pt %<>% mutate(diff_mean=ifelse(is.nan(diff_mean),NA,diff_mean))
pt
} | /experiment_4/utils_BuildDS.R | no_license | picarus/TalkingData | R | false | false | 3,838 | r |
createIPRange <- function(df){
df %>% mutate( rngNumber = ifelse( ip < 126400, 1, ifelse( ip < 212750, 2, ifelse(ip < 287000, 3, 4))))
}
addTimeVars <- function(df){
df %>%
mutate(day=as.numeric(format(click_time,"%d")), hour=as.numeric(format(click_time,"%H") )) %>%
mutate(tm=day*24+hour+8) %>%
mutate(day=tm %/% 24, hour=tm %% 24 ) %>%
select(-tm)
}
counts <- function(pt){
pt %>%
group_by(ip, day, channel, device, os, app ) %>%
mutate(n_day=n(), r_day=1:n()) %>%
group_by(hour, add=T) %>%
mutate(n_hour=n(), r_hour=1:n()) %>%
ungroup()
}
zero_set <- function(pt){
pt %<>% left_join(tr_hour, by="hour")
pt %<>% left_join(tr_app, by="app")
pt %<>% left_join(tr_channel, by="channel")
pt %<>% left_join(tr_os, by="os")
pt %<>% left_join(tr_device, by="device")
pt %<>% left_join(tr_channel_device_os, by=c("device","channel","os"))
pt %<>% left_join(tr_device_app, by=c("device","app"))
pt %<>% left_join(tr_device_os, by=c("device","os"))
pt %<>% left_join(tr_hour_channel_device_os, by=c("hour","channel","device","os") )
pt %<>% left_join(tr_channel_hour, by=c("hour","channel") )
pt %<>% left_join(tr_hour_device_app, by=c("hour","device","app") )
pt %<>% left_join(tr_hour_device_os, by=c("hour","device","os") )
pt
}
first_set <- function(pt){
pt %<>% left_join(tb_ip, by="ip")
pt %<>% left_join(tb_rng %>% select(-min_ip,-max_ip,-pct_mean), by="rngNumber")
pt %<>% left_join(tb_ip_app, by=c("rngNumber","app") )
pt %<>% left_join(tb_ip_channel, by=c("rngNumber","channel") )
pt %<>% left_join(tb_ip_device, by=c("rngNumber","device") )
pt %<>% left_join(tb_ip_os, by=c("rngNumber","os") )
pt
}
second_set <- function(pt){
pt %<>% left_join(tb_ip_app_channel, by=c("rngNumber","app","channel"))
pt %<>% left_join(tb_ip_channel_device, by=c("rngNumber","channel","device") )
pt %<>% left_join(tb_ip_device_os, by=c("rngNumber","device","os") )
pt %<>% left_join(tb_ip_os_channel, by=c("rngNumber","os","channel"))
pt %<>% left_join(tb_ip_device_app, by=c("rngNumber","app","device"))
pt %<>% left_join(tb_ip_os_app, by=c("rngNumber","app","os"))
pt
}
third_set <- function(pt){
pt %<>% left_join(tb_ip_channel_device_os, by=c("rngNumber","channel","device","os") )
pt %<>% left_join(tb_ip_device_os_app, by=c("rngNumber","app","device","os") )
pt %<>% left_join(tb_ip_channel_device_app, by=c("rngNumber","channel","device","app"))
pt %<>% left_join(tb_ip_channel_os_app, by=c("rngNumber","channel","app","os"))
pt
}
addNB <- function(pt) {
pt %>% mutate(pct_NB=pct_att_hour+pct_att_app+pct_att_channel+pct_att_os+pct_att_device+pct_att_channel_device_os+
pct_att_device_app+pct_att_device_os+pct_att_hour_channel_device_os+
pct_att_channel_hour+pct_att_hour_device_app+pct_att_hour_device_os+
pct_ip_app+pct_ip_channel+pct_ip_device+pct_ip_os+
pct_ip_app_channel+pct_ip_channel_device+pct_ip_device_device_os+pct_ip_os_channel+pct_ip_device_app+
pct_ip_os_app+pct_ip_channel_device_os+pct_ip_device_os_app+pct_ip_channel_device_app+pct_ip_channel_os_app)
}
adjust_set <- function(pt){
# pt %<>% select(-tm)
pt %<>% mutate(hour=as.numeric(hour), ip=as.numeric(ip), os=as.numeric(os),
channel=as.numeric(channel), device=as.numeric(device), app=as.numeric(app))
pt %<>% mutate(n_app=as.numeric(n_app), n_channel=as.numeric(n_channel), n_os=as.numeric(n_os),
# diff_mean=as.numeric(diff_mean),
n_device=as.numeric(n_device), n_device_os=as.numeric(n_device_os),
n_channel_device_os=as.numeric(n_channel_device_os))
# pt %<>% mutate(diff_mean=ifelse(is.nan(diff_mean),NA,diff_mean))
pt
} |
## Creates a matrix listing a function that
## a) sets matrix value
## b) gets matrix value
## c) sets inverse matrix value
## d) gets inverse matrix value
makeCacheMatrix <- function(x = matrix()) {
## set stored inverse value to null
inverse.value <- null
## set matrix value
matrix.value <- function(y) {
x <<- y
inverse.value <<- null
}
## pulling value from matrix
pull.matrix <- function() x
## setting inverse value
set.inverse <- function(inv_) inverse.value <<- inv_
## pulling inverse value
pull.inverse <- function () inverse.value
## print a list of all above functions
list(matrix.value = matrix.value, pull.matrix = pull.matrix, set.inverse = set.inverse, pull.inverse = pull.inverse)
}
## Calculates the inverse of above matrix, first verifying if calculation has done.
## If calculation is done, pulls inverse from cache.
## Else calculates the inverse of the matrix and sets value
cacheSolve <- function(x, ...) {
## Verification
inverse.value <- x$pull.inverse()
if(!is.null(inverse.value)) {
message("pulling cached data")
return(inverse.value)
}
##if not cached
data <- x$pull.matrix()
## calculate inverse
inverse.value <- solve(data, ...)
## then cache the inverse and print
x$set.inverse(inverse.value)
inverse.value
}
| /cachematrix.R | no_license | beethovensaves/ProgrammingAssignment2 | R | false | false | 1,448 | r | ## Creates a matrix listing a function that
## a) sets matrix value
## b) gets matrix value
## c) sets inverse matrix value
## d) gets inverse matrix value
makeCacheMatrix <- function(x = matrix()) {
## set stored inverse value to null
inverse.value <- null
## set matrix value
matrix.value <- function(y) {
x <<- y
inverse.value <<- null
}
## pulling value from matrix
pull.matrix <- function() x
## setting inverse value
set.inverse <- function(inv_) inverse.value <<- inv_
## pulling inverse value
pull.inverse <- function () inverse.value
## print a list of all above functions
list(matrix.value = matrix.value, pull.matrix = pull.matrix, set.inverse = set.inverse, pull.inverse = pull.inverse)
}
## Calculates the inverse of above matrix, first verifying if calculation has done.
## If calculation is done, pulls inverse from cache.
## Else calculates the inverse of the matrix and sets value
cacheSolve <- function(x, ...) {
## Verification
inverse.value <- x$pull.inverse()
if(!is.null(inverse.value)) {
message("pulling cached data")
return(inverse.value)
}
##if not cached
data <- x$pull.matrix()
## calculate inverse
inverse.value <- solve(data, ...)
## then cache the inverse and print
x$set.inverse(inverse.value)
inverse.value
}
|
library(shiny)
reef <- read.table("data/reefData.txt")
shinyUI(fluidPage(
titlePanel("Great Barrier Reef Fish"),
sidebarLayout(
sidebarPanel(
sliderInput("maximum", h5("Plot number of Species:"),
min=1, max=50, value=10),
checkboxGroupInput('show_vars', label=h5('Show table columns:'),
names(reef),
selected = names(reef))
),
mainPanel(
tabsetPanel(
tabPanel("Plot", plotOutput("speciesPlot")),
tabPanel("Table", dataTableOutput("fish")),
tabPanel("Dataset", verbatimTextOutput("summary"))
)
)
)
)
) | /ui.R | no_license | ColinLiiii/Developing-Data-Project | R | false | false | 916 | r | library(shiny)
reef <- read.table("data/reefData.txt")
shinyUI(fluidPage(
titlePanel("Great Barrier Reef Fish"),
sidebarLayout(
sidebarPanel(
sliderInput("maximum", h5("Plot number of Species:"),
min=1, max=50, value=10),
checkboxGroupInput('show_vars', label=h5('Show table columns:'),
names(reef),
selected = names(reef))
),
mainPanel(
tabsetPanel(
tabPanel("Plot", plotOutput("speciesPlot")),
tabPanel("Table", dataTableOutput("fish")),
tabPanel("Dataset", verbatimTextOutput("summary"))
)
)
)
)
) |
#################################################
#
# compileDatasets_brca.R
# Date: April 01, 2016
# Author: Amrit Singh
#
#################################################
WhereAmI <- "/Volumes/Data/mac files Dec 05 2016 update/Oz.TRI/TRI/Breast Cancer/NewAnalysis/data/"
## Import datasets
###----------------------------
#
# 1) Clinical Data
#
###----------------------------
# Clinical dataset
clinical <- as.data.frame(t(read.delim(paste0(WhereAmI, "firebrowse/gdac.broadinstitute.org_BRCA.Merge_Clinical.Level_1.2015110100.0.0/BRCA.clin.merged.txt"))))
colnames(clinical) <- as.character(as.matrix(clinical[1,]))
clinDat <- clinical[-1,]
rownames(clinDat) <- gsub("-", ".", toupper(clinDat$patient.patient_id))
length(rownames(clinDat)); length(unique(rownames(clinDat))); dim(clinDat);
###----------------------------
#
# Genes
#
###----------------------------
## gene expression
genExp <- read.delim(paste0(WhereAmI, "firebrowse/gdac.broadinstitute.org_BRCA.Merge_rnaseqv2__illuminahiseq_rnaseqv2__unc_edu__Level_3__RSEM_genes_normalized__data.Level_3.2015110100.0.0/BRCA.rnaseqv2__illuminahiseq_rnaseqv2__unc_edu__Level_3__RSEM_genes_normalized__data.data.txt"))
genExp2 <- genExp[-c(1:30), ]
genNames <- unlist(lapply(strsplit(as.character(genExp2[, 1]), "|", fixed=TRUE), function(i) i[1]))
genNames[which(genNames == "SLC35E2")[2]] <- "SLC35E2.rep"
rownames(genExp2) <- genNames
genEset <- genExp2[, -1]
dim(genEset)
dim(genEset)
table(colnames(genEset))
table(unlist(lapply(strsplit(colnames(genEset), "\\."), function(i) i[4]))) # 1098 01A
genEset2 <- genEset[, grep("01A", colnames(genEset))]
dim(genEset); dim(genEset2);
colnames(genEset2) <- unlist(lapply(strsplit(colnames(genEset2), "\\."), function(i) i[3]))
# number of common subjects between clinical and gene expression
length(intersect(rownames(clinDat), colnames(genEset2))) ## 1080
###----------------------------
#
# Proteins
#
###----------------------------
prot <- read.delim(paste0(WhereAmI, "firebrowse/gdac.broadinstitute.org_BRCA.Merge_protein_exp__mda_rppa_core__mdanderson_org__Level_3__protein_normalization__data.Level_3.2015110100.0.0/BRCA.protein_exp__mda_rppa_core__mdanderson_org__Level_3__protein_normalization__data.data.txt"))[-1,]
rownames(prot) <- prot[, "Sample.REF"]
prot1 <- prot[, -1]
dim(prot1) # 142 x 410
prot2 <- matrix(0, nrow=nrow(prot1), ncol=ncol(prot1))
rownames(prot2) <- rownames(prot1)
colnames(prot2) <- colnames(prot1)
for(i in 1:nrow(prot2)){
prot2[i, ] <- as.numeric(as.matrix(prot1[i, ]))
}
prot3 <- prot2[, grep("01A", colnames(prot2))]
dim(prot3) # 142 proteins x 403 samples
colnames(prot3) <- unlist(lapply(strsplit(colnames(prot3), "\\."), function(x) x[3]))
# number of common subjects between clinical and gene expression
length(Reduce(intersect, list(rownames(clinDat), colnames(genEset2), colnames(prot3)))) ## 400
###----------------------------
#
# microRNA
#
###----------------------------
### miRNA
## HiSeq
mirnaHiseq <- read.delim(paste0(WhereAmI, "firebrowse/gdac.broadinstitute.org_BRCA.Merge_mirnaseq__illuminahiseq_mirnaseq__bcgsc_ca__Level_3__miR_gene_expression__data.Level_3.2015110100.0.0/BRCA.mirnaseq__illuminahiseq_mirnaseq__bcgsc_ca__Level_3__miR_gene_expression__data.data.txt"))
dim(mirnaHiseq)
mirnaHiseq2 <- mirnaHiseq[-1, seq(2,ncol(mirnaHiseq), by=3)]
rownames(mirnaHiseq2) <- mirnaHiseq[-1, 1]
dim(mirnaHiseq2)
length(unique(colnames(mirnaHiseq2)))
## GA
mirnaGA <- read.delim(paste0(WhereAmI, "firebrowse/gdac.broadinstitute.org_BRCA.Merge_mirnaseq__illuminaga_mirnaseq__bcgsc_ca__Level_3__miR_gene_expression__data.Level_3.2015110100.0.0/BRCA.mirnaseq__illuminaga_mirnaseq__bcgsc_ca__Level_3__miR_gene_expression__data.data.txt"))
dim(mirnaGA)
mirnaGA2 <- mirnaGA[-1, seq(2,ncol(mirnaGA), by=3)]
rownames(mirnaGA2) <- mirnaGA[-1, 1]
dim(mirnaGA2)
all(rownames(mirnaHiseq2) == rownames(mirnaGA2))
mirna <- cbind(mirnaHiseq2, mirnaGA2)
dim(mirna)
mirna2 <- apply(mirna, 2, as.numeric)
rownames(mirna2) <- rownames(mirna)
mirna3 <- mirna2[, grep("01A", colnames(mirna2))]
dim(mirna3)
colnames(mirna3) <- unlist(lapply(strsplit(colnames(mirna3), "\\."), function(x) x[3]))
# number of common subjects between clinical and gene, proteins and miRNA
length(Reduce(intersect, list(rownames(clinDat), colnames(genEset2), colnames(prot3), colnames(mirna3)))) ## 388
###----------------------------
#
# CpG dataset
#
###----------------------------
## Illumina 27
meth.27 <- read.delim(paste0(WhereAmI, "firebrowse/gdac.broadinstitute.org_BRCA.Merge_methylation__humanmethylation27__jhu_usc_edu__Level_3__within_bioassay_data_set_function__data.Level_3.2015110100.0.0/BRCA.methylation__humanmethylation27__jhu_usc_edu__Level_3__within_bioassay_data_set_function__data.data.txt"))
meth.27.new <- meth.27[, c(1, 3, 4, 5, grep("Beta_value", as.character(as.matrix(meth.27[1,]))))]
meth.27.new2 <- meth.27.new[-1, ]
colnames(meth.27.new2) <- colnames(meth.27.new)
colnames(meth.27.new2)[1:4] <- c("Composite Element REF", "Gene_Symbol", "Chromosome", "Genomic_Coordinate")
cg.genSym <- meth.27.new2[, 1:2]
other <- apply(meth.27.new2[, -c(1:2)], 2, as.numeric)
cg.genSym.other <- cbind.data.frame(cg.genSym, other)
#saveRDS(cg.genSym.other, paste0(WhereAmI, "firebrowse/meth.27.rds"))
## Illumina 450K
## convert the large methylation datafile in smaller files using the following linux command
## split -l 100000 BRCA.methylation__humanmethylation450__jhu_usc_edu__Level_3__within_bioassay_data_set_function__data.data.txt tcga.meth-
fileNames <- list.files(paste0(WhereAmI, "firebrowse/gdac.broadinstitute.org_BRCA.Merge_methylation__humanmethylation450__jhu_usc_edu__Level_3__within_bioassay_data_set_function__data.Level_3.2015110100.0.0/splitMethDatasets"),
full.names = TRUE)
## import the first file separately since it contains the header
meth.450.aa <- read.delim(fileNames[1])
ind <- c(1, 3, 4, 5, grep("Beta_value", as.character(as.matrix(meth.450.aa[1,]))))
meth.450.new <- meth.450.aa[, ind]
meth.450.new2 <- meth.450.new[-1, ]
colnames(meth.450.new2) <- colnames(meth.450.new)
colnames(meth.450.new2)[1:4] <- c("Composite Element REF", "Gene_Symbol", "Chromosome", "Genomic_Coordinate")
cg.genSym <- meth.450.new2[, 1:2]
other <- apply(meth.450.new2[, -c(1:2)], 2, as.numeric)
cg.genSym.other <- cbind.data.frame(cg.genSym, other)
## import the rest of the files
methList <- list()
for(i in 2 : length(fileNames)){
meth.450 <- read.delim(fileNames[i], header = FALSE)
meth.450 <- meth.450[, ind]
colnames(meth.450) <- colnames(cg.genSym.other)
methList[[i]] <- meth.450
}
methList <- rbind(cg.genSym.other, do.call(rbind, methList))
#saveRDS(methList, paste0(WhereAmI, "firebrowse/meth.450.rds"))
## combine meth.27 and meth.450
meth.27 <- readRDS(paste0(WhereAmI, "firebrowse/meth.27.rds"))
dim(meth.27); # 27578 x 347
dim(methList); # 485577 x 889
length(meth.27$`Composite Element REF`)
length(unique(as.character(meth.27$`Composite Element REF`)))
length(methList$`Composite Element REF`)
length(unique(as.character(methList$`Composite Element REF`)))
rownames(meth.27) <- as.character(meth.27$`Composite Element REF`)
rownames(methList) <- as.character(methList$`Composite Element REF`)
comSubj <- intersect(rownames(meth.27), rownames(methList))
length(comSubj); # 25978
meth <- cbind(meth.27[comSubj, ], methList[comSubj, ])
dim(meth) # 25978 1236
saveRDS(meth, paste0(WhereAmI, "firebrowse/combined.meth.rds"))
colnames(meth)[1:5]
## methylation dataset
meth0 <- readRDS(paste0(WhereAmI, "firebrowse/combined.meth.rds"))
meth <- meth0[, -c(348:351)] ## remove duplicate cpg ids, gene symbols, chromosome, genomic coordinates (4 columns)
dim(meth)
ncol(meth) - 4 # number of samples (removed the first 4 columns)
meth2 <- meth[, grep("01A", colnames(meth))]
dim(meth2)
colnames(meth2) <- unlist(lapply(strsplit(colnames(meth2), "\\."), function(x) x[3]))
# number of common subjects between clinical and gene, proteins and miRNA and cpg
length(Reduce(intersect, list(rownames(clinDat), colnames(genEset2), colnames(prot3), colnames(mirna3), colnames(meth2)))) ## 387
###----------------------------
#
# PAM50 labels
#
###----------------------------
pam50 <- read.delim(paste0(WhereAmI, "BRCA.1182_pam50scores.txt"), row.names = 1)
dim(pam50)
pam50.new <- pam50[grep("01A", rownames(pam50)), ]
dim(pam50.new)
rownames(pam50.new) <- unlist(lapply(strsplit(rownames(pam50.new), "-"), function(x) x[3]))
# number of common subjects between clinical and gene, proteins and miRNA and cpg
length(Reduce(intersect, list(rownames(clinDat), colnames(genEset2), colnames(prot3),
colnames(mirna3), colnames(meth2), rownames(pam50.new)))) ## 387
comSubjects <- Reduce(intersect, list(rownames(clinDat), colnames(genEset2), colnames(prot3),
colnames(mirna3), colnames(meth2), rownames(pam50.new)))
###----------------------------
#
# 1) Training dataset (limiting dataset proteomics)
#
###----------------------------
clinTrain <- clinDat[comSubjects, ]
mrnaTrain <- genEset2[, comSubjects]
mirnaTrain <- mirna3[, comSubjects]
protTrain <- prot3[, comSubjects]
methTrain <- meth2[, comSubjects]
pam50Train <- pam50.new[comSubjects ,]
table(pam50Train$Call)
protAnnotation <- read.delim(paste0(WhereAmI, "firebrowse/gdac.broadinstitute.org_BRCA.RPPA_AnnotateWithGene.Level_3.2015110100.0.0/BRCA.antibody_annotation.txt"))
rownames(protAnnotation) <- protAnnotation[, "Composite.Element.REF"]
methAnnotation <- meth[, c("Composite Element REF", "Gene_Symbol", "Chromosome", "Genomic_Coordinate")]
###----------------------------
#
# 2) Test dataset
#
###----------------------------
comSubjects <- setdiff(Reduce(intersect, list(rownames(clinDat), colnames(genEset2),
colnames(mirna3), colnames(meth2), rownames(pam50.new))), rownames(clinTrain))
length(comSubjects)
clinTest <- clinDat[comSubjects, ]
mrnaTest <- genEset2[, comSubjects]
mirnaTest <- mirna3[, comSubjects]
methTest <- meth2[, comSubjects]
pam50Test <- pam50.new[comSubjects ,]
table(pam50Test$Call)
save(clinTrain=clinTrain, mrnaTrain=mrnaTrain, mirnaTrain=mirnaTrain, protTrain=protTrain, methTrain=methTrain, pam50Train=pam50Train,
clinTest=clinTest, mrnaTest=mrnaTest, mirnaTest=mirnaTest, methTest=methTest, pam50Test=pam50Test,
protAnnotation=protAnnotation, methAnnotation=methAnnotation,
file = paste0(WhereAmI, "trainTestDatasets.RDATA"))
| /brca/0-compileDatasets_brca.R | no_license | singha53-zz/diablo_datasets | R | false | false | 10,476 | r | #################################################
#
# compileDatasets_brca.R
# Date: April 01, 2016
# Author: Amrit Singh
#
#################################################
WhereAmI <- "/Volumes/Data/mac files Dec 05 2016 update/Oz.TRI/TRI/Breast Cancer/NewAnalysis/data/"
## Import datasets
###----------------------------
#
# 1) Clinical Data
#
###----------------------------
# Clinical dataset
clinical <- as.data.frame(t(read.delim(paste0(WhereAmI, "firebrowse/gdac.broadinstitute.org_BRCA.Merge_Clinical.Level_1.2015110100.0.0/BRCA.clin.merged.txt"))))
colnames(clinical) <- as.character(as.matrix(clinical[1,]))
clinDat <- clinical[-1,]
rownames(clinDat) <- gsub("-", ".", toupper(clinDat$patient.patient_id))
length(rownames(clinDat)); length(unique(rownames(clinDat))); dim(clinDat);
###----------------------------
#
# Genes
#
###----------------------------
## gene expression
genExp <- read.delim(paste0(WhereAmI, "firebrowse/gdac.broadinstitute.org_BRCA.Merge_rnaseqv2__illuminahiseq_rnaseqv2__unc_edu__Level_3__RSEM_genes_normalized__data.Level_3.2015110100.0.0/BRCA.rnaseqv2__illuminahiseq_rnaseqv2__unc_edu__Level_3__RSEM_genes_normalized__data.data.txt"))
genExp2 <- genExp[-c(1:30), ]
genNames <- unlist(lapply(strsplit(as.character(genExp2[, 1]), "|", fixed=TRUE), function(i) i[1]))
genNames[which(genNames == "SLC35E2")[2]] <- "SLC35E2.rep"
rownames(genExp2) <- genNames
genEset <- genExp2[, -1]
dim(genEset)
dim(genEset)
table(colnames(genEset))
table(unlist(lapply(strsplit(colnames(genEset), "\\."), function(i) i[4]))) # 1098 01A
genEset2 <- genEset[, grep("01A", colnames(genEset))]
dim(genEset); dim(genEset2);
colnames(genEset2) <- unlist(lapply(strsplit(colnames(genEset2), "\\."), function(i) i[3]))
# number of common subjects between clinical and gene expression
length(intersect(rownames(clinDat), colnames(genEset2))) ## 1080
###----------------------------
#
# Proteins
#
###----------------------------
prot <- read.delim(paste0(WhereAmI, "firebrowse/gdac.broadinstitute.org_BRCA.Merge_protein_exp__mda_rppa_core__mdanderson_org__Level_3__protein_normalization__data.Level_3.2015110100.0.0/BRCA.protein_exp__mda_rppa_core__mdanderson_org__Level_3__protein_normalization__data.data.txt"))[-1,]
rownames(prot) <- prot[, "Sample.REF"]
prot1 <- prot[, -1]
dim(prot1) # 142 x 410
prot2 <- matrix(0, nrow=nrow(prot1), ncol=ncol(prot1))
rownames(prot2) <- rownames(prot1)
colnames(prot2) <- colnames(prot1)
for(i in 1:nrow(prot2)){
prot2[i, ] <- as.numeric(as.matrix(prot1[i, ]))
}
prot3 <- prot2[, grep("01A", colnames(prot2))]
dim(prot3) # 142 proteins x 403 samples
colnames(prot3) <- unlist(lapply(strsplit(colnames(prot3), "\\."), function(x) x[3]))
# number of common subjects between clinical and gene expression
length(Reduce(intersect, list(rownames(clinDat), colnames(genEset2), colnames(prot3)))) ## 400
###----------------------------
#
# microRNA
#
###----------------------------
### miRNA
## HiSeq
mirnaHiseq <- read.delim(paste0(WhereAmI, "firebrowse/gdac.broadinstitute.org_BRCA.Merge_mirnaseq__illuminahiseq_mirnaseq__bcgsc_ca__Level_3__miR_gene_expression__data.Level_3.2015110100.0.0/BRCA.mirnaseq__illuminahiseq_mirnaseq__bcgsc_ca__Level_3__miR_gene_expression__data.data.txt"))
dim(mirnaHiseq)
mirnaHiseq2 <- mirnaHiseq[-1, seq(2,ncol(mirnaHiseq), by=3)]
rownames(mirnaHiseq2) <- mirnaHiseq[-1, 1]
dim(mirnaHiseq2)
length(unique(colnames(mirnaHiseq2)))
## GA
mirnaGA <- read.delim(paste0(WhereAmI, "firebrowse/gdac.broadinstitute.org_BRCA.Merge_mirnaseq__illuminaga_mirnaseq__bcgsc_ca__Level_3__miR_gene_expression__data.Level_3.2015110100.0.0/BRCA.mirnaseq__illuminaga_mirnaseq__bcgsc_ca__Level_3__miR_gene_expression__data.data.txt"))
dim(mirnaGA)
mirnaGA2 <- mirnaGA[-1, seq(2,ncol(mirnaGA), by=3)]
rownames(mirnaGA2) <- mirnaGA[-1, 1]
dim(mirnaGA2)
all(rownames(mirnaHiseq2) == rownames(mirnaGA2))
mirna <- cbind(mirnaHiseq2, mirnaGA2)
dim(mirna)
mirna2 <- apply(mirna, 2, as.numeric)
rownames(mirna2) <- rownames(mirna)
mirna3 <- mirna2[, grep("01A", colnames(mirna2))]
dim(mirna3)
colnames(mirna3) <- unlist(lapply(strsplit(colnames(mirna3), "\\."), function(x) x[3]))
# number of common subjects between clinical and gene, proteins and miRNA
length(Reduce(intersect, list(rownames(clinDat), colnames(genEset2), colnames(prot3), colnames(mirna3)))) ## 388
###----------------------------
#
# CpG dataset
#
###----------------------------
## Illumina 27
meth.27 <- read.delim(paste0(WhereAmI, "firebrowse/gdac.broadinstitute.org_BRCA.Merge_methylation__humanmethylation27__jhu_usc_edu__Level_3__within_bioassay_data_set_function__data.Level_3.2015110100.0.0/BRCA.methylation__humanmethylation27__jhu_usc_edu__Level_3__within_bioassay_data_set_function__data.data.txt"))
meth.27.new <- meth.27[, c(1, 3, 4, 5, grep("Beta_value", as.character(as.matrix(meth.27[1,]))))]
meth.27.new2 <- meth.27.new[-1, ]
colnames(meth.27.new2) <- colnames(meth.27.new)
colnames(meth.27.new2)[1:4] <- c("Composite Element REF", "Gene_Symbol", "Chromosome", "Genomic_Coordinate")
cg.genSym <- meth.27.new2[, 1:2]
other <- apply(meth.27.new2[, -c(1:2)], 2, as.numeric)
cg.genSym.other <- cbind.data.frame(cg.genSym, other)
#saveRDS(cg.genSym.other, paste0(WhereAmI, "firebrowse/meth.27.rds"))
## Illumina 450K
## convert the large methylation datafile in smaller files using the following linux command
## split -l 100000 BRCA.methylation__humanmethylation450__jhu_usc_edu__Level_3__within_bioassay_data_set_function__data.data.txt tcga.meth-
fileNames <- list.files(paste0(WhereAmI, "firebrowse/gdac.broadinstitute.org_BRCA.Merge_methylation__humanmethylation450__jhu_usc_edu__Level_3__within_bioassay_data_set_function__data.Level_3.2015110100.0.0/splitMethDatasets"),
full.names = TRUE)
## import the first file separately since it contains the header
meth.450.aa <- read.delim(fileNames[1])
ind <- c(1, 3, 4, 5, grep("Beta_value", as.character(as.matrix(meth.450.aa[1,]))))
meth.450.new <- meth.450.aa[, ind]
meth.450.new2 <- meth.450.new[-1, ]
colnames(meth.450.new2) <- colnames(meth.450.new)
colnames(meth.450.new2)[1:4] <- c("Composite Element REF", "Gene_Symbol", "Chromosome", "Genomic_Coordinate")
cg.genSym <- meth.450.new2[, 1:2]
other <- apply(meth.450.new2[, -c(1:2)], 2, as.numeric)
cg.genSym.other <- cbind.data.frame(cg.genSym, other)
## import the rest of the files
methList <- list()
for(i in 2 : length(fileNames)){
meth.450 <- read.delim(fileNames[i], header = FALSE)
meth.450 <- meth.450[, ind]
colnames(meth.450) <- colnames(cg.genSym.other)
methList[[i]] <- meth.450
}
methList <- rbind(cg.genSym.other, do.call(rbind, methList))
#saveRDS(methList, paste0(WhereAmI, "firebrowse/meth.450.rds"))
## combine meth.27 and meth.450
meth.27 <- readRDS(paste0(WhereAmI, "firebrowse/meth.27.rds"))
dim(meth.27); # 27578 x 347
dim(methList); # 485577 x 889
length(meth.27$`Composite Element REF`)
length(unique(as.character(meth.27$`Composite Element REF`)))
length(methList$`Composite Element REF`)
length(unique(as.character(methList$`Composite Element REF`)))
rownames(meth.27) <- as.character(meth.27$`Composite Element REF`)
rownames(methList) <- as.character(methList$`Composite Element REF`)
comSubj <- intersect(rownames(meth.27), rownames(methList))
length(comSubj); # 25978
meth <- cbind(meth.27[comSubj, ], methList[comSubj, ])
dim(meth) # 25978 1236
saveRDS(meth, paste0(WhereAmI, "firebrowse/combined.meth.rds"))
colnames(meth)[1:5]
## methylation dataset
meth0 <- readRDS(paste0(WhereAmI, "firebrowse/combined.meth.rds"))
meth <- meth0[, -c(348:351)] ## remove duplicate cpg ids, gene symbols, chromosome, genomic coordinates (4 columns)
dim(meth)
ncol(meth) - 4 # number of samples (removed the first 4 columns)
meth2 <- meth[, grep("01A", colnames(meth))]
dim(meth2)
colnames(meth2) <- unlist(lapply(strsplit(colnames(meth2), "\\."), function(x) x[3]))
# number of common subjects between clinical and gene, proteins and miRNA and cpg
length(Reduce(intersect, list(rownames(clinDat), colnames(genEset2), colnames(prot3), colnames(mirna3), colnames(meth2)))) ## 387
###----------------------------
#
# PAM50 labels
#
###----------------------------
pam50 <- read.delim(paste0(WhereAmI, "BRCA.1182_pam50scores.txt"), row.names = 1)
dim(pam50)
pam50.new <- pam50[grep("01A", rownames(pam50)), ]
dim(pam50.new)
rownames(pam50.new) <- unlist(lapply(strsplit(rownames(pam50.new), "-"), function(x) x[3]))
# number of common subjects between clinical and gene, proteins and miRNA and cpg
length(Reduce(intersect, list(rownames(clinDat), colnames(genEset2), colnames(prot3),
colnames(mirna3), colnames(meth2), rownames(pam50.new)))) ## 387
comSubjects <- Reduce(intersect, list(rownames(clinDat), colnames(genEset2), colnames(prot3),
colnames(mirna3), colnames(meth2), rownames(pam50.new)))
###----------------------------
#
# 1) Training dataset (limiting dataset proteomics)
#
###----------------------------
clinTrain <- clinDat[comSubjects, ]
mrnaTrain <- genEset2[, comSubjects]
mirnaTrain <- mirna3[, comSubjects]
protTrain <- prot3[, comSubjects]
methTrain <- meth2[, comSubjects]
pam50Train <- pam50.new[comSubjects ,]
table(pam50Train$Call)
protAnnotation <- read.delim(paste0(WhereAmI, "firebrowse/gdac.broadinstitute.org_BRCA.RPPA_AnnotateWithGene.Level_3.2015110100.0.0/BRCA.antibody_annotation.txt"))
rownames(protAnnotation) <- protAnnotation[, "Composite.Element.REF"]
methAnnotation <- meth[, c("Composite Element REF", "Gene_Symbol", "Chromosome", "Genomic_Coordinate")]
###----------------------------
#
# 2) Test dataset
#
###----------------------------
comSubjects <- setdiff(Reduce(intersect, list(rownames(clinDat), colnames(genEset2),
colnames(mirna3), colnames(meth2), rownames(pam50.new))), rownames(clinTrain))
length(comSubjects)
clinTest <- clinDat[comSubjects, ]
mrnaTest <- genEset2[, comSubjects]
mirnaTest <- mirna3[, comSubjects]
methTest <- meth2[, comSubjects]
pam50Test <- pam50.new[comSubjects ,]
table(pam50Test$Call)
save(clinTrain=clinTrain, mrnaTrain=mrnaTrain, mirnaTrain=mirnaTrain, protTrain=protTrain, methTrain=methTrain, pam50Train=pam50Train,
clinTest=clinTest, mrnaTest=mrnaTest, mirnaTest=mirnaTest, methTest=methTest, pam50Test=pam50Test,
protAnnotation=protAnnotation, methAnnotation=methAnnotation,
file = paste0(WhereAmI, "trainTestDatasets.RDATA"))
|
test_that("can use configuration file via options", {
config_file <- glue::glue("
default:
a_field: a_value
")
config_path <- tempfile()
readr::write_lines(config_file, config_path)
withr::local_options(list(r2dii_config = config_path))
expect_equal(get_param("a_field")(), "a_value")
})
test_that("outputs the expected value", {
config_file <- glue::glue("
default:
a_field: a_value
")
config_path <- tempfile()
readr::write_lines(config_file, config_path)
expect_equal(get_param("a_field")(config_path), "a_value")
})
test_that("with inexistent field outputs NULL", {
config_file <- glue::glue("
default:
a_field: a_value
")
config_path <- tempfile()
readr::write_lines(config_file, config_path)
expect_equal(get_param("bad_field")(config_path), NULL)
})
test_that("with NULL value and if_null = stop, errors gracefully", {
config_file <- glue::glue("
default:
a_field:
")
config_path <- tempfile()
readr::write_lines(config_file, config_path)
expect_error(get_param("bad_field")(config_path), NA)
expect_error(
get_param("bad_field", if_null = stop)(config_path),
"must be not `NULL`"
)
})
| /tests/testthat/test-get_param.R | permissive | fiona511/PACTA_analysis | R | false | false | 1,194 | r | test_that("can use configuration file via options", {
config_file <- glue::glue("
default:
a_field: a_value
")
config_path <- tempfile()
readr::write_lines(config_file, config_path)
withr::local_options(list(r2dii_config = config_path))
expect_equal(get_param("a_field")(), "a_value")
})
test_that("outputs the expected value", {
config_file <- glue::glue("
default:
a_field: a_value
")
config_path <- tempfile()
readr::write_lines(config_file, config_path)
expect_equal(get_param("a_field")(config_path), "a_value")
})
test_that("with inexistent field outputs NULL", {
config_file <- glue::glue("
default:
a_field: a_value
")
config_path <- tempfile()
readr::write_lines(config_file, config_path)
expect_equal(get_param("bad_field")(config_path), NULL)
})
test_that("with NULL value and if_null = stop, errors gracefully", {
config_file <- glue::glue("
default:
a_field:
")
config_path <- tempfile()
readr::write_lines(config_file, config_path)
expect_error(get_param("bad_field")(config_path), NA)
expect_error(
get_param("bad_field", if_null = stop)(config_path),
"must be not `NULL`"
)
})
|
# Season scores --------
# compute score between any 2 teams during regular season
.calculateScore <- function(team_home,team_away){
# Single game simulation ----------------
#team_home <- "PHI"
#team_away <- "BOS"
# ---------------------------------------
mean_predicted <- mean(c(teamsPredicted$TEAM_PTS,teamsPredicted$TEAM_PTSAG))
# teamsPredicted contain predicted avg PTS and avg PTS Against per team for a new season
teamH <- filter(teamsPredicted, TeamCode == team_home)
teamA <- filter(teamsPredicted, TeamCode == team_away)
# Define both Normal distributions. Empirical home-away difference is approx (2*home_away_factor) 6 points (+3, -3)
muH <- teamH$TEAM_PTS + home_away_factor/2 + teamA$TEAM_PTSAG - mean_predicted
muA <- teamA$TEAM_PTS - home_away_factor/2 + teamH$TEAM_PTSAG - mean_predicted
pointsH <- round(rnorm(1,muH,sigma),0)
pointsA <- round(rnorm(1,muA,sigma),0)
numOT <- 0
while (abs(pointsH-pointsA)<1){ # overtime tie-breaker
extraH <- round(rnorm(1,muH*5/48,sigma/3),0)
extraA <- round(rnorm(1,muA*5/48,sigma/3),0)
pointsH <- pointsH + extraH
pointsA <- pointsA + extraA
numOT <- numOT + 1
}
#print(paste0(team_home,": ",pointsH," vs. ",team_away,": ",pointsA))
return(c(pointsH,pointsA,numOT))
}
.calculateWinProbability <- function(data = teamsPredicted,team_home,team_away,home_away_f = home_away_factor){
mean_predicted <- mean(c(data$TEAM_PTS,data$TEAM_PTSAG))
# teamsPredicted contain predicted avg PTS and avg PTS Against per team for a new season
teamH <- filter(data, TeamCode == team_home)
teamA <- filter(data, TeamCode == team_away)
# Define both Normal distributions. Empirical home-away difference is approx (2*home_away_factor) 6 points (+3, -3)
#muH <- teamH$TEAM_PTS + home_away_f/2 + teamA$TEAM_PTSAG - global_mean
#muA <- teamA$TEAM_PTS - home_away_f/2 + teamH$TEAM_PTSAG - global_mean
muH <- teamH$TEAM_PTS + home_away_f/2 + teamA$TEAM_PTSAG - mean_predicted
muA <- teamA$TEAM_PTS - home_away_f/2 + teamH$TEAM_PTSAG - mean_predicted
prob_HvsA <- 1-pnorm(0,muH-muA,sqrt(2)*sigma)
# equivalent simulated probability (to double check analytical probability)
# prob_HvsA_sim <- length(which(rnorm(100000,muH-muA,sqrt(2)*sigma)>0))/100000
return(prob_HvsA)
}
.computeScores <- function(real=FALSE){
# Load season schedule
if (real){
season <- realSeasonSchedule %>%
mutate(Date = paste(Date,StartTime)) %>%
dplyr::select(-StartTime)
} else {
season <- seasonSchedule
}
# calculate all scores
scores <- data.frame()
for (i in 1:nrow(season)){
thisGame <- .calculateScore(season[i,2],season[i,3])
scores[i,1] <- thisGame[1]
scores[i,2] <- thisGame[2]
scores[i,3] <- thisGame[3]
}
return(scores)
}
.standings <- function(real=FALSE) {
set.seed(as.integer(Sys.time())) # always a different seed
# compute all scores for regular season
if (real){
regSeasonScores <- .computeScores(real=TRUE)
season <- bind_cols(realSeasonSchedule,regSeasonScores)
names(season) <- c("day","time","home_team","away_team","home_points","away_points","numOT")
datesRange <- unique(season$day)
} else {
regSeasonScores <- .computeScores()
seasonSchedule <- .seasonSchedule()
season <- bind_cols(seasonSchedule,regSeasonScores)
names(season) <- c("day","home_team","away_team","home_points","away_points","numOT")
datesRange <- c(1:tail(season,1)$day)
}
# compute standings by day for regular season
the_standings <- list() # standings is a list in which each day of competition is a data.frame
standings_aux <- data.frame(team = conferences$Team, teamCode = conferences$TeamCode,
conference = conferences$Conference, win = 0, lose = 0,
win_home = 0, lose_home = 0, win_home_perc = 0,
win_conf = 0, lose_conf = 0, win_conf_perc = 0,
tot_pts = 0, avg_pts = 0, tot_pts_ag = 0, avg_pts_ag = 0,
streak = 0)
for (i in datesRange){
thisDay <- filter(season,day == i)
for (j in 1:nrow(thisDay)){
HT <- standings_aux[standings_aux$teamCode==thisDay$home_team[j],]
AT <- standings_aux[standings_aux$teamCode==thisDay$away_team[j],]
if (thisDay$home_points[j] > thisDay$away_points[j]){ # home team wins
HT$win <- HT$win + 1
AT$lose <- AT$lose + 1
HT$win_home <- HT$win_home + 1
HT$win_home_perc <- round(HT$win_home/(HT$win_home + HT$lose_home),2)
HT$win_conf <- ifelse(HT$conference==AT$conference,HT$win_conf + 1,HT$win_conf)
AT$lose_conf <- ifelse(HT$conference==AT$conference,AT$lose_conf + 1,AT$lose_conf)
HT$win_conf_perc <- round(HT$win_conf/(HT$win_conf + HT$lose_conf),2)
HT$streak <- ifelse(HT$streak <= 0,1,HT$streak + 1)
AT$streak <- ifelse(AT$streak >= 0,-1,AT$streak - 1)
} else { # away team wins
AT$win <- AT$win + 1
HT$lose <- HT$lose + 1
HT$lose_home <- HT$lose_home + 1
AT$win_home_perc <- round(AT$win_home/(AT$win_home + AT$lose_home),2)
AT$win_conf <- ifelse(AT$conference==HT$conference,AT$win_conf + 1,AT$win_conf)
HT$lose_conf <- ifelse(HT$conference==AT$conference,HT$lose_conf + 1,HT$lose_conf)
AT$win_conf_perc <- round(AT$win_conf/(AT$win_conf + AT$lose_conf),2)
AT$streak <- ifelse(AT$streak <= 0,1,AT$streak + 1)
HT$streak <- ifelse(HT$streak >= 0,-1,HT$streak - 1)
}
# points don't depend on outcome of game
HT$tot_pts <- HT$tot_pts + thisDay$home_points[j]
HT$tot_pts_ag <- HT$tot_pts_ag + thisDay$away_points[j]
HT$avg_pts <- round(HT$tot_pts/(HT$win + HT$lose),1)
HT$avg_pts_ag <- round(HT$tot_pts_ag/(HT$win + HT$lose),1)
AT$tot_pts <- AT$tot_pts + thisDay$away_points[j]
AT$tot_pts_ag <- AT$tot_pts_ag + thisDay$home_points[j]
AT$avg_pts <- round(AT$tot_pts/(AT$win + AT$lose),1)
AT$avg_pts_ag <- round(AT$tot_pts_ag/(AT$win + AT$lose),1)
standings_aux[standings_aux$teamCode==thisDay$home_team[j],] <- HT
standings_aux[standings_aux$teamCode==thisDay$away_team[j],] <- AT
}
the_standings[[i]] <- standings_aux
}
return(list(the_standings,season)) # list of standings (list) and reg season scores (data.frame)
}
.getConferenceStandings <- function(conf,day){
standings <- regSeasonOutcome[[1]]
#day <- length(standings)
confPredStandings <- arrange(filter(select(standings[[day]], conference, team,W=win,L=lose,`%W Home`=win_home_perc,`%W Conf`=win_conf_perc,
PTS=avg_pts,PTSA=avg_pts_ag,Strk=streak), conference == conf), desc(W/(W+L)))
confPredStandings <- select(confPredStandings,-conference) %>%
mutate_if(is.numeric, function(x) round(x,1))
return(confPredStandings)
}
.getGames <- function(conf,this_day){
games <- regSeasonOutcome[[2]]
#day <- length(standings)
confPredGames <- dplyr::select(filter(games,day==this_day), away_team,home_team,
away_points,home_points) %>%
mutate(game = paste0(away_team," @ ",home_team))
confPredGames <- dplyr::select(confPredGames,game,A=away_points,H=home_points)
return(confPredGames)
}
.getGameProbability <- function(conf,this_day){
games <- regSeasonOutcome[[2]]
#day <- length(standings)
confPredGamesProbs <- filter(games,day==this_day) %>%
select(away_team,home_team,away_points,home_points) %>%
mutate(game = paste0(away_team," @ ",home_team)) %>%
group_by(game) %>%
mutate(Prob = .calculateWinProbability(teamsPredicted,home_team,away_team)) %>%
select(game,A=away_points,H=home_points,Prob)
return(confPredGamesProbs)
}
.winProbability_matrix <- function(){
prob_matrix <- data.frame()
k <- 1
for (i in 1:length(teamDashboard$Tm)){
for (j in 1:length(teamDashboard$Tm)){
prob_matrix[k,1] <- teamDashboard$Tm[i]
prob_matrix[k,2] <- teamDashboard$Tm[j]
prob_matrix[k,3] = .calculateWinProbability(teamsPredicted,teamDashboard$Tm[i],teamDashboard$Tm[j])
k <- k + 1
}
}
names(prob_matrix) <- c("Home_Team", "Away_Team", "Win_Prob")
prob_matrix2 <- spread(prob_matrix,Away_Team,Win_Prob)
return(prob_matrix2)
}
| /helper_functions/regular_season.R | no_license | asRodelgo/NBA | R | false | false | 8,441 | r | # Season scores --------
# compute score between any 2 teams during regular season
.calculateScore <- function(team_home,team_away){
# Single game simulation ----------------
#team_home <- "PHI"
#team_away <- "BOS"
# ---------------------------------------
mean_predicted <- mean(c(teamsPredicted$TEAM_PTS,teamsPredicted$TEAM_PTSAG))
# teamsPredicted contain predicted avg PTS and avg PTS Against per team for a new season
teamH <- filter(teamsPredicted, TeamCode == team_home)
teamA <- filter(teamsPredicted, TeamCode == team_away)
# Define both Normal distributions. Empirical home-away difference is approx (2*home_away_factor) 6 points (+3, -3)
muH <- teamH$TEAM_PTS + home_away_factor/2 + teamA$TEAM_PTSAG - mean_predicted
muA <- teamA$TEAM_PTS - home_away_factor/2 + teamH$TEAM_PTSAG - mean_predicted
pointsH <- round(rnorm(1,muH,sigma),0)
pointsA <- round(rnorm(1,muA,sigma),0)
numOT <- 0
while (abs(pointsH-pointsA)<1){ # overtime tie-breaker
extraH <- round(rnorm(1,muH*5/48,sigma/3),0)
extraA <- round(rnorm(1,muA*5/48,sigma/3),0)
pointsH <- pointsH + extraH
pointsA <- pointsA + extraA
numOT <- numOT + 1
}
#print(paste0(team_home,": ",pointsH," vs. ",team_away,": ",pointsA))
return(c(pointsH,pointsA,numOT))
}
.calculateWinProbability <- function(data = teamsPredicted,team_home,team_away,home_away_f = home_away_factor){
mean_predicted <- mean(c(data$TEAM_PTS,data$TEAM_PTSAG))
# teamsPredicted contain predicted avg PTS and avg PTS Against per team for a new season
teamH <- filter(data, TeamCode == team_home)
teamA <- filter(data, TeamCode == team_away)
# Define both Normal distributions. Empirical home-away difference is approx (2*home_away_factor) 6 points (+3, -3)
#muH <- teamH$TEAM_PTS + home_away_f/2 + teamA$TEAM_PTSAG - global_mean
#muA <- teamA$TEAM_PTS - home_away_f/2 + teamH$TEAM_PTSAG - global_mean
muH <- teamH$TEAM_PTS + home_away_f/2 + teamA$TEAM_PTSAG - mean_predicted
muA <- teamA$TEAM_PTS - home_away_f/2 + teamH$TEAM_PTSAG - mean_predicted
prob_HvsA <- 1-pnorm(0,muH-muA,sqrt(2)*sigma)
# equivalent simulated probability (to double check analytical probability)
# prob_HvsA_sim <- length(which(rnorm(100000,muH-muA,sqrt(2)*sigma)>0))/100000
return(prob_HvsA)
}
.computeScores <- function(real=FALSE){
# Load season schedule
if (real){
season <- realSeasonSchedule %>%
mutate(Date = paste(Date,StartTime)) %>%
dplyr::select(-StartTime)
} else {
season <- seasonSchedule
}
# calculate all scores
scores <- data.frame()
for (i in 1:nrow(season)){
thisGame <- .calculateScore(season[i,2],season[i,3])
scores[i,1] <- thisGame[1]
scores[i,2] <- thisGame[2]
scores[i,3] <- thisGame[3]
}
return(scores)
}
.standings <- function(real=FALSE) {
set.seed(as.integer(Sys.time())) # always a different seed
# compute all scores for regular season
if (real){
regSeasonScores <- .computeScores(real=TRUE)
season <- bind_cols(realSeasonSchedule,regSeasonScores)
names(season) <- c("day","time","home_team","away_team","home_points","away_points","numOT")
datesRange <- unique(season$day)
} else {
regSeasonScores <- .computeScores()
seasonSchedule <- .seasonSchedule()
season <- bind_cols(seasonSchedule,regSeasonScores)
names(season) <- c("day","home_team","away_team","home_points","away_points","numOT")
datesRange <- c(1:tail(season,1)$day)
}
# compute standings by day for regular season
the_standings <- list() # standings is a list in which each day of competition is a data.frame
standings_aux <- data.frame(team = conferences$Team, teamCode = conferences$TeamCode,
conference = conferences$Conference, win = 0, lose = 0,
win_home = 0, lose_home = 0, win_home_perc = 0,
win_conf = 0, lose_conf = 0, win_conf_perc = 0,
tot_pts = 0, avg_pts = 0, tot_pts_ag = 0, avg_pts_ag = 0,
streak = 0)
for (i in datesRange){
thisDay <- filter(season,day == i)
for (j in 1:nrow(thisDay)){
HT <- standings_aux[standings_aux$teamCode==thisDay$home_team[j],]
AT <- standings_aux[standings_aux$teamCode==thisDay$away_team[j],]
if (thisDay$home_points[j] > thisDay$away_points[j]){ # home team wins
HT$win <- HT$win + 1
AT$lose <- AT$lose + 1
HT$win_home <- HT$win_home + 1
HT$win_home_perc <- round(HT$win_home/(HT$win_home + HT$lose_home),2)
HT$win_conf <- ifelse(HT$conference==AT$conference,HT$win_conf + 1,HT$win_conf)
AT$lose_conf <- ifelse(HT$conference==AT$conference,AT$lose_conf + 1,AT$lose_conf)
HT$win_conf_perc <- round(HT$win_conf/(HT$win_conf + HT$lose_conf),2)
HT$streak <- ifelse(HT$streak <= 0,1,HT$streak + 1)
AT$streak <- ifelse(AT$streak >= 0,-1,AT$streak - 1)
} else { # away team wins
AT$win <- AT$win + 1
HT$lose <- HT$lose + 1
HT$lose_home <- HT$lose_home + 1
AT$win_home_perc <- round(AT$win_home/(AT$win_home + AT$lose_home),2)
AT$win_conf <- ifelse(AT$conference==HT$conference,AT$win_conf + 1,AT$win_conf)
HT$lose_conf <- ifelse(HT$conference==AT$conference,HT$lose_conf + 1,HT$lose_conf)
AT$win_conf_perc <- round(AT$win_conf/(AT$win_conf + AT$lose_conf),2)
AT$streak <- ifelse(AT$streak <= 0,1,AT$streak + 1)
HT$streak <- ifelse(HT$streak >= 0,-1,HT$streak - 1)
}
# points don't depend on outcome of game
HT$tot_pts <- HT$tot_pts + thisDay$home_points[j]
HT$tot_pts_ag <- HT$tot_pts_ag + thisDay$away_points[j]
HT$avg_pts <- round(HT$tot_pts/(HT$win + HT$lose),1)
HT$avg_pts_ag <- round(HT$tot_pts_ag/(HT$win + HT$lose),1)
AT$tot_pts <- AT$tot_pts + thisDay$away_points[j]
AT$tot_pts_ag <- AT$tot_pts_ag + thisDay$home_points[j]
AT$avg_pts <- round(AT$tot_pts/(AT$win + AT$lose),1)
AT$avg_pts_ag <- round(AT$tot_pts_ag/(AT$win + AT$lose),1)
standings_aux[standings_aux$teamCode==thisDay$home_team[j],] <- HT
standings_aux[standings_aux$teamCode==thisDay$away_team[j],] <- AT
}
the_standings[[i]] <- standings_aux
}
return(list(the_standings,season)) # list of standings (list) and reg season scores (data.frame)
}
.getConferenceStandings <- function(conf,day){
standings <- regSeasonOutcome[[1]]
#day <- length(standings)
confPredStandings <- arrange(filter(select(standings[[day]], conference, team,W=win,L=lose,`%W Home`=win_home_perc,`%W Conf`=win_conf_perc,
PTS=avg_pts,PTSA=avg_pts_ag,Strk=streak), conference == conf), desc(W/(W+L)))
confPredStandings <- select(confPredStandings,-conference) %>%
mutate_if(is.numeric, function(x) round(x,1))
return(confPredStandings)
}
.getGames <- function(conf,this_day){
games <- regSeasonOutcome[[2]]
#day <- length(standings)
confPredGames <- dplyr::select(filter(games,day==this_day), away_team,home_team,
away_points,home_points) %>%
mutate(game = paste0(away_team," @ ",home_team))
confPredGames <- dplyr::select(confPredGames,game,A=away_points,H=home_points)
return(confPredGames)
}
.getGameProbability <- function(conf,this_day){
games <- regSeasonOutcome[[2]]
#day <- length(standings)
confPredGamesProbs <- filter(games,day==this_day) %>%
select(away_team,home_team,away_points,home_points) %>%
mutate(game = paste0(away_team," @ ",home_team)) %>%
group_by(game) %>%
mutate(Prob = .calculateWinProbability(teamsPredicted,home_team,away_team)) %>%
select(game,A=away_points,H=home_points,Prob)
return(confPredGamesProbs)
}
.winProbability_matrix <- function(){
prob_matrix <- data.frame()
k <- 1
for (i in 1:length(teamDashboard$Tm)){
for (j in 1:length(teamDashboard$Tm)){
prob_matrix[k,1] <- teamDashboard$Tm[i]
prob_matrix[k,2] <- teamDashboard$Tm[j]
prob_matrix[k,3] = .calculateWinProbability(teamsPredicted,teamDashboard$Tm[i],teamDashboard$Tm[j])
k <- k + 1
}
}
names(prob_matrix) <- c("Home_Team", "Away_Team", "Win_Prob")
prob_matrix2 <- spread(prob_matrix,Away_Team,Win_Prob)
return(prob_matrix2)
}
|
#'@title Transform ggplot2 objects into 3D
#'
#'@description Plots a ggplot2 object in 3D by mapping the color or fill aesthetic to elevation.
#'
#'Currently, this function does not transform lines mapped to color into 3D.
#'
#'If there are multiple legends/guides due to multiple aesthetics being mapped (e.g. color and shape),
#'the package author recommends that the user pass the order of the guides manually using the ggplot2 function "guides()`.
#'Otherwise, the order may change when processing the ggplot2 object and result in a mismatch between the 3D mapping
#'and the underlying plot.
#'
#'Using the shape aesthetic with more than three groups is not recommended, unless the user passes in
#'custom, solid shapes. By default in ggplot2, only the first three shapes are solid, which is a requirement to be projected
#'into 3D.
#'
#'@param ggobj ggplot object to projected into 3D.
#'@param width Default `3`. Width of ggplot, in `units`.
#'@param height Default `3`. Height of ggplot, in `units`.
#'@param height_aes Default `NULL`. Whether the `fill` or `color` aesthetic should be used for height values,
#'which the user can specify by passing either `fill` or `color` to this argument.
#'Automatically detected. If both `fill` and `color` aesthetics are present, then `fill` is default.
#'@param invert Default `FALSE`. If `TRUE`, the height mapping is inverted.
#'@param shadow_intensity Default `0.5`. The intensity of the calculated shadows.
#'@param units Default `in`. One of c("in", "cm", "mm").
#'@param scale Default `150`. Multiplier for vertical scaling: a higher number increases the height
#'of the 3D transformation.
#'@param pointcontract Default `0.7`. This multiplies the size of the points and shrinks
#'them around their center in the 3D surface mapping. Decrease this to reduce color bleed on edges, and set to
#'`1` to turn off entirely. Note: If `size` is passed as an aesthetic to the same geom
#'that is being mapped to elevation, this scaling will not be applied. If `alpha` varies on the variable
#'being mapped, you may want to set this to `1`, since the points now have a non-zero width stroke outline (however,
#'mapping `alpha` in the same variable you are projecting to height is probably not a good choice. as the `alpha`
#'variable is ignored when performing the 3D projection).
#'@param offset_edges Default `FALSE`. If `TRUE`, inserts a small amount of space between polygons for "geom_sf", "geom_tile", "geom_hex", and "geom_polygon" layers.
#'If you pass in a number, the space between polygons will be a line of that width. Note: this feature may end up removing thin polygons
#'from the plot entirely--use with care.
#'@param preview Default `FALSE`. If `TRUE`, the raytraced 2D ggplot will be displayed on the current device.
#'@param raytrace Default `FALSE`. Whether to add a raytraced layer.
#'@param sunangle Default `315` (NW). If raytracing, the angle (in degrees) around the matrix from which the light originates.
#'@param anglebreaks Default `seq(30,40,0.1)`. The azimuth angle(s), in degrees, as measured from the horizon from which the light originates.
#'@param lambert Default `TRUE`. If raytracing, changes the intensity of the light at each point based proportional to the
#'dot product of the ray direction and the surface normal at that point. Zeros out all values directed away from
#'the ray.
#'@param reduce_size Default `NULL`. A number between 0 and 1 that specifies how much to reduce the resolution of the plot, for faster plotting.
#'@param multicore Default `FALSE`. If raytracing and `TRUE`, multiple cores will be used to compute the shadow matrix. By default, this uses all cores available, unless the user has
#'set `options("cores")` in which the multicore option will only use that many cores.
#'@param save_height_matrix Default `FALSE`. If `TRUE`, the function will return the height matrix used for the ggplot.
#'@param save_shadow_matrix Default `FALSE`. If `TRUE`, the function will return the shadow matrix for use in future updates via the `shadow_cache` argument passed to `ray_shade`.
#'@param saved_shadow_matrix Default `NULL`. A cached shadow matrix (saved by the a previous invocation of `plot_gg(..., save_shadow_matrix=TRUE)` to use instead of raytracing a shadow map each time.
#'@param ... Additional arguments to be passed to `plot_3d()`.
#'@return Opens a 3D plot in rgl.
#'@import ggplot2
#'@export
#'@examples
#'library(ggplot2)
#'library(viridis)
#'
#'ggdiamonds = ggplot(diamonds, aes(x, depth)) +
#' stat_density_2d(aes(fill = stat(nlevel)), geom = "polygon", n = 100, bins = 10,contour = TRUE) +
#' facet_wrap(clarity~.) +
#' scale_fill_viridis_c(option = "A")
#'\donttest{
#'plot_gg(ggdiamonds,multicore=TRUE,width=5,height=5,scale=250,windowsize=c(1400,866),
#' zoom = 0.55, phi = 30)
#'render_snapshot()
#'}
#'
#'#Change the camera angle and take a snapshot:
#'\donttest{
#'render_camera(zoom=0.5,theta=-30,phi=30)
#'render_snapshot(clear = TRUE)
#'}
#'
#'#Contours and other lines will automatically be ignored. Here is the volcano dataset:
#'
#'ggvolcano = volcano %>%
#' reshape2::melt() %>%
#' ggplot() +
#' geom_tile(aes(x=Var1,y=Var2,fill=value)) +
#' geom_contour(aes(x=Var1,y=Var2,z=value),color="black") +
#' scale_x_continuous("X",expand = c(0,0)) +
#' scale_y_continuous("Y",expand = c(0,0)) +
#' scale_fill_gradientn("Z",colours = terrain.colors(10)) +
#' coord_fixed()
#'ggvolcano
#'
#'\donttest{
#'plot_gg(ggvolcano, multicore = TRUE, raytrace = TRUE, width = 7, height = 4,
#' scale = 300, windowsize = c(1400, 866), zoom = 0.6, phi = 30, theta = 30)
#'render_snapshot(clear = TRUE)
#'}
#'
#'#Here, we will create a 3D plot of the mtcars dataset. This automatically detects
#'#that the user used the `color` aesthetic instead of the `fill`.
#'mtplot = ggplot(mtcars) +
#' geom_point(aes(x=mpg,y=disp,color=cyl)) +
#' scale_color_continuous(limits=c(0,8))
#'
#'#Preview how the plot will look by setting `preview = TRUE`: We also adjust the angle of the light.
#'\donttest{
#'plot_gg(mtplot, width=3.5, sunangle=225, preview = TRUE)
#'}
#'
#'\donttest{
#'plot_gg(mtplot, width=3.5, multicore = TRUE, windowsize = c(1400,866), sunangle=225,
#' zoom = 0.60, phi = 30, theta = 45)
#'render_snapshot(clear = TRUE)
#'}
#'
#'#Now let's plot a density plot in 3D.
#'mtplot_density = ggplot(mtcars) +
#' stat_density_2d(aes(x=mpg,y=disp, fill=..density..), geom = "raster", contour = FALSE) +
#' scale_x_continuous(expand=c(0,0)) +
#' scale_y_continuous(expand=c(0,0)) +
#' scale_fill_gradient(low="pink", high="red")
#'mtplot_density
#'
#'\donttest{
#'plot_gg(mtplot_density, width = 4,zoom = 0.60, theta = -45, phi = 30,
#' windowsize = c(1400,866))
#'render_snapshot(clear = TRUE)
#'}
#'
#'#This also works facetted.
#'mtplot_density_facet = mtplot_density + facet_wrap(~cyl)
#'
#'#Preview this plot in 2D:
#'\donttest{
#'plot_gg(mtplot_density_facet, preview = TRUE)
#'}
#'
#'\donttest{
#'plot_gg(mtplot_density_facet, windowsize=c(1400,866),
#' zoom = 0.55, theta = -10, phi = 25)
#'render_snapshot(clear = TRUE)
#'}
#'
#'#That is a little cramped. Specifying a larger width will improve the readability of this plot.
#'\donttest{
#'plot_gg(mtplot_density_facet, width = 6, preview = TRUE)
#'}
#'
#'#That's better. Let's plot it in 3D, and increase the scale.
#'\donttest{
#'plot_gg(mtplot_density_facet, width = 6, windowsize=c(1400,866),
#' zoom = 0.55, theta = -10, phi = 25, scale=300)
#'render_snapshot(clear = TRUE)
#'}
plot_gg = function(ggobj, width = 3, height = 3,
height_aes = NULL, invert = FALSE, shadow_intensity = 0.5,
units = c("in", "cm", "mm"), scale=150, pointcontract = 0.7, offset_edges = FALSE,
preview = FALSE, raytrace = TRUE, sunangle = 315, anglebreaks = seq(30,40,0.1),
multicore = FALSE, lambert=TRUE, reduce_size = NULL, save_height_matrix = FALSE,
save_shadow_matrix = FALSE, saved_shadow_matrix=NULL, ...) {
heightmaptemp = tempfile()
colormaptemp = tempfile()
if(methods::is(ggobj,"list") && length(ggobj) == 2) {
ggplotobj2 = unserialize(serialize(ggobj[[2]], NULL))
ggsave(paste0(colormaptemp,".png"),ggobj[[1]],width = width,height = height)
} else {
ggplotobj2 = unserialize(serialize(ggobj, NULL))
ggsave(paste0(colormaptemp,".png"),ggplotobj2,width = width,height = height)
}
isfill = FALSE
iscolor = FALSE
if(is.null(height_aes)) {
for(i in seq_len(length(ggplotobj2$layers))) {
if("fill" %in% names(ggplotobj2$layers[[i]]$mapping)) {
isfill = TRUE
}
if(any(c("color","colour") %in% names(ggplotobj2$layers[[i]]$mapping))) {
iscolor = TRUE
}
}
if(!iscolor && !isfill) {
if("fill" %in% names(ggplotobj2$mapping)) {
isfill = TRUE
}
if(any(c("color","colour") %in% names(ggplotobj2$mapping))) {
iscolor = TRUE
}
}
if(isfill && !iscolor) {
height_aes = "fill"
} else if (!isfill && iscolor) {
height_aes = "colour"
} else if (isfill && iscolor) {
height_aes = "fill"
} else {
height_aes = "fill"
}
}
if(height_aes == "color") {
height_aes = "colour"
}
if(is.numeric(offset_edges)) {
polygon_offset_value = offset_edges
offset_edges = TRUE
} else {
polygon_offset_value = 0.5
}
polygon_offset_geoms = c("GeomPolygon","GeomSf", "GeomHex", "GeomTile")
other_height_type = ifelse(height_aes == "colour", "fill", "colour")
colortheme = c("line","rect","text","axis.title", "axis.title.x",
"axis.title.x.top","axis.title.y","axis.title.y.right","axis.text",
"axis.text.x" ,"axis.text.x.top","axis.text.y","axis.text.y.right",
"axis.ticks" ,"axis.ticks.length","axis.line" ,"axis.line.x",
"axis.line.y","legend.background","legend.margin","legend.spacing",
"legend.spacing.x","legend.spacing.y","legend.key" ,"legend.key.size",
"legend.key.height","legend.key.width","legend.text","legend.text.align",
"legend.title","legend.title.align","legend.position","legend.direction",
"legend.justification" ,"legend.box","legend.box.margin","legend.box.background",
"legend.box.spacing","panel.background","panel.border","panel.spacing",
"panel.spacing.x","panel.spacing.y","panel.grid" ,"panel.grid.minor",
"panel.ontop","plot.background","plot.title" ,"plot.subtitle",
"plot.caption","plot.tag","plot.tag.position","plot.margin",
"strip.background","strip.placement","strip.text" ,"strip.text.x",
"strip.text.y","strip.switch.pad.grid","strip.switch.pad.wrap","panel.grid.major",
"title","axis.ticks.length.x","axis.ticks.length.y","axis.ticks.length.x.top",
"axis.ticks.length.x.bottom","axis.ticks.length.y.left","axis.ticks.length.y.right","axis.title.x.bottom",
"axis.text.x.bottom","axis.text.y.left","axis.title.y.left")
key_theme_elements = c("text", "line", "axis.line", "axis.title",
"axis.title.x",
"axis.title.y",
"axis.text",
"axis.text.x", "axis.text.y", "axis.text.x.top", "axis.text.x.bottom",
"axis.text.y.left", "axis.text.y.right",
"axis.ticks", "strip.background", "strip.text", "legend.text", "strip.text.x","strip.text.y",
"legend.title","legend.background", "legend.title", "panel.background")
theme_bool = rep(TRUE,length(key_theme_elements))
names(theme_bool) = key_theme_elements
typetheme = c("line","rect","text","text","text",
"text","text","text","text",
"text","text","text","text",
"line","unit","line","line",
"line","rect","margin","unit",
"unit","unit","rect","unit",
"unit","unit","text","none",
"text","none","none","none",
"none","rect","margin","rect",
"unit","rect","rect","unit",
"unit","unit","line","line",
"none","rect","text","text",
"text","text","none","margin",
"rect","none","text","text",
"text","unit","unit","line",
"text","line","line","line",
"line","line","line","text",
"text","text","text")
black_white_pal = function(x) {
grDevices::colorRampPalette(c("white", "black"))(255)[x * 254 + 1]
}
white_white_pal = function(x) {
grDevices::colorRampPalette(c("white", "white"))(255)[x * 254 + 1]
}
ifelsefxn = function(entry) {
if(!is.null(entry)) {
return(entry)
}
}
#aes_with_guides = c("size","shape","colour","fill","alpha","linetype")
#Shift all continuous palettes of height_aes to black/white, and set all discrete key colors to white.
if(ggplotobj2$scales$n() != 0) {
anyfound = FALSE
#Check to see if same guide being used for both color and fill aesthetics
if(ggplotobj2$scales$has_scale("colour") && ggplotobj2$scales$has_scale("fill")) {
fillscale = ggplotobj2$scales$get_scales("fill")
colorscale = ggplotobj2$scales$get_scales("colour")
same_limits = FALSE
same_breaks = FALSE
same_labels = FALSE
same_calls = FALSE
if((!is.null(fillscale$limits) && !is.null(colorscale$limits))) {
if(fillscale$limits == colorscale$limits) {
same_limits = TRUE
}
} else if (is.null(fillscale$limits) && is.null(colorscale$limits)) {
same_limits = TRUE
}
if((!is.null(fillscale$breaks) && !is.null(colorscale$breaks))) {
if(all(fillscale$breaks == colorscale$breaks)) {
same_breaks = TRUE
}
} else if (is.null(fillscale$breaks) && is.null(colorscale$breaks)) {
same_breaks = TRUE
}
if((class(fillscale$labels) != "waiver" && class(colorscale$labels) != "waiver")) {
if(all(fillscale$labels == colorscale$labels)) {
same_labels = TRUE
}
} else if ((class(fillscale$labels) == "waiver" && class(colorscale$labels) == "waiver")) {
same_labels = TRUE
}
if(fillscale$call == colorscale$call) {
same_calls = TRUE
}
if(same_limits && same_breaks && same_labels && same_calls) {
if(height_aes == "fill") {
ggplotobj2 = ggplotobj2 + guides(color = "none")
} else {
ggplotobj2 = ggplotobj2 + guides(fill = "none")
}
}
}
#Now check for scales and change to the b/w palette, but preserve guide traits.
for(i in seq_len(ggplotobj2$scales$n())) {
if(height_aes %in% ggplotobj2$scales$scales[[i]]$aesthetics) {
ggplotobj2$scales$scales[[i]]$palette = black_white_pal
ggplotobj2$scales$scales[[i]]$na.value = "white"
has_guide = !any("guide" %in% class(ggplotobj2$scales$scales[[i]]$guide))
if(any(c("logical" %in% class(ggplotobj2$scales$scales[[i]]$guide)))) {
has_guide = ggplotobj2$scales$scales[[i]]$guide
}
if(has_guide) {
if(height_aes == "fill") {
if(is.null(ggplotobj2$guides$fill)) {
ggplotobj2 = ggplotobj2 + guides(fill = guide_colourbar(ticks = FALSE,nbin = 1000,order=i))
} else {
if(any(ggplotobj2$guides$fill != "none")) {
copyguide = ggplotobj2$guides$fill
copyguide$frame.linewidth = 0
copyguide$ticks = FALSE
copyguide$nbin = 1000
ggplotobj2 = ggplotobj2 +
guides(fill = guide_colourbar(ticks = FALSE,nbin = 1000))
ggplotobj2$guides$fill = copyguide
}
}
for(j in seq_len(length(ggplotobj2$layers))) {
if("colour" %in% names(ggplotobj2$layers[[j]]$mapping)) {
ggplotobj2$layers[[j]]$geom$draw_key = drawkeyfunction_points
}
}
} else {
if(is.null(ggplotobj2$guides$colour)) {
ggplotobj2 = ggplotobj2 + guides(colour = guide_colourbar(ticks = FALSE,nbin = 1000,order=i))
} else {
if(any(ggplotobj2$guides$colour != "none")) {
copyguide = ggplotobj2$guides$colour
copyguide$frame.linewidth = 0
copyguide$ticks = FALSE
copyguide$nbin = 1000
ggplotobj2 = ggplotobj2 +
guides(colour = guide_colourbar(ticks = FALSE,nbin = 1000))
ggplotobj2$guides$colour = copyguide
}
}
}
}
anyfound = TRUE
} else if(other_height_type %in% ggplotobj2$scales$scales[[i]]$aesthetics) {
#change guides for other height_aes to be the all white palette
ggplotobj2$scales$scales[[i]]$palette = white_white_pal
ggplotobj2$scales$scales[[i]]$na.value = "white"
}
}
#If no scales found, just add one to the ggplot object.
if(!anyfound) {
if(height_aes == "colour") {
ggplotobj2 = ggplotobj2 +
scale_color_gradientn(colours = grDevices::colorRampPalette(c("white","black"))(256), na.value = "white") +
guides(colour = guide_colourbar(ticks = FALSE,nbin = 1000))
}
if(height_aes == "fill") {
ggplotobj2 = ggplotobj2 +
scale_fill_gradientn(colours = grDevices::colorRampPalette(c("white","black"))(256), na.value = "white") +
guides(fill = guide_colourbar(ticks = FALSE,nbin = 1000))
}
}
} else {
#If no scales found, just add one to the ggplot object.
if(ggplotobj2$scales$n() == 0) {
if(height_aes == "fill") {
ggplotobj2 = ggplotobj2 + scale_fill_gradientn(colours = grDevices::colorRampPalette(c("white","black"))(256), na.value = "white") +
guides(fill = guide_colourbar(ticks = FALSE,nbin = 1000))
} else {
ggplotobj2 = ggplotobj2 + scale_color_gradientn(colours = grDevices::colorRampPalette(c("white","black"))(256), na.value = "white") +
guides(colour = guide_colourbar(ticks = FALSE,nbin = 1000))
}
} else {
if(height_aes == "fill") {
ggplotobj2 = ggplotobj2 + scale_fill_gradientn(colours = grDevices::colorRampPalette(c("white","black"))(256), na.value = "white") +
guides(fill = guide_colourbar(ticks = FALSE,nbin = 1000))
} else {
ggplotobj2 = ggplotobj2 + scale_color_gradientn(colours = grDevices::colorRampPalette(c("white","black"))(256), na.value = "white") +
guides(colour = guide_colourbar(ticks = FALSE,nbin = 1000))
}
}
}
#Set all elements to white if custom theme passed, and color aesthetic geoms to size = 0 if height_aes == "fill"
if(length(ggplotobj2$theme) > 0) {
if(height_aes == "fill") {
for(layer in seq_along(1:length(ggplotobj2$layers))) {
if("colour" %in% names(ggplotobj2$layers[[layer]]$mapping) ||
0 == length(names(ggplotobj2$layers[[layer]]$mapping))) {
ggplotobj2$layers[[layer]]$aes_params$colour = "white"
}
if("fill" %in% names(ggplotobj2$layers[[layer]]$mapping)) {
ggplotobj2$layers[[layer]]$aes_params$size = NA
if(any(polygon_offset_geoms %in% class(ggplotobj2$layers[[layer]]$geom)) && offset_edges) {
ggplotobj2$layers[[layer]]$aes_params$size = polygon_offset_value
ggplotobj2$layers[[layer]]$aes_params$colour = "white"
}
}
if("shape" %in% names(ggplotobj2$layers[[layer]]$mapping)) {
shapedata = layer_data(ggplotobj2)
numbershapes = length(unique(shapedata$shape))
if(numbershapes > 3) {
warning("Non-solid shapes will not be projected to 3D.")
}
ggplotobj2$layers[[layer]]$geom$draw_key = drawkeyfunction_points
}
if("size" %in% names(ggplotobj2$layers[[layer]]$mapping)) {
ggplotobj2$layers[[layer]]$geom$draw_key = drawkeyfunction_points
}
if("alpha" %in% names(ggplotobj2$layers[[layer]]$mapping)) {
ggplotobj2$layers[[layer]]$geom$draw_key = drawkeyfunction_points
for(j in seq_len(length(ggplotobj2$layers))) {
if("stroke" %in% names(ggplotobj2$layers[[j]]$geom$default_aes)) {
ggplotobj2$layers[[j]]$geom$default_aes$stroke = 0
}
}
ggplotobj2 = suppressMessages({ggplotobj2 + scale_alpha_continuous(range=c(1,1))})
}
if("linetype" %in% names(ggplotobj2$layers[[layer]]$mapping)) {
ggplotobj2$layers[[layer]]$geom$draw_key = drawkeyfunction_lines
}
}
} else {
for(layer in seq_along(1:length(ggplotobj2$layers))) {
if("fill" %in% names(ggplotobj2$layers[[layer]]$mapping) ||
0 == length(names(ggplotobj2$layers[[layer]]$mapping))) {
ggplotobj2$layers[[layer]]$aes_params$fill = "white"
}
if("shape" %in% names(ggplotobj2$layers[[layer]]$mapping)) {
shapedata = layer_data(ggplotobj2)
numbershapes = length(unique(shapedata$shape))
if(numbershapes > 3) {
warning("Non-solid shapes will not be projected to 3D.")
}
ggplotobj2$layers[[layer]]$geom$draw_key = drawkeyfunction_points
}
if("size" %in% names(ggplotobj2$layers[[layer]]$mapping)) {
ggplotobj2$layers[[layer]]$geom$draw_key = drawkeyfunction_points
}
if("alpha" %in% names(ggplotobj2$layers[[layer]]$mapping)) {
ggplotobj2$layers[[layer]]$geom$draw_key = drawkeyfunction_points
for(j in seq_len(length(ggplotobj2$layers))) {
if("stroke" %in% names(ggplotobj2$layers[[j]]$geom$default_aes)) {
ggplotobj2$layers[[j]]$geom$default_aes$stroke = 0
}
}
ggplotobj2 = suppressMessages({ggplotobj2 + scale_alpha_continuous(range=c(1,1))})
}
if("linetype" %in% names(ggplotobj2$layers[[layer]]$mapping)) {
ggplotobj2$layers[[layer]]$geom$draw_key = drawkeyfunction_lines
}
}
}
#switch all elements to white
for(i in 1:length(ggplotobj2$theme)) {
tempname = names(ggplotobj2$theme[i])
if(tempname %in% key_theme_elements) {
theme_bool[tempname] = FALSE
} else if ("element_blank" %in% class(ggplotobj2$theme[[i]])) {
theme_bool[tempname] = FALSE
}
whichtype = typetheme[which(tempname == colortheme)]
if(whichtype %in% c("text","line")) {
if(!is.null(ggplotobj2$theme[[i]])) {
ggplotobj2$theme[[i]]$colour = "white"
}
} else if(whichtype == "rect") {
if(!(tempname %in% c("panel.border","rect"))) {
if(!is.null(ggplotobj2$theme[[i]])) {
ggplotobj2$theme[[i]]$colour = "white"
ggplotobj2$theme[[i]]$fill = "white"
}
} else {
ggplotobj2$theme[[i]]$colour = "white"
ggplotobj2$theme[[i]]$fill = NA
}
}
}
if(ggplotobj2$scales$n() > 0) {
for(i in 1:ggplotobj2$scales$n()) {
if(length(ggplotobj2$scales$scales[[i]]$guide) > 1) {
ggplotobj2$scales$scales[[i]]$guide$frame.colour = "white"
ggplotobj2$scales$scales[[i]]$guide$ticks = FALSE
ggplotobj2$scales$scales[[i]]$guide$nbin = 256
ggplotobj2$scales$scales[[i]]$guide$draw.llim = FALSE
ggplotobj2$scales$scales[[i]]$na.value = "white"
}
}
}
if(theme_bool["text"]) ggplotobj2 = ggplotobj2 + theme(text = element_text(color="white"))
if(theme_bool["line"]) ggplotobj2 = ggplotobj2 + theme(line = element_line(color="white"))
if(theme_bool["axis.line"]) ggplotobj2 = ggplotobj2 + theme(axis.line = element_line(color="white"))
if(theme_bool["axis.title"]) ggplotobj2 = ggplotobj2 + theme(axis.title = element_text(color="white"))
if(theme_bool["axis.title.x"]) ggplotobj2 = ggplotobj2 + theme(axis.title.x = element_text(color="white"))
if(theme_bool["axis.title.y"]) ggplotobj2 = ggplotobj2 + theme(axis.title.y = element_text(color="white"))
if(theme_bool["axis.text"]) ggplotobj2 = ggplotobj2 + theme(axis.text = element_text(color="white"))
if(theme_bool["axis.text.x"]) ggplotobj2 = ggplotobj2 + theme(axis.text.x = element_text(color="white"))
if(theme_bool["axis.text.y"]) ggplotobj2 = ggplotobj2 + theme(axis.text.y = element_text(color="white"))
if(theme_bool["strip.text.x"]) ggplotobj2 = ggplotobj2 + theme(strip.text.x = element_text(color="white"))
if(theme_bool["strip.text.y"]) ggplotobj2 = ggplotobj2 + theme(strip.text.y = element_text(color="white"))
if(theme_bool["axis.ticks"]) ggplotobj2 = ggplotobj2 + theme(axis.ticks = element_line(color="white"))
if(theme_bool["strip.background"]) ggplotobj2 = ggplotobj2 + theme(strip.background = element_rect(fill = "white", color = "white"))
if(theme_bool["strip.text"]) ggplotobj2 = ggplotobj2 + theme(strip.text = element_text(color="white"))
if(theme_bool["legend.text"]) ggplotobj2 = ggplotobj2 + theme(legend.text = element_text(color="white"))
if(theme_bool["legend.title"]) ggplotobj2 = ggplotobj2 + theme(legend.title = element_text(color="white"))
if(theme_bool["legend.background"]) ggplotobj2 = ggplotobj2 + theme(legend.background = element_rect(fill = "white", color = "white"))
if(theme_bool["legend.title"]) ggplotobj2 = ggplotobj2 + theme(legend.title = element_text(color="white"))
if(theme_bool["panel.background"]) ggplotobj2 = ggplotobj2 + theme(panel.background = element_rect(fill = "white", color = "white"))
} else {
if(height_aes == "fill") {
for(layer in seq_along(1:length(ggplotobj2$layers))) {
if("colour" %in% names(ggplotobj2$layers[[layer]]$mapping) ||
0 == length(names(ggplotobj2$layers[[layer]]$mapping))) {
ggplotobj2$layers[[layer]]$aes_params$colour = "white"
}
if("fill" %in% names(ggplotobj2$layers[[layer]]$mapping)) {
ggplotobj2$layers[[layer]]$aes_params$size = NA
if(any(polygon_offset_geoms %in% class(ggplotobj2$layers[[layer]]$geom)) && offset_edges) {
ggplotobj2$layers[[layer]]$aes_params$size = polygon_offset_value
ggplotobj2$layers[[layer]]$aes_params$colour = "white"
}
}
if("shape" %in% names(ggplotobj2$layers[[layer]]$mapping)) {
shapedata = layer_data(ggplotobj2)
numbershapes = length(unique(shapedata$shape))
if(numbershapes > 3) {
warning("Non-solid shapes will not be projected to 3D.")
}
ggplotobj2$layers[[layer]]$geom$draw_key = drawkeyfunction_points
}
if("size" %in% names(ggplotobj2$layers[[layer]]$mapping)) {
ggplotobj2$layers[[layer]]$geom$draw_key = drawkeyfunction_points
}
if("alpha" %in% names(ggplotobj2$layers[[layer]]$mapping)) {
ggplotobj2$layers[[layer]]$geom$draw_key = drawkeyfunction_points
for(j in seq_len(length(ggplotobj2$layers))) {
if("stroke" %in% names(ggplotobj2$layers[[j]]$geom$default_aes)) {
ggplotobj2$layers[[j]]$geom$default_aes$stroke = 0
}
}
ggplotobj2 = suppressMessages({ggplotobj2 + scale_alpha_continuous(range=c(1,1))})
}
if("linetype" %in% names(ggplotobj2$layers[[layer]]$mapping)) {
ggplotobj2$layers[[layer]]$geom$draw_key = drawkeyfunction_lines
}
}
} else {
for(layer in seq_len(length(ggplotobj2$layers))) {
if("fill" %in% names(ggplotobj2$layers[[layer]]$mapping) ||
0 == length(names(ggplotobj2$layers[[layer]]$mapping))) {
ggplotobj2$layers[[layer]]$aes_params$fill = "white"
}
if("shape" %in% names(ggplotobj2$layers[[layer]]$mapping)) {
shapedata = layer_data(ggplotobj2)
numbershapes = length(unique(shapedata$shape))
if(numbershapes > 3) {
warning("Non-solid shapes will not be projected to 3D.")
}
ggplotobj2$layers[[layer]]$geom$draw_key = drawkeyfunction_points
}
if("size" %in% names(ggplotobj2$layers[[layer]]$mapping)) {
ggplotobj2$layers[[layer]]$geom$draw_key = drawkeyfunction_points
}
if("alpha" %in% names(ggplotobj2$layers[[layer]]$mapping)) {
ggplotobj2$layers[[layer]]$geom$draw_key = drawkeyfunction_points
for(j in seq_len(length(ggplotobj2$layers))) {
if("stroke" %in% names(ggplotobj2$layers[[j]]$geom$default_aes)) {
ggplotobj2$layers[[j]]$geom$default_aes$stroke = 0
}
}
ggplotobj2 = suppressMessages({ggplotobj2 + scale_alpha_continuous(range=c(1,1))})
}
if("linetype" %in% names(ggplotobj2$layers[[layer]]$mapping)) {
ggplotobj2$layers[[layer]]$geom$draw_key = drawkeyfunction_lines
}
}
}
#No custom theme passed, just create one.
ggplotobj2 = ggplotobj2 +
theme(text = element_text(color="white"),
line = element_line(color="white"),
axis.line = element_line(color="white"),
axis.title = element_text(color="white"),
axis.text = element_text(color="white"),
axis.ticks = element_line(color="white"),
strip.background = element_rect(fill = "white", color = "white"),
strip.text = element_text(color="white"),
legend.key = element_rect(fill = "white", color = "white"),
legend.text = element_text(color="white"),
legend.background = element_rect(fill = "white", color = "white"),
legend.title = element_text(color="white"),
panel.background = element_rect(fill = "white", color = "white"))
}
if(height_aes == "fill") {
if(length(ggplotobj2$layers) > 0) {
for(i in seq_along(1:length(ggplotobj2$layers))) {
ggplotobj2$layers[[i]]$aes_params$size = NA
if(any(polygon_offset_geoms %in% class(ggplotobj2$layers[[layer]]$geom)) && offset_edges) {
ggplotobj2$layers[[i]]$aes_params$size = polygon_offset_value
ggplotobj2$layers[[i]]$aes_params$colour = "white"
}
}
}
} else {
if(length(ggplotobj2$layers) > 0) {
for(i in seq_along(1:length(ggplotobj2$layers))) {
ggplotobj2$layers[[i]]$aes_params$fill = "white"
if("GeomContour" %in% class(ggplotobj2$layers[[i]]$geom)) {
ggplotobj2$layers[[i]]$aes_params$alpha = 0
}
}
if(pointcontract != 1) {
for(i in 1:length(ggplotobj2$layers)) {
if(!is.null(ggplotobj2$layers[[i]]$aes_params$size)) {
ggplotobj2$layers[[i]]$aes_params$size = ggplotobj2$layers[[i]]$aes_params$size * pointcontract
} else {
ggplotobj2$layers[[i]]$geom$default_aes$size = ggplotobj2$layers[[i]]$geom$default_aes$size * pointcontract
}
}
}
}
}
tryCatch({
ggsave(paste0(heightmaptemp,".png"),ggplotobj2,width = width,height = height)
}, error = function(e) {
if(any(grepl("Error: Discrete value supplied to continuous scale", as.character(e),fixed = TRUE))) {
stop(paste0("Error: Discrete variable cannot be mapped to 3D. Did you mean to choose `",ifelse(height_aes == "fill","color","fill"), "` as the `height_aes`?"),call.=FALSE)
}
})
if(!is.null(reduce_size)) {
if(!("magick" %in% rownames(utils::installed.packages()))) {
stop("magick package required to use argument reduce_size")
} else {
if(length(reduce_size) == 1 && reduce_size < 1) {
scale = scale * reduce_size
image_info = magick::image_read(paste0(heightmaptemp,".png")) %>%
magick::image_info()
magick::image_read(paste0(heightmaptemp,".png")) %>%
magick::image_resize(paste0(image_info$width * reduce_size,"x",image_info$height * reduce_size)) %>%
magick::image_write(paste0(heightmaptemp,".png"))
magick::image_read(paste0(colormaptemp,".png")) %>%
magick::image_resize(paste0(image_info$width * reduce_size,"x",image_info$height * reduce_size)) %>%
magick::image_write(paste0(colormaptemp,".png"))
}
}
}
mapcolor = png::readPNG(paste0(colormaptemp,".png"))
mapheight = png::readPNG(paste0(heightmaptemp,".png"))
if(length(dim(mapheight)) == 3) {
mapheight = mapheight[,,1]
}
if(invert) {
mapheight = 1 - mapheight
}
if(raytrace) {
if(is.null(saved_shadow_matrix)) {
raylayer = ray_shade(1-t(mapheight),maxsearch = 600,sunangle = sunangle,anglebreaks = anglebreaks,
zscale=1/scale,multicore = multicore,lambert = lambert, ...)
if(!preview) {
mapcolor %>%
add_shadow(raylayer,shadow_intensity) %>%
plot_3d((t(1-mapheight)),zscale=1/scale, ... )
} else {
mapcolor %>%
add_shadow(raylayer,shadow_intensity) %>%
plot_map(keep_user_par = FALSE)
}
} else {
raylayer = saved_shadow_matrix
if(!preview) {
mapcolor %>%
add_shadow(raylayer,shadow_intensity) %>%
plot_3d((t(1-mapheight)),zscale=1/scale, ... )
} else {
mapcolor %>%
add_shadow(raylayer,shadow_intensity) %>%
plot_map(keep_user_par = FALSE)
}
}
} else {
if(!preview) {
plot_3d(mapcolor, (t(1-mapheight)), zscale=1/scale, ...)
} else {
plot_map(mapcolor, keep_user_par = FALSE)
}
}
if(save_shadow_matrix & !save_height_matrix) {
return(raylayer)
}
if(!save_shadow_matrix & save_height_matrix) {
return(1-t(mapheight))
}
if(save_shadow_matrix & save_height_matrix) {
return(list(1-t(mapheight),raylayer))
}
} | /R/plot_gg.R | no_license | Bisaloo/rayshader | R | false | false | 33,630 | r | #'@title Transform ggplot2 objects into 3D
#'
#'@description Plots a ggplot2 object in 3D by mapping the color or fill aesthetic to elevation.
#'
#'Currently, this function does not transform lines mapped to color into 3D.
#'
#'If there are multiple legends/guides due to multiple aesthetics being mapped (e.g. color and shape),
#'the package author recommends that the user pass the order of the guides manually using the ggplot2 function "guides()`.
#'Otherwise, the order may change when processing the ggplot2 object and result in a mismatch between the 3D mapping
#'and the underlying plot.
#'
#'Using the shape aesthetic with more than three groups is not recommended, unless the user passes in
#'custom, solid shapes. By default in ggplot2, only the first three shapes are solid, which is a requirement to be projected
#'into 3D.
#'
#'@param ggobj ggplot object to projected into 3D.
#'@param width Default `3`. Width of ggplot, in `units`.
#'@param height Default `3`. Height of ggplot, in `units`.
#'@param height_aes Default `NULL`. Whether the `fill` or `color` aesthetic should be used for height values,
#'which the user can specify by passing either `fill` or `color` to this argument.
#'Automatically detected. If both `fill` and `color` aesthetics are present, then `fill` is default.
#'@param invert Default `FALSE`. If `TRUE`, the height mapping is inverted.
#'@param shadow_intensity Default `0.5`. The intensity of the calculated shadows.
#'@param units Default `in`. One of c("in", "cm", "mm").
#'@param scale Default `150`. Multiplier for vertical scaling: a higher number increases the height
#'of the 3D transformation.
#'@param pointcontract Default `0.7`. This multiplies the size of the points and shrinks
#'them around their center in the 3D surface mapping. Decrease this to reduce color bleed on edges, and set to
#'`1` to turn off entirely. Note: If `size` is passed as an aesthetic to the same geom
#'that is being mapped to elevation, this scaling will not be applied. If `alpha` varies on the variable
#'being mapped, you may want to set this to `1`, since the points now have a non-zero width stroke outline (however,
#'mapping `alpha` in the same variable you are projecting to height is probably not a good choice. as the `alpha`
#'variable is ignored when performing the 3D projection).
#'@param offset_edges Default `FALSE`. If `TRUE`, inserts a small amount of space between polygons for "geom_sf", "geom_tile", "geom_hex", and "geom_polygon" layers.
#'If you pass in a number, the space between polygons will be a line of that width. Note: this feature may end up removing thin polygons
#'from the plot entirely--use with care.
#'@param preview Default `FALSE`. If `TRUE`, the raytraced 2D ggplot will be displayed on the current device.
#'@param raytrace Default `FALSE`. Whether to add a raytraced layer.
#'@param sunangle Default `315` (NW). If raytracing, the angle (in degrees) around the matrix from which the light originates.
#'@param anglebreaks Default `seq(30,40,0.1)`. The azimuth angle(s), in degrees, as measured from the horizon from which the light originates.
#'@param lambert Default `TRUE`. If raytracing, changes the intensity of the light at each point based proportional to the
#'dot product of the ray direction and the surface normal at that point. Zeros out all values directed away from
#'the ray.
#'@param reduce_size Default `NULL`. A number between 0 and 1 that specifies how much to reduce the resolution of the plot, for faster plotting.
#'@param multicore Default `FALSE`. If raytracing and `TRUE`, multiple cores will be used to compute the shadow matrix. By default, this uses all cores available, unless the user has
#'set `options("cores")` in which the multicore option will only use that many cores.
#'@param save_height_matrix Default `FALSE`. If `TRUE`, the function will return the height matrix used for the ggplot.
#'@param save_shadow_matrix Default `FALSE`. If `TRUE`, the function will return the shadow matrix for use in future updates via the `shadow_cache` argument passed to `ray_shade`.
#'@param saved_shadow_matrix Default `NULL`. A cached shadow matrix (saved by the a previous invocation of `plot_gg(..., save_shadow_matrix=TRUE)` to use instead of raytracing a shadow map each time.
#'@param ... Additional arguments to be passed to `plot_3d()`.
#'@return Opens a 3D plot in rgl.
#'@import ggplot2
#'@export
#'@examples
#'library(ggplot2)
#'library(viridis)
#'
#'ggdiamonds = ggplot(diamonds, aes(x, depth)) +
#' stat_density_2d(aes(fill = stat(nlevel)), geom = "polygon", n = 100, bins = 10,contour = TRUE) +
#' facet_wrap(clarity~.) +
#' scale_fill_viridis_c(option = "A")
#'\donttest{
#'plot_gg(ggdiamonds,multicore=TRUE,width=5,height=5,scale=250,windowsize=c(1400,866),
#' zoom = 0.55, phi = 30)
#'render_snapshot()
#'}
#'
#'#Change the camera angle and take a snapshot:
#'\donttest{
#'render_camera(zoom=0.5,theta=-30,phi=30)
#'render_snapshot(clear = TRUE)
#'}
#'
#'#Contours and other lines will automatically be ignored. Here is the volcano dataset:
#'
#'ggvolcano = volcano %>%
#' reshape2::melt() %>%
#' ggplot() +
#' geom_tile(aes(x=Var1,y=Var2,fill=value)) +
#' geom_contour(aes(x=Var1,y=Var2,z=value),color="black") +
#' scale_x_continuous("X",expand = c(0,0)) +
#' scale_y_continuous("Y",expand = c(0,0)) +
#' scale_fill_gradientn("Z",colours = terrain.colors(10)) +
#' coord_fixed()
#'ggvolcano
#'
#'\donttest{
#'plot_gg(ggvolcano, multicore = TRUE, raytrace = TRUE, width = 7, height = 4,
#' scale = 300, windowsize = c(1400, 866), zoom = 0.6, phi = 30, theta = 30)
#'render_snapshot(clear = TRUE)
#'}
#'
#'#Here, we will create a 3D plot of the mtcars dataset. This automatically detects
#'#that the user used the `color` aesthetic instead of the `fill`.
#'mtplot = ggplot(mtcars) +
#' geom_point(aes(x=mpg,y=disp,color=cyl)) +
#' scale_color_continuous(limits=c(0,8))
#'
#'#Preview how the plot will look by setting `preview = TRUE`: We also adjust the angle of the light.
#'\donttest{
#'plot_gg(mtplot, width=3.5, sunangle=225, preview = TRUE)
#'}
#'
#'\donttest{
#'plot_gg(mtplot, width=3.5, multicore = TRUE, windowsize = c(1400,866), sunangle=225,
#' zoom = 0.60, phi = 30, theta = 45)
#'render_snapshot(clear = TRUE)
#'}
#'
#'#Now let's plot a density plot in 3D.
#'mtplot_density = ggplot(mtcars) +
#' stat_density_2d(aes(x=mpg,y=disp, fill=..density..), geom = "raster", contour = FALSE) +
#' scale_x_continuous(expand=c(0,0)) +
#' scale_y_continuous(expand=c(0,0)) +
#' scale_fill_gradient(low="pink", high="red")
#'mtplot_density
#'
#'\donttest{
#'plot_gg(mtplot_density, width = 4,zoom = 0.60, theta = -45, phi = 30,
#' windowsize = c(1400,866))
#'render_snapshot(clear = TRUE)
#'}
#'
#'#This also works facetted.
#'mtplot_density_facet = mtplot_density + facet_wrap(~cyl)
#'
#'#Preview this plot in 2D:
#'\donttest{
#'plot_gg(mtplot_density_facet, preview = TRUE)
#'}
#'
#'\donttest{
#'plot_gg(mtplot_density_facet, windowsize=c(1400,866),
#' zoom = 0.55, theta = -10, phi = 25)
#'render_snapshot(clear = TRUE)
#'}
#'
#'#That is a little cramped. Specifying a larger width will improve the readability of this plot.
#'\donttest{
#'plot_gg(mtplot_density_facet, width = 6, preview = TRUE)
#'}
#'
#'#That's better. Let's plot it in 3D, and increase the scale.
#'\donttest{
#'plot_gg(mtplot_density_facet, width = 6, windowsize=c(1400,866),
#' zoom = 0.55, theta = -10, phi = 25, scale=300)
#'render_snapshot(clear = TRUE)
#'}
plot_gg = function(ggobj, width = 3, height = 3,
height_aes = NULL, invert = FALSE, shadow_intensity = 0.5,
units = c("in", "cm", "mm"), scale=150, pointcontract = 0.7, offset_edges = FALSE,
preview = FALSE, raytrace = TRUE, sunangle = 315, anglebreaks = seq(30,40,0.1),
multicore = FALSE, lambert=TRUE, reduce_size = NULL, save_height_matrix = FALSE,
save_shadow_matrix = FALSE, saved_shadow_matrix=NULL, ...) {
heightmaptemp = tempfile()
colormaptemp = tempfile()
if(methods::is(ggobj,"list") && length(ggobj) == 2) {
ggplotobj2 = unserialize(serialize(ggobj[[2]], NULL))
ggsave(paste0(colormaptemp,".png"),ggobj[[1]],width = width,height = height)
} else {
ggplotobj2 = unserialize(serialize(ggobj, NULL))
ggsave(paste0(colormaptemp,".png"),ggplotobj2,width = width,height = height)
}
isfill = FALSE
iscolor = FALSE
if(is.null(height_aes)) {
for(i in seq_len(length(ggplotobj2$layers))) {
if("fill" %in% names(ggplotobj2$layers[[i]]$mapping)) {
isfill = TRUE
}
if(any(c("color","colour") %in% names(ggplotobj2$layers[[i]]$mapping))) {
iscolor = TRUE
}
}
if(!iscolor && !isfill) {
if("fill" %in% names(ggplotobj2$mapping)) {
isfill = TRUE
}
if(any(c("color","colour") %in% names(ggplotobj2$mapping))) {
iscolor = TRUE
}
}
if(isfill && !iscolor) {
height_aes = "fill"
} else if (!isfill && iscolor) {
height_aes = "colour"
} else if (isfill && iscolor) {
height_aes = "fill"
} else {
height_aes = "fill"
}
}
if(height_aes == "color") {
height_aes = "colour"
}
if(is.numeric(offset_edges)) {
polygon_offset_value = offset_edges
offset_edges = TRUE
} else {
polygon_offset_value = 0.5
}
polygon_offset_geoms = c("GeomPolygon","GeomSf", "GeomHex", "GeomTile")
other_height_type = ifelse(height_aes == "colour", "fill", "colour")
colortheme = c("line","rect","text","axis.title", "axis.title.x",
"axis.title.x.top","axis.title.y","axis.title.y.right","axis.text",
"axis.text.x" ,"axis.text.x.top","axis.text.y","axis.text.y.right",
"axis.ticks" ,"axis.ticks.length","axis.line" ,"axis.line.x",
"axis.line.y","legend.background","legend.margin","legend.spacing",
"legend.spacing.x","legend.spacing.y","legend.key" ,"legend.key.size",
"legend.key.height","legend.key.width","legend.text","legend.text.align",
"legend.title","legend.title.align","legend.position","legend.direction",
"legend.justification" ,"legend.box","legend.box.margin","legend.box.background",
"legend.box.spacing","panel.background","panel.border","panel.spacing",
"panel.spacing.x","panel.spacing.y","panel.grid" ,"panel.grid.minor",
"panel.ontop","plot.background","plot.title" ,"plot.subtitle",
"plot.caption","plot.tag","plot.tag.position","plot.margin",
"strip.background","strip.placement","strip.text" ,"strip.text.x",
"strip.text.y","strip.switch.pad.grid","strip.switch.pad.wrap","panel.grid.major",
"title","axis.ticks.length.x","axis.ticks.length.y","axis.ticks.length.x.top",
"axis.ticks.length.x.bottom","axis.ticks.length.y.left","axis.ticks.length.y.right","axis.title.x.bottom",
"axis.text.x.bottom","axis.text.y.left","axis.title.y.left")
key_theme_elements = c("text", "line", "axis.line", "axis.title",
"axis.title.x",
"axis.title.y",
"axis.text",
"axis.text.x", "axis.text.y", "axis.text.x.top", "axis.text.x.bottom",
"axis.text.y.left", "axis.text.y.right",
"axis.ticks", "strip.background", "strip.text", "legend.text", "strip.text.x","strip.text.y",
"legend.title","legend.background", "legend.title", "panel.background")
theme_bool = rep(TRUE,length(key_theme_elements))
names(theme_bool) = key_theme_elements
typetheme = c("line","rect","text","text","text",
"text","text","text","text",
"text","text","text","text",
"line","unit","line","line",
"line","rect","margin","unit",
"unit","unit","rect","unit",
"unit","unit","text","none",
"text","none","none","none",
"none","rect","margin","rect",
"unit","rect","rect","unit",
"unit","unit","line","line",
"none","rect","text","text",
"text","text","none","margin",
"rect","none","text","text",
"text","unit","unit","line",
"text","line","line","line",
"line","line","line","text",
"text","text","text")
black_white_pal = function(x) {
grDevices::colorRampPalette(c("white", "black"))(255)[x * 254 + 1]
}
white_white_pal = function(x) {
grDevices::colorRampPalette(c("white", "white"))(255)[x * 254 + 1]
}
ifelsefxn = function(entry) {
if(!is.null(entry)) {
return(entry)
}
}
#aes_with_guides = c("size","shape","colour","fill","alpha","linetype")
#Shift all continuous palettes of height_aes to black/white, and set all discrete key colors to white.
if(ggplotobj2$scales$n() != 0) {
anyfound = FALSE
#Check to see if same guide being used for both color and fill aesthetics
if(ggplotobj2$scales$has_scale("colour") && ggplotobj2$scales$has_scale("fill")) {
fillscale = ggplotobj2$scales$get_scales("fill")
colorscale = ggplotobj2$scales$get_scales("colour")
same_limits = FALSE
same_breaks = FALSE
same_labels = FALSE
same_calls = FALSE
if((!is.null(fillscale$limits) && !is.null(colorscale$limits))) {
if(fillscale$limits == colorscale$limits) {
same_limits = TRUE
}
} else if (is.null(fillscale$limits) && is.null(colorscale$limits)) {
same_limits = TRUE
}
if((!is.null(fillscale$breaks) && !is.null(colorscale$breaks))) {
if(all(fillscale$breaks == colorscale$breaks)) {
same_breaks = TRUE
}
} else if (is.null(fillscale$breaks) && is.null(colorscale$breaks)) {
same_breaks = TRUE
}
if((class(fillscale$labels) != "waiver" && class(colorscale$labels) != "waiver")) {
if(all(fillscale$labels == colorscale$labels)) {
same_labels = TRUE
}
} else if ((class(fillscale$labels) == "waiver" && class(colorscale$labels) == "waiver")) {
same_labels = TRUE
}
if(fillscale$call == colorscale$call) {
same_calls = TRUE
}
if(same_limits && same_breaks && same_labels && same_calls) {
if(height_aes == "fill") {
ggplotobj2 = ggplotobj2 + guides(color = "none")
} else {
ggplotobj2 = ggplotobj2 + guides(fill = "none")
}
}
}
#Now check for scales and change to the b/w palette, but preserve guide traits.
for(i in seq_len(ggplotobj2$scales$n())) {
if(height_aes %in% ggplotobj2$scales$scales[[i]]$aesthetics) {
ggplotobj2$scales$scales[[i]]$palette = black_white_pal
ggplotobj2$scales$scales[[i]]$na.value = "white"
has_guide = !any("guide" %in% class(ggplotobj2$scales$scales[[i]]$guide))
if(any(c("logical" %in% class(ggplotobj2$scales$scales[[i]]$guide)))) {
has_guide = ggplotobj2$scales$scales[[i]]$guide
}
if(has_guide) {
if(height_aes == "fill") {
if(is.null(ggplotobj2$guides$fill)) {
ggplotobj2 = ggplotobj2 + guides(fill = guide_colourbar(ticks = FALSE,nbin = 1000,order=i))
} else {
if(any(ggplotobj2$guides$fill != "none")) {
copyguide = ggplotobj2$guides$fill
copyguide$frame.linewidth = 0
copyguide$ticks = FALSE
copyguide$nbin = 1000
ggplotobj2 = ggplotobj2 +
guides(fill = guide_colourbar(ticks = FALSE,nbin = 1000))
ggplotobj2$guides$fill = copyguide
}
}
for(j in seq_len(length(ggplotobj2$layers))) {
if("colour" %in% names(ggplotobj2$layers[[j]]$mapping)) {
ggplotobj2$layers[[j]]$geom$draw_key = drawkeyfunction_points
}
}
} else {
if(is.null(ggplotobj2$guides$colour)) {
ggplotobj2 = ggplotobj2 + guides(colour = guide_colourbar(ticks = FALSE,nbin = 1000,order=i))
} else {
if(any(ggplotobj2$guides$colour != "none")) {
copyguide = ggplotobj2$guides$colour
copyguide$frame.linewidth = 0
copyguide$ticks = FALSE
copyguide$nbin = 1000
ggplotobj2 = ggplotobj2 +
guides(colour = guide_colourbar(ticks = FALSE,nbin = 1000))
ggplotobj2$guides$colour = copyguide
}
}
}
}
anyfound = TRUE
} else if(other_height_type %in% ggplotobj2$scales$scales[[i]]$aesthetics) {
#change guides for other height_aes to be the all white palette
ggplotobj2$scales$scales[[i]]$palette = white_white_pal
ggplotobj2$scales$scales[[i]]$na.value = "white"
}
}
#If no scales found, just add one to the ggplot object.
if(!anyfound) {
if(height_aes == "colour") {
ggplotobj2 = ggplotobj2 +
scale_color_gradientn(colours = grDevices::colorRampPalette(c("white","black"))(256), na.value = "white") +
guides(colour = guide_colourbar(ticks = FALSE,nbin = 1000))
}
if(height_aes == "fill") {
ggplotobj2 = ggplotobj2 +
scale_fill_gradientn(colours = grDevices::colorRampPalette(c("white","black"))(256), na.value = "white") +
guides(fill = guide_colourbar(ticks = FALSE,nbin = 1000))
}
}
} else {
#If no scales found, just add one to the ggplot object.
if(ggplotobj2$scales$n() == 0) {
if(height_aes == "fill") {
ggplotobj2 = ggplotobj2 + scale_fill_gradientn(colours = grDevices::colorRampPalette(c("white","black"))(256), na.value = "white") +
guides(fill = guide_colourbar(ticks = FALSE,nbin = 1000))
} else {
ggplotobj2 = ggplotobj2 + scale_color_gradientn(colours = grDevices::colorRampPalette(c("white","black"))(256), na.value = "white") +
guides(colour = guide_colourbar(ticks = FALSE,nbin = 1000))
}
} else {
if(height_aes == "fill") {
ggplotobj2 = ggplotobj2 + scale_fill_gradientn(colours = grDevices::colorRampPalette(c("white","black"))(256), na.value = "white") +
guides(fill = guide_colourbar(ticks = FALSE,nbin = 1000))
} else {
ggplotobj2 = ggplotobj2 + scale_color_gradientn(colours = grDevices::colorRampPalette(c("white","black"))(256), na.value = "white") +
guides(colour = guide_colourbar(ticks = FALSE,nbin = 1000))
}
}
}
#Set all elements to white if custom theme passed, and color aesthetic geoms to size = 0 if height_aes == "fill"
if(length(ggplotobj2$theme) > 0) {
if(height_aes == "fill") {
for(layer in seq_along(1:length(ggplotobj2$layers))) {
if("colour" %in% names(ggplotobj2$layers[[layer]]$mapping) ||
0 == length(names(ggplotobj2$layers[[layer]]$mapping))) {
ggplotobj2$layers[[layer]]$aes_params$colour = "white"
}
if("fill" %in% names(ggplotobj2$layers[[layer]]$mapping)) {
ggplotobj2$layers[[layer]]$aes_params$size = NA
if(any(polygon_offset_geoms %in% class(ggplotobj2$layers[[layer]]$geom)) && offset_edges) {
ggplotobj2$layers[[layer]]$aes_params$size = polygon_offset_value
ggplotobj2$layers[[layer]]$aes_params$colour = "white"
}
}
if("shape" %in% names(ggplotobj2$layers[[layer]]$mapping)) {
shapedata = layer_data(ggplotobj2)
numbershapes = length(unique(shapedata$shape))
if(numbershapes > 3) {
warning("Non-solid shapes will not be projected to 3D.")
}
ggplotobj2$layers[[layer]]$geom$draw_key = drawkeyfunction_points
}
if("size" %in% names(ggplotobj2$layers[[layer]]$mapping)) {
ggplotobj2$layers[[layer]]$geom$draw_key = drawkeyfunction_points
}
if("alpha" %in% names(ggplotobj2$layers[[layer]]$mapping)) {
ggplotobj2$layers[[layer]]$geom$draw_key = drawkeyfunction_points
for(j in seq_len(length(ggplotobj2$layers))) {
if("stroke" %in% names(ggplotobj2$layers[[j]]$geom$default_aes)) {
ggplotobj2$layers[[j]]$geom$default_aes$stroke = 0
}
}
ggplotobj2 = suppressMessages({ggplotobj2 + scale_alpha_continuous(range=c(1,1))})
}
if("linetype" %in% names(ggplotobj2$layers[[layer]]$mapping)) {
ggplotobj2$layers[[layer]]$geom$draw_key = drawkeyfunction_lines
}
}
} else {
for(layer in seq_along(1:length(ggplotobj2$layers))) {
if("fill" %in% names(ggplotobj2$layers[[layer]]$mapping) ||
0 == length(names(ggplotobj2$layers[[layer]]$mapping))) {
ggplotobj2$layers[[layer]]$aes_params$fill = "white"
}
if("shape" %in% names(ggplotobj2$layers[[layer]]$mapping)) {
shapedata = layer_data(ggplotobj2)
numbershapes = length(unique(shapedata$shape))
if(numbershapes > 3) {
warning("Non-solid shapes will not be projected to 3D.")
}
ggplotobj2$layers[[layer]]$geom$draw_key = drawkeyfunction_points
}
if("size" %in% names(ggplotobj2$layers[[layer]]$mapping)) {
ggplotobj2$layers[[layer]]$geom$draw_key = drawkeyfunction_points
}
if("alpha" %in% names(ggplotobj2$layers[[layer]]$mapping)) {
ggplotobj2$layers[[layer]]$geom$draw_key = drawkeyfunction_points
for(j in seq_len(length(ggplotobj2$layers))) {
if("stroke" %in% names(ggplotobj2$layers[[j]]$geom$default_aes)) {
ggplotobj2$layers[[j]]$geom$default_aes$stroke = 0
}
}
ggplotobj2 = suppressMessages({ggplotobj2 + scale_alpha_continuous(range=c(1,1))})
}
if("linetype" %in% names(ggplotobj2$layers[[layer]]$mapping)) {
ggplotobj2$layers[[layer]]$geom$draw_key = drawkeyfunction_lines
}
}
}
#switch all elements to white
for(i in 1:length(ggplotobj2$theme)) {
tempname = names(ggplotobj2$theme[i])
if(tempname %in% key_theme_elements) {
theme_bool[tempname] = FALSE
} else if ("element_blank" %in% class(ggplotobj2$theme[[i]])) {
theme_bool[tempname] = FALSE
}
whichtype = typetheme[which(tempname == colortheme)]
if(whichtype %in% c("text","line")) {
if(!is.null(ggplotobj2$theme[[i]])) {
ggplotobj2$theme[[i]]$colour = "white"
}
} else if(whichtype == "rect") {
if(!(tempname %in% c("panel.border","rect"))) {
if(!is.null(ggplotobj2$theme[[i]])) {
ggplotobj2$theme[[i]]$colour = "white"
ggplotobj2$theme[[i]]$fill = "white"
}
} else {
ggplotobj2$theme[[i]]$colour = "white"
ggplotobj2$theme[[i]]$fill = NA
}
}
}
if(ggplotobj2$scales$n() > 0) {
for(i in 1:ggplotobj2$scales$n()) {
if(length(ggplotobj2$scales$scales[[i]]$guide) > 1) {
ggplotobj2$scales$scales[[i]]$guide$frame.colour = "white"
ggplotobj2$scales$scales[[i]]$guide$ticks = FALSE
ggplotobj2$scales$scales[[i]]$guide$nbin = 256
ggplotobj2$scales$scales[[i]]$guide$draw.llim = FALSE
ggplotobj2$scales$scales[[i]]$na.value = "white"
}
}
}
if(theme_bool["text"]) ggplotobj2 = ggplotobj2 + theme(text = element_text(color="white"))
if(theme_bool["line"]) ggplotobj2 = ggplotobj2 + theme(line = element_line(color="white"))
if(theme_bool["axis.line"]) ggplotobj2 = ggplotobj2 + theme(axis.line = element_line(color="white"))
if(theme_bool["axis.title"]) ggplotobj2 = ggplotobj2 + theme(axis.title = element_text(color="white"))
if(theme_bool["axis.title.x"]) ggplotobj2 = ggplotobj2 + theme(axis.title.x = element_text(color="white"))
if(theme_bool["axis.title.y"]) ggplotobj2 = ggplotobj2 + theme(axis.title.y = element_text(color="white"))
if(theme_bool["axis.text"]) ggplotobj2 = ggplotobj2 + theme(axis.text = element_text(color="white"))
if(theme_bool["axis.text.x"]) ggplotobj2 = ggplotobj2 + theme(axis.text.x = element_text(color="white"))
if(theme_bool["axis.text.y"]) ggplotobj2 = ggplotobj2 + theme(axis.text.y = element_text(color="white"))
if(theme_bool["strip.text.x"]) ggplotobj2 = ggplotobj2 + theme(strip.text.x = element_text(color="white"))
if(theme_bool["strip.text.y"]) ggplotobj2 = ggplotobj2 + theme(strip.text.y = element_text(color="white"))
if(theme_bool["axis.ticks"]) ggplotobj2 = ggplotobj2 + theme(axis.ticks = element_line(color="white"))
if(theme_bool["strip.background"]) ggplotobj2 = ggplotobj2 + theme(strip.background = element_rect(fill = "white", color = "white"))
if(theme_bool["strip.text"]) ggplotobj2 = ggplotobj2 + theme(strip.text = element_text(color="white"))
if(theme_bool["legend.text"]) ggplotobj2 = ggplotobj2 + theme(legend.text = element_text(color="white"))
if(theme_bool["legend.title"]) ggplotobj2 = ggplotobj2 + theme(legend.title = element_text(color="white"))
if(theme_bool["legend.background"]) ggplotobj2 = ggplotobj2 + theme(legend.background = element_rect(fill = "white", color = "white"))
if(theme_bool["legend.title"]) ggplotobj2 = ggplotobj2 + theme(legend.title = element_text(color="white"))
if(theme_bool["panel.background"]) ggplotobj2 = ggplotobj2 + theme(panel.background = element_rect(fill = "white", color = "white"))
} else {
if(height_aes == "fill") {
for(layer in seq_along(1:length(ggplotobj2$layers))) {
if("colour" %in% names(ggplotobj2$layers[[layer]]$mapping) ||
0 == length(names(ggplotobj2$layers[[layer]]$mapping))) {
ggplotobj2$layers[[layer]]$aes_params$colour = "white"
}
if("fill" %in% names(ggplotobj2$layers[[layer]]$mapping)) {
ggplotobj2$layers[[layer]]$aes_params$size = NA
if(any(polygon_offset_geoms %in% class(ggplotobj2$layers[[layer]]$geom)) && offset_edges) {
ggplotobj2$layers[[layer]]$aes_params$size = polygon_offset_value
ggplotobj2$layers[[layer]]$aes_params$colour = "white"
}
}
if("shape" %in% names(ggplotobj2$layers[[layer]]$mapping)) {
shapedata = layer_data(ggplotobj2)
numbershapes = length(unique(shapedata$shape))
if(numbershapes > 3) {
warning("Non-solid shapes will not be projected to 3D.")
}
ggplotobj2$layers[[layer]]$geom$draw_key = drawkeyfunction_points
}
if("size" %in% names(ggplotobj2$layers[[layer]]$mapping)) {
ggplotobj2$layers[[layer]]$geom$draw_key = drawkeyfunction_points
}
if("alpha" %in% names(ggplotobj2$layers[[layer]]$mapping)) {
ggplotobj2$layers[[layer]]$geom$draw_key = drawkeyfunction_points
for(j in seq_len(length(ggplotobj2$layers))) {
if("stroke" %in% names(ggplotobj2$layers[[j]]$geom$default_aes)) {
ggplotobj2$layers[[j]]$geom$default_aes$stroke = 0
}
}
ggplotobj2 = suppressMessages({ggplotobj2 + scale_alpha_continuous(range=c(1,1))})
}
if("linetype" %in% names(ggplotobj2$layers[[layer]]$mapping)) {
ggplotobj2$layers[[layer]]$geom$draw_key = drawkeyfunction_lines
}
}
} else {
for(layer in seq_len(length(ggplotobj2$layers))) {
if("fill" %in% names(ggplotobj2$layers[[layer]]$mapping) ||
0 == length(names(ggplotobj2$layers[[layer]]$mapping))) {
ggplotobj2$layers[[layer]]$aes_params$fill = "white"
}
if("shape" %in% names(ggplotobj2$layers[[layer]]$mapping)) {
shapedata = layer_data(ggplotobj2)
numbershapes = length(unique(shapedata$shape))
if(numbershapes > 3) {
warning("Non-solid shapes will not be projected to 3D.")
}
ggplotobj2$layers[[layer]]$geom$draw_key = drawkeyfunction_points
}
if("size" %in% names(ggplotobj2$layers[[layer]]$mapping)) {
ggplotobj2$layers[[layer]]$geom$draw_key = drawkeyfunction_points
}
if("alpha" %in% names(ggplotobj2$layers[[layer]]$mapping)) {
ggplotobj2$layers[[layer]]$geom$draw_key = drawkeyfunction_points
for(j in seq_len(length(ggplotobj2$layers))) {
if("stroke" %in% names(ggplotobj2$layers[[j]]$geom$default_aes)) {
ggplotobj2$layers[[j]]$geom$default_aes$stroke = 0
}
}
ggplotobj2 = suppressMessages({ggplotobj2 + scale_alpha_continuous(range=c(1,1))})
}
if("linetype" %in% names(ggplotobj2$layers[[layer]]$mapping)) {
ggplotobj2$layers[[layer]]$geom$draw_key = drawkeyfunction_lines
}
}
}
#No custom theme passed, just create one.
ggplotobj2 = ggplotobj2 +
theme(text = element_text(color="white"),
line = element_line(color="white"),
axis.line = element_line(color="white"),
axis.title = element_text(color="white"),
axis.text = element_text(color="white"),
axis.ticks = element_line(color="white"),
strip.background = element_rect(fill = "white", color = "white"),
strip.text = element_text(color="white"),
legend.key = element_rect(fill = "white", color = "white"),
legend.text = element_text(color="white"),
legend.background = element_rect(fill = "white", color = "white"),
legend.title = element_text(color="white"),
panel.background = element_rect(fill = "white", color = "white"))
}
if(height_aes == "fill") {
if(length(ggplotobj2$layers) > 0) {
for(i in seq_along(1:length(ggplotobj2$layers))) {
ggplotobj2$layers[[i]]$aes_params$size = NA
if(any(polygon_offset_geoms %in% class(ggplotobj2$layers[[layer]]$geom)) && offset_edges) {
ggplotobj2$layers[[i]]$aes_params$size = polygon_offset_value
ggplotobj2$layers[[i]]$aes_params$colour = "white"
}
}
}
} else {
if(length(ggplotobj2$layers) > 0) {
for(i in seq_along(1:length(ggplotobj2$layers))) {
ggplotobj2$layers[[i]]$aes_params$fill = "white"
if("GeomContour" %in% class(ggplotobj2$layers[[i]]$geom)) {
ggplotobj2$layers[[i]]$aes_params$alpha = 0
}
}
if(pointcontract != 1) {
for(i in 1:length(ggplotobj2$layers)) {
if(!is.null(ggplotobj2$layers[[i]]$aes_params$size)) {
ggplotobj2$layers[[i]]$aes_params$size = ggplotobj2$layers[[i]]$aes_params$size * pointcontract
} else {
ggplotobj2$layers[[i]]$geom$default_aes$size = ggplotobj2$layers[[i]]$geom$default_aes$size * pointcontract
}
}
}
}
}
tryCatch({
ggsave(paste0(heightmaptemp,".png"),ggplotobj2,width = width,height = height)
}, error = function(e) {
if(any(grepl("Error: Discrete value supplied to continuous scale", as.character(e),fixed = TRUE))) {
stop(paste0("Error: Discrete variable cannot be mapped to 3D. Did you mean to choose `",ifelse(height_aes == "fill","color","fill"), "` as the `height_aes`?"),call.=FALSE)
}
})
if(!is.null(reduce_size)) {
if(!("magick" %in% rownames(utils::installed.packages()))) {
stop("magick package required to use argument reduce_size")
} else {
if(length(reduce_size) == 1 && reduce_size < 1) {
scale = scale * reduce_size
image_info = magick::image_read(paste0(heightmaptemp,".png")) %>%
magick::image_info()
magick::image_read(paste0(heightmaptemp,".png")) %>%
magick::image_resize(paste0(image_info$width * reduce_size,"x",image_info$height * reduce_size)) %>%
magick::image_write(paste0(heightmaptemp,".png"))
magick::image_read(paste0(colormaptemp,".png")) %>%
magick::image_resize(paste0(image_info$width * reduce_size,"x",image_info$height * reduce_size)) %>%
magick::image_write(paste0(colormaptemp,".png"))
}
}
}
mapcolor = png::readPNG(paste0(colormaptemp,".png"))
mapheight = png::readPNG(paste0(heightmaptemp,".png"))
if(length(dim(mapheight)) == 3) {
mapheight = mapheight[,,1]
}
if(invert) {
mapheight = 1 - mapheight
}
if(raytrace) {
if(is.null(saved_shadow_matrix)) {
raylayer = ray_shade(1-t(mapheight),maxsearch = 600,sunangle = sunangle,anglebreaks = anglebreaks,
zscale=1/scale,multicore = multicore,lambert = lambert, ...)
if(!preview) {
mapcolor %>%
add_shadow(raylayer,shadow_intensity) %>%
plot_3d((t(1-mapheight)),zscale=1/scale, ... )
} else {
mapcolor %>%
add_shadow(raylayer,shadow_intensity) %>%
plot_map(keep_user_par = FALSE)
}
} else {
raylayer = saved_shadow_matrix
if(!preview) {
mapcolor %>%
add_shadow(raylayer,shadow_intensity) %>%
plot_3d((t(1-mapheight)),zscale=1/scale, ... )
} else {
mapcolor %>%
add_shadow(raylayer,shadow_intensity) %>%
plot_map(keep_user_par = FALSE)
}
}
} else {
if(!preview) {
plot_3d(mapcolor, (t(1-mapheight)), zscale=1/scale, ...)
} else {
plot_map(mapcolor, keep_user_par = FALSE)
}
}
if(save_shadow_matrix & !save_height_matrix) {
return(raylayer)
}
if(!save_shadow_matrix & save_height_matrix) {
return(1-t(mapheight))
}
if(save_shadow_matrix & save_height_matrix) {
return(list(1-t(mapheight),raylayer))
}
} |
# The goal is create book recommendation app with some statistics,
# metrics, information about users, their like history, recommendation
# history,etc.
# LIBRARIES ----
library(tidyverse)
# package below also has package recosystem, which has matrix factorization
# utility
library(recommenderlab)
# for better ggplots
library(ShinyItemAnalysis)
#DATA LOADING ----
book_ratings <- read_delim('BX-Book-Ratings.csv', col_names =TRUE,
delim = ';',locale = locale(encoding = 'UTF-8'))
books <- read_delim('BX-Books.csv', col_names =TRUE,delim = ';',
locale = locale(encoding = 'UTF-8'))
users <- read_delim('BX-Users.csv', col_names =TRUE,delim = ';',
locale = locale(encoding = 'UTF-8'))
#DATA STRUCTURE ----
str(book_ratings)
str(books)
str(users)
head(book_ratings)
head(books)
head(users)
# it seems, that only valuable information we have, are user-id,location,
# book title, isbn, and maybe book autho, and also rating
# also there are age missing values, but it should be estimated with regression
# it should be valuable information on book rating..
# there are not so much NA's values, there is only 10 missing ratings -
# it should be error in measure, or stg like that
# but i find some NULL values in dataset - what does it mean ?
# check number of NULL values in dataset
apply(books,2,function(x) sum(x == 'NULL',na.rm = TRUE))
apply(book_ratings,2,function(x) sum(x == 'NULL',na.rm = TRUE))
apply(users,2,function(x) sum(x == 'NULL',na.rm = TRUE))
# we see that only in age's are NULL values, but it should not be a
# problem, i think in this case, i do not really need age for this
# analysis
# DATA PREPROCESSING ----
summary(books)
View(books[is.na(books$`Year-Of-Publication`),])
# here is the interesting thing, if we look no book title, for those data,
# there are moved year of publication and author into bad columns
nas<-books[is.na(books$`Year-Of-Publication`),]
nas_list <- str_split(nas$`Book-Title`,';')
#every list which has only one component i will delete
idx_del <- which(unlist(lapply(nas_list,function(x) length(x))) == 1)
nas_list <- nas_list[-idx_del]
authors <- lapply(nas_list,function(x) str_sub(x[2],start = 2))
titles <- lapply(nas_list,function(x) x[1])
titles <- lapply(titles,function(x) str_sub(gsub('\\\\+','',x),start = 1,end = -2))
nas$`Year-Of-Publication` <- nas$`Book-Author`
nas <- nas[-idx_del,]
nas$`Book-Author` <- unlist(authors)
nas$`Book-Title` <- unlist(titles)
nas <- nas[-163,]
books[is.na(books$`Year-Of-Publication`),][-idx_del,][-163,] <- nas
books$`Year-Of-Publication` <- as.integer(books$`Year-Of-Publication`)
# the goal is to merge datasets into one
# in the main dataset, i need userid's,their ratings,book titles, ISBNS's
# maybe the location
# as i saw some anomalies in dataset, like lot of duplicates in names of the
# books, just because different name of author, or other publisher, or same name
# but the book has different story like e.g Ranma (Ranma 1/2) - there is like
# 6 rows with same name, but if you saw the picture of the books, there is
# different stories - but we have those books under other name..bad bad dataset
# e.g books %>% filter(`Book-Title` == "Ranma 1/2 (Ranma 1/2)")
# e.g books[grep('Harry Potter and',books),]
View(books %>% filter(`Book-Title` == "Ranma 1/2 (Ranma 1/2)"))
View(books[grep('Ranma',books),])
View(books[grep('Harry Potter and',books),])
# so by the case above, it won't be easy to remove all duplicates, but let's
# assume, that all duplicate values are just same book, but different publisher
# or just some anomaly in author name, or something like that
# again, some other example - also duplicates in book title, does not mean, that
# book is from the same author e.g : books %>% count(`Book-Title`,sort = TRUE)
# View(books %>% filter(`Book-Title` == 'Selected Poems'))
View(books %>% filter(`Book-Title` == 'Selected Poems'))
# some strings are not UTF-8 encoding
Encoding(books$`Book-Title`) <- 'UTF-8'
#some authors has initials with spaces, some not e.g J. K. R..; J.K. R..
books$`Book-Author` <- gsub('\\W\\s','.',books$`Book-Author`)
# keep just book title
books$`Book-Title` <- gsub('\\(.*\\)','',books$`Book-Title`)
# take every author names in upper case
books$`Book-Author`<- toupper(iconv(books$`Book-Author`,'UTF-8',
'UTF-8',sub = ''))
# remove white spaces from both sides of string
books$`Book-Title` <-str_trim(iconv(books$`Book-Title`,'UTF-8',
'UTF-8',sub = ''),'both')
# remove duplicates
books<-books[!duplicated(books[,c('Book-Title','Book-Author')]),]
books$`Book-Title` <- gsub("[^a-zA-Z .:?!-]", "", books$`Book-Title`)
books$`Book-Author` <- gsub("[^a-zA-Z .:?!-]", "", books$`Book-Author`)
books$`Publisher` <- gsub("[^a-zA-Z .:?!-@]", "", books$`Publisher`)
# remove NA's
# i think, if the year of publication is missing, is not relevant in our case
# but we see, that 10 NA's book ratings are here, so let's delete them
# okay right now, we need to merge dataset into one, with most important
# columns
# merge_data <- book_ratings %>% inner_join(books)
# merge_data <- merge_data %>% dplyr::select(`User-ID`,`Book-Title`,`Book-Rating`)
# summary of all datasets
# here is the interesting thing, if we look no book title, for those data,
# there are moved year of publication and author into bad columns
# nas<-books[is.na(books$`Year-Of-Publication`),]
# nas_list <- str_split(nas$`Book-Title`,';')
# #every list which has only one component i will delete
#
# idx_del <- which(unlist(lapply(nas_list,function(x) length(x))) == 1)
# nas_list <- nas_list[-idx_del]
# authors <- lapply(nas_list,function(x) str_sub(x[2],start = 2))
# titles <- lapply(nas_list,function(x) x[1])
# titles <- lapply(titles,function(x) str_sub(gsub('\\\\+','',x),start = 1,end = -2))
# nas$`Year-Of-Publication` <- nas$`Book-Author`
# nas <- nas[-idx_del,]
# nas$`Book-Author` <- unlist(authors)
# nas$`Book-Title` <- unlist(titles)
# nas <- nas[-163,]
# books[is.na(books$`Year-Of-Publication`),][-idx_del,][-163,] <- nas
# books$`Year-Of-Publication` <- as.integer(books$`Year-Of-Publication`)
# distribution of books - except zero
books %>% filter(between(`Year-Of-Publication`,1900,2020)) %>% ggplot() +
geom_histogram(aes(`Year-Of-Publication`),fill = 'blue',
binwidth = 3, col = 'black') +
theme_app()
# looks like lot of books are published near year 2000, but in this
# dataset is nothing really interesting
summary(users)
# age is defined as string column, because of NULL values - maybe
# bad data gathering, or something like that, or just user not
# fill his age
# convert to integers
users$Age <- as.integer(users$Age)
summary(users)
# here are more anomalies - about 110763 users are NA's, or NULL,
# also there are users whose has above 100 years - that's not real
# check distribution of age of users
users[!is.na(users$Age),] %>% ggplot() + geom_boxplot(aes(y=Age)) +
theme_app()
users[!is.na(users$Age),] %>% ggplot() + geom_histogram(aes(Age),
fill = 'red',
col = 'black') +
theme_app()
# we see there is lot of outliers, but in our future analysis, age
# will be not important, just for statistics, and tables, etc. but
# not for our model..lets assume, that every user with age above 100
# years, is old user,and sample age between(80,100)
users <- as.tbl(transform(users,
Age = ifelse(Age >= 100,NA,Age)))
users[!is.na(users$Age),] %>% ggplot() + geom_histogram(aes(Age),
fill = 'red',
col = 'black') +
theme_app()
# also there is some location - it is in format city, region, country
loc_list <- str_split(users$Location,', ')
city <- unlist(lapply(loc_list,function(x) x[1]))
region <- unlist(lapply(loc_list,function(x) x[2]))
country <- unlist(lapply(loc_list,function(x) x[3]))
users <- users %>% select(User.ID,Age) %>% mutate(city = city,
region = region,
country = country)
users$`User.ID` <- as.factor(users$`User.ID`)
colnames(users)[1] <- 'User-ID'
ggplot(users,aes(users$country)) + geom_bar()
#look from which country are our users - stupid statistic
frequency_states <- users %>% count(country,sort = TRUE)
frequency_states <- frequency_states[1:20,]$country
most_countries <- users %>% filter(country %in% frequency_states)
ggplot(most_countries,aes(most_countries$country)) + geom_bar() +
theme_app() + theme(axis.text.x = element_text(size = 7, angle = 90))
# most users are from USA
# let's look on ratings
book_ratings <- book_ratings[!is.na(book_ratings$`Book-Rating`),]
# okay right now, we need to merge dataset into one, with most important
# columns
merge_data <- book_ratings %>% inner_join(books)
merge_data <- merge_data %>% dplyr::select(`User-ID`,`ISBN`,`Book-Rating`)
merge_data2 <- book_ratings %>% inner_join(books)
merge_data2 <- merge_data2 %>% dplyr::select(`User-ID`,`Book-Title`,`ISBN`,`Book-Rating`)
summary(merge_data)
# there is no anomalies
# right now, it should be ok, to merge books with ratings, and consider
# those books - i do not say, that my way of doing this is correct, but
# let's see
#there was 1149780 of ratings, but right now we have 863906 which is
# still quite nice
# let's do some EDA here
# distribution of ratings
ggplot(merge_data) + geom_histogram(aes(merge_data$`Book-Rating`),
fill = 'green',
col = 'black') +
theme_app()
# there is a huge number of implicit ratings, let's divide those rankings
merge_data %>% count(`Book-Rating`)
#explicit ratings
merge_data %>% filter(`Book-Rating` != 0) %>% ggplot() +
geom_histogram(aes(`Book-Rating`),binwidth = 1,
fill = 'yellow',
col = 'black') +
theme_app()
# lot of users gave ratings above 5.0
merge_data %>% filter(`Book-Rating` != 0) %>% count(`Book-Rating`)
merge_data %>% filter(`Book-Rating` != 0) %>% summary()
# median is 8 - most used ratings
# 20 most rated films
most_rated <- merge_data %>% count(`Book-Title`,sort = TRUE)
most_rated <- most_rated[1:20,]
most_rated <- merge_data %>% filter(`Book-Title` %in% most_rated$`Book-Title`)
ggplot(most_rated,aes(most_rated$`Book-Title`)) + geom_bar() + theme_app() +
theme(axis.text.x = element_text(size = 7,angle = 90)) +
xlab('Movies') + ylab('Counts') + ggtitle('20 most rated movies')
# Wild Animus look like most rated movie - also implicit and explicit - it is
# also top rated ? For this, we should exclude implict ratings, because we do not
# know if implict rating mean positive experience with that book
# it is wild animus also top rated ?
#explicit rating
top_rated <- ratings_explicit %>% group_by(`ISBN`) %>%
summarise(sum_rating = sum(`Book-Rating`))
top_rated <- top_rated %>% arrange(desc(sum_rating))
top_rated <- top_rated[1:20,]
ggplot(top_rated,aes(top_rated$`Book-Title`, fill = top_rated$`Book-Title`)) +
geom_bar(aes(x = top_rated$`Book-Title`,y = top_rated$sum_rating),stat= 'identity') + theme_app() +
theme(axis.text.x = element_text(size = 7,angle = 90)) +
xlab('Movies') + ylab('Counts') + ggtitle('20 top rated movies')
# it seems that most rated movie, is not the best movie by users
# table of top 20 rated movies
# table: movie,total_points,average_points,top_country,avg_age
summarise_data <- ratings_explicit %>% inner_join(users)
summarise_data %>% group_by(`Book-Title`) %>%
summarise(sum_rating = sum(`Book-Rating`,na.rm = T),
avg_rating = mean(`Book-Rating`,na.rm = T),
avg_age = mean(`Age`,na.rm = T),
n = n()
) %>% arrange(desc(sum_rating))
# we see that most rated wild animus film get's quite low rating
# also it makes sense to divide dataset at explcit and implicit ratings
# because, if we consider boht ratings in one statitics, results should be
# skewed
#
frequency_rating <- as.data.frame(table(book_ratings$`ISBN`))
freq_rating_users <- as.data.frame(table(book_ratings$`User-ID`))
# there is lot of users, who are rate just one book - aroung 58166,
# that's lot of rows to remove..we will see
# ok ,let's try
# the last step needed is to create rating matrix
# creeate two rating matrix, for implicit model and for explicit model
rows_explicit <- which(merge_data$`Book-Rating` != 0)
ratings_implict <- merge_data[-rows_explicit,]
ratings_implict$`User-ID` <- as.factor(ratings_implict$`User-ID`)
ratings_implict$`ISBN` <- as.factor(ratings_implict$`ISBN`)
ratings_explicit <- merge_data[rows_explicit,]
idx_low_rates<-ratings_explicit %>% count(`User-ID`) %>% filter(n <= 30) %>%
select(`User-ID`)
idx_low_rates <- unique(idx_low_rates$`User-ID`)
ratings_explicit <- ratings_explicit %>% filter(!(`User-ID` %in% idx_low_rates))
#ratings_explicit <- ratings_explicit %>% filter(!(`User-ID` %in% c(11676,98391)))
ratings_explicit$`ISBN` <- as.factor(ratings_explicit$`ISBN`)
ratings_explicit$`User-ID` <- as.factor(ratings_explicit$`User-ID`)
top_rated <- ratings_explicit %>% group_by(`ISBN`) %>%
summarise(sum_rating = sum(`Book-Rating`))
top_rated <- top_rated %>% arrange(desc(sum_rating))
top_rated <- top_rated[1:20,]
ggplot(top_rated,aes(top_rated$`Book-Title`, fill = top_rated$`Book-Title`)) +
geom_bar(aes(x = top_rated$`Book-Title`,y = top_rated$sum_rating),stat= 'identity') + theme_app() +
theme(axis.text.x = element_text(size = 7,angle = 90)) +
xlab('Movies') + ylab('Counts') + ggtitle('20 top rated movies')
# it seems that most rated movie, is not the best movie by users
# table of top 20 rated movies
# table: movie,total_points,average_points,top_country,avg_age
summarise_data <- ratings_explicit %>% inner_join(users)
summarise_data %>% group_by(`Book-Title`) %>%
summarise(sum_rating = sum(`Book-Rating`,na.rm = T),
avg_rating = mean(`Book-Rating`,na.rm = T),
avg_age = mean(`Age`,na.rm = T),
n = n()
) %>% arrange(desc(sum_rating))
# we see that most rated wild animus film get's quite low rating
# also it makes sense to divide dataset at explcit and implicit ratings
# because, if we consider boht ratings in one statitics, results should be
# skewed
# create two matrix for explicit ratings and impicit ratings
# create index for mapping indices to books, users
# explicit rating model
idx_users_exp <- ratings_explicit %>% group_indices(`User-ID`)
idx_books_exp <- ratings_explicit %>% group_indices(`ISBN`)
write.table(idx_users_exp,'idx_users_exp.txt', row.names = FALSE,
col.names = FALSE)
write.table(idx_books_exp,'idx_books_exp.txt', row.names = FALSE,
col.names = FALSE)
write_csv(ratings_explicit,'ratings_explicit.csv')
# create sparse matrix for explicit ratings
sparse_exp <- sparseMatrix(i = idx_users_exp,
j = idx_books_exp,
x = ratings_explicit$`Book-Rating`,
dimnames = list(levels(ratings_explicit$`User-ID`),
levels(ratings_explicit$`ISBN`)))
rating_exp <- as(sparse_exp, 'realRatingMatrix')
# also create the same for implict ranking
idx_users_imp <- ratings_implict %>% group_indices(`User-ID`)
idx_books_imp <- ratings_implict %>% group_indices(`ISBN`)
# create sparse matrix for explicit ratings
sparse_imp <- sparseMatrix(i = idx_users_imp,
j = idx_books_imp,
x = ratings_implict$`Book-Rating`,
dimnames = list(levels(ratings_implict$`User-ID`),
levels(ratings_implict$`ISBN`)))
rating_imp <- as(sparse_imp, 'realRatingMatrix')
rating_imp <- binarize(rating_imp,minRating = 0)
# MODEL ----
#creating evaluation scheme
e <- evaluationScheme(rating_exp,
train = 0.8, method = 'split', given = 30, goodRating = 5)
model_UBCF <- Recommender(getData(e,'train'), method = 'UBCF',
parameter = list(nn = 20))
recom_UBCF <- predict(model_UBCF,getData(e,'known'),5)
recom_UBCF2 <- predict(model_UBCF,getData(e,'known'),type = 'ratings')
model_UBCF_smaller <- Recommender(getData(e,'train'), method = 'UBCF',
parameter = list(nn = 10))
recom_UBCF_smaller <- predict(model_UBCF_smaller,getData(e,'known'),5)
recom_UBCF2_smaller <- predict(model_UBCF_smaller,getData(e,'known'),type = 'ratings')
model_LIBMF <- Recommender(getData(e,'train'), method = 'LIBMF',
parameter = list(dim = 15, nthread =4,
costp_l2 = 1,costq_l2 = 0.9))
recom_LIBMF <- predict(model_LIBMF,getData(e,'known'),type = 'ratings')
recom_LIBMF2 <- predict(model_LIBMF,getData(e,'known'),5)
model_svd <- Recommender(getData(e,'train'), method = 'SVDF', parameter= list(k = 6, max_epochs = 50, verbose = TRUE))
recom_svd <- predict(model_svd,getData(e,'known'),5)
recom_svdf2 <- predict(model_svd,getData(e,'known'),type = 'ratings')
err_ubcf <- calcPredictionAccuracy(recom_UBCF2,getData(e,'unknown'),byUser = TRUE)
err_ubcf_s <- calcPredictionAccuracy(recom_UBCF2_smaller,getData(e,'unknown'),byUser = TRUE)
err_libmf <- calcPredictionAccuracy(recom_LIBMF,getData(e,'unknown'),byUser = TRUE)
err_svdf <- calcPredictionAccuracy(recom_svdf2,getData(e,'unknown'),byUser = TRUE)
d <- cbind(err_libmf,err_ubcf,err_svdf,err_ubcf_s)
| /script_ds.r | no_license | tomasj12/Recommendation_system | R | false | false | 18,263 | r | # The goal is create book recommendation app with some statistics,
# metrics, information about users, their like history, recommendation
# history,etc.
# LIBRARIES ----
library(tidyverse)
# package below also has package recosystem, which has matrix factorization
# utility
library(recommenderlab)
# for better ggplots
library(ShinyItemAnalysis)
#DATA LOADING ----
book_ratings <- read_delim('BX-Book-Ratings.csv', col_names =TRUE,
delim = ';',locale = locale(encoding = 'UTF-8'))
books <- read_delim('BX-Books.csv', col_names =TRUE,delim = ';',
locale = locale(encoding = 'UTF-8'))
users <- read_delim('BX-Users.csv', col_names =TRUE,delim = ';',
locale = locale(encoding = 'UTF-8'))
#DATA STRUCTURE ----
str(book_ratings)
str(books)
str(users)
head(book_ratings)
head(books)
head(users)
# it seems, that only valuable information we have, are user-id,location,
# book title, isbn, and maybe book autho, and also rating
# also there are age missing values, but it should be estimated with regression
# it should be valuable information on book rating..
# there are not so much NA's values, there is only 10 missing ratings -
# it should be error in measure, or stg like that
# but i find some NULL values in dataset - what does it mean ?
# check number of NULL values in dataset
apply(books,2,function(x) sum(x == 'NULL',na.rm = TRUE))
apply(book_ratings,2,function(x) sum(x == 'NULL',na.rm = TRUE))
apply(users,2,function(x) sum(x == 'NULL',na.rm = TRUE))
# we see that only in age's are NULL values, but it should not be a
# problem, i think in this case, i do not really need age for this
# analysis
# DATA PREPROCESSING ----
summary(books)
View(books[is.na(books$`Year-Of-Publication`),])
# here is the interesting thing, if we look no book title, for those data,
# there are moved year of publication and author into bad columns
nas<-books[is.na(books$`Year-Of-Publication`),]
nas_list <- str_split(nas$`Book-Title`,';')
#every list which has only one component i will delete
idx_del <- which(unlist(lapply(nas_list,function(x) length(x))) == 1)
nas_list <- nas_list[-idx_del]
authors <- lapply(nas_list,function(x) str_sub(x[2],start = 2))
titles <- lapply(nas_list,function(x) x[1])
titles <- lapply(titles,function(x) str_sub(gsub('\\\\+','',x),start = 1,end = -2))
nas$`Year-Of-Publication` <- nas$`Book-Author`
nas <- nas[-idx_del,]
nas$`Book-Author` <- unlist(authors)
nas$`Book-Title` <- unlist(titles)
nas <- nas[-163,]
books[is.na(books$`Year-Of-Publication`),][-idx_del,][-163,] <- nas
books$`Year-Of-Publication` <- as.integer(books$`Year-Of-Publication`)
# the goal is to merge datasets into one
# in the main dataset, i need userid's,their ratings,book titles, ISBNS's
# maybe the location
# as i saw some anomalies in dataset, like lot of duplicates in names of the
# books, just because different name of author, or other publisher, or same name
# but the book has different story like e.g Ranma (Ranma 1/2) - there is like
# 6 rows with same name, but if you saw the picture of the books, there is
# different stories - but we have those books under other name..bad bad dataset
# e.g books %>% filter(`Book-Title` == "Ranma 1/2 (Ranma 1/2)")
# e.g books[grep('Harry Potter and',books),]
View(books %>% filter(`Book-Title` == "Ranma 1/2 (Ranma 1/2)"))
View(books[grep('Ranma',books),])
View(books[grep('Harry Potter and',books),])
# so by the case above, it won't be easy to remove all duplicates, but let's
# assume, that all duplicate values are just same book, but different publisher
# or just some anomaly in author name, or something like that
# again, some other example - also duplicates in book title, does not mean, that
# book is from the same author e.g : books %>% count(`Book-Title`,sort = TRUE)
# View(books %>% filter(`Book-Title` == 'Selected Poems'))
View(books %>% filter(`Book-Title` == 'Selected Poems'))
# some strings are not UTF-8 encoding
Encoding(books$`Book-Title`) <- 'UTF-8'
#some authors has initials with spaces, some not e.g J. K. R..; J.K. R..
books$`Book-Author` <- gsub('\\W\\s','.',books$`Book-Author`)
# keep just book title
books$`Book-Title` <- gsub('\\(.*\\)','',books$`Book-Title`)
# take every author names in upper case
books$`Book-Author`<- toupper(iconv(books$`Book-Author`,'UTF-8',
'UTF-8',sub = ''))
# remove white spaces from both sides of string
books$`Book-Title` <-str_trim(iconv(books$`Book-Title`,'UTF-8',
'UTF-8',sub = ''),'both')
# remove duplicates
books<-books[!duplicated(books[,c('Book-Title','Book-Author')]),]
books$`Book-Title` <- gsub("[^a-zA-Z .:?!-]", "", books$`Book-Title`)
books$`Book-Author` <- gsub("[^a-zA-Z .:?!-]", "", books$`Book-Author`)
books$`Publisher` <- gsub("[^a-zA-Z .:?!-@]", "", books$`Publisher`)
# remove NA's
# i think, if the year of publication is missing, is not relevant in our case
# but we see, that 10 NA's book ratings are here, so let's delete them
# okay right now, we need to merge dataset into one, with most important
# columns
# merge_data <- book_ratings %>% inner_join(books)
# merge_data <- merge_data %>% dplyr::select(`User-ID`,`Book-Title`,`Book-Rating`)
# summary of all datasets
# here is the interesting thing, if we look no book title, for those data,
# there are moved year of publication and author into bad columns
# nas<-books[is.na(books$`Year-Of-Publication`),]
# nas_list <- str_split(nas$`Book-Title`,';')
# #every list which has only one component i will delete
#
# idx_del <- which(unlist(lapply(nas_list,function(x) length(x))) == 1)
# nas_list <- nas_list[-idx_del]
# authors <- lapply(nas_list,function(x) str_sub(x[2],start = 2))
# titles <- lapply(nas_list,function(x) x[1])
# titles <- lapply(titles,function(x) str_sub(gsub('\\\\+','',x),start = 1,end = -2))
# nas$`Year-Of-Publication` <- nas$`Book-Author`
# nas <- nas[-idx_del,]
# nas$`Book-Author` <- unlist(authors)
# nas$`Book-Title` <- unlist(titles)
# nas <- nas[-163,]
# books[is.na(books$`Year-Of-Publication`),][-idx_del,][-163,] <- nas
# books$`Year-Of-Publication` <- as.integer(books$`Year-Of-Publication`)
# distribution of books - except zero
books %>% filter(between(`Year-Of-Publication`,1900,2020)) %>% ggplot() +
geom_histogram(aes(`Year-Of-Publication`),fill = 'blue',
binwidth = 3, col = 'black') +
theme_app()
# looks like lot of books are published near year 2000, but in this
# dataset is nothing really interesting
summary(users)
# age is defined as string column, because of NULL values - maybe
# bad data gathering, or something like that, or just user not
# fill his age
# convert to integers
users$Age <- as.integer(users$Age)
summary(users)
# here are more anomalies - about 110763 users are NA's, or NULL,
# also there are users whose has above 100 years - that's not real
# check distribution of age of users
users[!is.na(users$Age),] %>% ggplot() + geom_boxplot(aes(y=Age)) +
theme_app()
users[!is.na(users$Age),] %>% ggplot() + geom_histogram(aes(Age),
fill = 'red',
col = 'black') +
theme_app()
# we see there is lot of outliers, but in our future analysis, age
# will be not important, just for statistics, and tables, etc. but
# not for our model..lets assume, that every user with age above 100
# years, is old user,and sample age between(80,100)
users <- as.tbl(transform(users,
Age = ifelse(Age >= 100,NA,Age)))
users[!is.na(users$Age),] %>% ggplot() + geom_histogram(aes(Age),
fill = 'red',
col = 'black') +
theme_app()
# also there is some location - it is in format city, region, country
loc_list <- str_split(users$Location,', ')
city <- unlist(lapply(loc_list,function(x) x[1]))
region <- unlist(lapply(loc_list,function(x) x[2]))
country <- unlist(lapply(loc_list,function(x) x[3]))
users <- users %>% select(User.ID,Age) %>% mutate(city = city,
region = region,
country = country)
users$`User.ID` <- as.factor(users$`User.ID`)
colnames(users)[1] <- 'User-ID'
ggplot(users,aes(users$country)) + geom_bar()
#look from which country are our users - stupid statistic
frequency_states <- users %>% count(country,sort = TRUE)
frequency_states <- frequency_states[1:20,]$country
most_countries <- users %>% filter(country %in% frequency_states)
ggplot(most_countries,aes(most_countries$country)) + geom_bar() +
theme_app() + theme(axis.text.x = element_text(size = 7, angle = 90))
# most users are from USA
# let's look on ratings
book_ratings <- book_ratings[!is.na(book_ratings$`Book-Rating`),]
# okay right now, we need to merge dataset into one, with most important
# columns
merge_data <- book_ratings %>% inner_join(books)
merge_data <- merge_data %>% dplyr::select(`User-ID`,`ISBN`,`Book-Rating`)
merge_data2 <- book_ratings %>% inner_join(books)
merge_data2 <- merge_data2 %>% dplyr::select(`User-ID`,`Book-Title`,`ISBN`,`Book-Rating`)
summary(merge_data)
# there is no anomalies
# right now, it should be ok, to merge books with ratings, and consider
# those books - i do not say, that my way of doing this is correct, but
# let's see
#there was 1149780 of ratings, but right now we have 863906 which is
# still quite nice
# let's do some EDA here
# distribution of ratings
ggplot(merge_data) + geom_histogram(aes(merge_data$`Book-Rating`),
fill = 'green',
col = 'black') +
theme_app()
# there is a huge number of implicit ratings, let's divide those rankings
merge_data %>% count(`Book-Rating`)
#explicit ratings
merge_data %>% filter(`Book-Rating` != 0) %>% ggplot() +
geom_histogram(aes(`Book-Rating`),binwidth = 1,
fill = 'yellow',
col = 'black') +
theme_app()
# lot of users gave ratings above 5.0
merge_data %>% filter(`Book-Rating` != 0) %>% count(`Book-Rating`)
merge_data %>% filter(`Book-Rating` != 0) %>% summary()
# median is 8 - most used ratings
# 20 most rated films
most_rated <- merge_data %>% count(`Book-Title`,sort = TRUE)
most_rated <- most_rated[1:20,]
most_rated <- merge_data %>% filter(`Book-Title` %in% most_rated$`Book-Title`)
ggplot(most_rated,aes(most_rated$`Book-Title`)) + geom_bar() + theme_app() +
theme(axis.text.x = element_text(size = 7,angle = 90)) +
xlab('Movies') + ylab('Counts') + ggtitle('20 most rated movies')
# Wild Animus look like most rated movie - also implicit and explicit - it is
# also top rated ? For this, we should exclude implict ratings, because we do not
# know if implict rating mean positive experience with that book
# it is wild animus also top rated ?
#explicit rating
top_rated <- ratings_explicit %>% group_by(`ISBN`) %>%
summarise(sum_rating = sum(`Book-Rating`))
top_rated <- top_rated %>% arrange(desc(sum_rating))
top_rated <- top_rated[1:20,]
ggplot(top_rated,aes(top_rated$`Book-Title`, fill = top_rated$`Book-Title`)) +
geom_bar(aes(x = top_rated$`Book-Title`,y = top_rated$sum_rating),stat= 'identity') + theme_app() +
theme(axis.text.x = element_text(size = 7,angle = 90)) +
xlab('Movies') + ylab('Counts') + ggtitle('20 top rated movies')
# it seems that most rated movie, is not the best movie by users
# table of top 20 rated movies
# table: movie,total_points,average_points,top_country,avg_age
summarise_data <- ratings_explicit %>% inner_join(users)
summarise_data %>% group_by(`Book-Title`) %>%
summarise(sum_rating = sum(`Book-Rating`,na.rm = T),
avg_rating = mean(`Book-Rating`,na.rm = T),
avg_age = mean(`Age`,na.rm = T),
n = n()
) %>% arrange(desc(sum_rating))
# we see that most rated wild animus film get's quite low rating
# also it makes sense to divide dataset at explcit and implicit ratings
# because, if we consider boht ratings in one statitics, results should be
# skewed
#
frequency_rating <- as.data.frame(table(book_ratings$`ISBN`))
freq_rating_users <- as.data.frame(table(book_ratings$`User-ID`))
# there is lot of users, who are rate just one book - aroung 58166,
# that's lot of rows to remove..we will see
# ok ,let's try
# the last step needed is to create rating matrix
# creeate two rating matrix, for implicit model and for explicit model
rows_explicit <- which(merge_data$`Book-Rating` != 0)
ratings_implict <- merge_data[-rows_explicit,]
ratings_implict$`User-ID` <- as.factor(ratings_implict$`User-ID`)
ratings_implict$`ISBN` <- as.factor(ratings_implict$`ISBN`)
ratings_explicit <- merge_data[rows_explicit,]
idx_low_rates<-ratings_explicit %>% count(`User-ID`) %>% filter(n <= 30) %>%
select(`User-ID`)
idx_low_rates <- unique(idx_low_rates$`User-ID`)
ratings_explicit <- ratings_explicit %>% filter(!(`User-ID` %in% idx_low_rates))
#ratings_explicit <- ratings_explicit %>% filter(!(`User-ID` %in% c(11676,98391)))
ratings_explicit$`ISBN` <- as.factor(ratings_explicit$`ISBN`)
ratings_explicit$`User-ID` <- as.factor(ratings_explicit$`User-ID`)
top_rated <- ratings_explicit %>% group_by(`ISBN`) %>%
summarise(sum_rating = sum(`Book-Rating`))
top_rated <- top_rated %>% arrange(desc(sum_rating))
top_rated <- top_rated[1:20,]
ggplot(top_rated,aes(top_rated$`Book-Title`, fill = top_rated$`Book-Title`)) +
geom_bar(aes(x = top_rated$`Book-Title`,y = top_rated$sum_rating),stat= 'identity') + theme_app() +
theme(axis.text.x = element_text(size = 7,angle = 90)) +
xlab('Movies') + ylab('Counts') + ggtitle('20 top rated movies')
# it seems that most rated movie, is not the best movie by users
# table of top 20 rated movies
# table: movie,total_points,average_points,top_country,avg_age
summarise_data <- ratings_explicit %>% inner_join(users)
summarise_data %>% group_by(`Book-Title`) %>%
summarise(sum_rating = sum(`Book-Rating`,na.rm = T),
avg_rating = mean(`Book-Rating`,na.rm = T),
avg_age = mean(`Age`,na.rm = T),
n = n()
) %>% arrange(desc(sum_rating))
# we see that most rated wild animus film get's quite low rating
# also it makes sense to divide dataset at explcit and implicit ratings
# because, if we consider boht ratings in one statitics, results should be
# skewed
# create two matrix for explicit ratings and impicit ratings
# create index for mapping indices to books, users
# explicit rating model
idx_users_exp <- ratings_explicit %>% group_indices(`User-ID`)
idx_books_exp <- ratings_explicit %>% group_indices(`ISBN`)
write.table(idx_users_exp,'idx_users_exp.txt', row.names = FALSE,
col.names = FALSE)
write.table(idx_books_exp,'idx_books_exp.txt', row.names = FALSE,
col.names = FALSE)
write_csv(ratings_explicit,'ratings_explicit.csv')
# create sparse matrix for explicit ratings
sparse_exp <- sparseMatrix(i = idx_users_exp,
j = idx_books_exp,
x = ratings_explicit$`Book-Rating`,
dimnames = list(levels(ratings_explicit$`User-ID`),
levels(ratings_explicit$`ISBN`)))
rating_exp <- as(sparse_exp, 'realRatingMatrix')
# also create the same for implict ranking
idx_users_imp <- ratings_implict %>% group_indices(`User-ID`)
idx_books_imp <- ratings_implict %>% group_indices(`ISBN`)
# create sparse matrix for explicit ratings
sparse_imp <- sparseMatrix(i = idx_users_imp,
j = idx_books_imp,
x = ratings_implict$`Book-Rating`,
dimnames = list(levels(ratings_implict$`User-ID`),
levels(ratings_implict$`ISBN`)))
rating_imp <- as(sparse_imp, 'realRatingMatrix')
rating_imp <- binarize(rating_imp,minRating = 0)
# MODEL ----
#creating evaluation scheme
e <- evaluationScheme(rating_exp,
train = 0.8, method = 'split', given = 30, goodRating = 5)
model_UBCF <- Recommender(getData(e,'train'), method = 'UBCF',
parameter = list(nn = 20))
recom_UBCF <- predict(model_UBCF,getData(e,'known'),5)
recom_UBCF2 <- predict(model_UBCF,getData(e,'known'),type = 'ratings')
model_UBCF_smaller <- Recommender(getData(e,'train'), method = 'UBCF',
parameter = list(nn = 10))
recom_UBCF_smaller <- predict(model_UBCF_smaller,getData(e,'known'),5)
recom_UBCF2_smaller <- predict(model_UBCF_smaller,getData(e,'known'),type = 'ratings')
model_LIBMF <- Recommender(getData(e,'train'), method = 'LIBMF',
parameter = list(dim = 15, nthread =4,
costp_l2 = 1,costq_l2 = 0.9))
recom_LIBMF <- predict(model_LIBMF,getData(e,'known'),type = 'ratings')
recom_LIBMF2 <- predict(model_LIBMF,getData(e,'known'),5)
model_svd <- Recommender(getData(e,'train'), method = 'SVDF', parameter= list(k = 6, max_epochs = 50, verbose = TRUE))
recom_svd <- predict(model_svd,getData(e,'known'),5)
recom_svdf2 <- predict(model_svd,getData(e,'known'),type = 'ratings')
err_ubcf <- calcPredictionAccuracy(recom_UBCF2,getData(e,'unknown'),byUser = TRUE)
err_ubcf_s <- calcPredictionAccuracy(recom_UBCF2_smaller,getData(e,'unknown'),byUser = TRUE)
err_libmf <- calcPredictionAccuracy(recom_LIBMF,getData(e,'unknown'),byUser = TRUE)
err_svdf <- calcPredictionAccuracy(recom_svdf2,getData(e,'unknown'),byUser = TRUE)
d <- cbind(err_libmf,err_ubcf,err_svdf,err_ubcf_s)
|
\name{SyNet-package}
\alias{SyNet-package}
\alias{SyNet}
\docType{package}
\title{
Inference and Analysis of Sympatry Networks
}
\description{
Historical Biogeography focuses basically on sympatry patterns among species.
\bold{SyNet} extracts hypotheses of sympatry from available distributional
evidence and integrates them into weighted and binary sympatry networks. \bold{SyNet}
tests the adequacy of networks to be segregated into groups of species cohesively sympatric
(units of co-occurrence). It identifies units of co-occurrence from the more
inclusive network where they are embedded. NAM algorithm iteratively removes
intermediary species that are linked to those units otherwise disconnected. Here,
you can do among other things: i) to analyze the spatial affinity between sets
of records and ii) to explore dynamically the cleavogram associated to the analysis
of sympatry networks. You can track many new ideas on numerical classification
throughout the examples of functions. Remarkably, geographical data associated
to any network partitioning can be exported to KML files which can be opened via
Google Earth.
}
\details{
\tabular{ll}{
Package: \tab SyNet\cr
Type: \tab Package\cr
Version: \tab 2.0\cr
Date: \tab 2011-11-11\cr
License: \tab GPL\cr
}
}
\author{
Daniel A. Dos Santos <dadossantos@csnat.unt.edu.ar>
}
\references{
Dos Santos D.A., Fernandez H.R., Cuezzo M.G., Dominguez E.
2008.\emph{ Sympatry Inference and Network Analysis in Biogeography.}
Systematic Biology 57:432-448.
Dos Santos D.A., Cuezzo M.G., Reynaga M.C., Dominguez E. 2011. \emph{Sympatry
Inference and Network Analysis in Biogeography}. Systematic Biology (in press)
}
\examples{
data(sciobius2x2)
# Derive a sympatry matrix from distributional table.
# Entries equal to 1 mean co-extensive sympatry.
infer <- gridinfer(dntable = sciobius2x2)
# Run NAM method on the previously inferred sympatry network
outnam <- nam(infer)
# Extract the sympatry network with elements optimally rearranged
# according to the NAM sequence of splitting events.
sm <- outnam$mt
# Plot the original network
forgraph <- seq(0, by = 2*pi/47, length = 47)
xcoord <- cos(forgraph)
ycoord <- sin(forgraph)
plot(xcoord, ycoord, main = "SCIOBIUS Sympatry Network",
xlab = "", ylab = "", pch = 19, col = 2, cex = 1.3, axes = FALSE)
text (xcoord*1.05, ycoord*1.05, outnam$leaves)
for (i in 1:46)
for (j in (i+1):47)
if(sm[i,j] > 0) lines (c(xcoord[i], xcoord[j]), c(ycoord[i], ycoord[j]))
mtext("Subnetwork 0 (Original network)", side = 1, line = 2, col = 4)
# Next, we define a control window to go from initial network to the last sub-network.
# Here, you can visualize the intrinsic dynamic associated to the iterative removal
# of intermediary species.
subnet <- 0
onClick <- function(action) {
if(action == 1) subnet <<- 0
if(action == 2) subnet <<- pmax(0, subnet - 1)
if(action == 3) subnet <<- pmin(outnam$nsub - 1, subnet + 1)
if(action == 4) subnet <<- outnam$nsub - 1
notremoved <- outnam$LastNet >= subnet
plot(xcoord, ycoord, main = "SCIOBIUS Sympatry Network",
xlab = "", ylab = "", type = "n", axes = FALSE)
points(xcoord[notremoved], ycoord[notremoved], pch = 19, col = 2)
text (xcoord[notremoved]*1.05, ycoord[notremoved]*1.05, outnam$leaves[notremoved])
for (i in 1:(outnam$nsp - 1)){
if(!notremoved[i]) next
for (j in (i+1):outnam$nsp) {
if(!notremoved[j]) next
if(sm[i,j] > 0) lines (c(xcoord[i], xcoord[j]), c(ycoord[i], ycoord[j]))
}
}
mtext(paste("Subnetwork", subnet), side = 1, line = 2, col = 4)
}
\dontrun{
tt <- tktoplevel()
tkwm.title(tt, "Evolution of NAM ...")
but1 <- tkbutton(tt, text = "<<", command = function(...) onClick(1), padx = 20)
but2 <- tkbutton(tt, text = "<", command = function(...) onClick(2), padx = 20)
but3 <- tkbutton(tt, text = ">", command = function(...) onClick(3), padx = 20)
but4 <- tkbutton(tt, text = ">>", command = function(...) onClick(4), padx = 20)
tkgrid(tklabel(tt, text = "*** Change Sub-Network ***", font = "Times 16", foreground = "blue"),
columnspan = 4)
tkgrid(but1, but2, but3, but4)
#Finally, type the following command to see the cleavogram called 'outnam':
cleavogram()
}
}
\keyword{ package } | /man/SyNet-package.Rd | no_license | cran/SyNet | R | false | false | 4,546 | rd | \name{SyNet-package}
\alias{SyNet-package}
\alias{SyNet}
\docType{package}
\title{
Inference and Analysis of Sympatry Networks
}
\description{
Historical Biogeography focuses basically on sympatry patterns among species.
\bold{SyNet} extracts hypotheses of sympatry from available distributional
evidence and integrates them into weighted and binary sympatry networks. \bold{SyNet}
tests the adequacy of networks to be segregated into groups of species cohesively sympatric
(units of co-occurrence). It identifies units of co-occurrence from the more
inclusive network where they are embedded. NAM algorithm iteratively removes
intermediary species that are linked to those units otherwise disconnected. Here,
you can do among other things: i) to analyze the spatial affinity between sets
of records and ii) to explore dynamically the cleavogram associated to the analysis
of sympatry networks. You can track many new ideas on numerical classification
throughout the examples of functions. Remarkably, geographical data associated
to any network partitioning can be exported to KML files which can be opened via
Google Earth.
}
\details{
\tabular{ll}{
Package: \tab SyNet\cr
Type: \tab Package\cr
Version: \tab 2.0\cr
Date: \tab 2011-11-11\cr
License: \tab GPL\cr
}
}
\author{
Daniel A. Dos Santos <dadossantos@csnat.unt.edu.ar>
}
\references{
Dos Santos D.A., Fernandez H.R., Cuezzo M.G., Dominguez E.
2008.\emph{ Sympatry Inference and Network Analysis in Biogeography.}
Systematic Biology 57:432-448.
Dos Santos D.A., Cuezzo M.G., Reynaga M.C., Dominguez E. 2011. \emph{Sympatry
Inference and Network Analysis in Biogeography}. Systematic Biology (in press)
}
\examples{
data(sciobius2x2)
# Derive a sympatry matrix from distributional table.
# Entries equal to 1 mean co-extensive sympatry.
infer <- gridinfer(dntable = sciobius2x2)
# Run NAM method on the previously inferred sympatry network
outnam <- nam(infer)
# Extract the sympatry network with elements optimally rearranged
# according to the NAM sequence of splitting events.
sm <- outnam$mt
# Plot the original network
forgraph <- seq(0, by = 2*pi/47, length = 47)
xcoord <- cos(forgraph)
ycoord <- sin(forgraph)
plot(xcoord, ycoord, main = "SCIOBIUS Sympatry Network",
xlab = "", ylab = "", pch = 19, col = 2, cex = 1.3, axes = FALSE)
text (xcoord*1.05, ycoord*1.05, outnam$leaves)
for (i in 1:46)
for (j in (i+1):47)
if(sm[i,j] > 0) lines (c(xcoord[i], xcoord[j]), c(ycoord[i], ycoord[j]))
mtext("Subnetwork 0 (Original network)", side = 1, line = 2, col = 4)
# Next, we define a control window to go from initial network to the last sub-network.
# Here, you can visualize the intrinsic dynamic associated to the iterative removal
# of intermediary species.
subnet <- 0
onClick <- function(action) {
if(action == 1) subnet <<- 0
if(action == 2) subnet <<- pmax(0, subnet - 1)
if(action == 3) subnet <<- pmin(outnam$nsub - 1, subnet + 1)
if(action == 4) subnet <<- outnam$nsub - 1
notremoved <- outnam$LastNet >= subnet
plot(xcoord, ycoord, main = "SCIOBIUS Sympatry Network",
xlab = "", ylab = "", type = "n", axes = FALSE)
points(xcoord[notremoved], ycoord[notremoved], pch = 19, col = 2)
text (xcoord[notremoved]*1.05, ycoord[notremoved]*1.05, outnam$leaves[notremoved])
for (i in 1:(outnam$nsp - 1)){
if(!notremoved[i]) next
for (j in (i+1):outnam$nsp) {
if(!notremoved[j]) next
if(sm[i,j] > 0) lines (c(xcoord[i], xcoord[j]), c(ycoord[i], ycoord[j]))
}
}
mtext(paste("Subnetwork", subnet), side = 1, line = 2, col = 4)
}
\dontrun{
tt <- tktoplevel()
tkwm.title(tt, "Evolution of NAM ...")
but1 <- tkbutton(tt, text = "<<", command = function(...) onClick(1), padx = 20)
but2 <- tkbutton(tt, text = "<", command = function(...) onClick(2), padx = 20)
but3 <- tkbutton(tt, text = ">", command = function(...) onClick(3), padx = 20)
but4 <- tkbutton(tt, text = ">>", command = function(...) onClick(4), padx = 20)
tkgrid(tklabel(tt, text = "*** Change Sub-Network ***", font = "Times 16", foreground = "blue"),
columnspan = 4)
tkgrid(but1, but2, but3, but4)
#Finally, type the following command to see the cleavogram called 'outnam':
cleavogram()
}
}
\keyword{ package } |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simulation.R
\name{bayes}
\alias{bayes}
\title{Bayesian integration where the reliability of the advisor is the probability
the advisor agrees given we were correct.}
\usage{
bayes(initial, advice, weight, compression = 0.05)
}
\arguments{
\item{initial}{vector of initial decisions}
\item{advice}{vector of advisory estimates}
\item{weight}{trust rating for the advisor}
\item{compression}{whether to limit extreme values to c(x,1-x)}
}
\description{
Bayesian integration where the reliability of the advisor is the probability
the advisor agrees given we were correct.
}
\details{
Uses a Bayesian integration formula where
\deqn{c_2 = \frac{c_1 * t}{c_1 * t + (1-c_1)(1-t)}}{c2 = (c1*t)/(c1*t + (1-c1)(1-t))}
\eqn{c_2}{c2} is the final confidence (returned as a vector), and \eqn{c_1}{c1} the initial
confidence.
\eqn{t} is the probability of the advisor's advice given the initial decision
was correct. Where the advisor agrees, this is simply the trust we have in
the advisor (an advisor we trusted 100\% would always be expected to give the
same answer we did). Where the advisor disagrees, this is the opposite (we
consider it very unlikely a highly trusted advisor disagrees with us if we
are right).
}
| /man/bayes.Rd | permissive | oxacclab/adviseR | R | false | true | 1,291 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simulation.R
\name{bayes}
\alias{bayes}
\title{Bayesian integration where the reliability of the advisor is the probability
the advisor agrees given we were correct.}
\usage{
bayes(initial, advice, weight, compression = 0.05)
}
\arguments{
\item{initial}{vector of initial decisions}
\item{advice}{vector of advisory estimates}
\item{weight}{trust rating for the advisor}
\item{compression}{whether to limit extreme values to c(x,1-x)}
}
\description{
Bayesian integration where the reliability of the advisor is the probability
the advisor agrees given we were correct.
}
\details{
Uses a Bayesian integration formula where
\deqn{c_2 = \frac{c_1 * t}{c_1 * t + (1-c_1)(1-t)}}{c2 = (c1*t)/(c1*t + (1-c1)(1-t))}
\eqn{c_2}{c2} is the final confidence (returned as a vector), and \eqn{c_1}{c1} the initial
confidence.
\eqn{t} is the probability of the advisor's advice given the initial decision
was correct. Where the advisor agrees, this is simply the trust we have in
the advisor (an advisor we trusted 100\% would always be expected to give the
same answer we did). Where the advisor disagrees, this is the opposite (we
consider it very unlikely a highly trusted advisor disagrees with us if we
are right).
}
|
\name{ABCcurve}
\alias{ABCcurve}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{calculates ABC Curve
}
\description{
Calculates cumulative percentage of largest data (effort) and cumulative percentages of sum of largest Data (yield)
with spline interpolation (second order, piecewise) of values in-between.
}
\usage{
ABCcurve(Data, p)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{Data}{
vector[1:n] describes an array of data: n cases in rows of one variable
}
\item{p}{
optional, an vector of values specifying where interpolation takes place, created by \code{\link{seq}} of package base}
}
\value{
Output is of type list which parts are described in the following
\item{Curve}{
A list with
\code{Effort}:vector [1:k], cumulative population in percent
\code{Yield}: vector [1:k], cumulative high data in percent
}
\item{CleanedData}{vector [1:m], columnvector containing Data>=0 and zeros for all NA, NaN and negative values in Data(1:n)}
\item{Slope}{
A list with
\code{p}: X-values for spline interpolation, defualt: p = (0:0.01:1)
\code{dABC}: first deviation of the functio ABC(p)=Effort(Yield
}
}
\author{
Michael Thrun
\url{http://www.uni-marburg.de/fb12/datenbionik}
}
\references{
Ultsch. A ., Lotsch J.: Computed ABC Analysis for Rational Selection of Most Informative Variables in Multivariate Data, PloS one, Vol. 10(6), pp. e0129767. doi 10.1371/journal.pone.0129767, 2015.
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ABCcurve}
\keyword{Lorenz curve}
\keyword{Lorenz}% __ONLY ONE__ keyword per line
\keyword{ABC curve}% __ONLY ONE__ keyword per line
| /man/ABCcurve.Rd | no_license | Mthrun/ABCanalysis | R | false | false | 1,721 | rd | \name{ABCcurve}
\alias{ABCcurve}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{calculates ABC Curve
}
\description{
Calculates cumulative percentage of largest data (effort) and cumulative percentages of sum of largest Data (yield)
with spline interpolation (second order, piecewise) of values in-between.
}
\usage{
ABCcurve(Data, p)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{Data}{
vector[1:n] describes an array of data: n cases in rows of one variable
}
\item{p}{
optional, an vector of values specifying where interpolation takes place, created by \code{\link{seq}} of package base}
}
\value{
Output is of type list which parts are described in the following
\item{Curve}{
A list with
\code{Effort}:vector [1:k], cumulative population in percent
\code{Yield}: vector [1:k], cumulative high data in percent
}
\item{CleanedData}{vector [1:m], columnvector containing Data>=0 and zeros for all NA, NaN and negative values in Data(1:n)}
\item{Slope}{
A list with
\code{p}: X-values for spline interpolation, defualt: p = (0:0.01:1)
\code{dABC}: first deviation of the functio ABC(p)=Effort(Yield
}
}
\author{
Michael Thrun
\url{http://www.uni-marburg.de/fb12/datenbionik}
}
\references{
Ultsch. A ., Lotsch J.: Computed ABC Analysis for Rational Selection of Most Informative Variables in Multivariate Data, PloS one, Vol. 10(6), pp. e0129767. doi 10.1371/journal.pone.0129767, 2015.
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ABCcurve}
\keyword{Lorenz curve}
\keyword{Lorenz}% __ONLY ONE__ keyword per line
\keyword{ABC curve}% __ONLY ONE__ keyword per line
|
\name{NISTstatcoulombTOcoulomb}
\alias{NISTstatcoulombTOcoulomb}
\title{Convert statcoulomb to coulomb }
\usage{NISTstatcoulombTOcoulomb(statcoulomb)}
\description{\code{NISTstatcoulombTOcoulomb} converts from statcoulomb to coulomb (C) }
\arguments{
\item{statcoulomb}{statcoulomb }
}
\value{coulomb (C) }
\source{
National Institute of Standards and Technology (NIST), 2014
NIST Guide to SI Units
B.8 Factors for Units Listed Alphabetically
\url{http://physics.nist.gov/Pubs/SP811/appenB8.html}
}
\references{
National Institute of Standards and Technology (NIST), 2014
NIST Guide to SI Units
B.8 Factors for Units Listed Alphabetically
\url{http://physics.nist.gov/Pubs/SP811/appenB8.html}
}
\author{Jose Gama}
\examples{
NISTstatcoulombTOcoulomb(10)
}
\keyword{programming} | /man/NISTstatcoulombTOcoulomb.Rd | no_license | cran/NISTunits | R | false | false | 779 | rd | \name{NISTstatcoulombTOcoulomb}
\alias{NISTstatcoulombTOcoulomb}
\title{Convert statcoulomb to coulomb }
\usage{NISTstatcoulombTOcoulomb(statcoulomb)}
\description{\code{NISTstatcoulombTOcoulomb} converts from statcoulomb to coulomb (C) }
\arguments{
\item{statcoulomb}{statcoulomb }
}
\value{coulomb (C) }
\source{
National Institute of Standards and Technology (NIST), 2014
NIST Guide to SI Units
B.8 Factors for Units Listed Alphabetically
\url{http://physics.nist.gov/Pubs/SP811/appenB8.html}
}
\references{
National Institute of Standards and Technology (NIST), 2014
NIST Guide to SI Units
B.8 Factors for Units Listed Alphabetically
\url{http://physics.nist.gov/Pubs/SP811/appenB8.html}
}
\author{Jose Gama}
\examples{
NISTstatcoulombTOcoulomb(10)
}
\keyword{programming} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ClaytonGumbelCopula.R
\docType{class}
\name{surGumbelCopula-class}
\alias{surGumbelCopula-class}
\alias{dduCopula,matrix,surGumbelCopula-method}
\alias{dduCopula,numeric,surGumbelCopula-method}
\alias{ddvCopula,matrix,surGumbelCopula-method}
\alias{ddvCopula,numeric,surGumbelCopula-method}
\alias{r90GumbelCopula-class}
\alias{dduCopula,matrix,r90GumbelCopula-method}
\alias{dduCopula,numeric,r90GumbelCopula-method}
\alias{ddvCopula,matrix,r90GumbelCopula-method}
\alias{ddvCopula,numeric,r90GumbelCopula-method}
\alias{r270GumbelCopula-class}
\alias{dduCopula,matrix,r270GumbelCopula-method}
\alias{dduCopula,numeric,r270GumbelCopula-method}
\alias{ddvCopula,matrix,r270GumbelCopula-method}
\alias{ddvCopula,numeric,r270GumbelCopula-method}
\title{Classes \code{"surGumbelCopula"}, \code{"r90GumbelCopula"} and
\code{"r270GumbelCopula"}}
\description{
A class representing rotated versions of the Gumbel copula family (survival,
90 and 270 degree rotated).
}
\section{Objects from the Class}{
Objects can be created by calls of the form
\code{new("surGumbelCopula", ...)}, \code{new("r90GumbelCopula", ...)} and
\code{new("r270GumbelCopula", ...)} or by the function
\code{\link{surGumbelCopula}}, \code{\link{r90GumbelCopula}} and
\code{\link{r270GumbelCopula}} respectively.
}
\examples{
library(copula)
persp(surGumbelCopula(1.5),dCopula,zlim=c(0,10))
persp(r90GumbelCopula(-1.5),dCopula,zlim=c(0,10))
persp(r270GumbelCopula(-1.5),dCopula,zlim=c(0,10))
}
\seealso{
\code{\link{VineCopula-package}}
}
\author{
Benedikt Graeler
}
\keyword{classes}
| /man/surGumbelCopula-class.Rd | no_license | tvatter/VineCopula | R | false | true | 1,635 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ClaytonGumbelCopula.R
\docType{class}
\name{surGumbelCopula-class}
\alias{surGumbelCopula-class}
\alias{dduCopula,matrix,surGumbelCopula-method}
\alias{dduCopula,numeric,surGumbelCopula-method}
\alias{ddvCopula,matrix,surGumbelCopula-method}
\alias{ddvCopula,numeric,surGumbelCopula-method}
\alias{r90GumbelCopula-class}
\alias{dduCopula,matrix,r90GumbelCopula-method}
\alias{dduCopula,numeric,r90GumbelCopula-method}
\alias{ddvCopula,matrix,r90GumbelCopula-method}
\alias{ddvCopula,numeric,r90GumbelCopula-method}
\alias{r270GumbelCopula-class}
\alias{dduCopula,matrix,r270GumbelCopula-method}
\alias{dduCopula,numeric,r270GumbelCopula-method}
\alias{ddvCopula,matrix,r270GumbelCopula-method}
\alias{ddvCopula,numeric,r270GumbelCopula-method}
\title{Classes \code{"surGumbelCopula"}, \code{"r90GumbelCopula"} and
\code{"r270GumbelCopula"}}
\description{
A class representing rotated versions of the Gumbel copula family (survival,
90 and 270 degree rotated).
}
\section{Objects from the Class}{
Objects can be created by calls of the form
\code{new("surGumbelCopula", ...)}, \code{new("r90GumbelCopula", ...)} and
\code{new("r270GumbelCopula", ...)} or by the function
\code{\link{surGumbelCopula}}, \code{\link{r90GumbelCopula}} and
\code{\link{r270GumbelCopula}} respectively.
}
\examples{
library(copula)
persp(surGumbelCopula(1.5),dCopula,zlim=c(0,10))
persp(r90GumbelCopula(-1.5),dCopula,zlim=c(0,10))
persp(r270GumbelCopula(-1.5),dCopula,zlim=c(0,10))
}
\seealso{
\code{\link{VineCopula-package}}
}
\author{
Benedikt Graeler
}
\keyword{classes}
|
readCdfUnitNames <- function(filename, units=NULL, verbose=0) {
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Validate arguments
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Argument 'filename':
filename <- file.path(dirname(filename), basename(filename));
if (!file.exists(filename))
stop("File not found: ", filename);
# Argument 'units':
if (is.null(units)) {
} else if (is.numeric(units)) {
units <- as.integer(units);
if (any(units < 1))
stop("Argument 'units' contains non-positive indices.");
} else {
stop("Argument 'units' must be numeric or NULL: ", class(units)[1]);
}
# Argument 'verbose':
if (length(verbose) != 1)
stop("Argument 'units' must be a single integer.");
verbose <- as.integer(verbose);
if (!is.finite(verbose))
stop("Argument 'units' must be an integer: ", verbose);
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Read the CDF file
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
res <- .Call("R_affx_get_cdf_unit_names", filename, units, verbose,
PACKAGE="affxparser");
# Sanity check
if (is.null(res)) {
stop("Failed to read unit names from CDF file: ", filename);
}
res;
} # readCdfUnitNames()
############################################################################
# HISTORY:
# 2011-11-18
# o ROBUSTNESS: Added sanity check that the native code did not return NULL.
# 2006-03-28
# o Unit indices are now one-based. /HB
############################################################################
| /R/readCdfUnitNames.R | no_license | deepstop/affxparser | R | false | false | 1,647 | r | readCdfUnitNames <- function(filename, units=NULL, verbose=0) {
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Validate arguments
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Argument 'filename':
filename <- file.path(dirname(filename), basename(filename));
if (!file.exists(filename))
stop("File not found: ", filename);
# Argument 'units':
if (is.null(units)) {
} else if (is.numeric(units)) {
units <- as.integer(units);
if (any(units < 1))
stop("Argument 'units' contains non-positive indices.");
} else {
stop("Argument 'units' must be numeric or NULL: ", class(units)[1]);
}
# Argument 'verbose':
if (length(verbose) != 1)
stop("Argument 'units' must be a single integer.");
verbose <- as.integer(verbose);
if (!is.finite(verbose))
stop("Argument 'units' must be an integer: ", verbose);
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Read the CDF file
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
res <- .Call("R_affx_get_cdf_unit_names", filename, units, verbose,
PACKAGE="affxparser");
# Sanity check
if (is.null(res)) {
stop("Failed to read unit names from CDF file: ", filename);
}
res;
} # readCdfUnitNames()
############################################################################
# HISTORY:
# 2011-11-18
# o ROBUSTNESS: Added sanity check that the native code did not return NULL.
# 2006-03-28
# o Unit indices are now one-based. /HB
############################################################################
|
% Generated by roxygen2 (4.0.0): do not edit by hand
\name{publishGitbook}
\alias{publishGitbook}
\title{Publish the built gitbook to Github.}
\usage{
publishGitbook(repo, out.dir = paste0(getwd(), "/_book"),
message = "Update built gitbook")
}
\arguments{
\item{repo}{the github repository. Should be of form username/repository}
\item{out.dir}{location of the built gitbook.}
\item{message}{commit message.}
}
\description{
Note that this is a wrapper to system \code{git} call.
}
\details{
This function assumes that the repository has already exists on Github.
Thanks to ramnathv for the shell script.
https://github.com/GitbookIO/gitbook/issues/106#issuecomment-40747887
}
| /man/publishGitbook.Rd | no_license | swhgoon/Rgitbook | R | false | false | 684 | rd | % Generated by roxygen2 (4.0.0): do not edit by hand
\name{publishGitbook}
\alias{publishGitbook}
\title{Publish the built gitbook to Github.}
\usage{
publishGitbook(repo, out.dir = paste0(getwd(), "/_book"),
message = "Update built gitbook")
}
\arguments{
\item{repo}{the github repository. Should be of form username/repository}
\item{out.dir}{location of the built gitbook.}
\item{message}{commit message.}
}
\description{
Note that this is a wrapper to system \code{git} call.
}
\details{
This function assumes that the repository has already exists on Github.
Thanks to ramnathv for the shell script.
https://github.com/GitbookIO/gitbook/issues/106#issuecomment-40747887
}
|
project1<-read.table("hpc070201070202.txt", sep=";",header=TRUE)
head(project1)
project1$Date<-as.Date(project1$Date, format = "%d/%m/%Y")
project1$timetemp<-paste(project1$Date, project1$Time)
project1$Time<-strptime(project1$timetemp, format = "%Y-%m-%d %H:%M:%S")
project1$weekdays<-weekdays(as.Date(project1$Date))
globalactivepower<-as.numeric(project1$Global_active_power)
submetering1<-as.numeric(project1$Sub_metering_1)
submetering2<-as.numeric(project1$Sub_metering_2)
submetering3<-as.numeric(project1$Sub_metering_3)
png('plot3.png', width=480, height=480)
plot(project1$Time, submetering1, type="l", ylab="Energy Sub metering", xlab="")
lines(project1$Time, submetering2, type="l", col="red")
lines(project1$Time, submetering3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),lty=1, lwd=3, col=c("black", "red", "blue"))
dev.off()
| /plot3.R | no_license | as335/ExData_Plotting1 | R | false | false | 902 | r | project1<-read.table("hpc070201070202.txt", sep=";",header=TRUE)
head(project1)
project1$Date<-as.Date(project1$Date, format = "%d/%m/%Y")
project1$timetemp<-paste(project1$Date, project1$Time)
project1$Time<-strptime(project1$timetemp, format = "%Y-%m-%d %H:%M:%S")
project1$weekdays<-weekdays(as.Date(project1$Date))
globalactivepower<-as.numeric(project1$Global_active_power)
submetering1<-as.numeric(project1$Sub_metering_1)
submetering2<-as.numeric(project1$Sub_metering_2)
submetering3<-as.numeric(project1$Sub_metering_3)
png('plot3.png', width=480, height=480)
plot(project1$Time, submetering1, type="l", ylab="Energy Sub metering", xlab="")
lines(project1$Time, submetering2, type="l", col="red")
lines(project1$Time, submetering3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),lty=1, lwd=3, col=c("black", "red", "blue"))
dev.off()
|
modelId = c(11,217,27,216,17,210,14,211,218,26,28,24,220,21,25,15,221,222,29,23,19,16,223,219,116,224,22,31,125,13,111,18,33,119,333,112,322,311,318,110,319,34,12,124,120,121,325,331,329,334,346,328,32,320,341,348,340,323,337,343,345,336,312,321,335,347,344,342,316,315,36,115,114,113,122,317,212,313,35,314,39,338,225,310,324,332,327,37,38,330)
modelId = paste0("model_", modelId)
factorList = c("age","attr1","attr2","attr3","brand","cate","comment_num","has_bad_comment","sex","user_level")
predict = h2o.importFile("/home/wanghl/jd_contest/0519/3/predict_DL_0_1_3.csv")
predict[,factorList] = as.factor(predict[,factorList])
predict[,modelId] = as.factor(predict[,modelId])
data <- h2o.importFile("/home/wanghl/jd_contest/0519/3/data_DL_6_7_9.csv")
parts <- h2o.splitFrame(data, c(0.8))
train <-parts[[1]]
test <-parts[[2]]
nrow(data[data$target==1,]) / nrow(data[data$target==0,])
nrow(train[train$target==1,]) / nrow(train[train$target==0,])
nrow(test[test$target==1,]) / nrow(test[test$target==0,])
x = setdiff(names(data), "target")
train[,factorList] = as.factor(train[,factorList])
train[,modelId] = as.factor(train[,modelId])
y = c("target")
train[,y] = as.factor(train[,y])
test[,factorList] = as.factor(test[,factorList])
test[,y] = as.factor(test[,y])
test[,modelId] = as.factor(test[,modelId])
x = setdiff(x, "user_id")
x = setdiff(x, "sku_id")
m4_6_7_9 <- h2o.deeplearning(x, y, train, nfolds=5,
model_id = "m4_6_7_9", hidden = c(300:400),
activation = "RectifierWithDropout",
l1 = 0.00001,
l2 = 0.0001,
input_dropout_ratio = 0.2,
hidden_dropout_ratios = c(0.1, 0.1),
replicate_training_data = TRUE,
balance_classes = T,
class_sampling_factors=c(5, 1),
shuffle_training_data = T,
classification_stop = -1,
stopping_metric = "misclassification",
stopping_tolerance = 0.001,
stopping_rounds = 8,
epochs = 500
)
p4 = h2o.predict(m4_6_7_9, predict)
p4bind = h2o.cbind(predict$user_id,predict$sku_id,p4$predict,p4$p0)
p4df = as.data.frame(p4bind)
nrow(p4df[p4df$predict==0,])
result4 = p4df[p4df$predict==0,]
write.csv(result4, file = "/home/wanghl/jd_contest/0519/result/result_6_7_9.csv", row.names=FALSE, quote =FALSE)
| /script/m4_6_7_9.R | no_license | whlgh258/jd-contest | R | false | false | 2,172 | r | modelId = c(11,217,27,216,17,210,14,211,218,26,28,24,220,21,25,15,221,222,29,23,19,16,223,219,116,224,22,31,125,13,111,18,33,119,333,112,322,311,318,110,319,34,12,124,120,121,325,331,329,334,346,328,32,320,341,348,340,323,337,343,345,336,312,321,335,347,344,342,316,315,36,115,114,113,122,317,212,313,35,314,39,338,225,310,324,332,327,37,38,330)
modelId = paste0("model_", modelId)
factorList = c("age","attr1","attr2","attr3","brand","cate","comment_num","has_bad_comment","sex","user_level")
predict = h2o.importFile("/home/wanghl/jd_contest/0519/3/predict_DL_0_1_3.csv")
predict[,factorList] = as.factor(predict[,factorList])
predict[,modelId] = as.factor(predict[,modelId])
data <- h2o.importFile("/home/wanghl/jd_contest/0519/3/data_DL_6_7_9.csv")
parts <- h2o.splitFrame(data, c(0.8))
train <-parts[[1]]
test <-parts[[2]]
nrow(data[data$target==1,]) / nrow(data[data$target==0,])
nrow(train[train$target==1,]) / nrow(train[train$target==0,])
nrow(test[test$target==1,]) / nrow(test[test$target==0,])
x = setdiff(names(data), "target")
train[,factorList] = as.factor(train[,factorList])
train[,modelId] = as.factor(train[,modelId])
y = c("target")
train[,y] = as.factor(train[,y])
test[,factorList] = as.factor(test[,factorList])
test[,y] = as.factor(test[,y])
test[,modelId] = as.factor(test[,modelId])
x = setdiff(x, "user_id")
x = setdiff(x, "sku_id")
m4_6_7_9 <- h2o.deeplearning(x, y, train, nfolds=5,
model_id = "m4_6_7_9", hidden = c(300:400),
activation = "RectifierWithDropout",
l1 = 0.00001,
l2 = 0.0001,
input_dropout_ratio = 0.2,
hidden_dropout_ratios = c(0.1, 0.1),
replicate_training_data = TRUE,
balance_classes = T,
class_sampling_factors=c(5, 1),
shuffle_training_data = T,
classification_stop = -1,
stopping_metric = "misclassification",
stopping_tolerance = 0.001,
stopping_rounds = 8,
epochs = 500
)
p4 = h2o.predict(m4_6_7_9, predict)
p4bind = h2o.cbind(predict$user_id,predict$sku_id,p4$predict,p4$p0)
p4df = as.data.frame(p4bind)
nrow(p4df[p4df$predict==0,])
result4 = p4df[p4df$predict==0,]
write.csv(result4, file = "/home/wanghl/jd_contest/0519/result/result_6_7_9.csv", row.names=FALSE, quote =FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/JSaicu.R
\name{JS.aicu}
\alias{JS.aicu}
\title{General survival statistics for univariable analysis (AIC)}
\usage{
JS.aicu(...)
}
\arguments{
\item{...}{arguments will be passed to coxph}
}
\value{
A dataframe of coxph output including HR(95\% Confidence Interval), P value and AIC
}
\description{
JS.aicu output the table with general survival analysis result with HR(95\% Confidence Interval),P value and Akaike's An Information Criterion. This function only change the format of the output table.
}
\examples{
Model_1 <- JS.aicu (Surv(as.numeric(surdate), scensor) ~ as.factor(isup_m_new) , data = D1)
Model_2 <- JS.aicu (Surv(as.numeric(surdate), scensor) ~ as.factor(FurhmanGrade_new), data = D2)
...
Model_5 <- JS.aicu (Surv(as.numeric(surdate), scensor) ~ as.factor(isup_m_new) + as.factor(Necrosis), data = D1)
output_f <- rbind(Model_1, Model_2, Model_3, Model_4, Model_5)
row.names(output_f) <- c(1:length(row.names(output_f)))
rtf output
rtf<-RTF("Table_survival.doc",width = 8.5, height = 11, font.size = 10, omi = c(1,1,1,1))
addHeader(rtf,title="Table1, Survival Analysis ")
addTable(rtf, output_f, font.size = 10, row.names = F, NA.string="-", col.widths = c(rep(1.5, 4) ) )
done(rtf)
Rmarkdown output
save(out,plot1, file='myfile.Rda')
Then open at markdown file
library(knitr)
output <- load("H:/Projects/myfile.Rda")
kable(output, format = "markdown")
}
| /man/JS.aicu.Rd | no_license | SophiaJia/Jsurvformat | R | false | true | 1,454 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/JSaicu.R
\name{JS.aicu}
\alias{JS.aicu}
\title{General survival statistics for univariable analysis (AIC)}
\usage{
JS.aicu(...)
}
\arguments{
\item{...}{arguments will be passed to coxph}
}
\value{
A dataframe of coxph output including HR(95\% Confidence Interval), P value and AIC
}
\description{
JS.aicu output the table with general survival analysis result with HR(95\% Confidence Interval),P value and Akaike's An Information Criterion. This function only change the format of the output table.
}
\examples{
Model_1 <- JS.aicu (Surv(as.numeric(surdate), scensor) ~ as.factor(isup_m_new) , data = D1)
Model_2 <- JS.aicu (Surv(as.numeric(surdate), scensor) ~ as.factor(FurhmanGrade_new), data = D2)
...
Model_5 <- JS.aicu (Surv(as.numeric(surdate), scensor) ~ as.factor(isup_m_new) + as.factor(Necrosis), data = D1)
output_f <- rbind(Model_1, Model_2, Model_3, Model_4, Model_5)
row.names(output_f) <- c(1:length(row.names(output_f)))
rtf output
rtf<-RTF("Table_survival.doc",width = 8.5, height = 11, font.size = 10, omi = c(1,1,1,1))
addHeader(rtf,title="Table1, Survival Analysis ")
addTable(rtf, output_f, font.size = 10, row.names = F, NA.string="-", col.widths = c(rep(1.5, 4) ) )
done(rtf)
Rmarkdown output
save(out,plot1, file='myfile.Rda')
Then open at markdown file
library(knitr)
output <- load("H:/Projects/myfile.Rda")
kable(output, format = "markdown")
}
|
####
#### STATISTIC MODEL : INFERENCE OF ECOLOGICAL MODEL FROM GENETIC DATA
####
##########################################################################
############## Set your working directory and files to load ##############
##########################################################################
# matrice : colonne
#raster : ligne
####################################################
##### BACKWARD MODEL FUNCTIONS AND EXECUTIONS ######
##### SIMULATION OF PREDICTED GENETIC DATA ######
####################################################
degree2km = function(rasterStack){
# Function to get spatial resolution in km from a rasterStack
#
# Args:
# rasterStack: the rasterStack from which to obtain the resolution
#
# Returns:
# The spatial resolution in km from the rasterStack
x_origin = ((xmin(rasterStack)+xmax(rasterStack))/2) #longitude origin
y_origin = ((ymin(rasterStack)+ymax(rasterStack))/2) #latitude origin
x_destination = (x_origin + xres(rasterStack)) #longitude of destination point
y_destination = (y_origin + yres(rasterStack)) #latitude of destination point
dist_degree <- acos(sin(x_origin)*sin(x_destination)+cos(x_origin)*cos(x_destination)*cos(y_origin-y_destination))
dist_km = dist_degree * 111.32
dist_km
}
Aggregate_and_adjust_raster_to_data <- function(Envir_raster_stack,release,recovery,extend_band_size,aggregate_index)
{
# Change resolution and extent of environmental stacked layers according to data geographic range and extension zone outside geographic range of data
#
# Args:
# Envir_raster_stack: raster file
# release: release points file (columns "X" and "Y" as longitude nd latitude)
# recovery: recovery points file (columns "X" and "Y" as longitude nd latitude)
#
# Returns:
# The transformed rasterStack
samples <- SpatialPoints(rbind(na.omit(release[,c("X","Y")]),na.omit(recovery[,c("X","Y")])))
if (aggregate_index > 1) {Envir_raster_stack <- aggregate(crop(Envir_raster_stack,extent(samples)+extend_band_size), fact=aggregate_index, fun=mean, expand=TRUE, na.rm=TRUE)} else {
Envir_raster_stack <- crop(Envir_raster_stack,extent(samples)+extend_band_size)
}
Envir_raster_stack
}
Show_Niche <- function(BBox,nb_points,p,shapes=c(BIO1="conquadraticskewed",BIO12="conquadraticskewed")) # non terminé
{
# Allow to visualize two dimensional niche function
#
# Args:
# BBox: pounding box of variable values (two variables) data.frame columns as variable names, lines as c("Min","Max")
# nb_points: number of points to draw between min and max for each variable
# p: parameter values of the reaction norm for each variable as column
# shapes: shapes of the reaction norms for each variable in a vector
#
# Returns:
#
# Example
# BB = matrix(c(100,400,200,3200),nrow=2,ncol=2,dimnames=list(c("Min","Max"),c("BIO1","BIO12")))
# p = matrix(c(100,500,300,0,10,10,300,3000,2500,0,20,20),nrow=6,ncol=2,dimnames=list(c("Xmin","Xmax","Xopt","Yxmin","Yxmax","Yopt"),c("BIO1","BIO12")))
# Shapes=c(BIO1="conquadraticskewed",BIO12="conquadraticskewed")
# Show_Niche(BB,nb_points=c(12,18),p,shapes=Shapes)
Data = as.data.frame(matrix(NA,nrow=1,ncol=length(shapes)));colnames(Data)=colnames(p);Data=Data[-1,]
n=rep(1,length(shapes))
Var1=NULL
for(i in 1:nb_points[1])
{
Var1=append(Var1,rep(i,nb_points[2]))
}
Var2 = rep(1:nb_points[2],nb_points[1])
Data = as.matrix(data.frame(Var1=Var1,Var2=Var2));colnames(Data)=colnames(p)
Data = BB[rep("Min",dim(Data)[1]),]+(Data-1)*(BB[rep("Max",dim(Data)[1]),]-BB[rep("Min",dim(Data)[1]),])/matrix(nb_points-1,nrow=dim(Data)[1],ncol=dim(Data)[2],byrow=TRUE)
rownames(Data)=1:dim(Data)[1];Data=as.data.frame(Data)
form = as.formula(paste("z~",paste(names(shapes),collapse="*"),sep=""))
Data[,"z"]=ReactNorm(Data,p,shapes)[,"Y"]
wireframe(form,data=Data,scales=list(arrows=FALSE)) # requires library lattice
}
# populationSize: uses K_Function to obtain the populationsize landscape raster
#
#
populationSize <- function(donneesEnvironmentObs, p, shapes)
{
# Give population size according to a landscape raster.
#
# Args:
# donneesEnvironmentObs:
# p:
# shapes:
#
# Returns:
# Population size
populationSize <- donneesEnvironmentObs
values(populationSize) <- ReactNorm(valules(donneesEnvironmentObs), p, shapes)[1,]
populationSize
}
distanceMatrix <- function(rasterStack){
# (optional) distanceMatrix return distance between all cells of raster
# get x and y coordinates for each cell of raster object put in parameters
coords = xyFromCell(rasterStack, 1:length(values(rasterStack[[1]])), spatial=FALSE)
distance = as.matrix(dist(coords)) # distance matrix of coordinates
return(distance)
}
#prior
# simulation forward of population sizes across time
forward_simul_landpopsize <- function(N0,p, migration)
{
}
# laplaceMatrix returns Laplacian matrix from transition matrix
laplaceMatrix <- function(transitionMatrix){
matrixD = matrix(0,nrow = dim(transitionMatrix), ncol = dim(transitionMatrix))
diag(matrixD) = 1 # diagonal equals to 1
laplacianMatrix = matrixD - transitionMatrix
laplacianMatrix[is.na(laplacianMatrix)]<-0 # replace NA by 0
laplacianMatrix
}
# Calcul of resistance between two points of the graph
# with the Moore-Penrose generalized inverser matrix.
# ref : Bapat et all, A Simple Method for Computing Resistance Distance (2003)
# ref : Courrieu, Fast Computation of Moore-Penrose Inverse Matrices (2005)
resistDist <- function(laplacianMatrix){
inverseMP = ginv(laplacianMatrix) # generalized inverse matrix (Moore Penrose)
diag = diag(inverseMP) # get diagonal of the inverse matrix
mii = matrix(diag, nrow =dim(inverseMP), ncol = dim(inverseMP))
mjj = t(mii)
mij = inverseMP
mji = t(mij)
commute_time = mii + mjj - mij - mji
commute_time
}
# Calcul of genetic distance from resistance
geneticDist <- function(commute_time, popSize){
#genetic_dist = commute_time / (8* popSize)
genetic_dist = commute_time / (8* (sum(popSize)/(dim(popSize)[1]*dim(popSize)[2])))
genetic_dist
}
# MAIN EXECUTIONS DES FONCTIONS PERMETTANT D'OBTENIR IN FINE LES DONNEES GENETIQUES PREDITES
#rasterCrop = Aggregate_and_adjust_raster_to_data(raster(paste(wd,envdir,envfiles,sep="")),release=read.table(paste(wd,genetfile,sep="")), recovery=read.table(paste(wd,genetfile,sep="")), extend_band_size=1, aggregate_index=aggregate_factor)
#plot(rasterCrop)
###################################################
##### FORWARD MODEL FUNCTIONS AND EXECUTIONS ######
##### SIMULATION OF OBSERVED GENETIC DATA ######
###################################################
#Plot genetic data in environmental data observed
#Genetic data is turn into a Spatial Pixel Data Frame
#Mettre des couleurs en fonction du nombre d'individu
plotGeneticData = function(geneticData, EnvironmentalDataObserved){
colnames(geneticData)[1:2] <- c("x","y")
geneticData = SpatialPixelsDataFrame(points = geneticData[,c("x","y")], data = geneticData[,])
plot(EnvironmentalDataObserved[[1]])
plot(geneticData, add = T)
}
# Function that computes population size distribution moments in a grid from one generation to the other
# N population sizes of the parent genration
# r growth rates
# K carrying capacities
# d death rates
# migration transition matrix between cells of the grid
# ptG : parameters of generaiton time model
gridRepnDispFunction <- function(dynamics,r,K,d=.9,ptG, migration,overlapping=TRUE)
{
# values(dynamics)[,dim(values(dynamics))[2]] is value at previoud day
# d is mortality
Nt = values(dynamics)[,dim(values(dynamics))[2]]*(1-d) + r*N*(K-N/K)
esperance[K==0] <- 0
}
# Function that combine reproduction, dispersion and mutation for a given genetic data
repnDispMutFunction <- function(geneticData, dimGeneticData, mutationRate, transitionmatrice){
# Calcul for reproduction and dispersion
# random choice of individuals and at the same time of their target cells in the transition matrix
ncell_transition <- dimGeneticData[1]*dim(transitionmatrice)[1]
transition_celnu <- sample(ncell_transition,dimGeneticData[1],replace=FALSE,transitionmatrice[geneticData[,"Cell_numbers"],])
transition_col <- ceiling(transition_celnu/dimGeneticData[1])
transition_line <- transition_celnu%%(dimGeneticData[1]);transition_line[transition_line==0]<-dimGeneticData[1]
cell_numbers_sampled <- geneticData[transition_line,"Cell_numbers"]
geneticData <- geneticData[transition_line,]
geneticData[,"Cell_numbers"] <- transition_col
locusCols = grep("Locus", colnames(geneticData))
step = 2
mu = mutationRate # mutation rate
liability = runif(prod(dimGeneticData), 0, 1) # mutation liability
liability = as.data.frame(matrix(liability, ncol = length(locusCols), nrow = dimGeneticData[1]))
geneticData[,locusCols] = geneticData[,locusCols] + ((liability<mu/2)*step - (liability>(1-mu/2))*step)
#print(c("mutrat",(sum(liability<mu/2)+sum(liability>(1-mu/2)))/(length(grep("Locus", colnames(geneticData)))*dimGeneticData[1])))
geneticData
}
# Function that combine reproduction, dispersion and mutation for a given genetic data
repnDispMutFunction <- function(geneticData, dimGeneticData, mutationRate, transitionmatrice){
# Calcul for reproduction and dispersion
# loop for individuals
locusCols = grep("Locus", colnames(geneticData))
for (individual in 1:dimGeneticData[1])
{ # we choose where the parent come from in the individual cell line probabilities of the backward transition matrice
mothercell = sample(nCell, 1,,transitionmatrice[geneticData[individual,"Cell_numbers"],])
# we chose the parent among the individuals in this cell
geneticline = sample(which(geneticData[,"Cell_numbers"]==mothercell),1)
# we atribute the individual the genetic data of its mother
geneticData[individual,locusCols] = geneticData[geneticline,locusCols]
}
step = 2
mu = mutationRate # mutation rate
liability = runif(prod(dimGeneticData), 0, 1) # mutation liability
liability = as.data.frame(matrix(liability, ncol = length(locusCols), nrow = dimGeneticData[1]))
geneticData[,locusCols] = geneticData[,locusCols] + ((liability<mu/2)*step - (liability>(1-mu/2))*step)
geneticData
}
#Function that calculate probability of identity of genes intra individual (at individuals level)
Qwithin_pair <- function(geneticData){
matrix_pair = geneticData[,grep(".2", colnames(geneticData), fixed = T)]
matrix_impair = geneticData[,grep(".1", colnames(geneticData), fixed = T)]
Qw <- (matrix_pair == matrix_impair)
Qw = rowMeans(Qw) # vector of probability of Qw for each individual
Qw = matrix(Qw, ncol = dim(geneticData)[1], nrow = dim(geneticData)[1])
Qw = (Qw+t(Qw))/2
Qw
}
#Function that calculate probability of identity of genes intra individual (at population level)
Qwithin_pop <- function(geneticData){
matrix_pair = geneticData[,grep(".2", colnames(geneticData), fixed = T)]
matrix_impair = geneticData[,grep(".1", colnames(geneticData), fixed = T)]
Qw <- (matrix_pair == matrix_impair)
Qw = mean(Qw*1)
Qw
}
# Fonction that calculates probability of identity of genes inter individual (between two individuals)
Qbetween <- function(geneticData, dimGeneticData){
Qb = matrix(ncol = dimGeneticData[1], nrow = dimGeneticData[1]) #initialization of Qb as a matrix
# A = genetic data with loci only
A=as.matrix(geneticData[,grep("Locus",colnames(geneticData),fixed=T)])
# On construit un tableau à plusieurs étages: il y a autant d'étage qu'il y a de locus ( les alleles passent en étage)
A3 = aperm(array(A,dim=c(dim(A)[1],dim(A)[2],dim(A)[1])), c(1,3,2)) # permutation des colonnes et des etages
B3 = aperm(A3, c(2,1,3)) # transposee de A3
moy1 = colMeans(aperm(A3 == B3), dims = 1, na.rm = T)
#Permutation des colonnes deux à deux des alleles de A pour calculer l'autre cas possible d'identite des alleles
l= 1:dim(A)[2]
Aprime= A[,c(matrix(c(l[2*floor(1:(length(l)/2))],l[2*floor(1:(length(l)/2))-1]), nrow= 2, ncol = length(l)/2, byrow = T))] # Permutation des colonnes
# permutation et creation des etages pour Aprime ( on ne change qu'une seule des matrice (A3 / B3) et l'autre est inchangée pour comparer les alleles"complementaires"
Aprime3 = aperm(array(Aprime,dim=c(dim(A)[1],dim(A)[2],dim(A)[1])), c(1,3,2))
moy2 = colMeans(aperm(Aprime3 == B3), dims = 1, na.rm = T) # calcul moy dist pour les individus avec loci permutés Aprime3 et la transposée B3
#Mean of distance between individuals: Qbetween
Qb =(moy1 + moy2)/2
Qb
}
# TEST: pour un nombre de generation donné, on teste la stabilité du a value
#Function test of stabilisation for a value
test_stabilite_a_value <- function(geneticData, mutationRate, dimGeneticData, nb_generations=5000,transitionmatrice){
## ref: Rousset et al. J Evol Biol,13 (2000) 58-62.
vecteur_a_value <-c(0)
for(i in 1: nb_generations){
print(i)
geneticData <- repnDispMutFunction(geneticData, dimGeneticData, mutationRate, transitionmatrice)
matrixQb = (1-Qwithin_pair(geneticData)+2*(Qwithin_pair(geneticData)-Qbetween(geneticData,dimGeneticData)))
matrixQw = 2*(1-Qwithin_pop(geneticData))
vecteur_a_value[i] = matrixQb/matrixQw-1/2
vecteur_a_value[is.na(vecteur_a_value)] <-0
if((i>90) && (i%%30 == 0)){
if(var(vecteur_a_value[(i-30):i])> var(vecteur_a_value[(i-60):(i-30)])
&& var(vecteur_a_value[(i-30):i])> var(vecteur_a_value[(i-90):(i-60)])){
return(list(geneticData, (matrixQb/matrixQw-1/2)))
break
}
}
}
}
test_stabilite_a_value <- function(geneticData, mutationRate, dimGeneticData, nb_generations=5000,transitionmatrice){
## ref: Rousset et al. J Evol Biol,13 (2000) 58-62.
vecteur_a_value <-c(0)
for(i in 1: nb_generations){
print(i)
geneticData <- repnDispMutFunction(geneticData, dimGeneticData, mutationRate, transitionmatrice)
Genotypes = geneticData[,grep("Locus", colnames(geneticData), fixed = T)]
popmbrship=geneticData[,"Cell_numbers"]
Fst = Fstat(Genotypes,nCell,popmbrship,ploidy=2)
Fstlinear[i] <- Fst/(1-Fst)
if((i>90) && (i%%30 == 0)){
if(var(Fst[(i-30):i])> var(Fst[(i-60):(i-30)])
&& var(Fst[(i-30):i])> var(Fst[(i-90):(i-60)])){
return(list(geneticData, Fst))
break
}
}
}
}
fstat = function(geneticData){
Genotypes = geneticData[,grep("Locus", colnames(geneticData), fixed = T)]
form <- as.formulae
Pops = geneticData[,"Cell_numbers"]
MeanPop = t(matrix((colSums(Genotypes)/dimGeneticData[1]),ncol=dimGeneticData[1],nrow=dimGeneticData[2]))
VarInd = matrix(Genotypes^2 - MeanTot^2,ncol=dimGeneticData[2])
VarInterPop = var(MeanPop)
VarIntraPop = colSums(VarInd)/dimGeneticData[1]
VarTot = VarInd
}
simul_commute <- function(cells=c(1,2),transitionmatrice)
{
tmpcell <- cells[1];t=1
while (tmpcell != cells[2])
{
tmpcell = sample(dim(transitionmatrice)[2],size=1,prob=c(transitionmatrice[tmpcell,]))
t=t+1
}
hit=TRUE
while (tmpcell != cells[1])
{
tmpcell = sample(dim(transitionmatrice)[2],size=1,prob=c(transitionmatrice[tmpcell,]))
t=t+1
}
commute=TRUE
t
}
simul_coocur <- function(cells=c(1,2),transitionmatrice)
{
tmpcell1 <- cells[1];tmpcell2 <- cells[2];t=1
while (tmpcell1 != tmpcell2)
{
tmpcell1 = sample(dim(transitionmatrice)[2],size=1,prob=c(transitionmatrice[tmpcell1,]))
tmpcell2 = sample(dim(transitionmatrice)[2],size=1,prob=c(transitionmatrice[tmpcell2,]))
t=t+1
}
t
}
combine.names = function(names1,names2)
{
combination=NULL
for (name1 in names1)
{
for (name2 in names2)
{
combination=append(combination,paste(name1,name2,sep="."))
}
}
combination
}
react_norm_param <- function(shapes)
{
params = NULL
for (shape in shapes)
{
params = append(params, switch(shape,
constant = paste(names(rasterStack),".Y",sep=""),
enveloppe = combine.names(names(rasterStack),c("Xmin","Xmax","Yopt")),
envelin = combine.names(names(rasterStack),c("Yxmin","Yxmax","Xmin","Xmax")),
envloglin = combine.names(names(rasterStack),c("Yxmin","Yxmax","Xmin","Xmax")),
linear = combine.names(names(rasterStack),c("X0","slope")),
linearPositive = combine.names(names(rasterStack),c("X0","slope")),
conquadratic = combine.names(names(rasterStack),c("Xmin","Xmax","Yopt")),
conquadraticsq = combine.names(names(rasterStack),c("Xmin","Xmax","Yopt")),
conquadraticskewed = combine.names(names(rasterStack),c("Xmin","Xmax","Xopt","Yopt")),
conquadraticskewedsq = combine.names(names(rasterStack),c("Xmin","Xmax","Xopt","Yopt"))
)
)
}
params
}
input_reaction_norm_shape_model <- function(demographic_parameter,names_envir_variables)
{
allshapes= c("constant", "enveloppe", "envelin", "envloglin", "linear", "linearPositive", "conquadratic", "conquadraticsq", "conquadraticskewed", "conquadraticskewedsq")
shape=NULL
for (envir in names_envir_variables)
{
ok=FALSE
while (!ok) {
cat("Enter reaction norm model for variable",envir,"\n",
"and demographic parameter", demographic_parameter,"\n",
"(or 'h' for help) : ")
shape <- append(shape,readline("Enter: ")) # prompt
ok = (shape %in% allshapes)
if (!ok) {cat("Please chose among: ","\n",
paste(allshapes,collapse="\n"))
shape=shape[-length(shape)]
}
}
}
shape
}
set_prior_vector_from_keyb <- function(name,n)
{
# sets a vector of priors from keyboard
# args:
# name of the vector to create
# n: length of the vector
#
ok = FALSE
while(!ok)
{
prior_dist <- readline(paste("Enter prior distribution for",
name,"(enter h for help): "))
ok = prior_dist%in%c("uniform","log_uniform","normal","log_normal")
if (!ok) cat("\n","models implemented are :",
"\n","'uniform'",
"\n","'log_uniform'",
"\n","'normal'",
"\n","'log_normal'"
)
}
parameters_names_prior_dist=switch(prior_dist,
uniform = c("min","max"),
log_uniform = c("min","max"),
normal = c("mean","sd"),
log_normal = c("mean","sd"))
params = NULL
if (prior_dist=="log_normal") {cat("for log-normal, note that: ",
"\n"," mean and sd are on the log scale")}
if (prior_dist=="log_uniform") {cat("for log-uniform, note that: ",
"\n","min and max are not on the log scale",
"\n","but on the variable scale")}
for (paramname in parameters_names_prior_dist)
{
params = append(params,
readline(
paste("Enter ",paramname,
" for ",prior_dist," distribution: ")
)
)
}
params=as.numeric(params)
names(params) = parameters_names_prior_dist
switch(prior_dist,
uniform=runif(n,params["min"],params["max"]),
log_uniform=exp(runif(n,log(params["min"]),log(params["max"]))),
normal=rnorm(n,params["mean"],params["sd"]),
log_normal=rlnorm(n,params["mean"],params["sd"])
)
}
set_ref_table_from_keyb <- function(rasterStack,n)
{
# function to create model parameter names of reference table
# arg: none
# value: reference table and average model
#
# reaction norm models
shapesK = input_reaction_norm_shape_model("K",names(rasterStack))
pKnames = react_norm_param(shapesK)
shapesr = input_reaction_norm_shape_model("r",names(rasterStack))
prnames = react_norm_param(shapesr)
# mutation model
ok=FALSE
while (!ok) {mutation_model <- readline("Enter mutation model (or 'h' for help) : ") # prompt
ok = (mutation_model %in% c("tpm","bigeometric","stepwise"))
if (!ok) {
cat("\n","models implemented are :",
"\n","'stepwise'",
"\n","'bigeometric'",
"\n","'tpm': two phase mutation model")
}
}
mut_param_names = switch(mutation_model,
bigeometric = "sigma2",
tmp = c("p","sigma2"),
stepwise = NULL
)
# Dispersion
ok=FALSE
while (!ok) {shapeDisp <- readline("Enter dispersion model (or 'h' for help) : ") # prompt
ok = (shapeDisp %in% c("fat_tail1","gaussian",
"exponential","contiguous",
"island", "fat_tail2",
"gaussian_island_mix"))
if (!ok) {
cat("\n","models implemented are :",
"\n","'fat_tail1' (Chapman et al)",
"\n","'gaussian'",
"\n","'exponential'",
"\n","'contiguous'",
"\n","'island'",
"\n","'fat_tail2' (Moilanen et al)",
"\n","'gaussian_island_mix'")
}
}
Dispersion_parameter_names = switch(shapeDisp,
fat_tail1 = c("alpha","beta"),
gaussian = c("sd"),
gaussian = c("sd"),
exponential = c("mean"),
contiguous = c("m"),
island = c("m"),
fat_tail2 = c("alpha","beta"),
gaussian_island_mix = c("sd","m")
)
# set priors
priors = list()
priors$shapesK = shapesK
priors$pK = rep(NA,length(pKnames));names(priors$pK)=pKnames
priors_names <- c(prnames,pKnames,mut_param_names,Dispersion_parameter_names)
df = as.data.frame(matrix(NA,nrow=n,ncol=length(priors_names)))
colnames(df)=priors_names
for (name in priors_names)
{
df[,name] <- set_prior_vector_from_keyb(name,n)
}
df
}
input_priors <- function()
{
# function to create prior values for reference table and average model
# arg: none
# value: reference table and average model
#
nb_simul <- as.numeric(readline("Number of simulations: "))
ok=FALSE
while (!ok) {shape_model <- readline("Enter mutation model or 'h' for help : ") # prompt
ok = (shape_model %in% c("tpm","bigeometric","stepwise"))
if (!ok) {
cat("\n","models implemented are :",
"\n","'stepwise'",
"\n","'bigeometric'",
"\n","'tpm': two phase mutation model")
}
}
if (mutation_model=="bigeometric")
{
sigma2Dist <- readline("Enter distribution of variance maximum of geometric distribution (1/p): ")
if (sigmaDist=="uniform")
{
sigma2Max <- readline("Enter variance maximum of geometric distribution (1/p): ")
sigma2Min <- readline("Enter variance minimum of geometric distribution (1/p): ")
sigma2 <- runif(n=nb_simul,min=sigma2Min,max=sigma2Max)
}
}
if (mutation_model=="tpm")
{
sigma2Dist <- readline("Enter prior distribution shape for variance of geometric distribution (1/p): ")
if (sigma2Dist=="uniform")
{
sigma2Max <- as.numeric(readline("Enter maximum of variance of geometric distribution: "))
sigma2Min <- as.numeric(readline("Enter minimum of variance of geometric distribution: "))
sigma2 <- runif(nb_simul,sigma2Min,sigma2Max)
pMax <- readline("Enter stepwise maximum proportion: ")
pMin <- readline("Enter stepwise minimum proportion: ")
p <- runif(nb_simul,pMin,pMax)
}
}
}
set_model <- function(pK, pr, shapesK, shapesr, shapeDisp, pDisp,
mutation_rate, initial_genetic_value,
mutation_model,stepvalue,
mut_param)
{
# sets a genetic and environemental demographic model as a list
# arg: parameters
# value : list describing the models (shapes of distribution and parameters)
model = list(pK=pK, pr=pr,
shapesK=shapesK, shapesr=shapesr,
shapeDisp=shapeDisp, pDisp=pDisp,
mutation_rate=mutation_rate,
initial_genetic_value=initial_genetic_value,
mutation_model=mutation_model,stepvalue=stepvalue,
mut_param=mut_param)
check_model(model)
}
check_dispersion_model <- function(shapeDisp,pDisp)
{
# checks that shapeDisp and pDisp are compatible
# arg:
# value:
compatible = switch(shapeDisp,
fat_tail1 = c("alpha","beta")%in%names(pDisp),
gaussian = c("sd")%in%names(pDisp),
gaussian = c("sd")%in%names(pDisp),
exponential = c("mean")%in%names(pDisp),
contiguous = c("m")%in%names(pDisp),
island = c("m")%in%names(pDisp),
fat_tail2 = c("alpha","beta")%in%names(pDisp),
gaussian_island_mix = c("sd","m")%in%names(pDisp)
)
if (!all(compatible)) {
Message = switch(shapeDisp,
fat_tail1 = "fat_tail1 : pDisp=c(alpha=..,beta=..)",
gaussian = "gaussian : pDisp=c(sd=..)",
exponential = "exponential: pDisp=c(mean=..)",
contiguous = "contiguous: pDisp=c(m=..)",
island = "island: pDisp=c(m=..)",
fat_tail2 = "fat_tail2: pDisp=c(alpha=..,beta=..)",
gaussian_island_mix = "gaussian_island_mix: pDisp=c(sd=..,m=..)"
)
stop (paste("check dispersion model. For",Message))
}
"Dispersion model OK"
}
check_reaction_norm <- function(shapes,p)
{
# Checks whether shapes and parameter matrix of reaction norms
# corresponds for all the variables
#
if (!all(shapes %in% c("enveloppe", "envelin", "envloglin","loG","linear",
"conquadratic","conquadraticskewed","conquadraticsq",
"conquadraticskewedsq","constant","linearPositive")))
stop("shape of reaction norm is unknown,
please chose among 'constant', 'enveloppe', 'linearPositive' 'envelin', 'envloglin','loG','linear',
'conquadratic','conquadraticskewed','conquadraticsq' or 'conquadraticskewedsq'
") else if (length(shapes)!=dim(p)[2])
stop(paste("reaction norm shapes and parameters do not use the same number of
environemental variables.")) else {
compatible=NA
for (i in 1:length(shapes))
{
compatible[i] <- switch(shapes[i],
constant = all(c("Y")%in%row.names(p)),
enveloppe = all(c("Xmin","Xmax","Yopt")%in%row.names(p)),
envelin = all(c("Yxmin","Yxmax","Xmin","Xmax")%in%row.names(p)),
envloglin = all(c("Yxmin","Yxmax","Xmin","Xmax")%in%row.names(p)),
linear = all(c("X0","slope")%in%row.names(p)),
linearPositive = all(c("X0","slope")%in%row.names(p)),
conquadratic = all(c("Xmin","Xmax","Xopt")%in%row.names(p)),
conquadraticsq = all(c("Xmin","Xmax","Xopt")%in%row.names(p)),
conquadraticskewed = all(c("Xmin","Xmax","Xopt","Yopt")%in%row.names(p)),
conquadraticskewedsq = all(c("Xmin","Xmax","Xopt","Yopt")%in%row.names(p)))
}
if (!all(compatible))
{
Message=NA
for (i in which(!compatible))
{
Message = switch(shapes[i],
constant = "rownames of pDisp matrix parameter for constant shape is 'Y'. ",
enveloppe = "rownames of pDisp matrix parameter for enveloppe shape are 'Xmin', 'Xmax' and 'Xopt'. ",
envelin = "rownames of pDisp matrix parameter for envelin shape are 'Yxmin', 'Yxmax', 'Xmin', and 'Xmax'. ",
envloglin = "rownames of pDisp matrix parameter for enveloglin shape are 'Yxmin', 'Yxmax', 'Xmin', and 'Xmax'. ",
linear = "rownames of pDisp matrix parameter for linear shape are 'X0' and 'slope'. ",
linearPositive = "rownames of pDisp matrix parameter for linearPositive shape are 'X0' and 'slope'. ",
conquadratic = "rownames of pDisp matrix parameter for conquadratic shape are 'Xmin', 'Xmax' and Xopt. ",
conquadraticsq = "rownames of pDisp matrix parameter for conquadraticsq shape are 'Xmin', 'Xmax' and Xopt. ",
conquadraticskewed = "rownames of pDisp matrix parameter for conquadraticskewed shape are 'Xmin', 'Xmax', Xopt and Yxopt. ",
conquadraticskewed = "rownames of pDisp matrix parameter for conquadraticskewed shape are 'Xmin', 'Xmax', Xopt and Yxopt. "
)
}
stop(paste("Error in reaction norm model settings:",
"for environmental variable(s) number",
paste(which(!compatible),collapse=" and "),
", parameters corresponding to the shape annouced are not provided as rownames. Note that",
paste(Message,collapse=" ")))
}
}
if (!all(compatible)) return(paste(compatible[2],"bad row names in variable number", which(!compatible),sep=" "))
return(TRUE)
}
check_mutation_model<-function(mutation_model,mutation_parameter)
{
compatible =
switch(mutation_model,
bigeometric = c("sigma2")%in%names(mutation_parameter),
tmp = c("p","sigma2")%in%names(mutation_parameter)
)
if (!all(compatible)) {
Message = switch(shapeDisp,
bigeometric = "bigeometric : mut_param=c(sigma2=..)",
tmp = "tmp : mut_param=c(p=.., sigma2=..)"
)
stop(paste("check mutation parameter(s) names for",Message))
}
"mutation model OK"
}
check_model <- function(model)
{
check_reaction_norm(model$shapesr,model$pr)
check_reaction_norm(model$shapesK,model$pK)
check_mutation_model(model$mutation_model,model$mut_param)
check_dispersion_model(model$shapeDisp,model$pDisp)
}
setClass("DispersionModel", representation(ID="numeric",
shapeDisp="character",
pDisp="vector"
)
)
setClass("ReactionNorm",representation(shapes = "character",
p="matrix"))
setClass("MutationModel",representation(model="character"
))
setClass("EnvDemogenetModel",representation(ID = "numeric",
K = "ReactionNorm",
r = "ReactionNorm",
Dispersion = "DispersionModel"
))
#
# add_genetic_to_coaltable function
# adds genetic values to a coalescent table containing mutation number per branch
# knowing initial genetic value of the ancastor and mutation model
genetics_of_coaltable <- function(coaltable,initial_genetic_value,mutation_model,stepvalue=2,mut_param=c(p=.5,sigma=2))
{
switch(mutation_model,
step_wise = stepwise(coaltable,initial_genetic_value,stepvalue),
tpm = tpm(coaltable,initial_genetic_value,stepvalue,mut_param),
bigeometric = bigeometric(coaltable,initial_genetic_value,stepvalue,mut_param)
)
}
# coalescent_2_phylog
# converts a coalescent simulation to a class phylog tree (library ape)
#
coalescent_2_phylog <- function(coalescent)
{
read.tree(text=coalescent_2_newick(coalescent))
}
#
# plot_coalescent plots a coalescent simulation
# argument: output of simul_coalescent()
plot_coalescent <- function(coalescent,genetic_table,with_landscape=FALSE,rasK=NULL,legend_right_move=-.2)
{
if (with_landscape) {par(mfrow=c(1,2),oma=c(0,0,0,4),xpd=TRUE)}else{par(mfrow=c(1,1),oma=c(0,0,0,4),xpd=TRUE)}
tipcells <- geneticData$Cell_numbers[as.numeric(coalescent_2_phylog(coalescent)$tip.label)]
tipcols = rainbow(ncell(rasK))[tipcells]
phylog_format_tree <- coalescent_2_phylog(coalescent)
phylog_format_tree$tip.label <- paste(phylog_format_tree$tip.label,genetic_table[order(genetic_table$coalescing)[as.numeric(phylog_format_tree$tip.label)],"genetic_value"],sep=":")
plot(phylog_format_tree,direction="downward",tip.color=tipcols)
legend("topright", title="demes", cex=0.75, pch=16, col=tipcols[!duplicated(tipcols)], legend=tipcells[!duplicated(tipcols)], ncol=2, inset=c(legend_right_move,0))
if (with_landscape) {plot(rasK)}
}
# summary_stat calculates summary stats for observed and simulated data and creates a reference table
# geneticDataSimulList : a list of simulations, with sublist geneticData and sublist log_lik_forward
#
set_priors <- function(variables,Min,Max,nb_lines)
{
df = data.frame(matrix(NA,nrow=1,ncol=length(variables)))
for (i in variables)
{
}
}
new_reference_table <- function(geneticData,Distance,priors)
{
# creates a new reference table from genetic data and priors data
# !!!!!! details : first line of reference table is NOT rotated observed genetic data
#list of of parameters
#list of summary stats
GenetDist = switch(Distance,
Goldstein = dist(geneticData[,grep("Locus",colnames(geneticData))])^.5,
pID = pID(geneticData)
)
Significant_Components <- which(abs(prcomp(GenetDist)$x)>1E-5)
df <- as.data.frame(matrix(NA,nrow=1,ncol=length(Significant_Components)+1,dimnames=list("l1",c(paste("C",c(Significant_Components),sep=""),"loglik"))))[FALSE,]
df
}
add_summary_stat <- function(reference_table,geneticDataSim,rotation,forward_log_lik,Distance="Goldstein")
{
# add_summary_stats
# add summary statistics of a simulation to reference table for ABC analysis
# arguments:
# reference_table the reference table to append
# rotation tha PCA rotation to apply to geneticData
# geneticDataSim : the simulated genetic data
# forward_log_lik : forward log likelihood of the simulated genetic data
# Distance : distance to apply to genetic data (Goldstein : difference in number of repeats)
# pID : proportion of identity.
#
# value: appended reference_table
#1) We calculate a matrix of observed genetic distance between individuals (DSAI = sum_j (Shared alleles) / (number of loci))
# or reapeat number distance (Goldstein 1995)
GenetDist = switch(Distance,
Goldstein = dist(geneticData[,grep("Locus",colnames(geneticData))])^.5,
pID = pID(geneticData)
)
#2) We express simulated data in these axis. First summary stats are the values of each invidiual
# in each of these major axes
summary_stats = as.matrix(GenetDist) %*% rotation
rbind(reference_table,as.vector(summary_stats))
}
fill_reference_table <- function(geneticData,Distance,rasterStack=rasterStack,
pK=pK, pr=pr,
shapesK=shapesK, shapesr=shapesr,
shapeDisp=shapeDisp, pDisp=pDisp,
mutation_rate=1E-1,
initial_genetic_value=initial_genetic_value,
mutation_model="tpm",stepvalue=2,
mut_param=c(p=.5,sigma2=4))
{
# filling a reference table using parameters values
# args:
# parameters of the model, geneticData, type of distance used
rotation = PCA_rotation(geneticData)
ref_table = new_reference_table(geneticData,Distance="Goldstein")
simulated_genetic <- geneticData[,grep("locus",colnames(geneticData))]
# simulation de genetic data pour chaque locus
for (i in 1:length(grep("Locus",colnames(geneticData))))
{
new_simulation <- simul_coalescent(geneticData=geneticData,
rasterStack=rasterStack,
pK=pK, pr=pr,
shapesK=shapesK, shapesr=shapesr,
shapeDisp=shapeDisp, pDisp=pDisp,
mutation_rate=1E-1,
initial_genetic_value=initial_genetic_value,
mutation_model="tpm",stepvalue=2,
mut_param=c(p=.5,sigma2=4))
simulated_genetic[,i] <- new_simulation$genetic_values[order( new_simulation$coalescing)[1:dim(geneticData)[1]],"genetic_value"]
forward_log_lik <- new_simulation$forward_log_prob
}
simulated_genetic
ref_table = add_summary_stat(reference_table=ref_table,
geneticDataSim=simulated_genetic,
rotation=rotation,
forward_log_lik=,Distance="Goldstein")
ref_table
}
#
# genetic_simulation
# this function simulates genetic data from a coalescent
#
#
test_stabilite_a_value <- function(geneticData, mutationRate, dimGeneticData, nb_generations=5000,transitionmatrice){
## ref: Rousset et al. J Evol Biol,13 (2000) 58-62.
vecteur_a_value <-c(0)
GenDist=list()
for(i in 1: nb_generations){#i=1
print(i)
geneticData <- repnDispMutFunction(geneticData, dimGeneticData, mutationRate, transitionmatrice)
Genotypes = geneticData[,grep("Locus", colnames(geneticData), fixed = T)]
popmbrship=geneticData[,"Cell_numbers"]
Fst = Fstat(Genotypes,nCell,popmbrship,ploidy=2)
Fis = Fst[[1]];Fst=Fst[[2]]
GenDist[[i]] <- Fst/(1-Fst)
if((i>90) && (i%%30 == 0)){
if(var(GenDist[[(i-30):i]])> var(GenDist[[(i-60):(i-30)]])
&& var(GenDist[[(i-30):i]])> var(GenDist[[(i-90):(i-60)]])){
return(list(geneticData, GenDist))
break
}
}
}
}
# to get a matrix of a-values between all cells of a landscape from
# simulations of evolution where not all cells necesarily contain individuals
#
#
a_value_matrix_from_forward_simulation <- function(geneticFinal,rasterStack)
{
matrixQb = (1-Qwithin_pair(geneticFinal)+2*(Qwithin_pair(geneticFinal)-Qbetween(geneticFinal,dim(geneticFinal))))
matrixQw = 2*(1-Qwithin_pop(geneticFinal))
vecteur_a_value = matrixQb/matrixQw-1/2
vecteur_a_value[is.na(vecteur_a_value)] <-0
cell_numbers_list <- levels(as.factor(geneticFinal$Cell_numbers))
agg <- aggregate(vecteur_a_value,by=list(geneticFinal$Cell_numbers),FUN="mean")[,-1]
agg2 <- t(aggregate(t(agg),by=list(geneticFinal$Cell_numbers),FUN="mean"))[-1,]
row.names(agg2) <- as.numeric(cell_numbers_list)
colnames(agg2) <- as.numeric(cell_numbers_list)
agg3 <- matrix(NA, nrow=ncell(rasterStack),ncol=ncell(rasterStack))
agg3[as.numeric(row.names(agg2)),as.numeric(colnames(agg2))] <- agg2
agg3
}
# Function that determine number of dispersal parameters from dispersal shapeDisp used
# Useful for nlm estimation
nbpDisp <- function(shapeDisp){
(switch(shapeDisp,
fat_tail1 = 2,
gaussian = 1,
exponential = 1,
contiguous = 1,
island = 1,
fat_tail2 = 2))
}
# Function that simulate a genetic data with parameters given.
#It returns a list of 2 variables: final genetic data observed and matrix of a-value observed
simulationGenet <- function(donneesEnvironmentObs, pK, pR, shapesK, shapesR, mutationRate, nbLocus, initial_locus_value, shapeDisp, pDisp, nb_generations,ind_per_cell=30){
K <- subset(ReactNorm(values(donneesEnvironmentObs), pK , shapesK),select="Y") # carrying capacity
r <- subset(ReactNorm(values(donneesEnvironmentObs), pR , shapesR),select="Y") # growth rate
Rast_K <- donneesEnvironmentObs ; values(Rast_K) <- K
Rast_r <- donneesEnvironmentObs ; values(Rast_r) <- r
geneticData = CreateGenetArray(Rast_K, nbLocus,initial_locus_value,Option="full_pop")
geneticData[,"Cell_number_init"] <- geneticData[,"Cell_numbers"]
dimGeneticData = dim(geneticData)
# Migration function used to calculate descendant from parents (draw with replacement)
migrationM = migrationMatrix(donneesEnvironmentObs,shapeDisp,pDisp)
transitionmatrice = transitionMatrixBackward(Npop = K, migration= migrationM)
geneticDataFinal = test_stabilite_a_value(geneticData, mutationRate, dimGeneticData, nb_generations,transitionmatrice)
a_value_obs = geneticDataFinal[[2]]
geneticDataFinal = geneticDataFinal[[1]]
return(list(geneticDataFinal, a_value_obs))
}
# plots matrixes of forward mutation for validation
#
#
matrixes_forward <- function(donneesEnvironmentObs, pK, pR, shapesK, shapesR, shapeDisp, pDisp, a_value_obs, a_value_att, file=NULL, mutationRate,nbLocus, initial_locus_value,indpercell)
{
nblayers =dim(donneesEnvironmentObs)[3]
nCell = ncell(donneesEnvironmentObs)
Cell_numbers <- 1:nCell
geneticObs = simulationGenet(donneesEnvironmentObs,pK,pR,shapesK,shapesR,mutationRate,nbLocus,initial_locus_value,shapeDisp,pDisp,nb_generations=5000,indpercel)
finalGenetData = geneticObs[[1]]
a_value_simul = geneticObs[[2]]
K <-reactNorm(donneesEnvironmentObs, pK, shapesK) # carrying capacity
r <-reactNorm(donneesEnvironmentObs, pR, shapesR) # carrying capacity
# Migration function used to calculate descendant from parents (draw with replacement)
migrationM = migrationMatrix(donneesEnvironmentObs,shapeDisp,pDisp)
transitionmatrice = transitionMatrixForward(r, K, migration= migrationM)
land_size <- raster(matrix(K[1,],nrow=dim(donneesEnvironmentObs)[1],ncol=dim(donneesEnvironmentObs)[2]))# be careful transposed
land_size <- raster(matrix(K[1,]))
extent(land_size) <- extent(donneesEnvironmentObs)
list(land_size,transitionmatrice,a_value_simul,a_value_att)
}
# MAIN DES FONCTIONS POUR SIMULER DES DONNEES GENETIQUES OBSERVEES
#mutationRate = 10^(-4); shapeDisp = "fat_tail1";
#nbLocus=10; initial_locus_value=200
#pDisp = c(alphaDisp = 20, betaDisp = 1.5); nbpDisp =nbpDisp(shapeDisp);
#aggregate_factor=16
#donneesEnvironmentObs = Aggregate_and_adjust_raster_to_data(raster(paste(wd,envdir,envfiles,sep="")),release=read.table(paste(wd,genetfile,sep="")), recovery=read.table(paste(wd,genetfile,sep="")), extend_band_size=1, aggregate_index=aggregate_factor)
# donneesEnvironmentObs = raster(matrix(NA,nrow=20,ncol=20))
#values(donneesEnvironmentObs) <-log(ceiling(runif(ncell(donneesEnvironmentObs),0,1)*3))
#plot(donneesEnvironmentObs)
#nblayers =dim(donneesEnvironmentObs)[3]
#nCell = ncell(donneesEnvironmentObs)
#geneticObs = simulationGenet(donneesEnvironmentObs,alpha=0, beta =1,mutationRate,nbLocus, initial_locus_value,shapeDisp,pDisp,nb_generations=5000)
#finalGenetData = geneticObs[[1]]
#a_value_obs = geneticObs[[2]]
#####################################
##### ESTIMATION DES PARAMETRES #####
#####################################
expect_a_value <- function(donneesEnvironmentObs,pK,pR,shapesK,shapesR,pDisp,Cell_numbers,nbpDisp,nblayers,shapeDisp)
{
K = ReactNorm(rasterStack =donneesEnvironmentObs, pK, shapesK)
r = ReactNorm(rasterStack =donneesEnvironmentObs, pR, shapesR)
matrice_migration = migrationMatrix(donneesEnvironmentObs,shapeDisp, shapeDisp)
matrice_transition = transitionMatrixBackward(K,r, matrice_migration)
matrice_laplace = laplaceMatrix(matrice_transition)
commute_time = resistDist(matrice_laplace)
genetic_dist_att = geneticDist(commute_time, popSize)
a_value_att = genetic_dist_att[Cell_numbers,Cell_numbers]
a_value_att
}
ssr <- function(p){
a_value_att = expect_a_value(donneesEnvironmentObs,pK,pR,shapesK,shapesR,pDisp,Cell_numbers=finalGenetData$Cell_Number,nbpDisp,nblayers,shapeDisp)
return(mean((a_value_obs - a_value_att)^2))
}
#initial = c(0,1,20,1.5)
#fct_erreur_calc = nlm(f = ssr , p = initial, hessian = FALSE)
#p=initial
#a_value_graph_model = expect_a_value(donneesEnvironmentObs,initial,finalGenetData$Cell_Number,nbpDisp=2,nblayers=1,shapeDisp="fat_tail1")
#ssr(initial)
#parametres_infere = fct_erreur_calc$estimate
#parametres_reels = c(alpha = 0, beta = 2,beta = 1, alphaDisp = 20, betaDisp = 1.5)
validation <- function(donneesEnvironmentObs,pK = matrix(c(100,400,300,10,0.5,1,300,2000,1500,10,.5,1),nrow=6,ncol=2,dimnames=list(c("Xmin","Xmax","Xopt","Yopt","Yxmin","Yxmax"),c("BIO1","BIO12"))), pR = matrix(c(100,400,300,10,0.5,1,300,2000,1500,10,.5,1),nrow=6,ncol=2,dimnames=list(c("Xmin","Xmax","Xopt","Yopt","Yxmin","Yxmax"),c("BIO1","BIO12"))),shapesK=c(BIO1="conquadraticskewed",BIO12="conquadraticskewed"),shapesR=c(BIO1="conquadraticskewed",BIO12="conquadraticskewed"), shapeDisp="fat_tail1", pDisp = c(mean=0.32,shape=1.6), file=NULL,mutationRate,nbLocus, initial_locus_value,nb_generations=5000,indpercell=30)
{
nblayers =dim(donneesEnvironmentObs)[3]
nCell = ncell(donneesEnvironmentObs)
Cell_numbers <- 1:nCell
K <- ReactNorm(values(donneesEnvironmentObs), pK , shapesK)$Y # recuperation de la carrying capacity
r <- ReactNorm(values(donneesEnvironmentObs), pR , shapesR)$Y # recuperation du growth rate
# Migration function used to calculate descendant from parents (draw with replacement)
migrationM = migrationMatrix(donneesEnvironmentObs,shapeDisp,pDisp)
transitionmatrice = transitionMatrixBackward(Npop = K, migration= migrationM)
geneticObs = simulationGenet(donneesEnvironmentObs,alpha, beta, mutationRate,nbLocus, initial_locus_value,shapeDisp,pDisp,nb_generations=5000,indpercell)
finalGenetData = geneticObs[[1]]
land_size <- raster(matrix(K[1,],nrow=dim(donneesEnvironmentObs)[1],ncol=dim(donneesEnvironmentObs)[2]))
a_value_simul = a_value_matrix_from_forward_simulation(geneticObs[[1]],land_size)
a_value_theory_stepping_stone_model = distanceMatrix(donneesEnvironmentObs)/(4*0.05)
a_value_theory_island_model <- matrix(pDisp[1],nrow=nCell,ncol=nCell)-pDisp[1]*diag(nCell)
a_value_theory_graph_model <- expect_a_value(donneesEnvironmentObs,pK,pR,shapesK,shapesR,pDisp,Cell_numbers,nbpDisp=nbpDisp,nblayers=nblayers,shapeDisp)
if (!is.null(file)){jpeg(file)}
par(mfrow=c(2,3))
plot(land_size,main="Population sizes")
#plot(raster(migrationM))
plot(raster(transitionmatrice),main="Transition matrix")
plot(raster(a_value_simul),main="Simul genet differ")
plot(raster(a_value_theory_stepping_stone_model),main="Expected stepping stone")
plot(raster(a_value_theory_island_model),main="Expected island")
plot(raster(a_value_theory_graph_model),main="Expected graph model")
if (!is.null(file)){dev.off()}
list(land_size,transitionmatrice,a_value_simul,a_value_theory_stepping_stone_model,a_value_theory_island_model,a_value_theory_graph_model,finalGenetData)
}
#test nlm
ssr <- function(p){
popSize = K_Function(rasterStack = donneesEnvironmentObs, p[1], p[2:(nblayers+1)])
matrice_migration = migrationMatrix(donneesEnvironmentObs,shapeDisp, p[(nblayers+2):(nbpDisp+nblayers+1)])
matrice_transition = transitionMatrixBackward(popSize, matrice_migration)
matrice_laplace = laplaceMatrix(matrice_transition)
commute_time = resistDist(matrice_laplace)
genetic_dist_att = geneticDist(commute_time, p[(nbpDisp+nblayers+2)])
a_value_att = genetic_dist_att[finalGenetData$Cell_Number,finalGenetData$Cell_Number]
return(mean((a_value_obs - a_value_att)^2))
}
| /graphPOP_0.114.R | no_license | stdupas/EnvironmentalDemogeneticsABC | R | false | false | 48,512 | r | ####
#### STATISTIC MODEL : INFERENCE OF ECOLOGICAL MODEL FROM GENETIC DATA
####
##########################################################################
############## Set your working directory and files to load ##############
##########################################################################
# matrice : colonne
#raster : ligne
####################################################
##### BACKWARD MODEL FUNCTIONS AND EXECUTIONS ######
##### SIMULATION OF PREDICTED GENETIC DATA ######
####################################################
degree2km = function(rasterStack){
# Function to get spatial resolution in km from a rasterStack
#
# Args:
# rasterStack: the rasterStack from which to obtain the resolution
#
# Returns:
# The spatial resolution in km from the rasterStack
x_origin = ((xmin(rasterStack)+xmax(rasterStack))/2) #longitude origin
y_origin = ((ymin(rasterStack)+ymax(rasterStack))/2) #latitude origin
x_destination = (x_origin + xres(rasterStack)) #longitude of destination point
y_destination = (y_origin + yres(rasterStack)) #latitude of destination point
dist_degree <- acos(sin(x_origin)*sin(x_destination)+cos(x_origin)*cos(x_destination)*cos(y_origin-y_destination))
dist_km = dist_degree * 111.32
dist_km
}
Aggregate_and_adjust_raster_to_data <- function(Envir_raster_stack,release,recovery,extend_band_size,aggregate_index)
{
# Change resolution and extent of environmental stacked layers according to data geographic range and extension zone outside geographic range of data
#
# Args:
# Envir_raster_stack: raster file
# release: release points file (columns "X" and "Y" as longitude nd latitude)
# recovery: recovery points file (columns "X" and "Y" as longitude nd latitude)
#
# Returns:
# The transformed rasterStack
samples <- SpatialPoints(rbind(na.omit(release[,c("X","Y")]),na.omit(recovery[,c("X","Y")])))
if (aggregate_index > 1) {Envir_raster_stack <- aggregate(crop(Envir_raster_stack,extent(samples)+extend_band_size), fact=aggregate_index, fun=mean, expand=TRUE, na.rm=TRUE)} else {
Envir_raster_stack <- crop(Envir_raster_stack,extent(samples)+extend_band_size)
}
Envir_raster_stack
}
Show_Niche <- function(BBox,nb_points,p,shapes=c(BIO1="conquadraticskewed",BIO12="conquadraticskewed")) # non terminé
{
# Allow to visualize two dimensional niche function
#
# Args:
# BBox: pounding box of variable values (two variables) data.frame columns as variable names, lines as c("Min","Max")
# nb_points: number of points to draw between min and max for each variable
# p: parameter values of the reaction norm for each variable as column
# shapes: shapes of the reaction norms for each variable in a vector
#
# Returns:
#
# Example
# BB = matrix(c(100,400,200,3200),nrow=2,ncol=2,dimnames=list(c("Min","Max"),c("BIO1","BIO12")))
# p = matrix(c(100,500,300,0,10,10,300,3000,2500,0,20,20),nrow=6,ncol=2,dimnames=list(c("Xmin","Xmax","Xopt","Yxmin","Yxmax","Yopt"),c("BIO1","BIO12")))
# Shapes=c(BIO1="conquadraticskewed",BIO12="conquadraticskewed")
# Show_Niche(BB,nb_points=c(12,18),p,shapes=Shapes)
Data = as.data.frame(matrix(NA,nrow=1,ncol=length(shapes)));colnames(Data)=colnames(p);Data=Data[-1,]
n=rep(1,length(shapes))
Var1=NULL
for(i in 1:nb_points[1])
{
Var1=append(Var1,rep(i,nb_points[2]))
}
Var2 = rep(1:nb_points[2],nb_points[1])
Data = as.matrix(data.frame(Var1=Var1,Var2=Var2));colnames(Data)=colnames(p)
Data = BB[rep("Min",dim(Data)[1]),]+(Data-1)*(BB[rep("Max",dim(Data)[1]),]-BB[rep("Min",dim(Data)[1]),])/matrix(nb_points-1,nrow=dim(Data)[1],ncol=dim(Data)[2],byrow=TRUE)
rownames(Data)=1:dim(Data)[1];Data=as.data.frame(Data)
form = as.formula(paste("z~",paste(names(shapes),collapse="*"),sep=""))
Data[,"z"]=ReactNorm(Data,p,shapes)[,"Y"]
wireframe(form,data=Data,scales=list(arrows=FALSE)) # requires library lattice
}
# populationSize: uses K_Function to obtain the populationsize landscape raster
#
#
populationSize <- function(donneesEnvironmentObs, p, shapes)
{
# Give population size according to a landscape raster.
#
# Args:
# donneesEnvironmentObs:
# p:
# shapes:
#
# Returns:
# Population size
populationSize <- donneesEnvironmentObs
values(populationSize) <- ReactNorm(valules(donneesEnvironmentObs), p, shapes)[1,]
populationSize
}
distanceMatrix <- function(rasterStack){
# (optional) distanceMatrix return distance between all cells of raster
# get x and y coordinates for each cell of raster object put in parameters
coords = xyFromCell(rasterStack, 1:length(values(rasterStack[[1]])), spatial=FALSE)
distance = as.matrix(dist(coords)) # distance matrix of coordinates
return(distance)
}
#prior
# simulation forward of population sizes across time
forward_simul_landpopsize <- function(N0,p, migration)
{
}
# laplaceMatrix returns Laplacian matrix from transition matrix
laplaceMatrix <- function(transitionMatrix){
matrixD = matrix(0,nrow = dim(transitionMatrix), ncol = dim(transitionMatrix))
diag(matrixD) = 1 # diagonal equals to 1
laplacianMatrix = matrixD - transitionMatrix
laplacianMatrix[is.na(laplacianMatrix)]<-0 # replace NA by 0
laplacianMatrix
}
# Calcul of resistance between two points of the graph
# with the Moore-Penrose generalized inverser matrix.
# ref : Bapat et all, A Simple Method for Computing Resistance Distance (2003)
# ref : Courrieu, Fast Computation of Moore-Penrose Inverse Matrices (2005)
resistDist <- function(laplacianMatrix){
inverseMP = ginv(laplacianMatrix) # generalized inverse matrix (Moore Penrose)
diag = diag(inverseMP) # get diagonal of the inverse matrix
mii = matrix(diag, nrow =dim(inverseMP), ncol = dim(inverseMP))
mjj = t(mii)
mij = inverseMP
mji = t(mij)
commute_time = mii + mjj - mij - mji
commute_time
}
# Calcul of genetic distance from resistance
geneticDist <- function(commute_time, popSize){
#genetic_dist = commute_time / (8* popSize)
genetic_dist = commute_time / (8* (sum(popSize)/(dim(popSize)[1]*dim(popSize)[2])))
genetic_dist
}
# MAIN EXECUTIONS DES FONCTIONS PERMETTANT D'OBTENIR IN FINE LES DONNEES GENETIQUES PREDITES
#rasterCrop = Aggregate_and_adjust_raster_to_data(raster(paste(wd,envdir,envfiles,sep="")),release=read.table(paste(wd,genetfile,sep="")), recovery=read.table(paste(wd,genetfile,sep="")), extend_band_size=1, aggregate_index=aggregate_factor)
#plot(rasterCrop)
###################################################
##### FORWARD MODEL FUNCTIONS AND EXECUTIONS ######
##### SIMULATION OF OBSERVED GENETIC DATA ######
###################################################
#Plot genetic data in environmental data observed
#Genetic data is turn into a Spatial Pixel Data Frame
#Mettre des couleurs en fonction du nombre d'individu
plotGeneticData = function(geneticData, EnvironmentalDataObserved){
colnames(geneticData)[1:2] <- c("x","y")
geneticData = SpatialPixelsDataFrame(points = geneticData[,c("x","y")], data = geneticData[,])
plot(EnvironmentalDataObserved[[1]])
plot(geneticData, add = T)
}
# Function that computes population size distribution moments in a grid from one generation to the other
# N population sizes of the parent genration
# r growth rates
# K carrying capacities
# d death rates
# migration transition matrix between cells of the grid
# ptG : parameters of generaiton time model
gridRepnDispFunction <- function(dynamics,r,K,d=.9,ptG, migration,overlapping=TRUE)
{
# values(dynamics)[,dim(values(dynamics))[2]] is value at previoud day
# d is mortality
Nt = values(dynamics)[,dim(values(dynamics))[2]]*(1-d) + r*N*(K-N/K)
esperance[K==0] <- 0
}
# Function that combine reproduction, dispersion and mutation for a given genetic data
repnDispMutFunction <- function(geneticData, dimGeneticData, mutationRate, transitionmatrice){
# Calcul for reproduction and dispersion
# random choice of individuals and at the same time of their target cells in the transition matrix
ncell_transition <- dimGeneticData[1]*dim(transitionmatrice)[1]
transition_celnu <- sample(ncell_transition,dimGeneticData[1],replace=FALSE,transitionmatrice[geneticData[,"Cell_numbers"],])
transition_col <- ceiling(transition_celnu/dimGeneticData[1])
transition_line <- transition_celnu%%(dimGeneticData[1]);transition_line[transition_line==0]<-dimGeneticData[1]
cell_numbers_sampled <- geneticData[transition_line,"Cell_numbers"]
geneticData <- geneticData[transition_line,]
geneticData[,"Cell_numbers"] <- transition_col
locusCols = grep("Locus", colnames(geneticData))
step = 2
mu = mutationRate # mutation rate
liability = runif(prod(dimGeneticData), 0, 1) # mutation liability
liability = as.data.frame(matrix(liability, ncol = length(locusCols), nrow = dimGeneticData[1]))
geneticData[,locusCols] = geneticData[,locusCols] + ((liability<mu/2)*step - (liability>(1-mu/2))*step)
#print(c("mutrat",(sum(liability<mu/2)+sum(liability>(1-mu/2)))/(length(grep("Locus", colnames(geneticData)))*dimGeneticData[1])))
geneticData
}
# Function that combine reproduction, dispersion and mutation for a given genetic data
repnDispMutFunction <- function(geneticData, dimGeneticData, mutationRate, transitionmatrice){
# Calcul for reproduction and dispersion
# loop for individuals
locusCols = grep("Locus", colnames(geneticData))
for (individual in 1:dimGeneticData[1])
{ # we choose where the parent come from in the individual cell line probabilities of the backward transition matrice
mothercell = sample(nCell, 1,,transitionmatrice[geneticData[individual,"Cell_numbers"],])
# we chose the parent among the individuals in this cell
geneticline = sample(which(geneticData[,"Cell_numbers"]==mothercell),1)
# we atribute the individual the genetic data of its mother
geneticData[individual,locusCols] = geneticData[geneticline,locusCols]
}
step = 2
mu = mutationRate # mutation rate
liability = runif(prod(dimGeneticData), 0, 1) # mutation liability
liability = as.data.frame(matrix(liability, ncol = length(locusCols), nrow = dimGeneticData[1]))
geneticData[,locusCols] = geneticData[,locusCols] + ((liability<mu/2)*step - (liability>(1-mu/2))*step)
geneticData
}
#Function that calculate probability of identity of genes intra individual (at individuals level)
Qwithin_pair <- function(geneticData){
matrix_pair = geneticData[,grep(".2", colnames(geneticData), fixed = T)]
matrix_impair = geneticData[,grep(".1", colnames(geneticData), fixed = T)]
Qw <- (matrix_pair == matrix_impair)
Qw = rowMeans(Qw) # vector of probability of Qw for each individual
Qw = matrix(Qw, ncol = dim(geneticData)[1], nrow = dim(geneticData)[1])
Qw = (Qw+t(Qw))/2
Qw
}
#Function that calculate probability of identity of genes intra individual (at population level)
Qwithin_pop <- function(geneticData){
matrix_pair = geneticData[,grep(".2", colnames(geneticData), fixed = T)]
matrix_impair = geneticData[,grep(".1", colnames(geneticData), fixed = T)]
Qw <- (matrix_pair == matrix_impair)
Qw = mean(Qw*1)
Qw
}
# Fonction that calculates probability of identity of genes inter individual (between two individuals)
Qbetween <- function(geneticData, dimGeneticData){
Qb = matrix(ncol = dimGeneticData[1], nrow = dimGeneticData[1]) #initialization of Qb as a matrix
# A = genetic data with loci only
A=as.matrix(geneticData[,grep("Locus",colnames(geneticData),fixed=T)])
# On construit un tableau à plusieurs étages: il y a autant d'étage qu'il y a de locus ( les alleles passent en étage)
A3 = aperm(array(A,dim=c(dim(A)[1],dim(A)[2],dim(A)[1])), c(1,3,2)) # permutation des colonnes et des etages
B3 = aperm(A3, c(2,1,3)) # transposee de A3
moy1 = colMeans(aperm(A3 == B3), dims = 1, na.rm = T)
#Permutation des colonnes deux à deux des alleles de A pour calculer l'autre cas possible d'identite des alleles
l= 1:dim(A)[2]
Aprime= A[,c(matrix(c(l[2*floor(1:(length(l)/2))],l[2*floor(1:(length(l)/2))-1]), nrow= 2, ncol = length(l)/2, byrow = T))] # Permutation des colonnes
# permutation et creation des etages pour Aprime ( on ne change qu'une seule des matrice (A3 / B3) et l'autre est inchangée pour comparer les alleles"complementaires"
Aprime3 = aperm(array(Aprime,dim=c(dim(A)[1],dim(A)[2],dim(A)[1])), c(1,3,2))
moy2 = colMeans(aperm(Aprime3 == B3), dims = 1, na.rm = T) # calcul moy dist pour les individus avec loci permutés Aprime3 et la transposée B3
#Mean of distance between individuals: Qbetween
Qb =(moy1 + moy2)/2
Qb
}
# TEST: pour un nombre de generation donné, on teste la stabilité du a value
#Function test of stabilisation for a value
test_stabilite_a_value <- function(geneticData, mutationRate, dimGeneticData, nb_generations=5000,transitionmatrice){
## ref: Rousset et al. J Evol Biol,13 (2000) 58-62.
vecteur_a_value <-c(0)
for(i in 1: nb_generations){
print(i)
geneticData <- repnDispMutFunction(geneticData, dimGeneticData, mutationRate, transitionmatrice)
matrixQb = (1-Qwithin_pair(geneticData)+2*(Qwithin_pair(geneticData)-Qbetween(geneticData,dimGeneticData)))
matrixQw = 2*(1-Qwithin_pop(geneticData))
vecteur_a_value[i] = matrixQb/matrixQw-1/2
vecteur_a_value[is.na(vecteur_a_value)] <-0
if((i>90) && (i%%30 == 0)){
if(var(vecteur_a_value[(i-30):i])> var(vecteur_a_value[(i-60):(i-30)])
&& var(vecteur_a_value[(i-30):i])> var(vecteur_a_value[(i-90):(i-60)])){
return(list(geneticData, (matrixQb/matrixQw-1/2)))
break
}
}
}
}
test_stabilite_a_value <- function(geneticData, mutationRate, dimGeneticData, nb_generations=5000,transitionmatrice){
## ref: Rousset et al. J Evol Biol,13 (2000) 58-62.
vecteur_a_value <-c(0)
for(i in 1: nb_generations){
print(i)
geneticData <- repnDispMutFunction(geneticData, dimGeneticData, mutationRate, transitionmatrice)
Genotypes = geneticData[,grep("Locus", colnames(geneticData), fixed = T)]
popmbrship=geneticData[,"Cell_numbers"]
Fst = Fstat(Genotypes,nCell,popmbrship,ploidy=2)
Fstlinear[i] <- Fst/(1-Fst)
if((i>90) && (i%%30 == 0)){
if(var(Fst[(i-30):i])> var(Fst[(i-60):(i-30)])
&& var(Fst[(i-30):i])> var(Fst[(i-90):(i-60)])){
return(list(geneticData, Fst))
break
}
}
}
}
fstat = function(geneticData){
Genotypes = geneticData[,grep("Locus", colnames(geneticData), fixed = T)]
form <- as.formulae
Pops = geneticData[,"Cell_numbers"]
MeanPop = t(matrix((colSums(Genotypes)/dimGeneticData[1]),ncol=dimGeneticData[1],nrow=dimGeneticData[2]))
VarInd = matrix(Genotypes^2 - MeanTot^2,ncol=dimGeneticData[2])
VarInterPop = var(MeanPop)
VarIntraPop = colSums(VarInd)/dimGeneticData[1]
VarTot = VarInd
}
simul_commute <- function(cells=c(1,2),transitionmatrice)
{
tmpcell <- cells[1];t=1
while (tmpcell != cells[2])
{
tmpcell = sample(dim(transitionmatrice)[2],size=1,prob=c(transitionmatrice[tmpcell,]))
t=t+1
}
hit=TRUE
while (tmpcell != cells[1])
{
tmpcell = sample(dim(transitionmatrice)[2],size=1,prob=c(transitionmatrice[tmpcell,]))
t=t+1
}
commute=TRUE
t
}
simul_coocur <- function(cells=c(1,2),transitionmatrice)
{
tmpcell1 <- cells[1];tmpcell2 <- cells[2];t=1
while (tmpcell1 != tmpcell2)
{
tmpcell1 = sample(dim(transitionmatrice)[2],size=1,prob=c(transitionmatrice[tmpcell1,]))
tmpcell2 = sample(dim(transitionmatrice)[2],size=1,prob=c(transitionmatrice[tmpcell2,]))
t=t+1
}
t
}
combine.names = function(names1,names2)
{
combination=NULL
for (name1 in names1)
{
for (name2 in names2)
{
combination=append(combination,paste(name1,name2,sep="."))
}
}
combination
}
react_norm_param <- function(shapes)
{
params = NULL
for (shape in shapes)
{
params = append(params, switch(shape,
constant = paste(names(rasterStack),".Y",sep=""),
enveloppe = combine.names(names(rasterStack),c("Xmin","Xmax","Yopt")),
envelin = combine.names(names(rasterStack),c("Yxmin","Yxmax","Xmin","Xmax")),
envloglin = combine.names(names(rasterStack),c("Yxmin","Yxmax","Xmin","Xmax")),
linear = combine.names(names(rasterStack),c("X0","slope")),
linearPositive = combine.names(names(rasterStack),c("X0","slope")),
conquadratic = combine.names(names(rasterStack),c("Xmin","Xmax","Yopt")),
conquadraticsq = combine.names(names(rasterStack),c("Xmin","Xmax","Yopt")),
conquadraticskewed = combine.names(names(rasterStack),c("Xmin","Xmax","Xopt","Yopt")),
conquadraticskewedsq = combine.names(names(rasterStack),c("Xmin","Xmax","Xopt","Yopt"))
)
)
}
params
}
input_reaction_norm_shape_model <- function(demographic_parameter,names_envir_variables)
{
allshapes= c("constant", "enveloppe", "envelin", "envloglin", "linear", "linearPositive", "conquadratic", "conquadraticsq", "conquadraticskewed", "conquadraticskewedsq")
shape=NULL
for (envir in names_envir_variables)
{
ok=FALSE
while (!ok) {
cat("Enter reaction norm model for variable",envir,"\n",
"and demographic parameter", demographic_parameter,"\n",
"(or 'h' for help) : ")
shape <- append(shape,readline("Enter: ")) # prompt
ok = (shape %in% allshapes)
if (!ok) {cat("Please chose among: ","\n",
paste(allshapes,collapse="\n"))
shape=shape[-length(shape)]
}
}
}
shape
}
set_prior_vector_from_keyb <- function(name,n)
{
# sets a vector of priors from keyboard
# args:
# name of the vector to create
# n: length of the vector
#
ok = FALSE
while(!ok)
{
prior_dist <- readline(paste("Enter prior distribution for",
name,"(enter h for help): "))
ok = prior_dist%in%c("uniform","log_uniform","normal","log_normal")
if (!ok) cat("\n","models implemented are :",
"\n","'uniform'",
"\n","'log_uniform'",
"\n","'normal'",
"\n","'log_normal'"
)
}
parameters_names_prior_dist=switch(prior_dist,
uniform = c("min","max"),
log_uniform = c("min","max"),
normal = c("mean","sd"),
log_normal = c("mean","sd"))
params = NULL
if (prior_dist=="log_normal") {cat("for log-normal, note that: ",
"\n"," mean and sd are on the log scale")}
if (prior_dist=="log_uniform") {cat("for log-uniform, note that: ",
"\n","min and max are not on the log scale",
"\n","but on the variable scale")}
for (paramname in parameters_names_prior_dist)
{
params = append(params,
readline(
paste("Enter ",paramname,
" for ",prior_dist," distribution: ")
)
)
}
params=as.numeric(params)
names(params) = parameters_names_prior_dist
switch(prior_dist,
uniform=runif(n,params["min"],params["max"]),
log_uniform=exp(runif(n,log(params["min"]),log(params["max"]))),
normal=rnorm(n,params["mean"],params["sd"]),
log_normal=rlnorm(n,params["mean"],params["sd"])
)
}
set_ref_table_from_keyb <- function(rasterStack,n)
{
# function to create model parameter names of reference table
# arg: none
# value: reference table and average model
#
# reaction norm models
shapesK = input_reaction_norm_shape_model("K",names(rasterStack))
pKnames = react_norm_param(shapesK)
shapesr = input_reaction_norm_shape_model("r",names(rasterStack))
prnames = react_norm_param(shapesr)
# mutation model
ok=FALSE
while (!ok) {mutation_model <- readline("Enter mutation model (or 'h' for help) : ") # prompt
ok = (mutation_model %in% c("tpm","bigeometric","stepwise"))
if (!ok) {
cat("\n","models implemented are :",
"\n","'stepwise'",
"\n","'bigeometric'",
"\n","'tpm': two phase mutation model")
}
}
mut_param_names = switch(mutation_model,
bigeometric = "sigma2",
tmp = c("p","sigma2"),
stepwise = NULL
)
# Dispersion
ok=FALSE
while (!ok) {shapeDisp <- readline("Enter dispersion model (or 'h' for help) : ") # prompt
ok = (shapeDisp %in% c("fat_tail1","gaussian",
"exponential","contiguous",
"island", "fat_tail2",
"gaussian_island_mix"))
if (!ok) {
cat("\n","models implemented are :",
"\n","'fat_tail1' (Chapman et al)",
"\n","'gaussian'",
"\n","'exponential'",
"\n","'contiguous'",
"\n","'island'",
"\n","'fat_tail2' (Moilanen et al)",
"\n","'gaussian_island_mix'")
}
}
Dispersion_parameter_names = switch(shapeDisp,
fat_tail1 = c("alpha","beta"),
gaussian = c("sd"),
gaussian = c("sd"),
exponential = c("mean"),
contiguous = c("m"),
island = c("m"),
fat_tail2 = c("alpha","beta"),
gaussian_island_mix = c("sd","m")
)
# set priors
priors = list()
priors$shapesK = shapesK
priors$pK = rep(NA,length(pKnames));names(priors$pK)=pKnames
priors_names <- c(prnames,pKnames,mut_param_names,Dispersion_parameter_names)
df = as.data.frame(matrix(NA,nrow=n,ncol=length(priors_names)))
colnames(df)=priors_names
for (name in priors_names)
{
df[,name] <- set_prior_vector_from_keyb(name,n)
}
df
}
input_priors <- function()
{
# function to create prior values for reference table and average model
# arg: none
# value: reference table and average model
#
nb_simul <- as.numeric(readline("Number of simulations: "))
ok=FALSE
while (!ok) {shape_model <- readline("Enter mutation model or 'h' for help : ") # prompt
ok = (shape_model %in% c("tpm","bigeometric","stepwise"))
if (!ok) {
cat("\n","models implemented are :",
"\n","'stepwise'",
"\n","'bigeometric'",
"\n","'tpm': two phase mutation model")
}
}
if (mutation_model=="bigeometric")
{
sigma2Dist <- readline("Enter distribution of variance maximum of geometric distribution (1/p): ")
if (sigmaDist=="uniform")
{
sigma2Max <- readline("Enter variance maximum of geometric distribution (1/p): ")
sigma2Min <- readline("Enter variance minimum of geometric distribution (1/p): ")
sigma2 <- runif(n=nb_simul,min=sigma2Min,max=sigma2Max)
}
}
if (mutation_model=="tpm")
{
sigma2Dist <- readline("Enter prior distribution shape for variance of geometric distribution (1/p): ")
if (sigma2Dist=="uniform")
{
sigma2Max <- as.numeric(readline("Enter maximum of variance of geometric distribution: "))
sigma2Min <- as.numeric(readline("Enter minimum of variance of geometric distribution: "))
sigma2 <- runif(nb_simul,sigma2Min,sigma2Max)
pMax <- readline("Enter stepwise maximum proportion: ")
pMin <- readline("Enter stepwise minimum proportion: ")
p <- runif(nb_simul,pMin,pMax)
}
}
}
set_model <- function(pK, pr, shapesK, shapesr, shapeDisp, pDisp,
mutation_rate, initial_genetic_value,
mutation_model,stepvalue,
mut_param)
{
# sets a genetic and environemental demographic model as a list
# arg: parameters
# value : list describing the models (shapes of distribution and parameters)
model = list(pK=pK, pr=pr,
shapesK=shapesK, shapesr=shapesr,
shapeDisp=shapeDisp, pDisp=pDisp,
mutation_rate=mutation_rate,
initial_genetic_value=initial_genetic_value,
mutation_model=mutation_model,stepvalue=stepvalue,
mut_param=mut_param)
check_model(model)
}
check_dispersion_model <- function(shapeDisp,pDisp)
{
# checks that shapeDisp and pDisp are compatible
# arg:
# value:
compatible = switch(shapeDisp,
fat_tail1 = c("alpha","beta")%in%names(pDisp),
gaussian = c("sd")%in%names(pDisp),
gaussian = c("sd")%in%names(pDisp),
exponential = c("mean")%in%names(pDisp),
contiguous = c("m")%in%names(pDisp),
island = c("m")%in%names(pDisp),
fat_tail2 = c("alpha","beta")%in%names(pDisp),
gaussian_island_mix = c("sd","m")%in%names(pDisp)
)
if (!all(compatible)) {
Message = switch(shapeDisp,
fat_tail1 = "fat_tail1 : pDisp=c(alpha=..,beta=..)",
gaussian = "gaussian : pDisp=c(sd=..)",
exponential = "exponential: pDisp=c(mean=..)",
contiguous = "contiguous: pDisp=c(m=..)",
island = "island: pDisp=c(m=..)",
fat_tail2 = "fat_tail2: pDisp=c(alpha=..,beta=..)",
gaussian_island_mix = "gaussian_island_mix: pDisp=c(sd=..,m=..)"
)
stop (paste("check dispersion model. For",Message))
}
"Dispersion model OK"
}
check_reaction_norm <- function(shapes,p)
{
# Checks whether shapes and parameter matrix of reaction norms
# corresponds for all the variables
#
if (!all(shapes %in% c("enveloppe", "envelin", "envloglin","loG","linear",
"conquadratic","conquadraticskewed","conquadraticsq",
"conquadraticskewedsq","constant","linearPositive")))
stop("shape of reaction norm is unknown,
please chose among 'constant', 'enveloppe', 'linearPositive' 'envelin', 'envloglin','loG','linear',
'conquadratic','conquadraticskewed','conquadraticsq' or 'conquadraticskewedsq'
") else if (length(shapes)!=dim(p)[2])
stop(paste("reaction norm shapes and parameters do not use the same number of
environemental variables.")) else {
compatible=NA
for (i in 1:length(shapes))
{
compatible[i] <- switch(shapes[i],
constant = all(c("Y")%in%row.names(p)),
enveloppe = all(c("Xmin","Xmax","Yopt")%in%row.names(p)),
envelin = all(c("Yxmin","Yxmax","Xmin","Xmax")%in%row.names(p)),
envloglin = all(c("Yxmin","Yxmax","Xmin","Xmax")%in%row.names(p)),
linear = all(c("X0","slope")%in%row.names(p)),
linearPositive = all(c("X0","slope")%in%row.names(p)),
conquadratic = all(c("Xmin","Xmax","Xopt")%in%row.names(p)),
conquadraticsq = all(c("Xmin","Xmax","Xopt")%in%row.names(p)),
conquadraticskewed = all(c("Xmin","Xmax","Xopt","Yopt")%in%row.names(p)),
conquadraticskewedsq = all(c("Xmin","Xmax","Xopt","Yopt")%in%row.names(p)))
}
if (!all(compatible))
{
Message=NA
for (i in which(!compatible))
{
Message = switch(shapes[i],
constant = "rownames of pDisp matrix parameter for constant shape is 'Y'. ",
enveloppe = "rownames of pDisp matrix parameter for enveloppe shape are 'Xmin', 'Xmax' and 'Xopt'. ",
envelin = "rownames of pDisp matrix parameter for envelin shape are 'Yxmin', 'Yxmax', 'Xmin', and 'Xmax'. ",
envloglin = "rownames of pDisp matrix parameter for enveloglin shape are 'Yxmin', 'Yxmax', 'Xmin', and 'Xmax'. ",
linear = "rownames of pDisp matrix parameter for linear shape are 'X0' and 'slope'. ",
linearPositive = "rownames of pDisp matrix parameter for linearPositive shape are 'X0' and 'slope'. ",
conquadratic = "rownames of pDisp matrix parameter for conquadratic shape are 'Xmin', 'Xmax' and Xopt. ",
conquadraticsq = "rownames of pDisp matrix parameter for conquadraticsq shape are 'Xmin', 'Xmax' and Xopt. ",
conquadraticskewed = "rownames of pDisp matrix parameter for conquadraticskewed shape are 'Xmin', 'Xmax', Xopt and Yxopt. ",
conquadraticskewed = "rownames of pDisp matrix parameter for conquadraticskewed shape are 'Xmin', 'Xmax', Xopt and Yxopt. "
)
}
stop(paste("Error in reaction norm model settings:",
"for environmental variable(s) number",
paste(which(!compatible),collapse=" and "),
", parameters corresponding to the shape annouced are not provided as rownames. Note that",
paste(Message,collapse=" ")))
}
}
if (!all(compatible)) return(paste(compatible[2],"bad row names in variable number", which(!compatible),sep=" "))
return(TRUE)
}
check_mutation_model<-function(mutation_model,mutation_parameter)
{
compatible =
switch(mutation_model,
bigeometric = c("sigma2")%in%names(mutation_parameter),
tmp = c("p","sigma2")%in%names(mutation_parameter)
)
if (!all(compatible)) {
Message = switch(shapeDisp,
bigeometric = "bigeometric : mut_param=c(sigma2=..)",
tmp = "tmp : mut_param=c(p=.., sigma2=..)"
)
stop(paste("check mutation parameter(s) names for",Message))
}
"mutation model OK"
}
check_model <- function(model)
{
check_reaction_norm(model$shapesr,model$pr)
check_reaction_norm(model$shapesK,model$pK)
check_mutation_model(model$mutation_model,model$mut_param)
check_dispersion_model(model$shapeDisp,model$pDisp)
}
setClass("DispersionModel", representation(ID="numeric",
shapeDisp="character",
pDisp="vector"
)
)
setClass("ReactionNorm",representation(shapes = "character",
p="matrix"))
setClass("MutationModel",representation(model="character"
))
setClass("EnvDemogenetModel",representation(ID = "numeric",
K = "ReactionNorm",
r = "ReactionNorm",
Dispersion = "DispersionModel"
))
#
# add_genetic_to_coaltable function
# adds genetic values to a coalescent table containing mutation number per branch
# knowing initial genetic value of the ancastor and mutation model
genetics_of_coaltable <- function(coaltable,initial_genetic_value,mutation_model,stepvalue=2,mut_param=c(p=.5,sigma=2))
{
switch(mutation_model,
step_wise = stepwise(coaltable,initial_genetic_value,stepvalue),
tpm = tpm(coaltable,initial_genetic_value,stepvalue,mut_param),
bigeometric = bigeometric(coaltable,initial_genetic_value,stepvalue,mut_param)
)
}
# coalescent_2_phylog
# converts a coalescent simulation to a class phylog tree (library ape)
#
coalescent_2_phylog <- function(coalescent)
{
read.tree(text=coalescent_2_newick(coalescent))
}
#
# plot_coalescent plots a coalescent simulation
# argument: output of simul_coalescent()
plot_coalescent <- function(coalescent,genetic_table,with_landscape=FALSE,rasK=NULL,legend_right_move=-.2)
{
if (with_landscape) {par(mfrow=c(1,2),oma=c(0,0,0,4),xpd=TRUE)}else{par(mfrow=c(1,1),oma=c(0,0,0,4),xpd=TRUE)}
tipcells <- geneticData$Cell_numbers[as.numeric(coalescent_2_phylog(coalescent)$tip.label)]
tipcols = rainbow(ncell(rasK))[tipcells]
phylog_format_tree <- coalescent_2_phylog(coalescent)
phylog_format_tree$tip.label <- paste(phylog_format_tree$tip.label,genetic_table[order(genetic_table$coalescing)[as.numeric(phylog_format_tree$tip.label)],"genetic_value"],sep=":")
plot(phylog_format_tree,direction="downward",tip.color=tipcols)
legend("topright", title="demes", cex=0.75, pch=16, col=tipcols[!duplicated(tipcols)], legend=tipcells[!duplicated(tipcols)], ncol=2, inset=c(legend_right_move,0))
if (with_landscape) {plot(rasK)}
}
# summary_stat calculates summary stats for observed and simulated data and creates a reference table
# geneticDataSimulList : a list of simulations, with sublist geneticData and sublist log_lik_forward
#
set_priors <- function(variables,Min,Max,nb_lines)
{
df = data.frame(matrix(NA,nrow=1,ncol=length(variables)))
for (i in variables)
{
}
}
new_reference_table <- function(geneticData,Distance,priors)
{
# creates a new reference table from genetic data and priors data
# !!!!!! details : first line of reference table is NOT rotated observed genetic data
#list of of parameters
#list of summary stats
GenetDist = switch(Distance,
Goldstein = dist(geneticData[,grep("Locus",colnames(geneticData))])^.5,
pID = pID(geneticData)
)
Significant_Components <- which(abs(prcomp(GenetDist)$x)>1E-5)
df <- as.data.frame(matrix(NA,nrow=1,ncol=length(Significant_Components)+1,dimnames=list("l1",c(paste("C",c(Significant_Components),sep=""),"loglik"))))[FALSE,]
df
}
add_summary_stat <- function(reference_table,geneticDataSim,rotation,forward_log_lik,Distance="Goldstein")
{
# add_summary_stats
# add summary statistics of a simulation to reference table for ABC analysis
# arguments:
# reference_table the reference table to append
# rotation tha PCA rotation to apply to geneticData
# geneticDataSim : the simulated genetic data
# forward_log_lik : forward log likelihood of the simulated genetic data
# Distance : distance to apply to genetic data (Goldstein : difference in number of repeats)
# pID : proportion of identity.
#
# value: appended reference_table
#1) We calculate a matrix of observed genetic distance between individuals (DSAI = sum_j (Shared alleles) / (number of loci))
# or reapeat number distance (Goldstein 1995)
GenetDist = switch(Distance,
Goldstein = dist(geneticData[,grep("Locus",colnames(geneticData))])^.5,
pID = pID(geneticData)
)
#2) We express simulated data in these axis. First summary stats are the values of each invidiual
# in each of these major axes
summary_stats = as.matrix(GenetDist) %*% rotation
rbind(reference_table,as.vector(summary_stats))
}
fill_reference_table <- function(geneticData,Distance,rasterStack=rasterStack,
pK=pK, pr=pr,
shapesK=shapesK, shapesr=shapesr,
shapeDisp=shapeDisp, pDisp=pDisp,
mutation_rate=1E-1,
initial_genetic_value=initial_genetic_value,
mutation_model="tpm",stepvalue=2,
mut_param=c(p=.5,sigma2=4))
{
# filling a reference table using parameters values
# args:
# parameters of the model, geneticData, type of distance used
rotation = PCA_rotation(geneticData)
ref_table = new_reference_table(geneticData,Distance="Goldstein")
simulated_genetic <- geneticData[,grep("locus",colnames(geneticData))]
# simulation de genetic data pour chaque locus
for (i in 1:length(grep("Locus",colnames(geneticData))))
{
new_simulation <- simul_coalescent(geneticData=geneticData,
rasterStack=rasterStack,
pK=pK, pr=pr,
shapesK=shapesK, shapesr=shapesr,
shapeDisp=shapeDisp, pDisp=pDisp,
mutation_rate=1E-1,
initial_genetic_value=initial_genetic_value,
mutation_model="tpm",stepvalue=2,
mut_param=c(p=.5,sigma2=4))
simulated_genetic[,i] <- new_simulation$genetic_values[order( new_simulation$coalescing)[1:dim(geneticData)[1]],"genetic_value"]
forward_log_lik <- new_simulation$forward_log_prob
}
simulated_genetic
ref_table = add_summary_stat(reference_table=ref_table,
geneticDataSim=simulated_genetic,
rotation=rotation,
forward_log_lik=,Distance="Goldstein")
ref_table
}
#
# genetic_simulation
# this function simulates genetic data from a coalescent
#
#
test_stabilite_a_value <- function(geneticData, mutationRate, dimGeneticData, nb_generations=5000,transitionmatrice){
## ref: Rousset et al. J Evol Biol,13 (2000) 58-62.
vecteur_a_value <-c(0)
GenDist=list()
for(i in 1: nb_generations){#i=1
print(i)
geneticData <- repnDispMutFunction(geneticData, dimGeneticData, mutationRate, transitionmatrice)
Genotypes = geneticData[,grep("Locus", colnames(geneticData), fixed = T)]
popmbrship=geneticData[,"Cell_numbers"]
Fst = Fstat(Genotypes,nCell,popmbrship,ploidy=2)
Fis = Fst[[1]];Fst=Fst[[2]]
GenDist[[i]] <- Fst/(1-Fst)
if((i>90) && (i%%30 == 0)){
if(var(GenDist[[(i-30):i]])> var(GenDist[[(i-60):(i-30)]])
&& var(GenDist[[(i-30):i]])> var(GenDist[[(i-90):(i-60)]])){
return(list(geneticData, GenDist))
break
}
}
}
}
# to get a matrix of a-values between all cells of a landscape from
# simulations of evolution where not all cells necesarily contain individuals
#
#
a_value_matrix_from_forward_simulation <- function(geneticFinal,rasterStack)
{
matrixQb = (1-Qwithin_pair(geneticFinal)+2*(Qwithin_pair(geneticFinal)-Qbetween(geneticFinal,dim(geneticFinal))))
matrixQw = 2*(1-Qwithin_pop(geneticFinal))
vecteur_a_value = matrixQb/matrixQw-1/2
vecteur_a_value[is.na(vecteur_a_value)] <-0
cell_numbers_list <- levels(as.factor(geneticFinal$Cell_numbers))
agg <- aggregate(vecteur_a_value,by=list(geneticFinal$Cell_numbers),FUN="mean")[,-1]
agg2 <- t(aggregate(t(agg),by=list(geneticFinal$Cell_numbers),FUN="mean"))[-1,]
row.names(agg2) <- as.numeric(cell_numbers_list)
colnames(agg2) <- as.numeric(cell_numbers_list)
agg3 <- matrix(NA, nrow=ncell(rasterStack),ncol=ncell(rasterStack))
agg3[as.numeric(row.names(agg2)),as.numeric(colnames(agg2))] <- agg2
agg3
}
# Function that determine number of dispersal parameters from dispersal shapeDisp used
# Useful for nlm estimation
nbpDisp <- function(shapeDisp){
(switch(shapeDisp,
fat_tail1 = 2,
gaussian = 1,
exponential = 1,
contiguous = 1,
island = 1,
fat_tail2 = 2))
}
# Function that simulate a genetic data with parameters given.
#It returns a list of 2 variables: final genetic data observed and matrix of a-value observed
simulationGenet <- function(donneesEnvironmentObs, pK, pR, shapesK, shapesR, mutationRate, nbLocus, initial_locus_value, shapeDisp, pDisp, nb_generations,ind_per_cell=30){
K <- subset(ReactNorm(values(donneesEnvironmentObs), pK , shapesK),select="Y") # carrying capacity
r <- subset(ReactNorm(values(donneesEnvironmentObs), pR , shapesR),select="Y") # growth rate
Rast_K <- donneesEnvironmentObs ; values(Rast_K) <- K
Rast_r <- donneesEnvironmentObs ; values(Rast_r) <- r
geneticData = CreateGenetArray(Rast_K, nbLocus,initial_locus_value,Option="full_pop")
geneticData[,"Cell_number_init"] <- geneticData[,"Cell_numbers"]
dimGeneticData = dim(geneticData)
# Migration function used to calculate descendant from parents (draw with replacement)
migrationM = migrationMatrix(donneesEnvironmentObs,shapeDisp,pDisp)
transitionmatrice = transitionMatrixBackward(Npop = K, migration= migrationM)
geneticDataFinal = test_stabilite_a_value(geneticData, mutationRate, dimGeneticData, nb_generations,transitionmatrice)
a_value_obs = geneticDataFinal[[2]]
geneticDataFinal = geneticDataFinal[[1]]
return(list(geneticDataFinal, a_value_obs))
}
# plots matrixes of forward mutation for validation
#
#
matrixes_forward <- function(donneesEnvironmentObs, pK, pR, shapesK, shapesR, shapeDisp, pDisp, a_value_obs, a_value_att, file=NULL, mutationRate,nbLocus, initial_locus_value,indpercell)
{
nblayers =dim(donneesEnvironmentObs)[3]
nCell = ncell(donneesEnvironmentObs)
Cell_numbers <- 1:nCell
geneticObs = simulationGenet(donneesEnvironmentObs,pK,pR,shapesK,shapesR,mutationRate,nbLocus,initial_locus_value,shapeDisp,pDisp,nb_generations=5000,indpercel)
finalGenetData = geneticObs[[1]]
a_value_simul = geneticObs[[2]]
K <-reactNorm(donneesEnvironmentObs, pK, shapesK) # carrying capacity
r <-reactNorm(donneesEnvironmentObs, pR, shapesR) # carrying capacity
# Migration function used to calculate descendant from parents (draw with replacement)
migrationM = migrationMatrix(donneesEnvironmentObs,shapeDisp,pDisp)
transitionmatrice = transitionMatrixForward(r, K, migration= migrationM)
land_size <- raster(matrix(K[1,],nrow=dim(donneesEnvironmentObs)[1],ncol=dim(donneesEnvironmentObs)[2]))# be careful transposed
land_size <- raster(matrix(K[1,]))
extent(land_size) <- extent(donneesEnvironmentObs)
list(land_size,transitionmatrice,a_value_simul,a_value_att)
}
# MAIN DES FONCTIONS POUR SIMULER DES DONNEES GENETIQUES OBSERVEES
#mutationRate = 10^(-4); shapeDisp = "fat_tail1";
#nbLocus=10; initial_locus_value=200
#pDisp = c(alphaDisp = 20, betaDisp = 1.5); nbpDisp =nbpDisp(shapeDisp);
#aggregate_factor=16
#donneesEnvironmentObs = Aggregate_and_adjust_raster_to_data(raster(paste(wd,envdir,envfiles,sep="")),release=read.table(paste(wd,genetfile,sep="")), recovery=read.table(paste(wd,genetfile,sep="")), extend_band_size=1, aggregate_index=aggregate_factor)
# donneesEnvironmentObs = raster(matrix(NA,nrow=20,ncol=20))
#values(donneesEnvironmentObs) <-log(ceiling(runif(ncell(donneesEnvironmentObs),0,1)*3))
#plot(donneesEnvironmentObs)
#nblayers =dim(donneesEnvironmentObs)[3]
#nCell = ncell(donneesEnvironmentObs)
#geneticObs = simulationGenet(donneesEnvironmentObs,alpha=0, beta =1,mutationRate,nbLocus, initial_locus_value,shapeDisp,pDisp,nb_generations=5000)
#finalGenetData = geneticObs[[1]]
#a_value_obs = geneticObs[[2]]
#####################################
##### ESTIMATION DES PARAMETRES #####
#####################################
expect_a_value <- function(donneesEnvironmentObs,pK,pR,shapesK,shapesR,pDisp,Cell_numbers,nbpDisp,nblayers,shapeDisp)
{
K = ReactNorm(rasterStack =donneesEnvironmentObs, pK, shapesK)
r = ReactNorm(rasterStack =donneesEnvironmentObs, pR, shapesR)
matrice_migration = migrationMatrix(donneesEnvironmentObs,shapeDisp, shapeDisp)
matrice_transition = transitionMatrixBackward(K,r, matrice_migration)
matrice_laplace = laplaceMatrix(matrice_transition)
commute_time = resistDist(matrice_laplace)
genetic_dist_att = geneticDist(commute_time, popSize)
a_value_att = genetic_dist_att[Cell_numbers,Cell_numbers]
a_value_att
}
ssr <- function(p){
a_value_att = expect_a_value(donneesEnvironmentObs,pK,pR,shapesK,shapesR,pDisp,Cell_numbers=finalGenetData$Cell_Number,nbpDisp,nblayers,shapeDisp)
return(mean((a_value_obs - a_value_att)^2))
}
#initial = c(0,1,20,1.5)
#fct_erreur_calc = nlm(f = ssr , p = initial, hessian = FALSE)
#p=initial
#a_value_graph_model = expect_a_value(donneesEnvironmentObs,initial,finalGenetData$Cell_Number,nbpDisp=2,nblayers=1,shapeDisp="fat_tail1")
#ssr(initial)
#parametres_infere = fct_erreur_calc$estimate
#parametres_reels = c(alpha = 0, beta = 2,beta = 1, alphaDisp = 20, betaDisp = 1.5)
validation <- function(donneesEnvironmentObs,pK = matrix(c(100,400,300,10,0.5,1,300,2000,1500,10,.5,1),nrow=6,ncol=2,dimnames=list(c("Xmin","Xmax","Xopt","Yopt","Yxmin","Yxmax"),c("BIO1","BIO12"))), pR = matrix(c(100,400,300,10,0.5,1,300,2000,1500,10,.5,1),nrow=6,ncol=2,dimnames=list(c("Xmin","Xmax","Xopt","Yopt","Yxmin","Yxmax"),c("BIO1","BIO12"))),shapesK=c(BIO1="conquadraticskewed",BIO12="conquadraticskewed"),shapesR=c(BIO1="conquadraticskewed",BIO12="conquadraticskewed"), shapeDisp="fat_tail1", pDisp = c(mean=0.32,shape=1.6), file=NULL,mutationRate,nbLocus, initial_locus_value,nb_generations=5000,indpercell=30)
{
nblayers =dim(donneesEnvironmentObs)[3]
nCell = ncell(donneesEnvironmentObs)
Cell_numbers <- 1:nCell
K <- ReactNorm(values(donneesEnvironmentObs), pK , shapesK)$Y # recuperation de la carrying capacity
r <- ReactNorm(values(donneesEnvironmentObs), pR , shapesR)$Y # recuperation du growth rate
# Migration function used to calculate descendant from parents (draw with replacement)
migrationM = migrationMatrix(donneesEnvironmentObs,shapeDisp,pDisp)
transitionmatrice = transitionMatrixBackward(Npop = K, migration= migrationM)
geneticObs = simulationGenet(donneesEnvironmentObs,alpha, beta, mutationRate,nbLocus, initial_locus_value,shapeDisp,pDisp,nb_generations=5000,indpercell)
finalGenetData = geneticObs[[1]]
land_size <- raster(matrix(K[1,],nrow=dim(donneesEnvironmentObs)[1],ncol=dim(donneesEnvironmentObs)[2]))
a_value_simul = a_value_matrix_from_forward_simulation(geneticObs[[1]],land_size)
a_value_theory_stepping_stone_model = distanceMatrix(donneesEnvironmentObs)/(4*0.05)
a_value_theory_island_model <- matrix(pDisp[1],nrow=nCell,ncol=nCell)-pDisp[1]*diag(nCell)
a_value_theory_graph_model <- expect_a_value(donneesEnvironmentObs,pK,pR,shapesK,shapesR,pDisp,Cell_numbers,nbpDisp=nbpDisp,nblayers=nblayers,shapeDisp)
if (!is.null(file)){jpeg(file)}
par(mfrow=c(2,3))
plot(land_size,main="Population sizes")
#plot(raster(migrationM))
plot(raster(transitionmatrice),main="Transition matrix")
plot(raster(a_value_simul),main="Simul genet differ")
plot(raster(a_value_theory_stepping_stone_model),main="Expected stepping stone")
plot(raster(a_value_theory_island_model),main="Expected island")
plot(raster(a_value_theory_graph_model),main="Expected graph model")
if (!is.null(file)){dev.off()}
list(land_size,transitionmatrice,a_value_simul,a_value_theory_stepping_stone_model,a_value_theory_island_model,a_value_theory_graph_model,finalGenetData)
}
#test nlm
ssr <- function(p){
popSize = K_Function(rasterStack = donneesEnvironmentObs, p[1], p[2:(nblayers+1)])
matrice_migration = migrationMatrix(donneesEnvironmentObs,shapeDisp, p[(nblayers+2):(nbpDisp+nblayers+1)])
matrice_transition = transitionMatrixBackward(popSize, matrice_migration)
matrice_laplace = laplaceMatrix(matrice_transition)
commute_time = resistDist(matrice_laplace)
genetic_dist_att = geneticDist(commute_time, p[(nbpDisp+nblayers+2)])
a_value_att = genetic_dist_att[finalGenetData$Cell_Number,finalGenetData$Cell_Number]
return(mean((a_value_obs - a_value_att)^2))
}
|
#load libraries
library(shiny)
library(leaflet)
library(tidyverse)
library(leaflet.extras)
library(RColorBrewer)
library(magrittr)
library(lubridate)
library(htmltools)
set.seed(1234)
#minimize file size of original data
#bosdata <- read_csv( "311calls.csv" )
#write.csv(bosdata[bosdata$year %in% c(2016,2017,2018,2019),],"311data.csv")
#import and condense data
bosdata <- read_csv("311data.csv")
bosdata <- as.tibble(bosdata[!is.na(bosdata$case_title),])
bosdata <- select(bosdata,c(open_dt,closed_dt,case_status,case_title,reason,subject,source,latitude,longitude))
bosdata <- mutate(bosdata, "year"= year(bosdata$open_dt))
lilbosdata <- bosdata[sample(length(bosdata$reason),7000),]
#bosdata <- select(bosdata,year == input$year)
ui <- fluidPage(
titlePanel("Boston 311 Data"),
mainPanel(
#map space
leafletOutput(outputId = "mybosmap")
),
sidebarPanel(
#Have user select 311 information year
absolutePanel(checkboxGroupInput("year",
"Year:",
choices = sort(unique(lilbosdata$year),decreasing = TRUE))
)
))
server <- function(input, output, session) {
#define the color pallate for subject of 311 call
data <- reactive({
#we add a validation statment here for the initial map with no year selected
validate(
need(input$year != "", "Please select a year")
)
lilbosdata[lilbosdata$year %in% c(as.character(input$year)),]})
pal <- colorFactor(
palette = c('purple','lightblue','blue','dodgerblue','darkblue','green','darkgreen','yellow','gold', 'orange', 'dark orange', 'orange red', 'red', 'dark red'),
domain = lilbosdata$subject)
#create the map
output$mybosmap <- renderLeaflet({
leaflet(data())%>%
addProviderTiles(providers$CartoDB.Positron) %>%
setView(lng = -71.075, lat = 42.3501, zoom = 14) %>%
addCircles(data = data(), lat = ~ latitude, lng = ~ longitude, weight = 6, fillOpacity = 0.7,color = ~pal(subject)) %>%
addLabelOnlyMarkers(label = ~as.character(case_title)) %>%
addLegend("bottomright", pal=pal, values = lilbosdata$subject, opacity = 1)
})
}
shinyApp(ui, server)
| /AnalyzeBoston/app.R | no_license | amturnr/615_R_Map | R | false | false | 2,343 | r | #load libraries
library(shiny)
library(leaflet)
library(tidyverse)
library(leaflet.extras)
library(RColorBrewer)
library(magrittr)
library(lubridate)
library(htmltools)
set.seed(1234)
#minimize file size of original data
#bosdata <- read_csv( "311calls.csv" )
#write.csv(bosdata[bosdata$year %in% c(2016,2017,2018,2019),],"311data.csv")
#import and condense data
bosdata <- read_csv("311data.csv")
bosdata <- as.tibble(bosdata[!is.na(bosdata$case_title),])
bosdata <- select(bosdata,c(open_dt,closed_dt,case_status,case_title,reason,subject,source,latitude,longitude))
bosdata <- mutate(bosdata, "year"= year(bosdata$open_dt))
lilbosdata <- bosdata[sample(length(bosdata$reason),7000),]
#bosdata <- select(bosdata,year == input$year)
ui <- fluidPage(
titlePanel("Boston 311 Data"),
mainPanel(
#map space
leafletOutput(outputId = "mybosmap")
),
sidebarPanel(
#Have user select 311 information year
absolutePanel(checkboxGroupInput("year",
"Year:",
choices = sort(unique(lilbosdata$year),decreasing = TRUE))
)
))
server <- function(input, output, session) {
#define the color pallate for subject of 311 call
data <- reactive({
#we add a validation statment here for the initial map with no year selected
validate(
need(input$year != "", "Please select a year")
)
lilbosdata[lilbosdata$year %in% c(as.character(input$year)),]})
pal <- colorFactor(
palette = c('purple','lightblue','blue','dodgerblue','darkblue','green','darkgreen','yellow','gold', 'orange', 'dark orange', 'orange red', 'red', 'dark red'),
domain = lilbosdata$subject)
#create the map
output$mybosmap <- renderLeaflet({
leaflet(data())%>%
addProviderTiles(providers$CartoDB.Positron) %>%
setView(lng = -71.075, lat = 42.3501, zoom = 14) %>%
addCircles(data = data(), lat = ~ latitude, lng = ~ longitude, weight = 6, fillOpacity = 0.7,color = ~pal(subject)) %>%
addLabelOnlyMarkers(label = ~as.character(case_title)) %>%
addLegend("bottomright", pal=pal, values = lilbosdata$subject, opacity = 1)
})
}
shinyApp(ui, server)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/IGAPI.R
\name{IG_update_open_pos}
\alias{IG_update_open_pos}
\title{IG API Updates an OTC position.}
\usage{
IG_update_open_pos(headers,
url = "https://demo-api.ig.com/gateway/deal/positions/otc", limit_level,
stop_level, deal_id, trailingStop, trailingStopIncrement,
trailingStopDistance, timeo = 5)
}
\arguments{
\item{headers}{Object returned from \code{IG_Auth}}
\item{url}{API URL}
\item{limit_level}{Limit level}
\item{stop_level}{Stop level}
\item{deal_id}{deal identifier}
\item{trailingStop}{Whether the stop has to be moved towards the current level in case of a favourable trade}
\item{trailingStopIncrement}{increment step in pips for the trailing stop}
\item{trailingStopDistance}{Trailing stop distance}
\item{timeo}{number of tries}
}
\value{
A \code{data.frame} Deal reference of the transaction
}
\description{
Updates an OTC position.
}
\examples{
HEADERS = IG_Auth(" ","APIdemo1", " ")
IG_update_open_pos(HEADERS) # to be updated
}
| /man/IG_update_open_pos.Rd | permissive | ivanliu1989/RQuantAPI | R | false | true | 1,045 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/IGAPI.R
\name{IG_update_open_pos}
\alias{IG_update_open_pos}
\title{IG API Updates an OTC position.}
\usage{
IG_update_open_pos(headers,
url = "https://demo-api.ig.com/gateway/deal/positions/otc", limit_level,
stop_level, deal_id, trailingStop, trailingStopIncrement,
trailingStopDistance, timeo = 5)
}
\arguments{
\item{headers}{Object returned from \code{IG_Auth}}
\item{url}{API URL}
\item{limit_level}{Limit level}
\item{stop_level}{Stop level}
\item{deal_id}{deal identifier}
\item{trailingStop}{Whether the stop has to be moved towards the current level in case of a favourable trade}
\item{trailingStopIncrement}{increment step in pips for the trailing stop}
\item{trailingStopDistance}{Trailing stop distance}
\item{timeo}{number of tries}
}
\value{
A \code{data.frame} Deal reference of the transaction
}
\description{
Updates an OTC position.
}
\examples{
HEADERS = IG_Auth(" ","APIdemo1", " ")
IG_update_open_pos(HEADERS) # to be updated
}
|
############################################################################
# Get mom's phase
# should return two haps
### link haplotypes
phasing2 <- function(estimated_mom, estimated_dad, progeny, win_length, verbose=FALSE){
mom_haps <- setup_haps(win_length)
haplist <- phase_mom_chuck(estimated_mom, progeny, win_length, verbose, mom_haps)
if(verbose){ message(sprintf(">>> start to join hap chunks ...")) }
outhaplist <- list(list())
if(length(haplist) > 1){
outhaplist[[1]] <- haplist[[1]] ### store the extended haps: hap1, hap2 and idx
hap1 <- haplist[[1]][[1]]
hap2 <- haplist[[1]][[2]]
idx <- haplist[[1]][[3]]
i <- 1
for(chunki in 2:length(haplist)){
if(verbose){ message(sprintf(">>> join chunks [ %s and %s / %s] ...", chunki-1, chunki, length(haplist))) }
# join two neighbor haplotype chunks
oldchunk <- haplist[[chunki-1]]
newchunk <- haplist[[chunki]]
hapidx <- c(oldchunk[[3]], newchunk[[3]])
haps <- list(c(oldchunk[[1]], newchunk[[1]]), c(oldchunk[[1]], newchunk[[2]]))
temhap <- link_haps(momwin=hapidx, progeny, haps, returnhap=FALSE)
if(!is.null(temhap)){
same <- sum(hap1[(length(hap1)-length(oldchunk[[1]])+1):length(hap1)] == temhap[1:length(oldchunk[[1]])])
#same <- sum(mom_phase1[(length(mom_phase1)-8):length(mom_phase1)] == win_hap[1:length(win_hap)-1])
idx <- c(idx, newchunk[[3]])
if(same == 0){ #totally opposite phase of last window
#hap2[length(mom_phase2)+1] <- win_hap[length(win_hap)]
hap2 <- c(hap2, temhap[(length(oldchunk[[1]])+1):length(temhap)])
hap1 <- 1 - hap2
} else if(same== length(oldchunk[[1]]) ){ #same phase as last window
#mom_phase1[length(mom_phase1)+1] <- win_hap[length(win_hap)]
hap1 <- c(hap1, temhap[(length(oldchunk[[1]])+1):length(temhap)])
hap2 <- 1 - hap1
} else{
stop(">>> Extending error !!!")
}
} else {
i <- i +1
outhaplist[[i]] <- haplist[[chunki]]
}
}
outhaplist[[1]] <- list(hap1, hap2, idx)
return(outhaplist)
}
else{
return(haplist)
}
}
##########################################
phase_mom_dad_chuck <- function(estimated_mom, estimated_dad, progeny, win_length, verbose, mom_haps){
hetsites <- which(estimated_mom==1 | estimated_dad==1)
# gets all possible haplotypes for X hets
mom_phase1 = mom_phase2 = dad_phase1 = dad_phase2 = as.numeric()
win_hap = old_hap = nophase = as.numeric()
haplist <- list()
#for(winstart in 1:(length(hetsites)-(win_length-1)))
winstart <- i <- 1
while(winstart <= length(hetsites)-(win_length-1)){
if(verbose){ message(sprintf(">>> phasing window [ %s ] ...", winstart)) }
momwin <- hetsites[winstart:(winstart+win_length-1)]
if(winstart==1){
#arbitrarily assign win_hap to one chromosome initially
win_hap <- infer_dip(momwin,progeny,haps=mom_haps, returnhap=TRUE)
mom_phase1=win_hap
mom_phase2=1-win_hap
idxstart <- 1
} else{
win_hap <- infer_dip(momwin, progeny, haps=mom_haps, returnhap=FALSE)
### comparing current hap with old hap except the last bp -JLY
if(!is.null(win_hap)){
same=sum(mom_phase1[(length(mom_phase1)-win_length+2):length(mom_phase1)]==win_hap[1:length(win_hap)-1])
if(same == 0){ #totally opposite phase of last window
mom_phase2[length(mom_phase2)+1] <- win_hap[length(win_hap)]
mom_phase1[length(mom_phase1)+1] <- 1-win_hap[length(win_hap)]
} else if(same==(win_length-1) ){ #same phase as last window
mom_phase1[length(mom_phase1)+1] <- win_hap[length(win_hap)]
mom_phase2[length(mom_phase2)+1] <- 1-win_hap[length(win_hap)]
} else{
diff1 <- sum(abs(mom_phase1[(length(mom_phase1)-win_length+2):length(mom_phase1)]-win_hap[1:length(win_hap)-1]))
diff2 <- sum(abs(mom_phase2[(length(mom_phase1)-win_length+2):length(mom_phase1)]-win_hap[1:length(win_hap)-1]))
if(diff1 > diff2){ #momphase1 is less similar to current inferred hap
mom_phase2[length(mom_phase2)+1] <- win_hap[length(win_hap)]
mom_phase1[length(mom_phase1)+1] <- 1-win_hap[length(win_hap)]
} else{ #momphase1 is more similar
mom_phase1[length(mom_phase1)+1] <- win_hap[length(win_hap)]
mom_phase2[length(mom_phase2)+1] <- 1-win_hap[length(win_hap)]
}
}
} else {
### potential recombination in kids, output previous haps and jump to next non-overlap window -JLY###
idxend <- winstart + win_length -2
haplist[[i]] <- list(mom_phase1, mom_phase2, hetsites[idxstart:idxend])
i <- i +1
### warning(paste("Likely recombination at position", winstart+1, sep=" "))
### if new window is still ambiguous, add 1bp and keep running until find the best hap
winstart <- winstart + win_length -2
while(is.null(win_hap)){
winstart <- winstart + 1
win_hap <- jump_win(winstart, win_length, hetsites, mom_haps)
if(is.null(win_hap)){
nophase <- c(nophase, hetsites[winstart])
}
}
idxstart <- winstart
mom_phase1 <- win_hap
mom_phase2 <- 1-win_hap
}
}
winstart <- winstart + 1
}
### return the two haplotypes
#myh1 <- replace(estimated_mom/2, hetsites, mom_phase1)
#myh2 <- replace(estimated_mom/2, hetsites, 1-mom_phase1)
#return(data.frame(h1=myh1, h2=myh2))
#if(verbose){ message(sprintf(">>> phasing done!")) }
haplist[[i]] <- list(mom_phase1, mom_phase2, hetsites[idxstart:length(hetsites)])
## list: hap1, hap2 and idx; info
return(haplist)
#return(list(haplist=haplist, info=list(het=hetsites, nophase=nophase)))
}
############################################################################
link_haps <- function(momwin, progeny, haps, returnhap=FALSE){
# momwin is list of heterozygous sites, progeny list of kids genotypes,
# haps list of possible haps,momphase1 is current phased mom for use in splitting ties
#### function for running one hap ####
runoverhaps <- function(myhap){
#iterate over possible haplotypes <- this is slower because setup_haps makes too many haps
#get max. prob for each kid, sum over kids
return(sum( sapply(1:length(progeny), function(z)
which_phase(haps[myhap],progeny[[z]][[2]][momwin] ))))
}
phase_probs <- sapply(1:(length(haps)), function(a) runoverhaps(a) )
#if multiple haps tie, return two un-phased haps
if(length(which(phase_probs==max(phase_probs)))>1){
return()
} else {
return(haps[[which(phase_probs==max(phase_probs))]])
}
}
############################################################################
jump_win <- function(winstart, win_length, hetsites, mom_haps){
### jump to next window
if(length(hetsites) > (winstart + win_length - 1)){
momwin <- hetsites[winstart:(winstart + win_length - 1)]
win_hap <- infer_dip(momwin, progeny, haps=mom_haps, returnhap=FALSE)
}else{
momwin <- hetsites[winstart:length(hetsites)]
mom_haps_tem <- setup_haps(win_length=length(winstart:length(hetsites)))
win_hap <- infer_dip(momwin, progeny, haps=mom_haps_tem, returnhap=TRUE)
}
return(win_hap)
}
############################################################################
# Setup all possible haplotypes for window of X heterozgous sites
# This needs to be fixed to remove redundancy. E.g. 010 is the same as 101 and 1010 is same as 0101.
# I don't think should bias things in the meantime, just be slow.
setup_haps <- function(win_length){
if(win_length <= 20){
alist <- lapply(1:win_length, function(a) c(0,1) )
### give a combination of all 0,1 into a data.frame
hapdf <- expand.grid(alist)[1:2^(win_length-1),]
### split the data.frame into a list
return(as.list(as.data.frame(t(hapdf))))
}else{
stop("!!! Can not handle [win_length > 20] !")
}
}
#system.time(tem2 <- setup_haps2(10))
#system.time(tem <- setup_haps(10))
############################################################################
# Infer which phase is mom in a window
infer_dip <- function(momwin, progeny, haps, returnhap=FALSE){
# momwin is list of heterozygous sites, progeny list of kids genotypes,
# haps list of possible haps,momphase1 is current phased mom for use in splitting ties
#### function for running one hap ####
runoverhaps <- function(myhap){
#iterate over possible haplotypes <- this is slower because setup_haps makes too many haps
#get max. prob for each kid, sum over kids
return(sum( sapply(1:length(progeny), function(z)
which_phase(haps[myhap],progeny[[z]][[2]][momwin] ))))
}
phase_probs <- sapply(1:(length(haps)), function(a) runoverhaps(a) )
#if multiple haps tie, check each against current phase and return one with smallest distance
if(length(which(phase_probs==max(phase_probs)))>1){
if(returnhap){
return(haps[[sample(which(phase_probs==max(phase_probs)), 1)]])
} else{
return(NULL)
}
} else {
return(haps[[which(phase_probs==max(phase_probs))]])
}
}
############################################################################
############################################################################
# Find most likely phase of kid at a window, return that probability
# give this mom haplotype and a kid's diploid genotype over the window and returns maximum prob
# Mendel is taken care of in the probs[[]] matrix already
which_md_phase <- function(momhap, dadhap, kidwin){
four_genos=list()
#haplotype=unlist(haplotype)
four_genos[[1]] <- momhap + dadhap
four_genos[[2]] <- momhap + (1-dadhap)
four_genos[[3]] <- (1-momhap) + dadhap
four_genos[[4]] <- (1-momhap) + (1-dadhap)
geno_probs=as.numeric() #prob of each of three genotypes
for(geno in 1:4){
#log(probs[[2]][three_genotypes,kidwin] is the log prob. of kid's obs geno
#given the current phased geno and given mom is het. (which is why probs[[2]])
geno_probs[geno]=sum( sapply(1:length(momhap), function(zz)
log( probs[[2]][four_genos[[geno]][zz]+1, kidwin[zz]+1])))
}
### may introduce error
if(length(which(geno_probs==max(geno_probs)))!=1){recover()}
return(max(geno_probs))
}
###########################################
checkphasing <- function(newmom, sim){
diff <- tot <- 0
for(i in 1:length(newmom)){
truehap <- sim[[1]][newmom[[i]][[3]],]
esthap <- data.frame(h1=newmom[[i]][[1]], h2=newmom[[i]][[2]])
tab <- cbind(truehap, esthap)
idx <- which.max(c(cor(tab$hap1, tab$h1), cor(tab$hap1, tab$h2)))
if(idx == 1){
a <- nrow(subset(tab, hap1 != h1))
}else{
a <- nrow(subset(tab, hap1 != h2))
}
diff <- diff + a
tot <- tot + nrow(tab)
}
message(sprintf(">>> [ %s ] chunks and [ %s ] error rate", length(newmom), diff/tot))
return(data.frame(piece=length(newmom), diff=diff, tot=tot))
}
############################################################################
# Same as above, output kid's phase.
# give this mom haplotype and a kid's diploid genotype over the window and returns maximum prob
# Mendel is takenh care of in the probs[[]] matrix already
which_phase_kid <- function(haplotype,kidwin){
three_genotypes=list()
haplotype=unlist(haplotype)
three_genotypes[[1]]=haplotype+haplotype
three_genotypes[[2]]=haplotype+(1-haplotype)
three_genotypes[[3]]=(1-haplotype)+(1-haplotype)
geno_probs=as.numeric() #prob of each of three genotypes
for(geno in 1:3){
#log(probs[[2]][three_genotypes,kidwin] is the log prob. of kid's obs geno
#given the current phased geno and given mom is het. (which is why probs[[2]])
geno_probs[geno]=sum( sapply(1:length(haplotype), function(zz) log( probs[[2]][three_genotypes[[geno]][zz]+1,kidwin[zz]+1])))
}
if(length(which(geno_probs==max(geno_probs)))!=1){recover()}
return(three_genotypes[[which(geno_probs==max(geno_probs))]])
}
############################################################################
| /lib/Outcrosser.R | no_license | rossibarra/phasing_tests | R | false | false | 13,252 | r | ############################################################################
# Get mom's phase
# should return two haps
### link haplotypes
phasing2 <- function(estimated_mom, estimated_dad, progeny, win_length, verbose=FALSE){
mom_haps <- setup_haps(win_length)
haplist <- phase_mom_chuck(estimated_mom, progeny, win_length, verbose, mom_haps)
if(verbose){ message(sprintf(">>> start to join hap chunks ...")) }
outhaplist <- list(list())
if(length(haplist) > 1){
outhaplist[[1]] <- haplist[[1]] ### store the extended haps: hap1, hap2 and idx
hap1 <- haplist[[1]][[1]]
hap2 <- haplist[[1]][[2]]
idx <- haplist[[1]][[3]]
i <- 1
for(chunki in 2:length(haplist)){
if(verbose){ message(sprintf(">>> join chunks [ %s and %s / %s] ...", chunki-1, chunki, length(haplist))) }
# join two neighbor haplotype chunks
oldchunk <- haplist[[chunki-1]]
newchunk <- haplist[[chunki]]
hapidx <- c(oldchunk[[3]], newchunk[[3]])
haps <- list(c(oldchunk[[1]], newchunk[[1]]), c(oldchunk[[1]], newchunk[[2]]))
temhap <- link_haps(momwin=hapidx, progeny, haps, returnhap=FALSE)
if(!is.null(temhap)){
same <- sum(hap1[(length(hap1)-length(oldchunk[[1]])+1):length(hap1)] == temhap[1:length(oldchunk[[1]])])
#same <- sum(mom_phase1[(length(mom_phase1)-8):length(mom_phase1)] == win_hap[1:length(win_hap)-1])
idx <- c(idx, newchunk[[3]])
if(same == 0){ #totally opposite phase of last window
#hap2[length(mom_phase2)+1] <- win_hap[length(win_hap)]
hap2 <- c(hap2, temhap[(length(oldchunk[[1]])+1):length(temhap)])
hap1 <- 1 - hap2
} else if(same== length(oldchunk[[1]]) ){ #same phase as last window
#mom_phase1[length(mom_phase1)+1] <- win_hap[length(win_hap)]
hap1 <- c(hap1, temhap[(length(oldchunk[[1]])+1):length(temhap)])
hap2 <- 1 - hap1
} else{
stop(">>> Extending error !!!")
}
} else {
i <- i +1
outhaplist[[i]] <- haplist[[chunki]]
}
}
outhaplist[[1]] <- list(hap1, hap2, idx)
return(outhaplist)
}
else{
return(haplist)
}
}
##########################################
phase_mom_dad_chuck <- function(estimated_mom, estimated_dad, progeny, win_length, verbose, mom_haps){
hetsites <- which(estimated_mom==1 | estimated_dad==1)
# gets all possible haplotypes for X hets
mom_phase1 = mom_phase2 = dad_phase1 = dad_phase2 = as.numeric()
win_hap = old_hap = nophase = as.numeric()
haplist <- list()
#for(winstart in 1:(length(hetsites)-(win_length-1)))
winstart <- i <- 1
while(winstart <= length(hetsites)-(win_length-1)){
if(verbose){ message(sprintf(">>> phasing window [ %s ] ...", winstart)) }
momwin <- hetsites[winstart:(winstart+win_length-1)]
if(winstart==1){
#arbitrarily assign win_hap to one chromosome initially
win_hap <- infer_dip(momwin,progeny,haps=mom_haps, returnhap=TRUE)
mom_phase1=win_hap
mom_phase2=1-win_hap
idxstart <- 1
} else{
win_hap <- infer_dip(momwin, progeny, haps=mom_haps, returnhap=FALSE)
### comparing current hap with old hap except the last bp -JLY
if(!is.null(win_hap)){
same=sum(mom_phase1[(length(mom_phase1)-win_length+2):length(mom_phase1)]==win_hap[1:length(win_hap)-1])
if(same == 0){ #totally opposite phase of last window
mom_phase2[length(mom_phase2)+1] <- win_hap[length(win_hap)]
mom_phase1[length(mom_phase1)+1] <- 1-win_hap[length(win_hap)]
} else if(same==(win_length-1) ){ #same phase as last window
mom_phase1[length(mom_phase1)+1] <- win_hap[length(win_hap)]
mom_phase2[length(mom_phase2)+1] <- 1-win_hap[length(win_hap)]
} else{
diff1 <- sum(abs(mom_phase1[(length(mom_phase1)-win_length+2):length(mom_phase1)]-win_hap[1:length(win_hap)-1]))
diff2 <- sum(abs(mom_phase2[(length(mom_phase1)-win_length+2):length(mom_phase1)]-win_hap[1:length(win_hap)-1]))
if(diff1 > diff2){ #momphase1 is less similar to current inferred hap
mom_phase2[length(mom_phase2)+1] <- win_hap[length(win_hap)]
mom_phase1[length(mom_phase1)+1] <- 1-win_hap[length(win_hap)]
} else{ #momphase1 is more similar
mom_phase1[length(mom_phase1)+1] <- win_hap[length(win_hap)]
mom_phase2[length(mom_phase2)+1] <- 1-win_hap[length(win_hap)]
}
}
} else {
### potential recombination in kids, output previous haps and jump to next non-overlap window -JLY###
idxend <- winstart + win_length -2
haplist[[i]] <- list(mom_phase1, mom_phase2, hetsites[idxstart:idxend])
i <- i +1
### warning(paste("Likely recombination at position", winstart+1, sep=" "))
### if new window is still ambiguous, add 1bp and keep running until find the best hap
winstart <- winstart + win_length -2
while(is.null(win_hap)){
winstart <- winstart + 1
win_hap <- jump_win(winstart, win_length, hetsites, mom_haps)
if(is.null(win_hap)){
nophase <- c(nophase, hetsites[winstart])
}
}
idxstart <- winstart
mom_phase1 <- win_hap
mom_phase2 <- 1-win_hap
}
}
winstart <- winstart + 1
}
### return the two haplotypes
#myh1 <- replace(estimated_mom/2, hetsites, mom_phase1)
#myh2 <- replace(estimated_mom/2, hetsites, 1-mom_phase1)
#return(data.frame(h1=myh1, h2=myh2))
#if(verbose){ message(sprintf(">>> phasing done!")) }
haplist[[i]] <- list(mom_phase1, mom_phase2, hetsites[idxstart:length(hetsites)])
## list: hap1, hap2 and idx; info
return(haplist)
#return(list(haplist=haplist, info=list(het=hetsites, nophase=nophase)))
}
############################################################################
link_haps <- function(momwin, progeny, haps, returnhap=FALSE){
# momwin is list of heterozygous sites, progeny list of kids genotypes,
# haps list of possible haps,momphase1 is current phased mom for use in splitting ties
#### function for running one hap ####
runoverhaps <- function(myhap){
#iterate over possible haplotypes <- this is slower because setup_haps makes too many haps
#get max. prob for each kid, sum over kids
return(sum( sapply(1:length(progeny), function(z)
which_phase(haps[myhap],progeny[[z]][[2]][momwin] ))))
}
phase_probs <- sapply(1:(length(haps)), function(a) runoverhaps(a) )
#if multiple haps tie, return two un-phased haps
if(length(which(phase_probs==max(phase_probs)))>1){
return()
} else {
return(haps[[which(phase_probs==max(phase_probs))]])
}
}
############################################################################
jump_win <- function(winstart, win_length, hetsites, mom_haps){
### jump to next window
if(length(hetsites) > (winstart + win_length - 1)){
momwin <- hetsites[winstart:(winstart + win_length - 1)]
win_hap <- infer_dip(momwin, progeny, haps=mom_haps, returnhap=FALSE)
}else{
momwin <- hetsites[winstart:length(hetsites)]
mom_haps_tem <- setup_haps(win_length=length(winstart:length(hetsites)))
win_hap <- infer_dip(momwin, progeny, haps=mom_haps_tem, returnhap=TRUE)
}
return(win_hap)
}
############################################################################
# Setup all possible haplotypes for window of X heterozgous sites
# This needs to be fixed to remove redundancy. E.g. 010 is the same as 101 and 1010 is same as 0101.
# I don't think should bias things in the meantime, just be slow.
setup_haps <- function(win_length){
if(win_length <= 20){
alist <- lapply(1:win_length, function(a) c(0,1) )
### give a combination of all 0,1 into a data.frame
hapdf <- expand.grid(alist)[1:2^(win_length-1),]
### split the data.frame into a list
return(as.list(as.data.frame(t(hapdf))))
}else{
stop("!!! Can not handle [win_length > 20] !")
}
}
#system.time(tem2 <- setup_haps2(10))
#system.time(tem <- setup_haps(10))
############################################################################
# Infer which phase is mom in a window
infer_dip <- function(momwin, progeny, haps, returnhap=FALSE){
# momwin is list of heterozygous sites, progeny list of kids genotypes,
# haps list of possible haps,momphase1 is current phased mom for use in splitting ties
#### function for running one hap ####
runoverhaps <- function(myhap){
#iterate over possible haplotypes <- this is slower because setup_haps makes too many haps
#get max. prob for each kid, sum over kids
return(sum( sapply(1:length(progeny), function(z)
which_phase(haps[myhap],progeny[[z]][[2]][momwin] ))))
}
phase_probs <- sapply(1:(length(haps)), function(a) runoverhaps(a) )
#if multiple haps tie, check each against current phase and return one with smallest distance
if(length(which(phase_probs==max(phase_probs)))>1){
if(returnhap){
return(haps[[sample(which(phase_probs==max(phase_probs)), 1)]])
} else{
return(NULL)
}
} else {
return(haps[[which(phase_probs==max(phase_probs))]])
}
}
############################################################################
############################################################################
# Find most likely phase of kid at a window, return that probability
# give this mom haplotype and a kid's diploid genotype over the window and returns maximum prob
# Mendel is taken care of in the probs[[]] matrix already
which_md_phase <- function(momhap, dadhap, kidwin){
four_genos=list()
#haplotype=unlist(haplotype)
four_genos[[1]] <- momhap + dadhap
four_genos[[2]] <- momhap + (1-dadhap)
four_genos[[3]] <- (1-momhap) + dadhap
four_genos[[4]] <- (1-momhap) + (1-dadhap)
geno_probs=as.numeric() #prob of each of three genotypes
for(geno in 1:4){
#log(probs[[2]][three_genotypes,kidwin] is the log prob. of kid's obs geno
#given the current phased geno and given mom is het. (which is why probs[[2]])
geno_probs[geno]=sum( sapply(1:length(momhap), function(zz)
log( probs[[2]][four_genos[[geno]][zz]+1, kidwin[zz]+1])))
}
### may introduce error
if(length(which(geno_probs==max(geno_probs)))!=1){recover()}
return(max(geno_probs))
}
###########################################
checkphasing <- function(newmom, sim){
diff <- tot <- 0
for(i in 1:length(newmom)){
truehap <- sim[[1]][newmom[[i]][[3]],]
esthap <- data.frame(h1=newmom[[i]][[1]], h2=newmom[[i]][[2]])
tab <- cbind(truehap, esthap)
idx <- which.max(c(cor(tab$hap1, tab$h1), cor(tab$hap1, tab$h2)))
if(idx == 1){
a <- nrow(subset(tab, hap1 != h1))
}else{
a <- nrow(subset(tab, hap1 != h2))
}
diff <- diff + a
tot <- tot + nrow(tab)
}
message(sprintf(">>> [ %s ] chunks and [ %s ] error rate", length(newmom), diff/tot))
return(data.frame(piece=length(newmom), diff=diff, tot=tot))
}
############################################################################
# Same as above, output kid's phase.
# give this mom haplotype and a kid's diploid genotype over the window and returns maximum prob
# Mendel is takenh care of in the probs[[]] matrix already
which_phase_kid <- function(haplotype,kidwin){
three_genotypes=list()
haplotype=unlist(haplotype)
three_genotypes[[1]]=haplotype+haplotype
three_genotypes[[2]]=haplotype+(1-haplotype)
three_genotypes[[3]]=(1-haplotype)+(1-haplotype)
geno_probs=as.numeric() #prob of each of three genotypes
for(geno in 1:3){
#log(probs[[2]][three_genotypes,kidwin] is the log prob. of kid's obs geno
#given the current phased geno and given mom is het. (which is why probs[[2]])
geno_probs[geno]=sum( sapply(1:length(haplotype), function(zz) log( probs[[2]][three_genotypes[[geno]][zz]+1,kidwin[zz]+1])))
}
if(length(which(geno_probs==max(geno_probs)))!=1){recover()}
return(three_genotypes[[which(geno_probs==max(geno_probs))]])
}
############################################################################
|
#' Generate mupltiple-sources simulation datasets.
#'
#' Generate mupltiple-sources simulation datasets following the settings.
#'
#' @param n The number of x.
#' @param ncomp The number of factors in the simulated data.
#' @param p p1 p2 p3 represent the number of variables in three datasets, respectively.
#' @param sig sig1 sig2 sig3 represent the noise ratio in three datasets, respectively.
#' @param d d1 d2 d3 represent the scale parameters in three datasets, respectively.
#' @return \item{x}{multi-source data.}
#' \item{truex}{multi-source data without noise.}
simdata=function(n,p1,p2,p3,sig1,sig2,sig3,d1,d2,d3){
p=p1+p2+p3
u=matrix(0,n,1)
u = matrix(c(10,9,8,7,6,5,4,3,rep(2,17),rep(0,n-25)))
u=apply(u,2,function(x) x/norm(x,'2'))
v1=matrix(c(10,-10,8,-8,5,-5, rep(3,5),rep(-3,5),rep(0,p1-16)))
v2=matrix(c(10,-10,8,-8,5,-5, rep(3,5),rep(-3,5),rep(0,p2-16)))
v3=matrix(c(10,-10,8,-8,5,-5, rep(3,5),rep(-3,5),rep(0,p3-16)))
v1=apply(v1,2,function(x) x/norm(x,'2'))
v2=apply(v2,2,function(x) x/norm(x,'2'))
v3=apply(v3,2,function(x) x/norm(x,'2'))
d=d1
truex1=u%*%d%*%t(v1)
x1=truex1+sig1*max(truex1)*matrix(rnorm(n*p1),n,p1)
d=d2
truex2=u%*%d%*%t(v2)
x2=truex2+sig2*max(truex2)*matrix(rnorm(n*p2),n,p2)
d=d3
truex3=u%*%d%*%t(v3)
x3=truex3+sig3*max(truex3)*matrix(rnorm(n*p3),n,p3)
x=list(x1,x2,x3)
truex=list(truex1,truex2,truex3)
sim_load=list(v1,v2,v3)
index=list(seq(17,p1),seq(17,p2),seq(17,p3))
uindex=seq(26,n)
list(x=x,truex=truex)
}
| /R/simdata.R | no_license | cqli-stat/issvd | R | false | false | 1,581 | r | #' Generate mupltiple-sources simulation datasets.
#'
#' Generate mupltiple-sources simulation datasets following the settings.
#'
#' @param n The number of x.
#' @param ncomp The number of factors in the simulated data.
#' @param p p1 p2 p3 represent the number of variables in three datasets, respectively.
#' @param sig sig1 sig2 sig3 represent the noise ratio in three datasets, respectively.
#' @param d d1 d2 d3 represent the scale parameters in three datasets, respectively.
#' @return \item{x}{multi-source data.}
#' \item{truex}{multi-source data without noise.}
simdata=function(n,p1,p2,p3,sig1,sig2,sig3,d1,d2,d3){
p=p1+p2+p3
u=matrix(0,n,1)
u = matrix(c(10,9,8,7,6,5,4,3,rep(2,17),rep(0,n-25)))
u=apply(u,2,function(x) x/norm(x,'2'))
v1=matrix(c(10,-10,8,-8,5,-5, rep(3,5),rep(-3,5),rep(0,p1-16)))
v2=matrix(c(10,-10,8,-8,5,-5, rep(3,5),rep(-3,5),rep(0,p2-16)))
v3=matrix(c(10,-10,8,-8,5,-5, rep(3,5),rep(-3,5),rep(0,p3-16)))
v1=apply(v1,2,function(x) x/norm(x,'2'))
v2=apply(v2,2,function(x) x/norm(x,'2'))
v3=apply(v3,2,function(x) x/norm(x,'2'))
d=d1
truex1=u%*%d%*%t(v1)
x1=truex1+sig1*max(truex1)*matrix(rnorm(n*p1),n,p1)
d=d2
truex2=u%*%d%*%t(v2)
x2=truex2+sig2*max(truex2)*matrix(rnorm(n*p2),n,p2)
d=d3
truex3=u%*%d%*%t(v3)
x3=truex3+sig3*max(truex3)*matrix(rnorm(n*p3),n,p3)
x=list(x1,x2,x3)
truex=list(truex1,truex2,truex3)
sim_load=list(v1,v2,v3)
index=list(seq(17,p1),seq(17,p2),seq(17,p3))
uindex=seq(26,n)
list(x=x,truex=truex)
}
|
#----------------------------------------------------------------------
# Tom's demonstration example.
#
# Purpose: Split Airlines dataset into train and validation sets.
# Build model and predict on a test Set.
# Print Confusion matrix and performance measures for test set
#----------------------------------------------------------------------
# Source setup code to define myIP and myPort and helper functions.
# If you are having trouble running this, just set the condition to FALSE
# and hardcode myIP and myPort.
if (TRUE) {
# Set working directory so that the source() below works.
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
if (FALSE) {
setwd("/Users/tomk/0xdata/ws/h2o-dev/h2o-r/tests/testdir_demos")
}
source('../h2o-runit.R')
options(echo=TRUE)
filePath <- normalizePath(h2o:::.h2o.locate("smalldata/airlines/AirlinesTrain.csv.zip"))
testFilePath <- normalizePath(h2o:::.h2o.locate("smalldata/airlines/AirlinesTest.csv.zip"))
} else {
stop("need to hardcode ip and port")
myIP = "127.0.0.1"
myPort = 54321
library(h2o)
PASS_BANNER <- function() { cat("\nPASS\n\n") }
filePath <- "https://s3.amazonaws.com/h2o-public-test-data/smalldata/airlines/AirlinesTest.csv.zip"
testFilePath <-"https://s3.amazonaws.com/h2o-public-test-data/smalldata/airlines/AirlinesTrain.csv.zip"
}
h2o.startLogging()
conn <- h2o.init(ip=myIP, port=myPort, startH2O=T)
#uploading data file to h2o
air <- h2o.importFile(conn, filePath, "air")
#Constructing validation and train sets by sampling (20/80)
#creating a column as tall as airlines(nrow(air))
s <- h2o.runif(air) # Useful when number of rows too large for R to handle
air.train <- air[s <= 0.8,]
air.valid <- air[s > 0.8,]
myX = c("Origin", "Dest", "Distance", "UniqueCarrier", "fMonth", "fDayofMonth", "fDayOfWeek" )
myY="IsDepDelayed"
#gbm
air.gbm <- h2o.gbm(x = myX, y = myY, loss = "multinomial", training_frame = air.train, ntrees = 10,
max_depth = 3, learn_rate = 0.01, nbins = 100, validation_frame = air.valid, variable_importance = F)
print(air.gbm@model)
air.gbm@model$auc
#RF
# air.rf <- h2o.randomForest(x=myX,y=myY,data=air.train,ntree=10,depth=20,seed=12,importance=T,validation=air.valid, type = "BigData")
# print(air.rf@model)
#uploading test file to h2o
air.test <- h2o.importFile(conn,testFilePath,key="air.test")
model_object <- air.gbm # air.rf #air.glm air.gbm air.dl
#predicting on test file
pred <- predict(model_object,air.test)
head(pred)
perf <- h2o.performance(model_object,air.test)
#Building confusion matrix for test set
perf@metrics$cm$table
#Plot ROC for test set
perf@metrics$auc$precision
perf@metrics$auc$accuracy
# perf@auc$auc
plot(perf,type="roc")
PASS_BANNER()
if (FALSE) {
h <- h2o.init(ip="mr-0xb1", port=60024)
df <-h2o.importFile(h, "/home/tomk/airlines_all.csv")
nrow(df)
ncol(df)
head(df)
myX <- c("Origin", "Dest", "Distance", "UniqueCarrier", "Month", "DayofMonth", "DayOfWeek")
myY <- "IsDepDelayed"
air.glm <- h2o.glm(x = myX, y = myY, training_frame = df, family = "binomial", n_folds = 10, alpha = 0.25, lambda = 0.001)
air.glm@model$confusion
}
| /h2o-r/tests/testdir_demos/runit_NOPASS_demo_tk_cm_roc.R | permissive | darraghdog/h2o-dev | R | false | false | 3,207 | r | #----------------------------------------------------------------------
# Tom's demonstration example.
#
# Purpose: Split Airlines dataset into train and validation sets.
# Build model and predict on a test Set.
# Print Confusion matrix and performance measures for test set
#----------------------------------------------------------------------
# Source setup code to define myIP and myPort and helper functions.
# If you are having trouble running this, just set the condition to FALSE
# and hardcode myIP and myPort.
if (TRUE) {
# Set working directory so that the source() below works.
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
if (FALSE) {
setwd("/Users/tomk/0xdata/ws/h2o-dev/h2o-r/tests/testdir_demos")
}
source('../h2o-runit.R')
options(echo=TRUE)
filePath <- normalizePath(h2o:::.h2o.locate("smalldata/airlines/AirlinesTrain.csv.zip"))
testFilePath <- normalizePath(h2o:::.h2o.locate("smalldata/airlines/AirlinesTest.csv.zip"))
} else {
stop("need to hardcode ip and port")
myIP = "127.0.0.1"
myPort = 54321
library(h2o)
PASS_BANNER <- function() { cat("\nPASS\n\n") }
filePath <- "https://s3.amazonaws.com/h2o-public-test-data/smalldata/airlines/AirlinesTest.csv.zip"
testFilePath <-"https://s3.amazonaws.com/h2o-public-test-data/smalldata/airlines/AirlinesTrain.csv.zip"
}
h2o.startLogging()
conn <- h2o.init(ip=myIP, port=myPort, startH2O=T)
#uploading data file to h2o
air <- h2o.importFile(conn, filePath, "air")
#Constructing validation and train sets by sampling (20/80)
#creating a column as tall as airlines(nrow(air))
s <- h2o.runif(air) # Useful when number of rows too large for R to handle
air.train <- air[s <= 0.8,]
air.valid <- air[s > 0.8,]
myX = c("Origin", "Dest", "Distance", "UniqueCarrier", "fMonth", "fDayofMonth", "fDayOfWeek" )
myY="IsDepDelayed"
#gbm
air.gbm <- h2o.gbm(x = myX, y = myY, loss = "multinomial", training_frame = air.train, ntrees = 10,
max_depth = 3, learn_rate = 0.01, nbins = 100, validation_frame = air.valid, variable_importance = F)
print(air.gbm@model)
air.gbm@model$auc
#RF
# air.rf <- h2o.randomForest(x=myX,y=myY,data=air.train,ntree=10,depth=20,seed=12,importance=T,validation=air.valid, type = "BigData")
# print(air.rf@model)
#uploading test file to h2o
air.test <- h2o.importFile(conn,testFilePath,key="air.test")
model_object <- air.gbm # air.rf #air.glm air.gbm air.dl
#predicting on test file
pred <- predict(model_object,air.test)
head(pred)
perf <- h2o.performance(model_object,air.test)
#Building confusion matrix for test set
perf@metrics$cm$table
#Plot ROC for test set
perf@metrics$auc$precision
perf@metrics$auc$accuracy
# perf@auc$auc
plot(perf,type="roc")
PASS_BANNER()
if (FALSE) {
h <- h2o.init(ip="mr-0xb1", port=60024)
df <-h2o.importFile(h, "/home/tomk/airlines_all.csv")
nrow(df)
ncol(df)
head(df)
myX <- c("Origin", "Dest", "Distance", "UniqueCarrier", "Month", "DayofMonth", "DayOfWeek")
myY <- "IsDepDelayed"
air.glm <- h2o.glm(x = myX, y = myY, training_frame = df, family = "binomial", n_folds = 10, alpha = 0.25, lambda = 0.001)
air.glm@model$confusion
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mvcokmUtils.R
\name{mvcokm}
\alias{mvcokm}
\title{Construct the mvcokm object}
\usage{
mvcokm(
formula = list(~1, ~1),
output,
input,
cov.model = "matern_5_2",
nugget.est = FALSE,
prior = list(),
opt = list(),
NestDesign = TRUE,
tuning = list(),
info = list()
)
}
\arguments{
\item{formula}{a list of \eqn{s} elements, each of which contains the formula to specify fixed basis functions or regressors.}
\item{output}{a list of \eqn{s} elements, each of which contains a matrix of computer model outputs.}
\item{input}{a list of \eqn{s} elements, each of which contains a matrix of inputs.}
\item{cov.model}{a string indicating the type of covariance
function in the PP cokriging models. Current covariance functions include
\describe{
\item{exp}{product form of exponential covariance functions.}
\item{matern_3_2}{product form of Matern covariance functions with
smoothness parameter 3/2.}
\item{matern_5_2}{product form of Matern covariance functions with
smoothness parameter 5/2.}
\item{Gaussian}{product form of Gaussian covariance functions.}
\item{powexp}{product form of power-exponential covariance functions with roughness parameter fixed at 1.9.}
}}
\item{nugget.est}{a logical value indicating whether the nugget is included or not. Default value is \code{FALSE}.}
\item{prior}{a list of arguments to setup the prior distributions with the jointly robust prior as default
\describe{
\item{name}{the name of the prior. Current implementation includes
\code{JR}, \code{Reference}, \code{Jeffreys}, \code{Ind_Jeffreys}}
\item{hyperparam}{hyperparameters in the priors.
For jointly robust (JR) prior, three parameters are included:
\eqn{a} refers to the polynomial penalty to avoid singular correlation
matrix with a default value 0.2; \eqn{b} refers to the exponenetial penalty to avoid
diagonal correlation matrix with a default value 1; nugget.UB is the upper
bound of the nugget variance with default value 1, which indicates that the
nugget variance has support (0, 1).}
}}
\item{opt}{a list of arguments to setup the \code{\link{optim}} routine.}
\item{NestDesign}{a logical value indicating whether the
experimental design is hierarchically nested within each level
of the code.}
\item{tuning}{a list of arguments to control the MCEM algorithm for non-nested
design. It includes the arguments
\describe{
\item{maxit}{the maximum number of MCEM iterations.}
\item{tol}{a tolerance to stop the MCEM algorithm. If the parameter
difference between any two consecutive MCEM algorithm is less than
this tolerance, the MCEM algorithm is stopped.}
\item{n.sample}{the number of Monte Carlo samples in the
MCEM algorithm.}
\item{verbose}{a logical value to show the MCEM iterations if it is true.}
}}
\item{info}{a list that contains
\describe{
\item{iter}{number of iterations used in the MCEM algorithm}
\item{eps}{parameter difference after the MCEM algorithm stops}
}}
}
\description{
This function constructs the mvcokm object in
autogressive cokriging models for multivariate outputs. The model is known as the parallel partial (PP) cokriging emulator.
}
\seealso{
\code{\link{ARCokrig}}, \code{\link{mvcokm.fit}}, \code{\link{mvcokm.predict}}, \code{\link{mvcokm.condsim}}
}
\author{
Pulong Ma <mpulong@gmail.com>
}
| /fuzzedpackages/ARCokrig/man/mvcokm.Rd | no_license | akhikolla/testpackages | R | false | true | 3,383 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mvcokmUtils.R
\name{mvcokm}
\alias{mvcokm}
\title{Construct the mvcokm object}
\usage{
mvcokm(
formula = list(~1, ~1),
output,
input,
cov.model = "matern_5_2",
nugget.est = FALSE,
prior = list(),
opt = list(),
NestDesign = TRUE,
tuning = list(),
info = list()
)
}
\arguments{
\item{formula}{a list of \eqn{s} elements, each of which contains the formula to specify fixed basis functions or regressors.}
\item{output}{a list of \eqn{s} elements, each of which contains a matrix of computer model outputs.}
\item{input}{a list of \eqn{s} elements, each of which contains a matrix of inputs.}
\item{cov.model}{a string indicating the type of covariance
function in the PP cokriging models. Current covariance functions include
\describe{
\item{exp}{product form of exponential covariance functions.}
\item{matern_3_2}{product form of Matern covariance functions with
smoothness parameter 3/2.}
\item{matern_5_2}{product form of Matern covariance functions with
smoothness parameter 5/2.}
\item{Gaussian}{product form of Gaussian covariance functions.}
\item{powexp}{product form of power-exponential covariance functions with roughness parameter fixed at 1.9.}
}}
\item{nugget.est}{a logical value indicating whether the nugget is included or not. Default value is \code{FALSE}.}
\item{prior}{a list of arguments to setup the prior distributions with the jointly robust prior as default
\describe{
\item{name}{the name of the prior. Current implementation includes
\code{JR}, \code{Reference}, \code{Jeffreys}, \code{Ind_Jeffreys}}
\item{hyperparam}{hyperparameters in the priors.
For jointly robust (JR) prior, three parameters are included:
\eqn{a} refers to the polynomial penalty to avoid singular correlation
matrix with a default value 0.2; \eqn{b} refers to the exponenetial penalty to avoid
diagonal correlation matrix with a default value 1; nugget.UB is the upper
bound of the nugget variance with default value 1, which indicates that the
nugget variance has support (0, 1).}
}}
\item{opt}{a list of arguments to setup the \code{\link{optim}} routine.}
\item{NestDesign}{a logical value indicating whether the
experimental design is hierarchically nested within each level
of the code.}
\item{tuning}{a list of arguments to control the MCEM algorithm for non-nested
design. It includes the arguments
\describe{
\item{maxit}{the maximum number of MCEM iterations.}
\item{tol}{a tolerance to stop the MCEM algorithm. If the parameter
difference between any two consecutive MCEM algorithm is less than
this tolerance, the MCEM algorithm is stopped.}
\item{n.sample}{the number of Monte Carlo samples in the
MCEM algorithm.}
\item{verbose}{a logical value to show the MCEM iterations if it is true.}
}}
\item{info}{a list that contains
\describe{
\item{iter}{number of iterations used in the MCEM algorithm}
\item{eps}{parameter difference after the MCEM algorithm stops}
}}
}
\description{
This function constructs the mvcokm object in
autogressive cokriging models for multivariate outputs. The model is known as the parallel partial (PP) cokriging emulator.
}
\seealso{
\code{\link{ARCokrig}}, \code{\link{mvcokm.fit}}, \code{\link{mvcokm.predict}}, \code{\link{mvcokm.condsim}}
}
\author{
Pulong Ma <mpulong@gmail.com>
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{ar_L5_invitrodb}
\alias{ar_L5_invitrodb}
\title{AR ToxCast Data}
\format{A data.table with 42734 rows and 9 columns}
\usage{
ar_L5_invitrodb
}
\description{
A dataset containing ToxCast model parameters, hit calls, assays, and
chemical info to generate AR Model AUC values. Data is pulled from the
internal release (invitrodb), and subset down to
the minimum needed to run the model.
}
\keyword{datasets}
| /man/ar_L5_invitrodb.Rd | no_license | rnaimehaom/eapath | R | false | true | 512 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{ar_L5_invitrodb}
\alias{ar_L5_invitrodb}
\title{AR ToxCast Data}
\format{A data.table with 42734 rows and 9 columns}
\usage{
ar_L5_invitrodb
}
\description{
A dataset containing ToxCast model parameters, hit calls, assays, and
chemical info to generate AR Model AUC values. Data is pulled from the
internal release (invitrodb), and subset down to
the minimum needed to run the model.
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fbind.R
\name{fbind}
\alias{fbind}
\title{Bind two factors together}
\usage{
fbind(a, b)
}
\arguments{
\item{a}{factor}
\item{b}{factor}
}
\value{
factor
}
\description{
Create a new factor from two existing factors, where the new factor's levels
are the union of the levels of the input factors.
}
\examples{
fbind(iris$Species[c(1, 51, 101)], PlantGrowth$group[c(1, 11, 21)])
}
| /man/fbind.Rd | permissive | SamEdwardes/foofactors | R | false | true | 459 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fbind.R
\name{fbind}
\alias{fbind}
\title{Bind two factors together}
\usage{
fbind(a, b)
}
\arguments{
\item{a}{factor}
\item{b}{factor}
}
\value{
factor
}
\description{
Create a new factor from two existing factors, where the new factor's levels
are the union of the levels of the input factors.
}
\examples{
fbind(iris$Species[c(1, 51, 101)], PlantGrowth$group[c(1, 11, 21)])
}
|
testlist <- list(G = numeric(0), Rn = numeric(0), atmp = numeric(0), ra = numeric(0), relh = c(-4.55414938106482e-200, 1.95236685739849e-214, 2.28917898403533e-310, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), rs = numeric(0), temp = c(-2.80363318787251e-287, 1.56898304742183e+82, 8.96970809549085e-158, -1.3258495253834e-113, 1.18967993003373e-150, 4.46390936931362e+256, 8.69418809820149e-304, -7.21785050162452e-100, -8.79868786589058e+161, 0.000202182345424814, -4.8618220430901e+57, 1.99541219161559e+121, 1.54707830950554e-307, 4.8265050783594e+76, -1.94295658750812e-157, 5.21464652810224e-302, -7.59509429910999e+118, 1.07434901103112e-219, -4.2324579017604e+95, -1.3199888952305e+101, -1.86834569065576e+236))
result <- do.call(meteor:::ET0_PenmanMonteith,testlist)
str(result) | /meteor/inst/testfiles/ET0_PenmanMonteith/AFL_ET0_PenmanMonteith/ET0_PenmanMonteith_valgrind_files/1615842207-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 824 | r | testlist <- list(G = numeric(0), Rn = numeric(0), atmp = numeric(0), ra = numeric(0), relh = c(-4.55414938106482e-200, 1.95236685739849e-214, 2.28917898403533e-310, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), rs = numeric(0), temp = c(-2.80363318787251e-287, 1.56898304742183e+82, 8.96970809549085e-158, -1.3258495253834e-113, 1.18967993003373e-150, 4.46390936931362e+256, 8.69418809820149e-304, -7.21785050162452e-100, -8.79868786589058e+161, 0.000202182345424814, -4.8618220430901e+57, 1.99541219161559e+121, 1.54707830950554e-307, 4.8265050783594e+76, -1.94295658750812e-157, 5.21464652810224e-302, -7.59509429910999e+118, 1.07434901103112e-219, -4.2324579017604e+95, -1.3199888952305e+101, -1.86834569065576e+236))
result <- do.call(meteor:::ET0_PenmanMonteith,testlist)
str(result) |
#Logistic Regresion : GRE
#https://stats.idre.ucla.edu/r/dae/logit-regression/
#A researcher is interested in how variables, such as GRE (Graduate Record Exam scores), GPA (grade point average) and prestige of the undergraduate institution, effect admission into graduate school. The response variable, admit/don’t admit, is a binary variable
inputData <- read.csv("https://stats.idre.ucla.edu/stat/data/binary.csv")
## view the first few rows of the data
head(inputData)
inputData
summary(inputData)
sapply(inputData, sd) # calculating the sd for each column in the data set
str(inputData)
data= inputData # make a copy for futher analysis
data$rank = factor(data$rank)
data$admit = factor(data$admit)
## 2way contingency table of cat outcome and predictors we want
## to make sure there are not 0 cells
xtabs(~admit + rank, data = data) # similar to pivot tables in excel
mylogit <- glm(admit ~ gre + gpa + rank, data = data, family = "binomial")
summary(mylogit)
#gre,gpa, rank are statistically significant,
#For every one unit change in gre, the log odds of admission (versus non-admission) increases by 0.002.
#For a one unit increase in gpa, the log odds of being admitted to graduate school increases by 0.804.
#The indicator variables for rank have a slightly different interpretation. For example, having attended an undergraduate institution with rank of 2, versus an institution with a rank of 1, changes the log odds of admission by -0.675.
## odds ratios only
exp(coef(mylogit))
#Predict admit for input data
prob=predict(mylogit,type=c("response"))
cbind(data, prob)
#cutoff value
library(InformationValue)
(optCutOff <- optimalCutoff(data$admit, prob)[1] ) #.46
confusionMatrix(data$admit, prob, threshold = optCutOff)
(accuracy = (247+38)/ (sum(247+38+89+26))) # .715
confusionMatrix(data$admit, prob, threshold = .7)
(accuracy = (272+2)/ (sum(272+2+125+1))) #.685
confusionMatrix(data$admit, prob, threshold = .2)
library(dplyr)
## view data frame
sample_n(data,size=1)
(newdata1 = data.frame(gre=450, gpa=3.7, rank=factor(3) ))
(newdata1$admitPredicted <- predict(mylogit, newdata = newdata1, type = "response"))
(newdata1$admitClass = ifelse(newdata1$admitPredicted > .46,1,0))
newdata1 #b=not admitted to institute
#End of Logistic Regression
#also check for assumptions of residues, VIF, Multi-collinearity
#Parition the data into train and test
library(caret)
Index <- createDataPartition(y=data$admit, p=0.70, list=FALSE)
head(Index)
nrow(data)
trainData = data[Index ,]
testData = data[-Index, ]
table(data$admit); prop.table(table(data$admit))
summary(trainData$admit); summary(testData$admit)
nrow(trainData) ; nrow(testData); nrow(trainData) + nrow(testData)
prop.table(table(trainData$admit))
prop.table(table(testData$admit))
#same promotion of admit in test and train
str(testData)
#now construct a model with train and then test on testdata
| /lr.R | no_license | cart3ch/analytics1 | R | false | false | 2,895 | r | #Logistic Regresion : GRE
#https://stats.idre.ucla.edu/r/dae/logit-regression/
#A researcher is interested in how variables, such as GRE (Graduate Record Exam scores), GPA (grade point average) and prestige of the undergraduate institution, effect admission into graduate school. The response variable, admit/don’t admit, is a binary variable
inputData <- read.csv("https://stats.idre.ucla.edu/stat/data/binary.csv")
## view the first few rows of the data
head(inputData)
inputData
summary(inputData)
sapply(inputData, sd) # calculating the sd for each column in the data set
str(inputData)
data= inputData # make a copy for futher analysis
data$rank = factor(data$rank)
data$admit = factor(data$admit)
## 2way contingency table of cat outcome and predictors we want
## to make sure there are not 0 cells
xtabs(~admit + rank, data = data) # similar to pivot tables in excel
mylogit <- glm(admit ~ gre + gpa + rank, data = data, family = "binomial")
summary(mylogit)
#gre,gpa, rank are statistically significant,
#For every one unit change in gre, the log odds of admission (versus non-admission) increases by 0.002.
#For a one unit increase in gpa, the log odds of being admitted to graduate school increases by 0.804.
#The indicator variables for rank have a slightly different interpretation. For example, having attended an undergraduate institution with rank of 2, versus an institution with a rank of 1, changes the log odds of admission by -0.675.
## odds ratios only
exp(coef(mylogit))
#Predict admit for input data
prob=predict(mylogit,type=c("response"))
cbind(data, prob)
#cutoff value
library(InformationValue)
(optCutOff <- optimalCutoff(data$admit, prob)[1] ) #.46
confusionMatrix(data$admit, prob, threshold = optCutOff)
(accuracy = (247+38)/ (sum(247+38+89+26))) # .715
confusionMatrix(data$admit, prob, threshold = .7)
(accuracy = (272+2)/ (sum(272+2+125+1))) #.685
confusionMatrix(data$admit, prob, threshold = .2)
library(dplyr)
## view data frame
sample_n(data,size=1)
(newdata1 = data.frame(gre=450, gpa=3.7, rank=factor(3) ))
(newdata1$admitPredicted <- predict(mylogit, newdata = newdata1, type = "response"))
(newdata1$admitClass = ifelse(newdata1$admitPredicted > .46,1,0))
newdata1 #b=not admitted to institute
#End of Logistic Regression
#also check for assumptions of residues, VIF, Multi-collinearity
#Parition the data into train and test
library(caret)
Index <- createDataPartition(y=data$admit, p=0.70, list=FALSE)
head(Index)
nrow(data)
trainData = data[Index ,]
testData = data[-Index, ]
table(data$admit); prop.table(table(data$admit))
summary(trainData$admit); summary(testData$admit)
nrow(trainData) ; nrow(testData); nrow(trainData) + nrow(testData)
prop.table(table(trainData$admit))
prop.table(table(testData$admit))
#same promotion of admit in test and train
str(testData)
#now construct a model with train and then test on testdata
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/speakerSplit.R
\name{speakerSplit}
\alias{speakerSplit}
\title{Break and Stretch if Multiple Persons per Cell}
\usage{
speakerSplit(
dataframe,
person.var = 1,
sep = c("and", "&", ","),
track.reps = FALSE
)
}
\arguments{
\item{dataframe}{A dataframe that contains the person variable.}
\item{person.var}{The person variable to be stretched.}
\item{sep}{The separator(s) to search for and break on. Default is:
c("and", "&", ",")}
\item{track.reps}{logical. If \code{TRUE} leaves the row names of person
variable cells that were repeated and stretched.}
}
\value{
Returns an expanded dataframe with person variable stretched and
accompanying rows repeated.
}
\description{
Look for cells with multiple people and create separate rows for each person.
}
\examples{
\dontrun{
DATA$person <- as.character(DATA$person)
DATA$person[c(1, 4, 6)] <- c("greg, sally, & sam",
"greg, sally", "sam and sally")
speakerSplit(DATA)
speakerSplit(DATA, track.reps=TRUE)
DATA$person[c(1, 4, 6)] <- c("greg_sally_sam",
"greg.sally", "sam; sally")
speakerSplit(DATA, sep = c(".", "_", ";"))
DATA <- qdap::DATA #reset DATA
}
}
| /man/speakerSplit.Rd | no_license | cran/qdap | R | false | true | 1,214 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/speakerSplit.R
\name{speakerSplit}
\alias{speakerSplit}
\title{Break and Stretch if Multiple Persons per Cell}
\usage{
speakerSplit(
dataframe,
person.var = 1,
sep = c("and", "&", ","),
track.reps = FALSE
)
}
\arguments{
\item{dataframe}{A dataframe that contains the person variable.}
\item{person.var}{The person variable to be stretched.}
\item{sep}{The separator(s) to search for and break on. Default is:
c("and", "&", ",")}
\item{track.reps}{logical. If \code{TRUE} leaves the row names of person
variable cells that were repeated and stretched.}
}
\value{
Returns an expanded dataframe with person variable stretched and
accompanying rows repeated.
}
\description{
Look for cells with multiple people and create separate rows for each person.
}
\examples{
\dontrun{
DATA$person <- as.character(DATA$person)
DATA$person[c(1, 4, 6)] <- c("greg, sally, & sam",
"greg, sally", "sam and sally")
speakerSplit(DATA)
speakerSplit(DATA, track.reps=TRUE)
DATA$person[c(1, 4, 6)] <- c("greg_sally_sam",
"greg.sally", "sam; sally")
speakerSplit(DATA, sep = c(".", "_", ";"))
DATA <- qdap::DATA #reset DATA
}
}
|
# Load in the required packages that makes the 3D figure
pacman::p_load(scatterplot3d, rgdal, maptools, tidyverse, zoo, sf, GISTools, ggsn)
sites <- read_csv("./Materials for R mapping/nutrient_mapping/09_September/Sep_RCC_nitrate_mapping.csv")
scatter.grid <- function (x, y = NULL, z = NULL, color = par("col"), pch = NULL,
main = NULL, sub = NULL, xlim = NULL, ylim = NULL, zlim = NULL,
xlab = NULL, ylab = NULL, zlab = NULL, scale.y = 1, angle = 40,
axis = TRUE, tick.marks = TRUE, label.tick.marks = TRUE,
x.ticklabs = NULL, y.ticklabs = NULL, z.ticklabs = NULL,
y.margin.add = 0, grid = TRUE, box = TRUE, lab = par("lab"),
lab.z = mean(lab[1:2]), type = "p", highlight.3d = FALSE,
mar = c(5, 3, 4, 3) + 0.1, bg = par("bg"), col.axis = par("col.axis"),
col.grid = "grey", col.lab = par("col.lab"), cex.symbols = par("cex"),
cex.axis = 0.8 * par("cex.axis"), cex.lab = par("cex.lab"),
font.axis = par("font.axis"), font.lab = par("font.lab"),
lty.axis = par("lty"), lty.grid = par("lty"), lty.hide = NULL,
lty.hplot = par("lty"), log = "", ...)
{
mem.par <- par(mar = mar)
x.scal <- y.scal <- z.scal <- 1
xlabel <- if (!missing(x))
deparse(substitute(x))
ylabel <- if (!missing(y))
deparse(substitute(y))
zlabel <- if (!missing(z))
deparse(substitute(z))
if (highlight.3d && !missing(color))
warning("color is ignored when highlight.3d = TRUE")
if (!is.null(d <- dim(x)) && (length(d) == 2) && (d[2] >=
4))
color <- x[, 4]
else if (is.list(x) && !is.null(x$color))
color <- x$color
xyz <- xyz.coords(x = x, y = y, z = z, xlab = xlabel, ylab = ylabel,
zlab = zlabel, log = log)
if (is.null(xlab)) {
xlab <- xyz$xlab
if (is.null(xlab))
xlab <- ""
}
if (is.null(ylab)) {
ylab <- xyz$ylab
if (is.null(ylab))
ylab <- ""
}
if (is.null(zlab)) {
zlab <- xyz$zlab
if (is.null(zlab))
zlab <- ""
}
if (length(color) == 1)
color <- rep(color, length(xyz$x))
else if (length(color) != length(xyz$x))
stop("length(color) ", "must be equal length(x) or 1")
angle <- (angle%%360)/90
yz.f <- scale.y * abs(if (angle < 1) angle else if (angle >
3) angle - 4 else 2 - angle)
yx.f <- scale.y * (if (angle < 2)
1 - angle
else angle - 3)
if (angle > 2) {
temp <- xyz$x
xyz$x <- xyz$y
xyz$y <- temp
temp <- xlab
xlab <- ylab
ylab <- temp
temp <- xlim
xlim <- ylim
ylim <- temp
}
angle.1 <- (1 < angle && angle < 2) || angle > 3
angle.2 <- 1 <= angle && angle <= 3
dat <- cbind(as.data.frame(xyz[c("x", "y", "z")]), col = color)
if (!is.null(xlim)) {
xlim <- range(xlim)
dat <- dat[xlim[1] <= dat$x & dat$x <= xlim[2], , drop = FALSE]
}
if (!is.null(ylim)) {
ylim <- range(ylim)
dat <- dat[ylim[1] <= dat$y & dat$y <= ylim[2], , drop = FALSE]
}
if (!is.null(zlim)) {
zlim <- range(zlim)
dat <- dat[zlim[1] <= dat$z & dat$z <= zlim[2], , drop = FALSE]
}
n <- nrow(dat)
if (n < 1)
stop("no data left within (x|y|z)lim")
y.range <- range(dat$y[is.finite(dat$y)])
if (type == "p" || type == "h") {
y.ord <- rev(order(dat$y))
dat <- dat[y.ord, ]
if (length(pch) > 1)
if (length(pch) != length(y.ord))
stop("length(pch) ", "must be equal length(x) or 1")
else pch <- pch[y.ord]
if (length(bg) > 1)
if (length(bg) != length(y.ord))
stop("length(bg) ", "must be equal length(x) or 1")
else bg <- bg[y.ord]
if (length(cex.symbols) > 1)
if (length(cex.symbols) != length(y.ord))
stop("length(cex.symbols) ", "must be equal length(x) or 1")
else cex.symbols <- cex.symbols[y.ord]
daty <- dat$y
daty[!is.finite(daty)] <- mean(daty[is.finite(daty)])
if (highlight.3d && !(all(diff(daty) == 0)))
dat$col <- rgb(red = seq(0, 1, length = n) * (y.range[2] -
daty)/diff(y.range), green = 0, blue = 0)
}
p.lab <- par("lab")
y.range <- range(dat$y[is.finite(dat$y)], ylim)
y.prty <- pretty(y.range, n = lab[2], min.n = max(1, min(0.5 *
lab[2], p.lab[2])))
y.scal <- round(diff(y.prty[1:2]), digits = 12)
y.add <- min(y.prty)
dat$y <- (dat$y - y.add)/y.scal
y.max <- (max(y.prty) - y.add)/y.scal
if (!is.null(ylim))
y.max <- max(y.max, ceiling((ylim[2] - y.add)/y.scal))
x.range <- range(dat$x[is.finite(dat$x)], xlim)
x.prty <- pretty(x.range, n = lab[1], min.n = max(1, min(0.5 *
lab[1], p.lab[1])))
x.scal <- round(diff(x.prty[1:2]), digits = 12)
dat$x <- dat$x/x.scal
x.range <- range(x.prty)/x.scal
x.max <- ceiling(x.range[2])
x.min <- floor(x.range[1])
if (!is.null(xlim)) {
x.max <- max(x.max, ceiling(xlim[2]/x.scal))
x.min <- min(x.min, floor(xlim[1]/x.scal))
}
x.range <- range(x.min, x.max)
z.range <- range(dat$z[is.finite(dat$z)], zlim)
z.prty <- pretty(z.range, n = lab.z, min.n = max(1, min(0.5 *
lab.z, p.lab[2])))
z.scal <- round(diff(z.prty[1:2]), digits = 12)
dat$z <- dat$z/z.scal
z.range <- range(z.prty)/z.scal
z.max <- ceiling(z.range[2])
z.min <- floor(z.range[1])
if (!is.null(zlim)) {
z.max <- max(z.max, ceiling(zlim[2]/z.scal))
z.min <- min(z.min, floor(zlim[1]/z.scal))
}
z.range <- range(z.min, z.max)
plot.new()
if (angle.2) {
x1 <- x.min + yx.f * y.max
x2 <- x.max
}
else {
x1 <- x.min
x2 <- x.max + yx.f * y.max
}
plot.window(c(x1, x2), c(z.min, z.max + yz.f * y.max))
temp <- strwidth(format(rev(y.prty))[1], cex = cex.axis/par("cex"))
if (angle.2)
x1 <- x1 - temp - y.margin.add
else x2 <- x2 + temp + y.margin.add
plot.window(c(x1, x2), c(z.min, z.max + yz.f * y.max))
if (angle > 2)
par(usr = par("usr")[c(2, 1, 3:4)])
usr <- par("usr")
title(main, sub, ...)
if ("xy" %in% grid || grid) {
i <- x.min:x.max
segments(i, z.min, i + (yx.f * y.max), yz.f * y.max +
z.min, col = col.grid, lty = lty.grid)
i <- 0:y.max
segments(x.min + (i * yx.f), i * yz.f + z.min, x.max +
(i * yx.f), i * yz.f + z.min, col = col.grid, lty = lty.grid)
}
if ("xz" %in% grid) {
i <- x.min:x.max
segments(i + (yx.f * y.max), yz.f * y.max + z.min,
i + (yx.f * y.max), yz.f * y.max + z.max,
col = col.grid, lty = lty.grid)
temp <- yx.f * y.max
temp1 <- yz.f * y.max
i <- z.min:z.max
segments(x.min + temp,temp1 + i,
x.max + temp,temp1 + i , col = col.grid, lty = lty.grid)
}
if ("yz" %in% grid) {
i <- 0:y.max
segments(x.min + (i * yx.f), i * yz.f + z.min,
x.min + (i * yx.f) ,i * yz.f + z.max,
col = col.grid, lty = lty.grid)
temp <- yx.f * y.max
temp1 <- yz.f * y.max
i <- z.min:z.max
segments(x.min + temp,temp1 + i,
x.min, i , col = col.grid, lty = lty.grid)
}
if (axis) {
xx <- if (angle.2)
c(x.min, x.max)
else c(x.max, x.min)
if (tick.marks) {
xtl <- (z.max - z.min) * (tcl <- -par("tcl"))/50
ztl <- (x.max - x.min) * tcl/50
mysegs <- function(x0, y0, x1, y1) segments(x0,
y0, x1, y1, col = col.axis, lty = lty.axis)
i.y <- 0:y.max
mysegs(yx.f * i.y - ztl + xx[1], yz.f * i.y + z.min,
yx.f * i.y + ztl + xx[1], yz.f * i.y + z.min)
i.x <- x.min:x.max
mysegs(i.x, -xtl + z.min, i.x, xtl + z.min)
i.z <- z.min:z.max
mysegs(-ztl + xx[2], i.z, ztl + xx[2], i.z)
if (label.tick.marks) {
las <- par("las")
mytext <- function(labels, side, at, ...) mtext(text = labels,
side = side, at = at, line = -0.5, col = col.lab,
cex = cex.axis, font = font.lab, ...)
if (is.null(x.ticklabs))
x.ticklabs <- format(i.x * x.scal)
mytext(x.ticklabs, side = 1, at = i.x)
if (is.null(z.ticklabs))
z.ticklabs <- format(i.z * z.scal)
mytext(z.ticklabs, side = if (angle.1)
4
else 2, at = i.z, adj = if (0 < las && las <
3)
1
else NA)
temp <- if (angle > 2)
rev(i.y)
else i.y
if (is.null(y.ticklabs))
y.ticklabs <- format(y.prty)
else if (angle > 2)
y.ticklabs <- rev(y.ticklabs)
text(i.y * yx.f + xx[1], i.y * yz.f + z.min,
y.ticklabs, pos = if (angle.1)
2
else 4, offset = 1, col = col.lab, cex = cex.axis/par("cex"),
font = font.lab)
}
}
mytext2 <- function(lab, side, line, at) mtext(lab,
side = side, line = line, at = at, col = col.lab,
cex = cex.lab, font = font.axis, las = 0)
lines(c(x.min, x.max), c(z.min, z.min), col = col.axis,
lty = lty.axis)
mytext2(xlab, 1, line = 1.5, at = mean(x.range))
lines(xx[1] + c(0, y.max * yx.f), c(z.min, y.max * yz.f +
z.min), col = col.axis, lty = lty.axis)
mytext2(ylab, if (angle.1)
2
else 4, line = 0.5, at = z.min + y.max * yz.f)
lines(xx[c(2, 2)], c(z.min, z.max), col = col.axis,
lty = lty.axis)
mytext2(zlab, if (angle.1)
4
else 2, line = 1.5, at = mean(z.range))
if (box) {
if (is.null(lty.hide))
lty.hide <- lty.axis
temp <- yx.f * y.max
temp1 <- yz.f * y.max
lines(c(x.min + temp, x.max + temp), c(z.min + temp1,
z.min + temp1), col = col.axis, lty = lty.hide)
lines(c(x.min + temp, x.max + temp), c(temp1 + z.max,
temp1 + z.max), col = col.axis, lty = lty.axis)
temp <- c(0, y.max * yx.f)
temp1 <- c(0, y.max * yz.f)
lines(temp + xx[2], temp1 + z.min, col = col.axis,
lty = lty.hide)
lines(temp + x.min, temp1 + z.max, col = col.axis,
lty = lty.axis)
temp <- yx.f * y.max
temp1 <- yz.f * y.max
lines(c(temp + x.min, temp + x.min), c(z.min + temp1,
z.max + temp1), col = col.axis, lty = if (!angle.2)
lty.hide
else lty.axis)
lines(c(x.max + temp, x.max + temp), c(z.min + temp1,
z.max + temp1), col = col.axis, lty = if (angle.2)
lty.hide
else lty.axis)
}
}
x <- dat$x + (dat$y * yx.f)
z <- dat$z + (dat$y * yz.f)
col <- as.character(dat$col)
if (type == "h") {
z2 <- dat$y * yz.f + z.min
segments(x, z, x, z2, col = col, cex = cex.symbols,
lty = lty.hplot, ...)
points(x, z, type = "p", col = col, pch = pch, bg = bg,
cex = cex.symbols, ...)
}
else points(x, z, type = type, col = col, pch = pch, bg = bg,
cex = cex.symbols, ...)
if (axis && box) {
lines(c(x.min, x.max), c(z.max, z.max), col = col.axis,
lty = lty.axis)
lines(c(0, y.max * yx.f) + x.max, c(0, y.max * yz.f) +
z.max, col = col.axis, lty = lty.axis)
lines(xx[c(1, 1)], c(z.min, z.max), col = col.axis,
lty = lty.axis)
}
ob <- ls()
rm(list = ob[!ob %in% c("angle", "mar", "usr", "x.scal",
"y.scal", "z.scal", "yx.f", "yz.f", "y.add", "z.min",
"z.max", "x.min", "x.max", "y.max", "x.prty", "y.prty",
"z.prty")])
rm(ob)
invisible(list(xyz.convert = function(x, y = NULL, z = NULL) {
xyz <- xyz.coords(x, y, z)
if (angle > 2) {
temp <- xyz$x
xyz$x <- xyz$y
xyz$y <- temp
}
y <- (xyz$y - y.add)/y.scal
return(list(x = xyz$x/x.scal + yx.f * y, y = xyz$z/z.scal +
yz.f * y))
}, points3d = function(x, y = NULL, z = NULL, type = "p",
...) {
xyz <- xyz.coords(x, y, z)
if (angle > 2) {
temp <- xyz$x
xyz$x <- xyz$y
xyz$y <- temp
}
y2 <- (xyz$y - y.add)/y.scal
x <- xyz$x/x.scal + yx.f * y2
y <- xyz$z/z.scal + yz.f * y2
mem.par <- par(mar = mar, usr = usr)
on.exit(par(mem.par))
if (type == "h") {
y2 <- z.min + yz.f * y2
segments(x, y, x, y2, ...)
points(x, y, type = "p", ...)
} else points(x, y, type = type, ...)
}, plane3d = function(Intercept, x.coef = NULL, y.coef = NULL,
lty = "dashed", lty.box = NULL, ...) {
if (!is.atomic(Intercept) && !is.null(coef(Intercept))) Intercept <- coef(Intercept)
if (is.null(lty.box)) lty.box <- lty
if (is.null(x.coef) && length(Intercept) == 3) {
x.coef <- Intercept[if (angle > 2) 3 else 2]
y.coef <- Intercept[if (angle > 2) 2 else 3]
Intercept <- Intercept[1]
}
mem.par <- par(mar = mar, usr = usr)
on.exit(par(mem.par))
x <- x.min:x.max
ltya <- c(lty.box, rep(lty, length(x) - 2), lty.box)
x.coef <- x.coef * x.scal
z1 <- (Intercept + x * x.coef + y.add * y.coef)/z.scal
z2 <- (Intercept + x * x.coef + (y.max * y.scal + y.add) *
y.coef)/z.scal
segments(x, z1, x + y.max * yx.f, z2 + yz.f * y.max,
lty = ltya, ...)
y <- 0:y.max
ltya <- c(lty.box, rep(lty, length(y) - 2), lty.box)
y.coef <- (y * y.scal + y.add) * y.coef
z1 <- (Intercept + x.min * x.coef + y.coef)/z.scal
z2 <- (Intercept + x.max * x.coef + y.coef)/z.scal
segments(x.min + y * yx.f, z1 + y * yz.f, x.max + y *
yx.f, z2 + y * yz.f, lty = ltya, ...)
}, box3d = function(...) {
mem.par <- par(mar = mar, usr = usr)
on.exit(par(mem.par))
lines(c(x.min, x.max), c(z.max, z.max), ...)
lines(c(0, y.max * yx.f) + x.max, c(0, y.max * yz.f) +
z.max, ...)
lines(c(0, y.max * yx.f) + x.min, c(0, y.max * yz.f) +
z.max, ...)
lines(c(x.max, x.max), c(z.min, z.max), ...)
lines(c(x.min, x.min), c(z.min, z.max), ...)
lines(c(x.min, x.max), c(z.min, z.min), ...)
}))
}
scale_bar <- function(lon, lat, distance_lon, distance_lat, distance_legend, dist_unit = "km", rec_fill = "white", rec_colour = "black", rec2_fill = "black", rec2_colour = "black", legend_colour = "black", legend_size = 3, orientation = TRUE, arrow_length = 500, arrow_distance = 300, arrow_north_size = 6){
the_scale_bar <- create_scale_bar(lon = lon, lat = lat, distance_lon = distance_lon, distance_lat = distance_lat, distance_legend = distance_legend, dist_unit = dist_unit)
# First rectangle
rectangle1 <- geom_polygon(data = the_scale_bar$rectangle, aes(x = lon, y = lat), fill = rec_fill, colour = rec_colour)
# Second rectangle
rectangle2 <- geom_polygon(data = the_scale_bar$rectangle2, aes(x = lon, y = lat), fill = rec2_fill, colour = rec2_colour)
# Legend
scale_bar_legend <- annotate("text", label = paste(the_scale_bar$legend[,"text"], dist_unit, sep=""), x = the_scale_bar$legend[,"long"], y = the_scale_bar$legend[,"lat"], size = legend_size, colour = legend_colour)
res <- list(rectangle1, rectangle2, scale_bar_legend)
if(orientation){# Add an arrow pointing North
coords_arrow <- create_orientation_arrow(scale_bar = the_scale_bar, length = arrow_length, distance = arrow_distance, dist_unit = dist_unit)
arrow <- list(geom_segment(data = coords_arrow$res, aes(x = x, y = y, xend = xend, yend = yend)), annotate("text", label = "N", x = coords_arrow$coords_n[1,"x"], y = coords_arrow$coords_n[1,"y"], size = arrow_north_size, colour = "black"))
res <- c(res, arrow)
}
return(res)
}
create_scale_bar <- function(lon,lat,distance_lon,distance_lat,distance_legend, dist_units = "km"){
# First rectangle
bottom_right <- gcDestination(lon = lon, lat = lat, bearing = 90, dist = distance_lon, dist.units = dist_units, model = "WGS84")
topLeft <- gcDestination(lon = lon, lat = lat, bearing = 0, dist = distance_lat, dist.units = dist_units, model = "WGS84")
rectangle <- cbind(lon=c(lon, lon, bottom_right[1,"long"], bottom_right[1,"long"], lon),
lat = c(lat, topLeft[1,"lat"], topLeft[1,"lat"],lat, lat))
rectangle <- data.frame(rectangle, stringsAsFactors = FALSE)
# Second rectangle t right of the first rectangle
bottom_right2 <- gcDestination(lon = lon, lat = lat, bearing = 90, dist = distance_lon*2, dist.units = dist_units, model = "WGS84")
rectangle2 <- cbind(lon = c(bottom_right[1,"long"], bottom_right[1,"long"], bottom_right2[1,"long"], bottom_right2[1,"long"], bottom_right[1,"long"]),
lat=c(lat, topLeft[1,"lat"], topLeft[1,"lat"], lat, lat))
rectangle2 <- data.frame(rectangle2, stringsAsFactors = FALSE)
# Now let's deal with the text
on_top <- gcDestination(lon = lon, lat = lat, bearing = 0, dist = distance_legend, dist.units = dist_units, model = "WGS84")
on_top2 <- on_top3 <- on_top
on_top2[1,"long"] <- bottom_right[1,"long"]
on_top3[1,"long"] <- bottom_right2[1,"long"]
legend <- rbind(on_top, on_top2, on_top3)
legend <- data.frame(cbind(legend, text = c(0, distance_lon, distance_lon*2)), stringsAsFactors = FALSE, row.names = NULL)
return(list(rectangle = rectangle, rectangle2 = rectangle2, legend = legend))
}
create_orientation_arrow <- function(scale_bar, length, distance = 1, dist_units = "km"){
lon <- scale_bar$rectangle2[1,1]
lat <- scale_bar$rectangle2[1,2]
# Bottom point of the arrow
beg_point <- gcDestination(lon = lon, lat = lat, bearing = 0, dist = distance, dist.units = dist_units, model = "WGS84")
lon <- beg_point[1,"long"]
lat <- beg_point[1,"lat"]
# Let us create the endpoint
on_top <- gcDestination(lon = lon, lat = lat, bearing = 0, dist = length, dist.units = dist_units, model = "WGS84")
left_arrow <- gcDestination(lon = on_top[1,"long"], lat = on_top[1,"lat"], bearing = 225, dist = length/5, dist.units = dist_units, model = "WGS84")
right_arrow <- gcDestination(lon = on_top[1,"long"], lat = on_top[1,"lat"], bearing = 135, dist = length/5, dist.units = dist_units, model = "WGS84")
res <- rbind(
cbind(x = lon, y = lat, xend = on_top[1,"long"], yend = on_top[1,"lat"]),
cbind(x = left_arrow[1,"long"], y = left_arrow[1,"lat"], xend = on_top[1,"long"], yend = on_top[1,"lat"]),
cbind(x = right_arrow[1,"long"], y = right_arrow[1,"lat"], xend = on_top[1,"long"], yend = on_top[1,"lat"]))
res <- as.data.frame(res, stringsAsFactors = FALSE)
# Coordinates from which "N" will be plotted
coords_n <- cbind(x = lon, y = (lat + on_top[1,"lat"])/2)
return(list(res = res, coords_n = coords_n))
}
# Extract the lake shape file that was originally developed from Zach Munger.
lakeShape = readOGR(dsn=path.expand("./Materials for R mapping/FCR"), layer="506_9")
inflow = readOGR(dsn=path.expand("./Materials for R mapping/FCR"), layer="inf2")
em = readOGR(dsn=path.expand("./Materials for R mapping/FCR"), layer="EM_line")
hox = readOGR(dsn=path.expand("./Materials for R mapping/FCR"), layer="sss_system")
# Extract the lake shape file that was originally developed from Zach Munger.
lakeShapebvr = readOGR(dsn=path.expand("./Materials for R mapping/BVR"), layer="0_2mpoly12apr18")
inflowpipe = readOGR(dsn=path.expand("./Materials for R mapping/BVR"), layer="inflow pipe")
# The data are in Standard Easting-Northing Units, this short line of code transforms our units into Lat-Long
# If you are unsure about this, follow the link below for a detailed workflow about spatial transformations.
# http://gis.stackexchange.com/questions/142156/r-how-to-get-latitudes-and-longitudes-from-a-rasterlayer
#FCR
fcr1 <- spTransform(lakeShape, CRS("+init=epsg:4326"))
bvr1 <- spTransform(lakeShapebvr, CRS("+init=epsg:4326"))
inf <- spTransform(inflow, CRS("+init=epsg:4326"))
emer <- spTransform(em, CRS("+init=epsg:4326"))
sss <- spTransform(hox, CRS("+init=epsg:4326"))
pipe <- spTransform(inflowpipe, CRS("+init=epsg:4326"))
#BVR
# At this point, fcr is a Formal Class Spatial Polygon dataframe. In order to plot it in 3D space we need to
# convert it into a 2D dataframe with a corresponding depth. This is done using the "fortify" function.
fcr1 <- fortify(fcr1)
bvr1 <- fortify(bvr1)
inf <- fortify(inf)
emer <- fortify(emer)
sss <- fortify(sss)
pipe <- fortify(pipe)
# Unfortunately, this is a necessary subsetting in order to get a nice clear perimeter figure of FCR.
# If the whole dataset is plotted, there are weird lines that emerge fromt he shape file. It starts
# after row 1022, and I have absolutely no idea why it does that.
# This subsets out the first 1022 rows from the fortified dataframe.
fcr1 <- fcr1[1:1024,]
inf <- inf[2:45,]
fcr1$depth <- 0
fcr1$Depths <- "Falling Creek"
bvr1$depth <- 0
bvr1$Depths <- "Beaverdam"
inf$depth <- 0
inf$Depths <- "Inflow"
emer$depth <- 3
emer$Depths <- "EM"
sss$depth <- 3
sss$Depths <- "HOx"
pipe$depth <- 0
pipe$Depths <- "Inflow Pipe"
bvr1 <- bvr1[,-4]
fcr1 <- fcr1[,-4]
fcr_map <- rbind(fcr1, bvr1, deparse.level = 1)
fcr_add <- rbind(inf, emer, sss, pipe, deparse.level = 1)
a = ggplot(fcr_map, aes(long, lat, fill=Depths)) +
geom_polygon()+
geom_path(color="black", lwd = 1.7) +
coord_equal() +
#geom_point(data = traps, aes(x = lon, y = lat), pch = 21, bg = "red", col = "black", cex = 5)+
labs(x = "Longitude", y = "Latitude")+
scale_fill_brewer()+
theme(axis.title = element_text(size = 25, color = "black"))+
theme(axis.text = element_text(size = 25, color = "black"))+
theme_bw()
b = a +
geom_line(data = inf, aes(x = long, y = lat), lwd = 1, color = "dodgerblue1")+
geom_line(data = pipe, aes(x = long, y = lat), lwd = 1, color = "dodgerblue4", lty = "dashed")+
geom_point(data = sites, aes(x = lon, y = lat, size = NO3NO2_ppb ), pch = 21, bg = "blue", col = "black")+
scale_size_continuous(limits=c(0,250))
c = b +
# north(fcr_map, symbol = 3, scale = 0.15, location = "topleft") +
scalebar(fcr_map, dist = 0.25, dist_unit = "km",
transform = TRUE, model = "WGS84")
c = c + ggtitle('September') + theme(plot.title = element_text(size = 40))
pdf("./Materials for R mapping/nutrient_mapping/09_September/20Sep19_RCC_NO3NO2.pdf", width = 8, height = 6)
c
dev.off()
png("./Materials for R mapping/nutrient_mapping/09_September/20Sep19_RCC_NO3NO2.png", width = 1100, height = 800)
c
dev.off()
# ggplot(fcr1, aes(long, lat, fill=Depths)) +
# geom_polygon()+
# geom_path(color="black", lwd = 1.7) +
# coord_equal() +
# xlim(-79.8391, -79.836)+
# ylim(37.3025,37.305)+
# #geom_point(data = traps, aes(x = lon, y = lat), pch = 21, bg = "red", col = "black", cex = 5)+
# labs(x = "Longitude", y = "Latitude")+
# scale_fill_brewer()+
# theme(axis.title = element_text(size = 12, color = "black"))+
# theme(axis.text = element_text(size = 12, color = "black"))+
# theme_bw()+
# theme(legend.position = "none")+
# geom_line(data = sss, aes(x = long, y = lat), lwd = 1, color = "dodgerblue4")+
# geom_line(data = emer, aes(x = long, y = lat), lwd = 1, color = "dodgerblue4", lty = "dashed")
| /Materials for R mapping/nutrient_mapping/09_September/20Sep19_RCC_map_NO3NO2.R | no_license | wwoelmer/Thesis | R | false | false | 23,905 | r | # Load in the required packages that makes the 3D figure
pacman::p_load(scatterplot3d, rgdal, maptools, tidyverse, zoo, sf, GISTools, ggsn)
sites <- read_csv("./Materials for R mapping/nutrient_mapping/09_September/Sep_RCC_nitrate_mapping.csv")
scatter.grid <- function (x, y = NULL, z = NULL, color = par("col"), pch = NULL,
main = NULL, sub = NULL, xlim = NULL, ylim = NULL, zlim = NULL,
xlab = NULL, ylab = NULL, zlab = NULL, scale.y = 1, angle = 40,
axis = TRUE, tick.marks = TRUE, label.tick.marks = TRUE,
x.ticklabs = NULL, y.ticklabs = NULL, z.ticklabs = NULL,
y.margin.add = 0, grid = TRUE, box = TRUE, lab = par("lab"),
lab.z = mean(lab[1:2]), type = "p", highlight.3d = FALSE,
mar = c(5, 3, 4, 3) + 0.1, bg = par("bg"), col.axis = par("col.axis"),
col.grid = "grey", col.lab = par("col.lab"), cex.symbols = par("cex"),
cex.axis = 0.8 * par("cex.axis"), cex.lab = par("cex.lab"),
font.axis = par("font.axis"), font.lab = par("font.lab"),
lty.axis = par("lty"), lty.grid = par("lty"), lty.hide = NULL,
lty.hplot = par("lty"), log = "", ...)
{
mem.par <- par(mar = mar)
x.scal <- y.scal <- z.scal <- 1
xlabel <- if (!missing(x))
deparse(substitute(x))
ylabel <- if (!missing(y))
deparse(substitute(y))
zlabel <- if (!missing(z))
deparse(substitute(z))
if (highlight.3d && !missing(color))
warning("color is ignored when highlight.3d = TRUE")
if (!is.null(d <- dim(x)) && (length(d) == 2) && (d[2] >=
4))
color <- x[, 4]
else if (is.list(x) && !is.null(x$color))
color <- x$color
xyz <- xyz.coords(x = x, y = y, z = z, xlab = xlabel, ylab = ylabel,
zlab = zlabel, log = log)
if (is.null(xlab)) {
xlab <- xyz$xlab
if (is.null(xlab))
xlab <- ""
}
if (is.null(ylab)) {
ylab <- xyz$ylab
if (is.null(ylab))
ylab <- ""
}
if (is.null(zlab)) {
zlab <- xyz$zlab
if (is.null(zlab))
zlab <- ""
}
if (length(color) == 1)
color <- rep(color, length(xyz$x))
else if (length(color) != length(xyz$x))
stop("length(color) ", "must be equal length(x) or 1")
angle <- (angle%%360)/90
yz.f <- scale.y * abs(if (angle < 1) angle else if (angle >
3) angle - 4 else 2 - angle)
yx.f <- scale.y * (if (angle < 2)
1 - angle
else angle - 3)
if (angle > 2) {
temp <- xyz$x
xyz$x <- xyz$y
xyz$y <- temp
temp <- xlab
xlab <- ylab
ylab <- temp
temp <- xlim
xlim <- ylim
ylim <- temp
}
angle.1 <- (1 < angle && angle < 2) || angle > 3
angle.2 <- 1 <= angle && angle <= 3
dat <- cbind(as.data.frame(xyz[c("x", "y", "z")]), col = color)
if (!is.null(xlim)) {
xlim <- range(xlim)
dat <- dat[xlim[1] <= dat$x & dat$x <= xlim[2], , drop = FALSE]
}
if (!is.null(ylim)) {
ylim <- range(ylim)
dat <- dat[ylim[1] <= dat$y & dat$y <= ylim[2], , drop = FALSE]
}
if (!is.null(zlim)) {
zlim <- range(zlim)
dat <- dat[zlim[1] <= dat$z & dat$z <= zlim[2], , drop = FALSE]
}
n <- nrow(dat)
if (n < 1)
stop("no data left within (x|y|z)lim")
y.range <- range(dat$y[is.finite(dat$y)])
if (type == "p" || type == "h") {
y.ord <- rev(order(dat$y))
dat <- dat[y.ord, ]
if (length(pch) > 1)
if (length(pch) != length(y.ord))
stop("length(pch) ", "must be equal length(x) or 1")
else pch <- pch[y.ord]
if (length(bg) > 1)
if (length(bg) != length(y.ord))
stop("length(bg) ", "must be equal length(x) or 1")
else bg <- bg[y.ord]
if (length(cex.symbols) > 1)
if (length(cex.symbols) != length(y.ord))
stop("length(cex.symbols) ", "must be equal length(x) or 1")
else cex.symbols <- cex.symbols[y.ord]
daty <- dat$y
daty[!is.finite(daty)] <- mean(daty[is.finite(daty)])
if (highlight.3d && !(all(diff(daty) == 0)))
dat$col <- rgb(red = seq(0, 1, length = n) * (y.range[2] -
daty)/diff(y.range), green = 0, blue = 0)
}
p.lab <- par("lab")
y.range <- range(dat$y[is.finite(dat$y)], ylim)
y.prty <- pretty(y.range, n = lab[2], min.n = max(1, min(0.5 *
lab[2], p.lab[2])))
y.scal <- round(diff(y.prty[1:2]), digits = 12)
y.add <- min(y.prty)
dat$y <- (dat$y - y.add)/y.scal
y.max <- (max(y.prty) - y.add)/y.scal
if (!is.null(ylim))
y.max <- max(y.max, ceiling((ylim[2] - y.add)/y.scal))
x.range <- range(dat$x[is.finite(dat$x)], xlim)
x.prty <- pretty(x.range, n = lab[1], min.n = max(1, min(0.5 *
lab[1], p.lab[1])))
x.scal <- round(diff(x.prty[1:2]), digits = 12)
dat$x <- dat$x/x.scal
x.range <- range(x.prty)/x.scal
x.max <- ceiling(x.range[2])
x.min <- floor(x.range[1])
if (!is.null(xlim)) {
x.max <- max(x.max, ceiling(xlim[2]/x.scal))
x.min <- min(x.min, floor(xlim[1]/x.scal))
}
x.range <- range(x.min, x.max)
z.range <- range(dat$z[is.finite(dat$z)], zlim)
z.prty <- pretty(z.range, n = lab.z, min.n = max(1, min(0.5 *
lab.z, p.lab[2])))
z.scal <- round(diff(z.prty[1:2]), digits = 12)
dat$z <- dat$z/z.scal
z.range <- range(z.prty)/z.scal
z.max <- ceiling(z.range[2])
z.min <- floor(z.range[1])
if (!is.null(zlim)) {
z.max <- max(z.max, ceiling(zlim[2]/z.scal))
z.min <- min(z.min, floor(zlim[1]/z.scal))
}
z.range <- range(z.min, z.max)
plot.new()
if (angle.2) {
x1 <- x.min + yx.f * y.max
x2 <- x.max
}
else {
x1 <- x.min
x2 <- x.max + yx.f * y.max
}
plot.window(c(x1, x2), c(z.min, z.max + yz.f * y.max))
temp <- strwidth(format(rev(y.prty))[1], cex = cex.axis/par("cex"))
if (angle.2)
x1 <- x1 - temp - y.margin.add
else x2 <- x2 + temp + y.margin.add
plot.window(c(x1, x2), c(z.min, z.max + yz.f * y.max))
if (angle > 2)
par(usr = par("usr")[c(2, 1, 3:4)])
usr <- par("usr")
title(main, sub, ...)
if ("xy" %in% grid || grid) {
i <- x.min:x.max
segments(i, z.min, i + (yx.f * y.max), yz.f * y.max +
z.min, col = col.grid, lty = lty.grid)
i <- 0:y.max
segments(x.min + (i * yx.f), i * yz.f + z.min, x.max +
(i * yx.f), i * yz.f + z.min, col = col.grid, lty = lty.grid)
}
if ("xz" %in% grid) {
i <- x.min:x.max
segments(i + (yx.f * y.max), yz.f * y.max + z.min,
i + (yx.f * y.max), yz.f * y.max + z.max,
col = col.grid, lty = lty.grid)
temp <- yx.f * y.max
temp1 <- yz.f * y.max
i <- z.min:z.max
segments(x.min + temp,temp1 + i,
x.max + temp,temp1 + i , col = col.grid, lty = lty.grid)
}
if ("yz" %in% grid) {
i <- 0:y.max
segments(x.min + (i * yx.f), i * yz.f + z.min,
x.min + (i * yx.f) ,i * yz.f + z.max,
col = col.grid, lty = lty.grid)
temp <- yx.f * y.max
temp1 <- yz.f * y.max
i <- z.min:z.max
segments(x.min + temp,temp1 + i,
x.min, i , col = col.grid, lty = lty.grid)
}
if (axis) {
xx <- if (angle.2)
c(x.min, x.max)
else c(x.max, x.min)
if (tick.marks) {
xtl <- (z.max - z.min) * (tcl <- -par("tcl"))/50
ztl <- (x.max - x.min) * tcl/50
mysegs <- function(x0, y0, x1, y1) segments(x0,
y0, x1, y1, col = col.axis, lty = lty.axis)
i.y <- 0:y.max
mysegs(yx.f * i.y - ztl + xx[1], yz.f * i.y + z.min,
yx.f * i.y + ztl + xx[1], yz.f * i.y + z.min)
i.x <- x.min:x.max
mysegs(i.x, -xtl + z.min, i.x, xtl + z.min)
i.z <- z.min:z.max
mysegs(-ztl + xx[2], i.z, ztl + xx[2], i.z)
if (label.tick.marks) {
las <- par("las")
mytext <- function(labels, side, at, ...) mtext(text = labels,
side = side, at = at, line = -0.5, col = col.lab,
cex = cex.axis, font = font.lab, ...)
if (is.null(x.ticklabs))
x.ticklabs <- format(i.x * x.scal)
mytext(x.ticklabs, side = 1, at = i.x)
if (is.null(z.ticklabs))
z.ticklabs <- format(i.z * z.scal)
mytext(z.ticklabs, side = if (angle.1)
4
else 2, at = i.z, adj = if (0 < las && las <
3)
1
else NA)
temp <- if (angle > 2)
rev(i.y)
else i.y
if (is.null(y.ticklabs))
y.ticklabs <- format(y.prty)
else if (angle > 2)
y.ticklabs <- rev(y.ticklabs)
text(i.y * yx.f + xx[1], i.y * yz.f + z.min,
y.ticklabs, pos = if (angle.1)
2
else 4, offset = 1, col = col.lab, cex = cex.axis/par("cex"),
font = font.lab)
}
}
mytext2 <- function(lab, side, line, at) mtext(lab,
side = side, line = line, at = at, col = col.lab,
cex = cex.lab, font = font.axis, las = 0)
lines(c(x.min, x.max), c(z.min, z.min), col = col.axis,
lty = lty.axis)
mytext2(xlab, 1, line = 1.5, at = mean(x.range))
lines(xx[1] + c(0, y.max * yx.f), c(z.min, y.max * yz.f +
z.min), col = col.axis, lty = lty.axis)
mytext2(ylab, if (angle.1)
2
else 4, line = 0.5, at = z.min + y.max * yz.f)
lines(xx[c(2, 2)], c(z.min, z.max), col = col.axis,
lty = lty.axis)
mytext2(zlab, if (angle.1)
4
else 2, line = 1.5, at = mean(z.range))
if (box) {
if (is.null(lty.hide))
lty.hide <- lty.axis
temp <- yx.f * y.max
temp1 <- yz.f * y.max
lines(c(x.min + temp, x.max + temp), c(z.min + temp1,
z.min + temp1), col = col.axis, lty = lty.hide)
lines(c(x.min + temp, x.max + temp), c(temp1 + z.max,
temp1 + z.max), col = col.axis, lty = lty.axis)
temp <- c(0, y.max * yx.f)
temp1 <- c(0, y.max * yz.f)
lines(temp + xx[2], temp1 + z.min, col = col.axis,
lty = lty.hide)
lines(temp + x.min, temp1 + z.max, col = col.axis,
lty = lty.axis)
temp <- yx.f * y.max
temp1 <- yz.f * y.max
lines(c(temp + x.min, temp + x.min), c(z.min + temp1,
z.max + temp1), col = col.axis, lty = if (!angle.2)
lty.hide
else lty.axis)
lines(c(x.max + temp, x.max + temp), c(z.min + temp1,
z.max + temp1), col = col.axis, lty = if (angle.2)
lty.hide
else lty.axis)
}
}
x <- dat$x + (dat$y * yx.f)
z <- dat$z + (dat$y * yz.f)
col <- as.character(dat$col)
if (type == "h") {
z2 <- dat$y * yz.f + z.min
segments(x, z, x, z2, col = col, cex = cex.symbols,
lty = lty.hplot, ...)
points(x, z, type = "p", col = col, pch = pch, bg = bg,
cex = cex.symbols, ...)
}
else points(x, z, type = type, col = col, pch = pch, bg = bg,
cex = cex.symbols, ...)
if (axis && box) {
lines(c(x.min, x.max), c(z.max, z.max), col = col.axis,
lty = lty.axis)
lines(c(0, y.max * yx.f) + x.max, c(0, y.max * yz.f) +
z.max, col = col.axis, lty = lty.axis)
lines(xx[c(1, 1)], c(z.min, z.max), col = col.axis,
lty = lty.axis)
}
ob <- ls()
rm(list = ob[!ob %in% c("angle", "mar", "usr", "x.scal",
"y.scal", "z.scal", "yx.f", "yz.f", "y.add", "z.min",
"z.max", "x.min", "x.max", "y.max", "x.prty", "y.prty",
"z.prty")])
rm(ob)
invisible(list(xyz.convert = function(x, y = NULL, z = NULL) {
xyz <- xyz.coords(x, y, z)
if (angle > 2) {
temp <- xyz$x
xyz$x <- xyz$y
xyz$y <- temp
}
y <- (xyz$y - y.add)/y.scal
return(list(x = xyz$x/x.scal + yx.f * y, y = xyz$z/z.scal +
yz.f * y))
}, points3d = function(x, y = NULL, z = NULL, type = "p",
...) {
xyz <- xyz.coords(x, y, z)
if (angle > 2) {
temp <- xyz$x
xyz$x <- xyz$y
xyz$y <- temp
}
y2 <- (xyz$y - y.add)/y.scal
x <- xyz$x/x.scal + yx.f * y2
y <- xyz$z/z.scal + yz.f * y2
mem.par <- par(mar = mar, usr = usr)
on.exit(par(mem.par))
if (type == "h") {
y2 <- z.min + yz.f * y2
segments(x, y, x, y2, ...)
points(x, y, type = "p", ...)
} else points(x, y, type = type, ...)
}, plane3d = function(Intercept, x.coef = NULL, y.coef = NULL,
lty = "dashed", lty.box = NULL, ...) {
if (!is.atomic(Intercept) && !is.null(coef(Intercept))) Intercept <- coef(Intercept)
if (is.null(lty.box)) lty.box <- lty
if (is.null(x.coef) && length(Intercept) == 3) {
x.coef <- Intercept[if (angle > 2) 3 else 2]
y.coef <- Intercept[if (angle > 2) 2 else 3]
Intercept <- Intercept[1]
}
mem.par <- par(mar = mar, usr = usr)
on.exit(par(mem.par))
x <- x.min:x.max
ltya <- c(lty.box, rep(lty, length(x) - 2), lty.box)
x.coef <- x.coef * x.scal
z1 <- (Intercept + x * x.coef + y.add * y.coef)/z.scal
z2 <- (Intercept + x * x.coef + (y.max * y.scal + y.add) *
y.coef)/z.scal
segments(x, z1, x + y.max * yx.f, z2 + yz.f * y.max,
lty = ltya, ...)
y <- 0:y.max
ltya <- c(lty.box, rep(lty, length(y) - 2), lty.box)
y.coef <- (y * y.scal + y.add) * y.coef
z1 <- (Intercept + x.min * x.coef + y.coef)/z.scal
z2 <- (Intercept + x.max * x.coef + y.coef)/z.scal
segments(x.min + y * yx.f, z1 + y * yz.f, x.max + y *
yx.f, z2 + y * yz.f, lty = ltya, ...)
}, box3d = function(...) {
mem.par <- par(mar = mar, usr = usr)
on.exit(par(mem.par))
lines(c(x.min, x.max), c(z.max, z.max), ...)
lines(c(0, y.max * yx.f) + x.max, c(0, y.max * yz.f) +
z.max, ...)
lines(c(0, y.max * yx.f) + x.min, c(0, y.max * yz.f) +
z.max, ...)
lines(c(x.max, x.max), c(z.min, z.max), ...)
lines(c(x.min, x.min), c(z.min, z.max), ...)
lines(c(x.min, x.max), c(z.min, z.min), ...)
}))
}
scale_bar <- function(lon, lat, distance_lon, distance_lat, distance_legend, dist_unit = "km", rec_fill = "white", rec_colour = "black", rec2_fill = "black", rec2_colour = "black", legend_colour = "black", legend_size = 3, orientation = TRUE, arrow_length = 500, arrow_distance = 300, arrow_north_size = 6){
the_scale_bar <- create_scale_bar(lon = lon, lat = lat, distance_lon = distance_lon, distance_lat = distance_lat, distance_legend = distance_legend, dist_unit = dist_unit)
# First rectangle
rectangle1 <- geom_polygon(data = the_scale_bar$rectangle, aes(x = lon, y = lat), fill = rec_fill, colour = rec_colour)
# Second rectangle
rectangle2 <- geom_polygon(data = the_scale_bar$rectangle2, aes(x = lon, y = lat), fill = rec2_fill, colour = rec2_colour)
# Legend
scale_bar_legend <- annotate("text", label = paste(the_scale_bar$legend[,"text"], dist_unit, sep=""), x = the_scale_bar$legend[,"long"], y = the_scale_bar$legend[,"lat"], size = legend_size, colour = legend_colour)
res <- list(rectangle1, rectangle2, scale_bar_legend)
if(orientation){# Add an arrow pointing North
coords_arrow <- create_orientation_arrow(scale_bar = the_scale_bar, length = arrow_length, distance = arrow_distance, dist_unit = dist_unit)
arrow <- list(geom_segment(data = coords_arrow$res, aes(x = x, y = y, xend = xend, yend = yend)), annotate("text", label = "N", x = coords_arrow$coords_n[1,"x"], y = coords_arrow$coords_n[1,"y"], size = arrow_north_size, colour = "black"))
res <- c(res, arrow)
}
return(res)
}
create_scale_bar <- function(lon,lat,distance_lon,distance_lat,distance_legend, dist_units = "km"){
# First rectangle
bottom_right <- gcDestination(lon = lon, lat = lat, bearing = 90, dist = distance_lon, dist.units = dist_units, model = "WGS84")
topLeft <- gcDestination(lon = lon, lat = lat, bearing = 0, dist = distance_lat, dist.units = dist_units, model = "WGS84")
rectangle <- cbind(lon=c(lon, lon, bottom_right[1,"long"], bottom_right[1,"long"], lon),
lat = c(lat, topLeft[1,"lat"], topLeft[1,"lat"],lat, lat))
rectangle <- data.frame(rectangle, stringsAsFactors = FALSE)
# Second rectangle t right of the first rectangle
bottom_right2 <- gcDestination(lon = lon, lat = lat, bearing = 90, dist = distance_lon*2, dist.units = dist_units, model = "WGS84")
rectangle2 <- cbind(lon = c(bottom_right[1,"long"], bottom_right[1,"long"], bottom_right2[1,"long"], bottom_right2[1,"long"], bottom_right[1,"long"]),
lat=c(lat, topLeft[1,"lat"], topLeft[1,"lat"], lat, lat))
rectangle2 <- data.frame(rectangle2, stringsAsFactors = FALSE)
# Now let's deal with the text
on_top <- gcDestination(lon = lon, lat = lat, bearing = 0, dist = distance_legend, dist.units = dist_units, model = "WGS84")
on_top2 <- on_top3 <- on_top
on_top2[1,"long"] <- bottom_right[1,"long"]
on_top3[1,"long"] <- bottom_right2[1,"long"]
legend <- rbind(on_top, on_top2, on_top3)
legend <- data.frame(cbind(legend, text = c(0, distance_lon, distance_lon*2)), stringsAsFactors = FALSE, row.names = NULL)
return(list(rectangle = rectangle, rectangle2 = rectangle2, legend = legend))
}
create_orientation_arrow <- function(scale_bar, length, distance = 1, dist_units = "km"){
lon <- scale_bar$rectangle2[1,1]
lat <- scale_bar$rectangle2[1,2]
# Bottom point of the arrow
beg_point <- gcDestination(lon = lon, lat = lat, bearing = 0, dist = distance, dist.units = dist_units, model = "WGS84")
lon <- beg_point[1,"long"]
lat <- beg_point[1,"lat"]
# Let us create the endpoint
on_top <- gcDestination(lon = lon, lat = lat, bearing = 0, dist = length, dist.units = dist_units, model = "WGS84")
left_arrow <- gcDestination(lon = on_top[1,"long"], lat = on_top[1,"lat"], bearing = 225, dist = length/5, dist.units = dist_units, model = "WGS84")
right_arrow <- gcDestination(lon = on_top[1,"long"], lat = on_top[1,"lat"], bearing = 135, dist = length/5, dist.units = dist_units, model = "WGS84")
res <- rbind(
cbind(x = lon, y = lat, xend = on_top[1,"long"], yend = on_top[1,"lat"]),
cbind(x = left_arrow[1,"long"], y = left_arrow[1,"lat"], xend = on_top[1,"long"], yend = on_top[1,"lat"]),
cbind(x = right_arrow[1,"long"], y = right_arrow[1,"lat"], xend = on_top[1,"long"], yend = on_top[1,"lat"]))
res <- as.data.frame(res, stringsAsFactors = FALSE)
# Coordinates from which "N" will be plotted
coords_n <- cbind(x = lon, y = (lat + on_top[1,"lat"])/2)
return(list(res = res, coords_n = coords_n))
}
# Extract the lake shape file that was originally developed from Zach Munger.
lakeShape = readOGR(dsn=path.expand("./Materials for R mapping/FCR"), layer="506_9")
inflow = readOGR(dsn=path.expand("./Materials for R mapping/FCR"), layer="inf2")
em = readOGR(dsn=path.expand("./Materials for R mapping/FCR"), layer="EM_line")
hox = readOGR(dsn=path.expand("./Materials for R mapping/FCR"), layer="sss_system")
# Extract the lake shape file that was originally developed from Zach Munger.
lakeShapebvr = readOGR(dsn=path.expand("./Materials for R mapping/BVR"), layer="0_2mpoly12apr18")
inflowpipe = readOGR(dsn=path.expand("./Materials for R mapping/BVR"), layer="inflow pipe")
# The data are in Standard Easting-Northing Units, this short line of code transforms our units into Lat-Long
# If you are unsure about this, follow the link below for a detailed workflow about spatial transformations.
# http://gis.stackexchange.com/questions/142156/r-how-to-get-latitudes-and-longitudes-from-a-rasterlayer
#FCR
fcr1 <- spTransform(lakeShape, CRS("+init=epsg:4326"))
bvr1 <- spTransform(lakeShapebvr, CRS("+init=epsg:4326"))
inf <- spTransform(inflow, CRS("+init=epsg:4326"))
emer <- spTransform(em, CRS("+init=epsg:4326"))
sss <- spTransform(hox, CRS("+init=epsg:4326"))
pipe <- spTransform(inflowpipe, CRS("+init=epsg:4326"))
#BVR
# At this point, fcr is a Formal Class Spatial Polygon dataframe. In order to plot it in 3D space we need to
# convert it into a 2D dataframe with a corresponding depth. This is done using the "fortify" function.
fcr1 <- fortify(fcr1)
bvr1 <- fortify(bvr1)
inf <- fortify(inf)
emer <- fortify(emer)
sss <- fortify(sss)
pipe <- fortify(pipe)
# Unfortunately, this is a necessary subsetting in order to get a nice clear perimeter figure of FCR.
# If the whole dataset is plotted, there are weird lines that emerge fromt he shape file. It starts
# after row 1022, and I have absolutely no idea why it does that.
# This subsets out the first 1022 rows from the fortified dataframe.
fcr1 <- fcr1[1:1024,]
inf <- inf[2:45,]
fcr1$depth <- 0
fcr1$Depths <- "Falling Creek"
bvr1$depth <- 0
bvr1$Depths <- "Beaverdam"
inf$depth <- 0
inf$Depths <- "Inflow"
emer$depth <- 3
emer$Depths <- "EM"
sss$depth <- 3
sss$Depths <- "HOx"
pipe$depth <- 0
pipe$Depths <- "Inflow Pipe"
bvr1 <- bvr1[,-4]
fcr1 <- fcr1[,-4]
fcr_map <- rbind(fcr1, bvr1, deparse.level = 1)
fcr_add <- rbind(inf, emer, sss, pipe, deparse.level = 1)
a = ggplot(fcr_map, aes(long, lat, fill=Depths)) +
geom_polygon()+
geom_path(color="black", lwd = 1.7) +
coord_equal() +
#geom_point(data = traps, aes(x = lon, y = lat), pch = 21, bg = "red", col = "black", cex = 5)+
labs(x = "Longitude", y = "Latitude")+
scale_fill_brewer()+
theme(axis.title = element_text(size = 25, color = "black"))+
theme(axis.text = element_text(size = 25, color = "black"))+
theme_bw()
b = a +
geom_line(data = inf, aes(x = long, y = lat), lwd = 1, color = "dodgerblue1")+
geom_line(data = pipe, aes(x = long, y = lat), lwd = 1, color = "dodgerblue4", lty = "dashed")+
geom_point(data = sites, aes(x = lon, y = lat, size = NO3NO2_ppb ), pch = 21, bg = "blue", col = "black")+
scale_size_continuous(limits=c(0,250))
c = b +
# north(fcr_map, symbol = 3, scale = 0.15, location = "topleft") +
scalebar(fcr_map, dist = 0.25, dist_unit = "km",
transform = TRUE, model = "WGS84")
c = c + ggtitle('September') + theme(plot.title = element_text(size = 40))
pdf("./Materials for R mapping/nutrient_mapping/09_September/20Sep19_RCC_NO3NO2.pdf", width = 8, height = 6)
c
dev.off()
png("./Materials for R mapping/nutrient_mapping/09_September/20Sep19_RCC_NO3NO2.png", width = 1100, height = 800)
c
dev.off()
# ggplot(fcr1, aes(long, lat, fill=Depths)) +
# geom_polygon()+
# geom_path(color="black", lwd = 1.7) +
# coord_equal() +
# xlim(-79.8391, -79.836)+
# ylim(37.3025,37.305)+
# #geom_point(data = traps, aes(x = lon, y = lat), pch = 21, bg = "red", col = "black", cex = 5)+
# labs(x = "Longitude", y = "Latitude")+
# scale_fill_brewer()+
# theme(axis.title = element_text(size = 12, color = "black"))+
# theme(axis.text = element_text(size = 12, color = "black"))+
# theme_bw()+
# theme(legend.position = "none")+
# geom_line(data = sss, aes(x = long, y = lat), lwd = 1, color = "dodgerblue4")+
# geom_line(data = emer, aes(x = long, y = lat), lwd = 1, color = "dodgerblue4", lty = "dashed")
|
library(testthat)
library(ONION)
test_check("ONION") | /unitTests/testthat.R | no_license | anu-bioinfo/ONION | R | false | false | 53 | r | library(testthat)
library(ONION)
test_check("ONION") |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_candidate.R
\name{get_candidate}
\alias{get_candidate}
\title{Get Candidate Info
https://www.propublica.org/datastore/apis
GET https://api.propublica.org/campaign-finance/v1/{cycle}/candidates/{fec-id}}
\usage{
get_candidate(FEC_ID, cycle = 2018, myAPI_Key)
}
\arguments{
\item{FEC_ID}{The FEC-assigned 9-character ID of a candidate.}
\item{cycle}{The election cycle must be even-numbered year between 1996 and 2018}
\item{myAPI_key}{use the Campaign Finance API, you must sign up for an API key. The API key must be included in all API requests to the server, set as a header.}
}
\value{
a list object with the return values about candidate basic information
}
\description{
Get Candidate Info
https://www.propublica.org/datastore/apis
GET https://api.propublica.org/campaign-finance/v1/{cycle}/candidates/{fec-id}
}
\examples{
\dontrun{
get_candidate('P60005915', 2016)}
}
| /man/get_candidate.Rd | no_license | DavytJ/ProPublicaR | R | false | true | 960 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_candidate.R
\name{get_candidate}
\alias{get_candidate}
\title{Get Candidate Info
https://www.propublica.org/datastore/apis
GET https://api.propublica.org/campaign-finance/v1/{cycle}/candidates/{fec-id}}
\usage{
get_candidate(FEC_ID, cycle = 2018, myAPI_Key)
}
\arguments{
\item{FEC_ID}{The FEC-assigned 9-character ID of a candidate.}
\item{cycle}{The election cycle must be even-numbered year between 1996 and 2018}
\item{myAPI_key}{use the Campaign Finance API, you must sign up for an API key. The API key must be included in all API requests to the server, set as a header.}
}
\value{
a list object with the return values about candidate basic information
}
\description{
Get Candidate Info
https://www.propublica.org/datastore/apis
GET https://api.propublica.org/campaign-finance/v1/{cycle}/candidates/{fec-id}
}
\examples{
\dontrun{
get_candidate('P60005915', 2016)}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/idnotification.R
\name{get_id_notification}
\alias{get_id_notification}
\alias{set_id_notification}
\title{Get/Set Notifications}
\usage{
get_id_notification(identity, ...)
set_id_notification(identity, type = c("Bounce", "Complaint", "Delivery"),
topic, ...)
}
\arguments{
\item{identity}{An SES identity.}
\item{type}{A character string specifying a notification type.}
\item{topic}{An SNS topic name}
\item{\dots}{Additional arguments passed to \code{\link{sesPOST}}.}
}
\description{
Get/Set ID Notifications
}
\examples{
\dontrun{
# get
get_id_notifiaction()
# set
set_id_notifiaction()
}
}
| /man/idnotification.Rd | no_license | metanoid/aws.ses | R | false | true | 681 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/idnotification.R
\name{get_id_notification}
\alias{get_id_notification}
\alias{set_id_notification}
\title{Get/Set Notifications}
\usage{
get_id_notification(identity, ...)
set_id_notification(identity, type = c("Bounce", "Complaint", "Delivery"),
topic, ...)
}
\arguments{
\item{identity}{An SES identity.}
\item{type}{A character string specifying a notification type.}
\item{topic}{An SNS topic name}
\item{\dots}{Additional arguments passed to \code{\link{sesPOST}}.}
}
\description{
Get/Set ID Notifications
}
\examples{
\dontrun{
# get
get_id_notifiaction()
# set
set_id_notifiaction()
}
}
|
traits.to.kruk=function(flagella=NA,silica=NA,MLD=NA,mucilage=NA,aerotopes=NA,SV=NA,V=NA)
{
kruk.mfg=''
if(flagella==1 & silica==1) kruk.mfg='II'
if(flagella==1 & silica==0 & MLD < 2) kruk.mfg='I'
if(flagella==1 & silica==0 & MLD >= 2) kruk.mfg='V'
if(flagella==0 & silica==1) kruk.mfg='VI'
if(flagella==0 & silica==0 & mucilage==1 & aerotopes==1 & SV >=0.6) kruk.mfg='III'
if(flagella==0 & silica==0 & mucilage==1 & aerotopes==1 & SV < 0.6) kruk.mfg='VII'
if(flagella==0 & silica==0 & mucilage==1 & aerotopes==0 & SV < 10) kruk.mfg='I'
if(flagella==0 & silica==0 & mucilage==1 & aerotopes==0 & SV >= 10) kruk.mfg='VII'
if(flagella==0 & silica==0 & mucilage==0 & V < 30 & MLD <20) kruk.mfg='I'
if(flagella==0 & silica==0 & mucilage==0 & V < 30 & MLD >= 20) kruk.mfg='IV'
if(flagella==0 & silica==0 & mucilage==0 & V >= 30 & aerotopes==1 ) kruk.mfg='III'
if(flagella==0 & silica==0 & mucilage==0 & V >= 30 & aerotopes==0 ) kruk.mfg='IV'
return(kruk.mfg)
} | /traits_to_kruk2010.r | no_license | jpdoubek/GEISHA_phytoplankton | R | false | false | 969 | r | traits.to.kruk=function(flagella=NA,silica=NA,MLD=NA,mucilage=NA,aerotopes=NA,SV=NA,V=NA)
{
kruk.mfg=''
if(flagella==1 & silica==1) kruk.mfg='II'
if(flagella==1 & silica==0 & MLD < 2) kruk.mfg='I'
if(flagella==1 & silica==0 & MLD >= 2) kruk.mfg='V'
if(flagella==0 & silica==1) kruk.mfg='VI'
if(flagella==0 & silica==0 & mucilage==1 & aerotopes==1 & SV >=0.6) kruk.mfg='III'
if(flagella==0 & silica==0 & mucilage==1 & aerotopes==1 & SV < 0.6) kruk.mfg='VII'
if(flagella==0 & silica==0 & mucilage==1 & aerotopes==0 & SV < 10) kruk.mfg='I'
if(flagella==0 & silica==0 & mucilage==1 & aerotopes==0 & SV >= 10) kruk.mfg='VII'
if(flagella==0 & silica==0 & mucilage==0 & V < 30 & MLD <20) kruk.mfg='I'
if(flagella==0 & silica==0 & mucilage==0 & V < 30 & MLD >= 20) kruk.mfg='IV'
if(flagella==0 & silica==0 & mucilage==0 & V >= 30 & aerotopes==1 ) kruk.mfg='III'
if(flagella==0 & silica==0 & mucilage==0 & V >= 30 & aerotopes==0 ) kruk.mfg='IV'
return(kruk.mfg)
} |
library(asnipe)
### Name: get_sampling_periods
### Title: Convert group or individual data into sampling periods
### Aliases: get_sampling_periods
### ** Examples
## define group memberships (these would be read from a file)
individuals <- data.frame(ID=c("C695905","H300253","H300253",
"H300283","H839876","F464557","H300296","H300253",
"F464557","H300296","C695905","H300283","H839876"),
GROUP=c(1,1,2,2,2,3,3,4,5,5,6,6,6))
## create a time column
individuals <- cbind(individuals,
DAY=c(1,1,1,1,1,2,2,2,3,3,3,3,3))
SPs <- get_sampling_periods(individuals[,c(1,2)],
individuals[,3],1,data_format="individuals")
occurs <- get_sampling_periods(individuals[,c(1,2)],
individuals[,3],1,data_format="individuals", return="occ")
## define group memberships (these would be read from a file)
groups <- list(G1=c("C695905","H300253"),
G2=c("H300253","H300283","H839876"),
G3=c("F464557","H300296"),
G4=c("H300253"),
G5=c("F464557","H300296"),
G6=c("C695905","H300283","H839876"))
## create a time variable
days <- c(1,1,2,2,3,3)
SPs <- get_sampling_periods(groups,
days,1,data_format="groups")
occurs <- get_sampling_periods(groups,
days,1,data_format="groups", return="occ")
| /data/genthat_extracted_code/asnipe/examples/get_sampling_periods.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,197 | r | library(asnipe)
### Name: get_sampling_periods
### Title: Convert group or individual data into sampling periods
### Aliases: get_sampling_periods
### ** Examples
## define group memberships (these would be read from a file)
individuals <- data.frame(ID=c("C695905","H300253","H300253",
"H300283","H839876","F464557","H300296","H300253",
"F464557","H300296","C695905","H300283","H839876"),
GROUP=c(1,1,2,2,2,3,3,4,5,5,6,6,6))
## create a time column
individuals <- cbind(individuals,
DAY=c(1,1,1,1,1,2,2,2,3,3,3,3,3))
SPs <- get_sampling_periods(individuals[,c(1,2)],
individuals[,3],1,data_format="individuals")
occurs <- get_sampling_periods(individuals[,c(1,2)],
individuals[,3],1,data_format="individuals", return="occ")
## define group memberships (these would be read from a file)
groups <- list(G1=c("C695905","H300253"),
G2=c("H300253","H300283","H839876"),
G3=c("F464557","H300296"),
G4=c("H300253"),
G5=c("F464557","H300296"),
G6=c("C695905","H300283","H839876"))
## create a time variable
days <- c(1,1,2,2,3,3)
SPs <- get_sampling_periods(groups,
days,1,data_format="groups")
occurs <- get_sampling_periods(groups,
days,1,data_format="groups", return="occ")
|
library(nsRFA)
### Name: DISTPLOTS
### Title: Empirical distribution plots
### Aliases: DISTPLOTS plotpos plotposRP loglogplot unifplot normplot
### lognormplot studentplot logisplot gammaplot expplot paretoplot
### gumbelplot frechetplot weibullplot plotposRPhist pointspos
### pointsposRP loglogpoints unifpoints normpoints studentpoints
### logispoints gammapoints exppoints gumbelpoints weibullpoints
### regionalplotpos regionalnormplot regionallognormplot regionalexpplot
### regionalparetoplot regionalgumbelplot regionalfrechetplot
### pointsposRPhist
### Keywords: hplot
### ** Examples
x <- rnorm(30,10,2)
plotpos(x)
normplot(x)
normplot(x,xlab=expression(D[m]),ylab=expression(hat(F)),
main="Normal plot",cex.main=1,font.main=1)
normplot(x,line=FALSE)
x <- rlnorm(30,log(100),log(10))
normplot(x)
lognormplot(x)
x <- rand.gumb(30,1000,100)
normplot(x)
gumbelplot(x)
x <- rnorm(30,10,2)
y <- rnorm(50,10,3)
z <- c(x,y)
codz <- c(rep(1,30),rep(2,50))
regionalplotpos(z,codz)
regionalnormplot(z,codz,xlab="z")
regionallognormplot(z,codz)
regionalgumbelplot(z,codz)
plotpos(x)
pointspos(y,pch=2,col=2)
x <- rnorm(50,10,2)
F <- seq(0.01,0.99,by=0.01)
qq <- qnorm(F,10,2)
plotpos(x)
pointspos(qq,type="l")
normplot(x,line=FALSE)
normpoints(x,type="l",lty=2,col=3)
lognormplot(x)
normpoints(x,type="l",lty=2,col=3)
gumbelplot(x)
gumbelpoints(x,type="l",lty=2,col=3)
# distributions comparison in probabilistic graphs
x <- rnorm(50,10,2)
F <- seq(0.001,0.999,by=0.001)
loglikelhood <- function(param) {-sum(dgamma(x, shape=param[1],
scale=param[2], log=TRUE))}
parameters <- optim(c(1,1),loglikelhood)$par
qq <- qgamma(F,shape=parameters[1],scale=parameters[2])
plotpos(x)
pointspos(qq,type="l")
normplot(x,line=FALSE)
normpoints(qq,type="l")
lognormplot(x,line=FALSE)
normpoints(qq,type="l")
| /data/genthat_extracted_code/nsRFA/examples/DISTPLOTS.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,855 | r | library(nsRFA)
### Name: DISTPLOTS
### Title: Empirical distribution plots
### Aliases: DISTPLOTS plotpos plotposRP loglogplot unifplot normplot
### lognormplot studentplot logisplot gammaplot expplot paretoplot
### gumbelplot frechetplot weibullplot plotposRPhist pointspos
### pointsposRP loglogpoints unifpoints normpoints studentpoints
### logispoints gammapoints exppoints gumbelpoints weibullpoints
### regionalplotpos regionalnormplot regionallognormplot regionalexpplot
### regionalparetoplot regionalgumbelplot regionalfrechetplot
### pointsposRPhist
### Keywords: hplot
### ** Examples
x <- rnorm(30,10,2)
plotpos(x)
normplot(x)
normplot(x,xlab=expression(D[m]),ylab=expression(hat(F)),
main="Normal plot",cex.main=1,font.main=1)
normplot(x,line=FALSE)
x <- rlnorm(30,log(100),log(10))
normplot(x)
lognormplot(x)
x <- rand.gumb(30,1000,100)
normplot(x)
gumbelplot(x)
x <- rnorm(30,10,2)
y <- rnorm(50,10,3)
z <- c(x,y)
codz <- c(rep(1,30),rep(2,50))
regionalplotpos(z,codz)
regionalnormplot(z,codz,xlab="z")
regionallognormplot(z,codz)
regionalgumbelplot(z,codz)
plotpos(x)
pointspos(y,pch=2,col=2)
x <- rnorm(50,10,2)
F <- seq(0.01,0.99,by=0.01)
qq <- qnorm(F,10,2)
plotpos(x)
pointspos(qq,type="l")
normplot(x,line=FALSE)
normpoints(x,type="l",lty=2,col=3)
lognormplot(x)
normpoints(x,type="l",lty=2,col=3)
gumbelplot(x)
gumbelpoints(x,type="l",lty=2,col=3)
# distributions comparison in probabilistic graphs
x <- rnorm(50,10,2)
F <- seq(0.001,0.999,by=0.001)
loglikelhood <- function(param) {-sum(dgamma(x, shape=param[1],
scale=param[2], log=TRUE))}
parameters <- optim(c(1,1),loglikelhood)$par
qq <- qgamma(F,shape=parameters[1],scale=parameters[2])
plotpos(x)
pointspos(qq,type="l")
normplot(x,line=FALSE)
normpoints(qq,type="l")
lognormplot(x,line=FALSE)
normpoints(qq,type="l")
|
/01_school/07_예측방법론/예측방법론.R | no_license | braveji18/mystudy | R | false | false | 18,541 | r | ||
context("dc_oai_listrecords")
test_that("dc_oai_listrecords works", {
skip_on_cran()
skip_on_travis()
a <- dc_oai_listrecords(from = '2011-06-01T', until = '2011-07-01T')
expect_is(a, "data.frame")
expect_is(a, "tbl_df")
expect_is(a$identifier, "character")
})
test_that("dc_oai_listrecords fails well", {
skip_on_cran()
expect_error(dc_oai_listrecords(from = '2011-06-01T', until = 'adffdsadsf'),
"The request includes illegal arguments",
class = "error")
expect_error(dc_oai_listrecords(from = '2011-06-01T', until = 5),
"The request includes illegal arguments",
class = "error")
expect_error(dc_oai_listrecords(url = 5), "One or more of your URLs",
class = "error")
})
| /tests/testthat/test-dc_oai_listrecords.R | permissive | katrinleinweber/rdatacite | R | false | false | 742 | r | context("dc_oai_listrecords")
test_that("dc_oai_listrecords works", {
skip_on_cran()
skip_on_travis()
a <- dc_oai_listrecords(from = '2011-06-01T', until = '2011-07-01T')
expect_is(a, "data.frame")
expect_is(a, "tbl_df")
expect_is(a$identifier, "character")
})
test_that("dc_oai_listrecords fails well", {
skip_on_cran()
expect_error(dc_oai_listrecords(from = '2011-06-01T', until = 'adffdsadsf'),
"The request includes illegal arguments",
class = "error")
expect_error(dc_oai_listrecords(from = '2011-06-01T', until = 5),
"The request includes illegal arguments",
class = "error")
expect_error(dc_oai_listrecords(url = 5), "One or more of your URLs",
class = "error")
})
|
library(testthat)
library(nuvolos)
test_check("nuvolos")
| /tests/testthat.R | no_license | nuvolos-cloud/r-connector | R | false | false | 58 | r | library(testthat)
library(nuvolos)
test_check("nuvolos")
|
library(mvtnorm)
library(ggplot2)
library(readr)
library(dplyr)
library(heplots)
set.seed(7017)
n <- 500
p.vec <- c(5, 10, 50, 100, 300, 375)
N <- n - 1
sim <- 200
table <- data.frame()
timer <- Sys.time()
for (i in 1:length(p.vec)) {
p <- p.vec[i]
mu <- rep(0, p)
Sigma <- diag(p)
Sigma2 <- diag(.05,p)
Sigma2[1,1] <- 1
for (s in 1:sim) {
X <- rmvnorm(n, mu, Sigma)
S <- cov(X)
rho <- 1-(2*p**2+3*p-1)/(6*(n-1)*(p+1))
ts <- -2*rho*(0.5*p*N + (0.5*N) *
(unlist(determinant(S,logarithm=T))[[1]]-sum(diag(S))))
X2 <- rmvnorm(n, mu, Sigma2)
S2 <- cov(X2)
ts2 <- -2*rho*(0.5*p*N + (0.5*N) *
(unlist(determinant(S2,logarithm=T))[[1]]-sum(diag(S2))))
table <- rbind(table,c(p,ts,1))
table <- rbind(table,c(p,ts2,2))
}
}
Sys.time()-timer
names(table) <- c("p","ts","type")
large.summary <- data.frame()
for (pv in p.vec) {
table.size <- table %>%
filter(p==pv,type==1)
table.power <- table %>%
filter(p==pv,type==2)
f <- 0.5*pv*(pv+1)
gamma2 <- pv*(2*pv**4+6*pv**3+pv**2-12*pv-13)/(288*(pv+1))
rho <- 1-(2*pv**2+3*pv-1)/(6*(n-1)*(pv+1))
pv.size <- mean(table.size$ts>(
qchisq(.95,f)+gamma2/rho**2/N**2*(qchisq(.95,f+4)-qchisq(.95,f))))
pv.power <- mean(table.power$ts>(
qchisq(.95,f)+gamma2/rho**2/N**2*(qchisq(.95,f+4)-qchisq(.95,f))))
large.summary <- rbind(large.summary, c(pv, pv.size, pv.power))
}
names(large.summary) <- c("p", "size", "power")
large.summary
# p size power
# 1 5 0.040 1
# 2 10 0.060 1
# 3 50 0.060 1
# 4 100 0.045 1
# 5 300 0.280 1
# 6 375 0.970 1 | /project/q4.R | no_license | rexarski/stat7017-big-data | R | false | false | 1,765 | r | library(mvtnorm)
library(ggplot2)
library(readr)
library(dplyr)
library(heplots)
set.seed(7017)
n <- 500
p.vec <- c(5, 10, 50, 100, 300, 375)
N <- n - 1
sim <- 200
table <- data.frame()
timer <- Sys.time()
for (i in 1:length(p.vec)) {
p <- p.vec[i]
mu <- rep(0, p)
Sigma <- diag(p)
Sigma2 <- diag(.05,p)
Sigma2[1,1] <- 1
for (s in 1:sim) {
X <- rmvnorm(n, mu, Sigma)
S <- cov(X)
rho <- 1-(2*p**2+3*p-1)/(6*(n-1)*(p+1))
ts <- -2*rho*(0.5*p*N + (0.5*N) *
(unlist(determinant(S,logarithm=T))[[1]]-sum(diag(S))))
X2 <- rmvnorm(n, mu, Sigma2)
S2 <- cov(X2)
ts2 <- -2*rho*(0.5*p*N + (0.5*N) *
(unlist(determinant(S2,logarithm=T))[[1]]-sum(diag(S2))))
table <- rbind(table,c(p,ts,1))
table <- rbind(table,c(p,ts2,2))
}
}
Sys.time()-timer
names(table) <- c("p","ts","type")
large.summary <- data.frame()
for (pv in p.vec) {
table.size <- table %>%
filter(p==pv,type==1)
table.power <- table %>%
filter(p==pv,type==2)
f <- 0.5*pv*(pv+1)
gamma2 <- pv*(2*pv**4+6*pv**3+pv**2-12*pv-13)/(288*(pv+1))
rho <- 1-(2*pv**2+3*pv-1)/(6*(n-1)*(pv+1))
pv.size <- mean(table.size$ts>(
qchisq(.95,f)+gamma2/rho**2/N**2*(qchisq(.95,f+4)-qchisq(.95,f))))
pv.power <- mean(table.power$ts>(
qchisq(.95,f)+gamma2/rho**2/N**2*(qchisq(.95,f+4)-qchisq(.95,f))))
large.summary <- rbind(large.summary, c(pv, pv.size, pv.power))
}
names(large.summary) <- c("p", "size", "power")
large.summary
# p size power
# 1 5 0.040 1
# 2 10 0.060 1
# 3 50 0.060 1
# 4 100 0.045 1
# 5 300 0.280 1
# 6 375 0.970 1 |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils_ffmt.R
\name{FFe}
\alias{FFe}
\title{Fixed format scientific}
\usage{
FFe(value, w = 12L, d = 4L, ew = 2L)
}
\arguments{
\item{ew}{}
}
\value{
vector of character strings
}
\description{
Fixed format scientific
}
| /man/FFe.Rd | no_license | CJBarry/Rflow | R | false | true | 297 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils_ffmt.R
\name{FFe}
\alias{FFe}
\title{Fixed format scientific}
\usage{
FFe(value, w = 12L, d = 4L, ew = 2L)
}
\arguments{
\item{ew}{}
}
\value{
vector of character strings
}
\description{
Fixed format scientific
}
|
mvdnorm <- function(X, mu, Sigma)
{
# computes the density of a multivariate normal distribution
# with mean mu and covariance matrix Sigma
# at the points specified in X
# Benjamin Quost, 2009.09.13
n <- dim(X)[1]
p <- dim(X)[2]
Xc <- X - matrix(rep(mu,n),nrow=n,byrow=T)
density <- exp(-1/2*diag(Xc%*%ginv(Sigma)%*%t(Xc))) / ((2*pi)^(p/2)*det(Sigma)^1/2)
return(density)
}
| /sy09 - p14/TPS JO/SY19/Tps/TP2/Exo3/mvdnorm.r | no_license | crgarridos/SY09 | R | false | false | 390 | r | mvdnorm <- function(X, mu, Sigma)
{
# computes the density of a multivariate normal distribution
# with mean mu and covariance matrix Sigma
# at the points specified in X
# Benjamin Quost, 2009.09.13
n <- dim(X)[1]
p <- dim(X)[2]
Xc <- X - matrix(rep(mu,n),nrow=n,byrow=T)
density <- exp(-1/2*diag(Xc%*%ginv(Sigma)%*%t(Xc))) / ((2*pi)^(p/2)*det(Sigma)^1/2)
return(density)
}
|
4da1972c45e1bb55bb634cf2b6eb4072 ctrl.e#1.a#3.E#126.A#48.c#.w#5.s#45.asp.qdimacs 5213 15118 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/A1/Database/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#126.A#48.c#.w#5.s#45.asp/ctrl.e#1.a#3.E#126.A#48.c#.w#5.s#45.asp.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 91 | r | 4da1972c45e1bb55bb634cf2b6eb4072 ctrl.e#1.a#3.E#126.A#48.c#.w#5.s#45.asp.qdimacs 5213 15118 |
library(tidyverse)
library(cplm)
library(glmmTMB)
library(emmeans)
library(tweedie)
library(glmmTMB)
library(sjPlot)
source("scripts/tweedie_functions.r")
## Read in and prepare data for modeling
dat <- read_csv("data/2015_2016_field_expt_data_6sp.csv") %>%
mutate(growth = srv*sht_wt %>% ifelse(srv == 0, 0, .)) %>%
filter(!is.na(growth))
# Model effects of environmental distance on performance ------------------
### Null model (no environmental distance effects)
m0 = cpglmm(growth ~ year + sp*nr + (1|site),
link='log', data=dat, optimizer='bobyqa', control=list(max.fun=2e4))
# model diagnostics
qq_tweed(m0)
fit.obs(m0)
summary(m0)
### Environmental distance model
# Global model
m1 <- cpglmm(growth ~ year + sp*nr*env_dist + (1|site),
link='log', data=dat, optimizer='bobyqa', control=list(max.fun=2e4))
# model diagnostics
qq_tweed(m1)
fit.obs(m1)
# test significance of distance to home environment effects
anova(m1, m0)
# Test interactions
m1.d1 <- update(m1, .~. - sp:nr:env_dist); anova(m1, m1.d1)
# refit with glmmTMB
m1_tmb <- glmmTMB(growth ~ year + sp*nr*env_dist + (1|site),
family = tweedie(),
data = dat)
# get distance to home environment effects within neighbor removal and species
dist_effects <- get_model_data(m1_tmb,
type = "eff",
terms = c("env_dist [0:7]", "nr", "sp")) %>% as_tibble()
write_csv(dist_effects, "results/env_dist_effects.csv")
# Test environmental distance effect within species and neighbor removal
three_way <- emtrends(m1_tmb, ~sp|nr, "env_dist")
plot(three_way) + geom_vline(aes(xintercept = 0), color = "gray")
summary(three_way, infer = T)
# Test for difference in strength of environmental distance effect between neighbor removal treatments within species.
three_way2 <- emtrends(m1_tmb, ~nr|sp, "env_dist")
plot(pairs(three_way2))
pairs(three_way2)
# Test "main" two-way interaction effect between neighbor removal and environmental distance.
nr_env <- emtrends(m1_tmb, ~nr, "env_dist")
plot(nr_env)
pairs(nr_env)
| /scripts/environmental_distance_models.R | no_license | andrewsiefert/trifolium | R | false | false | 2,198 | r | library(tidyverse)
library(cplm)
library(glmmTMB)
library(emmeans)
library(tweedie)
library(glmmTMB)
library(sjPlot)
source("scripts/tweedie_functions.r")
## Read in and prepare data for modeling
dat <- read_csv("data/2015_2016_field_expt_data_6sp.csv") %>%
mutate(growth = srv*sht_wt %>% ifelse(srv == 0, 0, .)) %>%
filter(!is.na(growth))
# Model effects of environmental distance on performance ------------------
### Null model (no environmental distance effects)
m0 = cpglmm(growth ~ year + sp*nr + (1|site),
link='log', data=dat, optimizer='bobyqa', control=list(max.fun=2e4))
# model diagnostics
qq_tweed(m0)
fit.obs(m0)
summary(m0)
### Environmental distance model
# Global model
m1 <- cpglmm(growth ~ year + sp*nr*env_dist + (1|site),
link='log', data=dat, optimizer='bobyqa', control=list(max.fun=2e4))
# model diagnostics
qq_tweed(m1)
fit.obs(m1)
# test significance of distance to home environment effects
anova(m1, m0)
# Test interactions
m1.d1 <- update(m1, .~. - sp:nr:env_dist); anova(m1, m1.d1)
# refit with glmmTMB
m1_tmb <- glmmTMB(growth ~ year + sp*nr*env_dist + (1|site),
family = tweedie(),
data = dat)
# get distance to home environment effects within neighbor removal and species
dist_effects <- get_model_data(m1_tmb,
type = "eff",
terms = c("env_dist [0:7]", "nr", "sp")) %>% as_tibble()
write_csv(dist_effects, "results/env_dist_effects.csv")
# Test environmental distance effect within species and neighbor removal
three_way <- emtrends(m1_tmb, ~sp|nr, "env_dist")
plot(three_way) + geom_vline(aes(xintercept = 0), color = "gray")
summary(three_way, infer = T)
# Test for difference in strength of environmental distance effect between neighbor removal treatments within species.
three_way2 <- emtrends(m1_tmb, ~nr|sp, "env_dist")
plot(pairs(three_way2))
pairs(three_way2)
# Test "main" two-way interaction effect between neighbor removal and environmental distance.
nr_env <- emtrends(m1_tmb, ~nr, "env_dist")
plot(nr_env)
pairs(nr_env)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_url_tweetsv2.R
\name{get_url_tweets}
\alias{get_url_tweets}
\title{Get tweets containing URL}
\usage{
get_url_tweets(
query,
start_tweets,
end_tweets,
bearer_token,
file = NULL,
data_path = NULL,
bind_tweets = TRUE,
verbose = TRUE
)
}
\arguments{
\item{query}{string, url}
\item{start_tweets}{string, starting date}
\item{end_tweets}{string, ending date}
\item{bearer_token}{string, bearer token}
\item{file}{string, name of the resulting RDS file}
\item{data_path}{string, if supplied, fetched data can be saved to the designated path as jsons}
\item{bind_tweets}{If \code{TRUE}, tweets captured are bound into a data.frame for assignment}
\item{verbose}{If \code{FALSE}, query progress messages are suppressed}
}
\value{
a data.frame
}
\description{
This function collects tweets containing a given url between specified date ranges.
Tweet-level data is stored in a data/ path as a series of JSONs beginning "data_"; User-level data is stored as a series of
JSONs beginning "users_". If a filename is supplied, this function will save the result as a RDS file, otherwise,
it will return the results as a dataframe.
}
\examples{
\dontrun{
bearer_token <- "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
tweets <- get_url_tweets("https://www.theguardian.com/environment/2020/jan/07/save-the-planet-guide-fighting-climate-crisis-veganism-flying-earth-emergency-action",
"2020-01-01T00:00:00Z", "2020-04-04T00:00:00Z", bearer_token, data_path = "data/")
}
}
| /man/get_url_tweets.Rd | permissive | haluong89-bcn/academictwitteR | R | false | true | 1,556 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_url_tweetsv2.R
\name{get_url_tweets}
\alias{get_url_tweets}
\title{Get tweets containing URL}
\usage{
get_url_tweets(
query,
start_tweets,
end_tweets,
bearer_token,
file = NULL,
data_path = NULL,
bind_tweets = TRUE,
verbose = TRUE
)
}
\arguments{
\item{query}{string, url}
\item{start_tweets}{string, starting date}
\item{end_tweets}{string, ending date}
\item{bearer_token}{string, bearer token}
\item{file}{string, name of the resulting RDS file}
\item{data_path}{string, if supplied, fetched data can be saved to the designated path as jsons}
\item{bind_tweets}{If \code{TRUE}, tweets captured are bound into a data.frame for assignment}
\item{verbose}{If \code{FALSE}, query progress messages are suppressed}
}
\value{
a data.frame
}
\description{
This function collects tweets containing a given url between specified date ranges.
Tweet-level data is stored in a data/ path as a series of JSONs beginning "data_"; User-level data is stored as a series of
JSONs beginning "users_". If a filename is supplied, this function will save the result as a RDS file, otherwise,
it will return the results as a dataframe.
}
\examples{
\dontrun{
bearer_token <- "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
tweets <- get_url_tweets("https://www.theguardian.com/environment/2020/jan/07/save-the-planet-guide-fighting-climate-crisis-veganism-flying-earth-emergency-action",
"2020-01-01T00:00:00Z", "2020-04-04T00:00:00Z", bearer_token, data_path = "data/")
}
}
|
#2
# Run a linear regression of price_twd_msq vs. n_convenience
lm(formula= price_twd_msq~n_convenience, data=taiwan_real_estate)
DM.result = 2
DM.result = 1
| /Introduction to regression in R/Simple linear regression/2.R | no_license | SaiSharanyaY/DataCamp-Data-Scientist-with-R-Track. | R | false | false | 166 | r | #2
# Run a linear regression of price_twd_msq vs. n_convenience
lm(formula= price_twd_msq~n_convenience, data=taiwan_real_estate)
DM.result = 2
DM.result = 1
|
# to get nice looking tables
remove.packages("plotROC")
devtools::install_github("hadley/ggplot2")
library(pander)
library(caret)
library(ggplot2)
library(psych)
library(OptimalCutpoints)
describe(ichscore$outcome)
# a utility important function for % freq tables
frqtab <- function(x, caption) {
round(100*prop.table(table(x)), 1)
}
# utility function to summarize model comparison results
sumpred <- function(cm) {
summ <- list(TN=cm$table[1,1], # true negatives
TP=cm$table[2,2], # true positives
FN=cm$table[1,2], # false negatives
FP=cm$table[2,1], # false positives
acc=cm$overall["Accuracy"], # accuracy
sens=cm$byClass["Sensitivity"], # sensitivity
spec=cm$byClass["Specificity"]) # specificity
lapply(summ, FUN=round, 2)
}
# amelia to look for misssing data
install.packages("Amelia")
library(Amelia)
missmap(ichscore, main = "Missingness Map Train")
str(ichscore)
summary(ichscore)
str(Book1)
# Recode integer/factors
ichscore$sex = as.factor(ichscore$sex)
ichscore$tentorium = as.factor(ichscore$tentorium)
ichscore$VE = as.factor(ichscore$VE)
ichscore$volume = as.factor(ichscore$volume)
ichscore$midline_shift = as.factor(ichscore$midline_shift)
# Recode oucome as factor
ichscore$outcome = as.factor(ichscore$outcome)
# Divide data into train and test
trainindex <- createDataPartition(ichscore$outcome, p=0.75, list=FALSE)
trainset <- ichscore[trainindex,]
testset <- ichscore[-trainindex,]
# Data visualisation and exploration
summary(trainset)
summary(testset)
# crosstabs for categorical variables
p = table(trainset[,c("outcome", "sex")])
q= table(trainset[,c("outcome", "VE")])
r= table(trainset[,c("outcome", "tentorium")])
s =table(trainset[,c("outcome", "volume")])
# List multiple tables together
t = list(p,q,r,s)
t
# Boxplots for continuous cariables
install.packages("fields")
library(fields)
par(mfrow=c(2,2))
bplot.xy(trainset$outcome, trainset$Age)
bplot.xy(trainset$outcome, trainset$RBS)
bplot.xy(trainset$outcome, trainset$GCS)
bplot.xy(trainset$outcome, trainset$MAP)
str(trainset)
# ggplot objects
# Multiplot function
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
library(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
Ap =ggplot(aes(x =outcome , y = Age), data = ichscore) + geom_boxplot (aes(fill= outcome))
Bp = ggplot(aes(x =outcome , y = RBS), data = ichscore) + geom_boxplot (aes(fill= outcome))
Cp = ggplot(aes(x =outcome , y = GCS), data = ichscore) + geom_boxplot (aes(fill= outcome))
Dp = ggplot(aes(x =outcome , y = MAP), data = ichscore) + geom_boxplot (aes(fill= outcome))
Mp =multiplot(Ap , Bp, Cp, Dp,Ep,Fp,Gp,Hp, cols=2)
# Plotting categorical by categorical variable
Ep = ggplot(ichscore, aes(outcome, ..count..)) + geom_bar(aes(fill = volume), position = "dodge")
Fp =ggplot(ichscore, aes(outcome, ..count..)) + geom_bar(aes(fill = VE), position = "dodge")
Gp = ggplot(ichscore, aes(outcome, ..count..)) + geom_bar(aes(fill = tentorium), position = "dodge")
Hp = ggplot(ichscore, aes(outcome, ..count..)) + geom_bar(aes(fill = sex), position = "dodge")
Ip= ggplot(ichscore, aes(outcome, ..count..)) + geom_bar(aes(fill = midline_shift), position = "dodge")
Mp = multiplot(Ap , Bp, Cp, Dp,Ep,Fp,Gp,Hp, cols=2)
# ICH score
# Roc curve
# Generation of derived variables for GCS and Age
ichscore$GCSscore2 = ifelse(ichscore$GCS<=4,2,ifelse(ichscore$GCS<=12,1,ifelse(ichscore$GCS<=15,0)))
ichscore$Age2 = ifelse(ichscore$Age<80,0,1)
# Calculation of Revised ICH score
ichscore$rev.ich = as.numeric(ichscore$VE)+ as.numeric(ichscore$volume) + as.numeric(ichscore$tentorium) + ichscore$Age2+ichscore$GCSscore2
# Plotting ROC curve and optimal cut off
library(ROCR)
opt.cut = function(perf, pred){
cut.ind = mapply(FUN=function(x, y, p){
d = (x - 0)^2 + (y-1)^2
ind = which(d == min(d))
c(sensitivity = y[[ind]], specificity = 1-x[[ind]],
cutoff = p[[ind]])
}, perf@x.values, perf@y.values, pred@cutoffs)
}
# creating prediction and performance objects
ichscore_pred = prediction(hemorrhage$ICH_score, hemorrhage$outcome)
perf_ich <- performance(ichscore_pred, "tpr", "fpr")
plot(perf_ich, add = TRUE, col = "green")
print(opt.cut(perf_ich,ichscore_pred ))
# creating prediction and performance objects
ichscore_pred = prediction(ichscore$rev.ich, ichscore$outcome)
perf_ich <- performance(ichscore_pred, "tpr", "fpr")
plot(perf_ich, add = TRUE, col = "green")
x = print(opt.cut(perf_ich,ichscore_pred ))
plot(perf)
library(plotROC)
ROCd = calculate_roc(ichscore$rev.ich, ichscore$outcome)
ggroc(ROCd)
hemorrhage
# Data Pre processing
# A. Handling missing values/unrelated variables in
# 1. Remove columns/variables with many NAs or which are unrelated like this
myvars <- names(trainset) %in% c("RMCH.score", "X", "X.1","sICH.score", "midline_shift")
trainset <- trainset[!myvars]
myvars2 = names(trainset) %in% c( "midline_shift")
trainset <- trainset[!myvars2]
# Remove these columns from test set as well
myvars3 <- names(testset) %in% c("RMCH.score", "X", "X.1","sICH.score", "midline_shift")
testset <- testset[!myvars3]
# Putting mean/median values in NAs
trainset$midline_shift <- ifelse(is.na(trainset$midline_shift), mean(trainset$midline_shift, na.rm = TRUE), trainset$midline_shift)
# Putting median values in NA
# Putting Principal Component/Dimensionality reduction
# Taking complete cases only from remaining cases
# 1list rows of data that have missing values
trainset[!complete.cases(trainset),]
#trainset <- na.omit(trainset)
# 2. Data transformations
library(caret)
library(randomForest)
str(trainset)
# You should train the data on newichtrain and test accuracy on newichtest
#RF
# training and boot sampling can be optimized like
fitControl <- trainControl(method = "repeatedcv",number = 5,repeats = 5,
## Estimate class probabilities
classProbs = TRUE,
## Evaluate performance using
## the following function
summaryFunction = twoClassSummary)
# metric = "ROC" can be added
# see example in modelSvm2
ichscore$outcome = ifelse(ichscore$outcome==1,"Dead","Alive")
trainset$outcome = ifelse(trainset$outcome==1,"Dead","Alive")
testset$outcome = ifelse(testset$outcome==1,"Dead","Alive")
# ROCmetric
modelRF2 <- train(as.factor(outcome) ~ Age + sex + MAP +
RBS + GCS + tentorium + VE + volume , # Survived is a function of the variables we decided to include
data = trainset, # Use the trainSet dataframe as the training data
method = "rf",# Use the "random forest" algorithm
metric = "ROC",
trControl = fitControl)
modelRF2
modelLR2 <- train(as.factor(outcome) ~ Age + sex + MAP +
RBS + GCS + tentorium + VE + volume , # Survived is a function of the variables we decided to include
data = trainset, # Use the trainSet dataframe as the training data
method = "glm",# Use the "random forest" algorithm
family = "binomial",
metric = "ROC",
trControl = fitControl
)
modelLR2
modelNB2 <- train(as.factor(outcome) ~ Age + sex + MAP +
RBS + GCS + tentorium + VE + volume , # Survived is a function of the variables we decided to include
data = trainset, # Use the trainSet dataframe as the training data
method = "nb",# Use the "random forest" algorithm
metric = "ROC",
trControl = fitControl
)
modelNB2
# comparative Results in ROC
results2 <- resamples(list( LR = modelLR2, RF = modelRF2 , NB = modelNB2))
summary(results2)
# ROC curve draw
library(pROC)
library(ROCR)
rf_pred <- predict(modelRF2, newdata = testset, type = "prob")
nb_pred <- predict(modelNB2, newdata = testset, type = "prob")
lr_pred <- predict(modelLR2, newdata = testset, type = "prob")
predRF <- prediction(rf_pred[,2], testset$outcome)
perfRF <- performance(predRF, "tpr", "fpr")
plot(perfRF, main = "ROC curves for Randomforest,NaiveBayes,LogisticRegression")
# Generate an ROC curve for the NewBayes method
predNB <- prediction(nb_pred[,2], testset$outcome)
perf_nb <- performance(predNB, "tpr", "fpr")
plot(perf_nb, add = TRUE, col = "blue")
# Generate an ROC curve for the NewBayes method
predLR <- prediction(lr_pred[,2], testset$outcome)
perf_lr <- performance(predLR, "tpr", "fpr")
plot(perf_lr, add = TRUE, col = "red")
# Gnerate ROC curve for ICHScore
ichscore_pred = prediction(ichscore$rev.ich, ichscore$outcome)
perf_ich <- performance(ichscore_pred, "tpr", "fpr")
plot(perf_ich, add = TRUE, col = "green")
# Add legends to plot
legend("right", legend = c("randomForest", "
nb", "lr","ICHscore"), bty = "n", cex = 1, lty = 1,
col = c("black", "blue", "red","green"))
# Generating ggROC
detach("package:ggplot2", unload=TRUE)
detach("package:plotROC", unload=TRUE)
detach("package:caret", unload=TRUE)
devtools::install_github("hadley/ggplot2")
devtools::install_github("sachsmc/plotROC")
library(ggplot2)
library(plotROC)
library(plotROC)
test = data.frame(lr_pred[,2],rf_pred[,2],nb_pred[,2],testset$ICH_score,testset$outcome)
str(test)
library(reshape2)
K =melt(test)
longtest <- melt_roc(test, "testset.outcome", c("lr_pred", "rf_pred","nb_pred"))
ggplot(K, aes(d = testset.outcome, m = value, color = variable)) + geom_roc() + style_roc()
# ROC for trainset
rf_predA <- predict(modelRF2, newdata = trainset, type = "prob")
nb_predA <- predict(modelNB2, newdata = trainset, type = "prob")
lr_predA <- predict(modelLR2, newdata = trainset, type = "prob")
# prpearing data in format for ggROC,melt and cast with reshape 2
testA = data.frame(lr_predA[,2],rf_predA[,2],nb_predA[,2],trainset$ICH_score,trainset$outcome)
longtestA = melt(testA)
str(longtestA)
# Rename variable Factors in longtest
library(plyr)
longtestA$variable = revalue(longtestA$variable, c("lr_predA...2."="LR", "rf_predA...2."="RF" ,"nb_predA...2." ="NB","trainset.ICH_score" = "ICHscore" ))
ggplot(longtestA, aes(d = trainset.outcome, m = value, color = variable)) + geom_roc() + style_roc()
testset$outcome = as.factor(testset$outcome)
# confusion matrix
RFpredict <- predict(modelRF2, newdata = testset)
cm1 <- confusionMatrix(RFpredict, reference = trainset$outcome,positive="Dead" )
LRpredict = predict(modelLR2, newdata = testset)
cm2 <- confusionMatrix(LRpredict, trainset$outcome,positive="Dead" )
NBpredict = predict(modelNB2, newdata = testset)
cm3 = confusionMatrix(NBpredict, trainset$outcome,positive="Dead" )
m1 <- sumpred(cm1)
m2 <- sumpred(cm2)
m3 <- sumpred(cm3)
model_comp <- as.data.frame(rbind(m1, m2, m3))
rownames(model_comp) <- c("RandomForest", "LogisticRegression", "NaiveBayes")
pander(model_comp, style="rmarkdown", split.tables=Inf, keep.trailing.zeros=TRUE,
caption="Model results when comparing predictions and test set")
library(randomForest)
# Plot of Importance
varImpPlot(modelRF2$finalModel)
set.seed(7)
modelRF <- train(outcome ~ Age + sex + MAP +
RBS + GCS + tentorium + VE + volume , # Survived is a function of the variables we decided to include
data = trainset, # Use the trainSet dataframe as the training data
method = "rf",# Use the "random forest" algorithm
trControl = trainControl(method = "cv", # Use cross-validation
number = 10) # Use 5 folds for cross-validation
)
modelRF
#GBM
set.seed(7)
modelGbm <- train(outcome ~ Age + sex + MAP +
RBS + GCS + tentorium + VE + volume , # Survived is a function of the variables we decided to include
data = trainset, # Use the trainSet dataframe as the training data
method = "gbm",# Use the "random forest" algorithm
trControl = trainControl(method = "cv", # Use cross-validation
number = 10) # Use 5 folds for cross-validation
)
#
#
set.seed(7)
modelGbm2 <- train(outcome ~ Age + sex + MAP +
RBS + GCS + tentorium + VE + volume , # Survived is a function of the variables we decided to include
data = trainset, # Use the trainSet dataframe as the training data
method = "gbm",# Use the "random forest" algorithm
verbose = FALSE,
tuneGrid = gbmGrid,
## Specify which metric to optimize
metric = "ROC"
trControl = trainControl(method = "cv", # Use cross-validation
repeats = 10,
## Estimate class probabilities
classProbs = TRUE,
## Evaluate performance using
## the following function
summaryFunction = twoClassSummary
number = 10) # Use 5 folds for cross-validation
)
modelGbm2
# SVM
set.seed(7)
modelSvm <- train(outcome ~ Age + sex + MAP +
RBS + GCS + tentorium + VE + volume , # Survived is a function of the variables we decided to include
data = trainset, # Use the trainSet dataframe as the training data
method = "svmRadial",# Use the "random forest" algorithm
trControl = trainControl(method = "cv", # Use cross-validation
number = 10) # Use 5 folds for cross-validation
)
modelSVm2 <- train(outcome ~ Age + sex + MAP +
RBS + GCS + tentorium + VE + volume , # Survived is a function of the variables we decided to include
data = trainset, # Use the trainSet dataframe as the training data
method = "svmRadial",# Use the "random forest" algorithm
metric = "ROC",
trControl = fitControl
)
#LVQ
set.seed(7)
modelLvq <- train(outcome ~ Age + sex + MAP +
RBS + GCS + tentorium + VE + volume , # Survived is a function of the variables we decided to include
data = trainset, # Use the trainSet dataframe as the training data
method = "lvq",# Use the "random forest" algorithm
trControl = trainControl(method = "cv", # Use cross-validation
number = 10) # Use 5 folds for cross-validation
)
#LR
set.seed(7)
modelLR <- train(outcome ~ Age + sex + MAP +
RBS + GCS + tentorium + VE + volume , # Survived is a function of the variables we decided to include
data = ichscore, # Use the trainSet dataframe as the training data
method = "glm",# Use the "random forest" algorithm
family = "binomial", # for logistic
trControl = trainControl(method = "cv", # Use cross-validation
number = 10) # Use 5 folds for cross-validation
)
# Naive Bayes
set.seed(7)
modelNB <- train(outcome ~ Age + sex + MAP +
RBS + GCS + tentorium + VE + volume , # Survived is a function of the variables we decided to include
data = trainset, # Use the trainSet dataframe as the training data
method = "nb",# Use the "random forest" algorithm
trControl = trainControl(method = "cv", # Use cross-validation
number = 10) # Use 5 folds for cross-validation
)
results <- resamples(list( LR = modelLR, RF = modelRF , NB = modelNB))
summary(results)
# Predicting on test
#Checking structure of testset
str(testset)
# Fixing NAs in testset
# Fixing NAs in test case
# Putting median values in NA
# Putting Principal Component/Dimensionality reduction
# Taking complete cases only from remaining cases
# 1list rows of data that have missing values
#testset[!complete.cases(testset),]
#testset <- na.omit(testset)
RFpredict1 <- predict(modelRF, newdata = testset)
cm1 <- confusionMatrix(RFpredict1, reference = testset$outcome,positive="Dead" )
LRpredict = predict(modelLR, newdata = testset)
cm2 <- confusionMatrix(LRpredict, testset$outcome,positive="Dead" )
Gbmpredict = predict(modelGbm, newdata = testset)
cm3 = confusionMatrix(Gbmpredict, testset$outcome,positive="1" )
Lvqpredict = predict(modelLvq, newdata = testset)
cm4 = confusionMatrix(Lvqpredict, testset$outcome,positive="1" )
NBpredict = predict(modelNB, newdata = testset)
cm5 = confusionMatrix(NBpredict, testset$outcome,positive="Dead" )
Svmpredict = predict(modelSvm, newdata = testset)
cm6 = confusionMatrix(Svmpredict, testset$outcome,positive="1" )
m1 <- sumpred(cm1)
m2 <- sumpred(cm2)
m3 <- sumpred(cm3)
m4 <- sumpred(cm4)
m5 <- sumpred(cm5)
m6 <- sumpred(cm6)
model_comp <- as.data.frame(rbind(m1, m2, m3, m4,m5,m6))
# model_comp <- as.data.frame(rbind(m1, m2,m5))
rownames(model_comp) <- c("RandomForest", "LogisticRegression", "GradientBoostingMachine", "LearningVectorQuantization","NaiveBayes","SupportVectorMachine")
# rownames(model_comp) <- c("RandomForest", "LogisticRegression", "NaiveBayes")
pander(model_comp, style="rmarkdown", split.tables=Inf, keep.trailing.zeros=TRUE,
caption="Model results when comparing predictions and test set")
# Plot of Importance
varImpPlot(modelRF2$finalModel)
# Predicting new data
temps = data.frame(Age <- c(80),
sex <- as.factor(c(1)),
MAP <- c(220),
RBS <- c(340),
GCS <- c(7),
Age <- c(80),
tentorium <- as.factor(c(1)),
VE <- as.factor(c(1)),
volume <- as.factor(c(1))
)
temps$outcome <- predict(modelRF, newdata = temps)
temps$outcome
# calculation of new ich
temps = data.frame(Age <- c(input$a),
Sex <- as.factor(input$b),
MAP <- c(input$c),
RBS <- c(input$e),
GCS <- c(input$d),
tentorium <- as.factor(input$f),
VE <- as.factor(input$h),
Volume <- as.factor(input$g)
)
i = ifelse(input$d<=4,2,ifelse(input$d<=12,1,ifelse(input$d<=15,0)))
j = ifelse(input$a<80,0,1)
k = input$g + input$h + input$f + i + j
newich = ifelse(k<=3,"Alive","Dead")
p = c ( " Machine learning algorithms used in Study " )
p
# ROC curve
# not working
install.packages("ROCR")
library(ROCR)
RFpredict1 <- predict(modelRF, newdata = testset)
cm1 <- confusionMatrix(RFpredict1, refernce = testset$outcome,positive="1" )
RFpredict2 <- predict(modelSVm2, newdata = testset, type= "prob")
gbmProbs <- predict(modelGbm, newdata = testset, type = "prob")
head(RFpredict2, n=4)
adult.rf <-randomForest(outcome ~ Age + sex + MAP +
RBS + GCS + tentorium + VE + volume,data= trainset, mtry=2, ntree=1000,
keep.forest=TRUE, importance=TRUE,test= testset)
# generate probabilities instead of class labels type="prob" ensures that
#randomForest generates probabilities for both the class labels,
#we are selecting one of the value [2] at the end does that
<span style="line-height: 1.5;">
adult.rf.pr = predict(adult.rf,type="prob",newdata=testset)[,2]
#prediction is ROCR function
adult.rf.pred = prediction(adult.rf.pr, testset$outcome)
#performance in terms of true and false positive rates
adult.rf.perf = performance(adult.rf.pred,"tpr","fpr")
#plot the curve
plot(adult.rf.perf,main="ROC Curve for Random Forest",col=2,lwd=2)
abline(a=0,b=1,lwd=2,lty=2,col="gray")
#compute area under curve
auc <- performance(adult.rf.pred,"auc")
auc <- unlist(slot(auc, "y.values"))
minauc<-min(round(auc, digits = 2))
maxauc<-max(round(auc, digits = 2))
minauct <- paste(c("min(AUC) = "),minauc,sep="")
maxauct <- paste(c("max(AUC) = "),maxauc,sep="")
###
ichscore$rev.ich = hemorrhage$VE + hemorrhage$volume + hemorrhage$tentorium + hemorrhage$Age2+ hemorrhage$GCSscore2
hemorrhage$nu.ich = ichscore
hemorrhage$VE = ifelse(hemorrhage$VE==1,0,1)
hemorrhage$volume = ifelse(hemorrhage$volume==1,0,1)
hemorrhage$tentorium = ifelse(hemorrhage$tentorium==1,0,1)
hemorrhage$VE = as.integer(factor(hemorrhage$VE))
hemorrhage$volume = as.integer(factor(hemorrhage$volume))
hemorrhage$tentorium = as.integer(factor(hemorrhage$tentorium))
as.numeric.factor <- function(x) {as.numeric(levels(x))[x]}
ichscore$VE = as.numeric.factor(ichscore$VE)
ichscore$volume = as.numeric.factor(ichscore$volume)
ichscore$tentorium = as.numeric.factor(ichscore$tentorium)
# Optimal cut off better
library(OptimalCutpoins)
optimal.cutpoint.Youden<-optimal.cutpoints(X = "rev.ich", status = "outcome", tag.healthy = 0,
methods = "Youden", data = df)
optimal.cutpoint.Youden<-optimal.cutpoints(X = "rev.ich", status = "outcome", tag.healthy = 0,
methods = "Youden", data = ichscore)
summary(optimal.cutpoint.Youden)
print(optimal.cutpoint.Youden)
str(opimal.cutpoint.Youden)
G =list (optimal.cutpoint.Youden,optimal.cutpoint.Youden$Youden$Global$optimal.cutoff)
G
# Correlation matrix in R
library(magrittr)
library(dplyr)
library(corrplot)
n = Book1 %>% dplyr::select(Age,MAP,RBS,GCS)
mycorrelationmatrix <- cor(n)
col <- colorRampPalette(c("#BB4444", "#EE9988", "#FFFFFF", "#77AADD", "#4477AA"))
corrplot::corrplot(mycorrelationmatrix, method="shade", shade.col=NA, tl.col="black", tl.srt=45, col=col(200), addCoef.col="black")
corrplot::corrplot(n, method="shade", shade.col=NA, tl.col="black", tl.srt=45, col=col(200), addCoef.col="black")
# Rename variable in R with dplyr
library(dplyr)
n = select(df, newvalue = oldvalue ,Age = X2 , RBS = X3 , GCS =X4)
select(Book1, Shift = Midlineshift)
Book2 = rename(Book1, shift =
Midlineshift)
Book4 = rename(Book2,shift = Shift, age = Age)
# Recode a variable with R
library(plyr)
str(Book2)
Book2$Outcome = revalue(as.factor(Book2$Outcome) , c("0" = "Dead", "1" ="Alive"))
str(Book2)
levels(Book2$Outcome)
# This has two levels Dead and Alive
# Relevel
#http://www.cookbook-r.com/Manipulating_data/Recoding_data/
#https://stat545-ubc.github.io/block014_factors.html
# data$scode[data$sex=="M"] <- "1"
data$scode[data$sex=="F"] <- "2"
# Convert the column to a factor
data$scode <- factor(data$scode)
| /Machine learningexper2.R | no_license | anupamsingh81/myprojects | R | false | false | 23,965 | r | # to get nice looking tables
remove.packages("plotROC")
devtools::install_github("hadley/ggplot2")
library(pander)
library(caret)
library(ggplot2)
library(psych)
library(OptimalCutpoints)
describe(ichscore$outcome)
# a utility important function for % freq tables
frqtab <- function(x, caption) {
round(100*prop.table(table(x)), 1)
}
# utility function to summarize model comparison results
sumpred <- function(cm) {
summ <- list(TN=cm$table[1,1], # true negatives
TP=cm$table[2,2], # true positives
FN=cm$table[1,2], # false negatives
FP=cm$table[2,1], # false positives
acc=cm$overall["Accuracy"], # accuracy
sens=cm$byClass["Sensitivity"], # sensitivity
spec=cm$byClass["Specificity"]) # specificity
lapply(summ, FUN=round, 2)
}
# amelia to look for misssing data
install.packages("Amelia")
library(Amelia)
missmap(ichscore, main = "Missingness Map Train")
str(ichscore)
summary(ichscore)
str(Book1)
# Recode integer/factors
ichscore$sex = as.factor(ichscore$sex)
ichscore$tentorium = as.factor(ichscore$tentorium)
ichscore$VE = as.factor(ichscore$VE)
ichscore$volume = as.factor(ichscore$volume)
ichscore$midline_shift = as.factor(ichscore$midline_shift)
# Recode oucome as factor
ichscore$outcome = as.factor(ichscore$outcome)
# Divide data into train and test
trainindex <- createDataPartition(ichscore$outcome, p=0.75, list=FALSE)
trainset <- ichscore[trainindex,]
testset <- ichscore[-trainindex,]
# Data visualisation and exploration
summary(trainset)
summary(testset)
# crosstabs for categorical variables
p = table(trainset[,c("outcome", "sex")])
q= table(trainset[,c("outcome", "VE")])
r= table(trainset[,c("outcome", "tentorium")])
s =table(trainset[,c("outcome", "volume")])
# List multiple tables together
t = list(p,q,r,s)
t
# Boxplots for continuous cariables
install.packages("fields")
library(fields)
par(mfrow=c(2,2))
bplot.xy(trainset$outcome, trainset$Age)
bplot.xy(trainset$outcome, trainset$RBS)
bplot.xy(trainset$outcome, trainset$GCS)
bplot.xy(trainset$outcome, trainset$MAP)
str(trainset)
# ggplot objects
# Multiplot function
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
library(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
Ap =ggplot(aes(x =outcome , y = Age), data = ichscore) + geom_boxplot (aes(fill= outcome))
Bp = ggplot(aes(x =outcome , y = RBS), data = ichscore) + geom_boxplot (aes(fill= outcome))
Cp = ggplot(aes(x =outcome , y = GCS), data = ichscore) + geom_boxplot (aes(fill= outcome))
Dp = ggplot(aes(x =outcome , y = MAP), data = ichscore) + geom_boxplot (aes(fill= outcome))
Mp =multiplot(Ap , Bp, Cp, Dp,Ep,Fp,Gp,Hp, cols=2)
# Plotting categorical by categorical variable
Ep = ggplot(ichscore, aes(outcome, ..count..)) + geom_bar(aes(fill = volume), position = "dodge")
Fp =ggplot(ichscore, aes(outcome, ..count..)) + geom_bar(aes(fill = VE), position = "dodge")
Gp = ggplot(ichscore, aes(outcome, ..count..)) + geom_bar(aes(fill = tentorium), position = "dodge")
Hp = ggplot(ichscore, aes(outcome, ..count..)) + geom_bar(aes(fill = sex), position = "dodge")
Ip= ggplot(ichscore, aes(outcome, ..count..)) + geom_bar(aes(fill = midline_shift), position = "dodge")
Mp = multiplot(Ap , Bp, Cp, Dp,Ep,Fp,Gp,Hp, cols=2)
# ICH score
# Roc curve
# Generation of derived variables for GCS and Age
ichscore$GCSscore2 = ifelse(ichscore$GCS<=4,2,ifelse(ichscore$GCS<=12,1,ifelse(ichscore$GCS<=15,0)))
ichscore$Age2 = ifelse(ichscore$Age<80,0,1)
# Calculation of Revised ICH score
ichscore$rev.ich = as.numeric(ichscore$VE)+ as.numeric(ichscore$volume) + as.numeric(ichscore$tentorium) + ichscore$Age2+ichscore$GCSscore2
# Plotting ROC curve and optimal cut off
library(ROCR)
opt.cut = function(perf, pred){
cut.ind = mapply(FUN=function(x, y, p){
d = (x - 0)^2 + (y-1)^2
ind = which(d == min(d))
c(sensitivity = y[[ind]], specificity = 1-x[[ind]],
cutoff = p[[ind]])
}, perf@x.values, perf@y.values, pred@cutoffs)
}
# creating prediction and performance objects
ichscore_pred = prediction(hemorrhage$ICH_score, hemorrhage$outcome)
perf_ich <- performance(ichscore_pred, "tpr", "fpr")
plot(perf_ich, add = TRUE, col = "green")
print(opt.cut(perf_ich,ichscore_pred ))
# creating prediction and performance objects
ichscore_pred = prediction(ichscore$rev.ich, ichscore$outcome)
perf_ich <- performance(ichscore_pred, "tpr", "fpr")
plot(perf_ich, add = TRUE, col = "green")
x = print(opt.cut(perf_ich,ichscore_pred ))
plot(perf)
library(plotROC)
ROCd = calculate_roc(ichscore$rev.ich, ichscore$outcome)
ggroc(ROCd)
hemorrhage
# Data Pre processing
# A. Handling missing values/unrelated variables in
# 1. Remove columns/variables with many NAs or which are unrelated like this
myvars <- names(trainset) %in% c("RMCH.score", "X", "X.1","sICH.score", "midline_shift")
trainset <- trainset[!myvars]
myvars2 = names(trainset) %in% c( "midline_shift")
trainset <- trainset[!myvars2]
# Remove these columns from test set as well
myvars3 <- names(testset) %in% c("RMCH.score", "X", "X.1","sICH.score", "midline_shift")
testset <- testset[!myvars3]
# Putting mean/median values in NAs
trainset$midline_shift <- ifelse(is.na(trainset$midline_shift), mean(trainset$midline_shift, na.rm = TRUE), trainset$midline_shift)
# Putting median values in NA
# Putting Principal Component/Dimensionality reduction
# Taking complete cases only from remaining cases
# 1list rows of data that have missing values
trainset[!complete.cases(trainset),]
#trainset <- na.omit(trainset)
# 2. Data transformations
library(caret)
library(randomForest)
str(trainset)
# You should train the data on newichtrain and test accuracy on newichtest
#RF
# training and boot sampling can be optimized like
fitControl <- trainControl(method = "repeatedcv",number = 5,repeats = 5,
## Estimate class probabilities
classProbs = TRUE,
## Evaluate performance using
## the following function
summaryFunction = twoClassSummary)
# metric = "ROC" can be added
# see example in modelSvm2
ichscore$outcome = ifelse(ichscore$outcome==1,"Dead","Alive")
trainset$outcome = ifelse(trainset$outcome==1,"Dead","Alive")
testset$outcome = ifelse(testset$outcome==1,"Dead","Alive")
# ROCmetric
modelRF2 <- train(as.factor(outcome) ~ Age + sex + MAP +
RBS + GCS + tentorium + VE + volume , # Survived is a function of the variables we decided to include
data = trainset, # Use the trainSet dataframe as the training data
method = "rf",# Use the "random forest" algorithm
metric = "ROC",
trControl = fitControl)
modelRF2
modelLR2 <- train(as.factor(outcome) ~ Age + sex + MAP +
RBS + GCS + tentorium + VE + volume , # Survived is a function of the variables we decided to include
data = trainset, # Use the trainSet dataframe as the training data
method = "glm",# Use the "random forest" algorithm
family = "binomial",
metric = "ROC",
trControl = fitControl
)
modelLR2
modelNB2 <- train(as.factor(outcome) ~ Age + sex + MAP +
RBS + GCS + tentorium + VE + volume , # Survived is a function of the variables we decided to include
data = trainset, # Use the trainSet dataframe as the training data
method = "nb",# Use the "random forest" algorithm
metric = "ROC",
trControl = fitControl
)
modelNB2
# comparative Results in ROC
results2 <- resamples(list( LR = modelLR2, RF = modelRF2 , NB = modelNB2))
summary(results2)
# ROC curve draw
library(pROC)
library(ROCR)
rf_pred <- predict(modelRF2, newdata = testset, type = "prob")
nb_pred <- predict(modelNB2, newdata = testset, type = "prob")
lr_pred <- predict(modelLR2, newdata = testset, type = "prob")
predRF <- prediction(rf_pred[,2], testset$outcome)
perfRF <- performance(predRF, "tpr", "fpr")
plot(perfRF, main = "ROC curves for Randomforest,NaiveBayes,LogisticRegression")
# Generate an ROC curve for the NewBayes method
predNB <- prediction(nb_pred[,2], testset$outcome)
perf_nb <- performance(predNB, "tpr", "fpr")
plot(perf_nb, add = TRUE, col = "blue")
# Generate an ROC curve for the NewBayes method
predLR <- prediction(lr_pred[,2], testset$outcome)
perf_lr <- performance(predLR, "tpr", "fpr")
plot(perf_lr, add = TRUE, col = "red")
# Gnerate ROC curve for ICHScore
ichscore_pred = prediction(ichscore$rev.ich, ichscore$outcome)
perf_ich <- performance(ichscore_pred, "tpr", "fpr")
plot(perf_ich, add = TRUE, col = "green")
# Add legends to plot
legend("right", legend = c("randomForest", "
nb", "lr","ICHscore"), bty = "n", cex = 1, lty = 1,
col = c("black", "blue", "red","green"))
# Generating ggROC
detach("package:ggplot2", unload=TRUE)
detach("package:plotROC", unload=TRUE)
detach("package:caret", unload=TRUE)
devtools::install_github("hadley/ggplot2")
devtools::install_github("sachsmc/plotROC")
library(ggplot2)
library(plotROC)
library(plotROC)
test = data.frame(lr_pred[,2],rf_pred[,2],nb_pred[,2],testset$ICH_score,testset$outcome)
str(test)
library(reshape2)
K =melt(test)
longtest <- melt_roc(test, "testset.outcome", c("lr_pred", "rf_pred","nb_pred"))
ggplot(K, aes(d = testset.outcome, m = value, color = variable)) + geom_roc() + style_roc()
# ROC for trainset
rf_predA <- predict(modelRF2, newdata = trainset, type = "prob")
nb_predA <- predict(modelNB2, newdata = trainset, type = "prob")
lr_predA <- predict(modelLR2, newdata = trainset, type = "prob")
# prpearing data in format for ggROC,melt and cast with reshape 2
testA = data.frame(lr_predA[,2],rf_predA[,2],nb_predA[,2],trainset$ICH_score,trainset$outcome)
longtestA = melt(testA)
str(longtestA)
# Rename variable Factors in longtest
library(plyr)
longtestA$variable = revalue(longtestA$variable, c("lr_predA...2."="LR", "rf_predA...2."="RF" ,"nb_predA...2." ="NB","trainset.ICH_score" = "ICHscore" ))
ggplot(longtestA, aes(d = trainset.outcome, m = value, color = variable)) + geom_roc() + style_roc()
testset$outcome = as.factor(testset$outcome)
# confusion matrix
RFpredict <- predict(modelRF2, newdata = testset)
cm1 <- confusionMatrix(RFpredict, reference = trainset$outcome,positive="Dead" )
LRpredict = predict(modelLR2, newdata = testset)
cm2 <- confusionMatrix(LRpredict, trainset$outcome,positive="Dead" )
NBpredict = predict(modelNB2, newdata = testset)
cm3 = confusionMatrix(NBpredict, trainset$outcome,positive="Dead" )
m1 <- sumpred(cm1)
m2 <- sumpred(cm2)
m3 <- sumpred(cm3)
model_comp <- as.data.frame(rbind(m1, m2, m3))
rownames(model_comp) <- c("RandomForest", "LogisticRegression", "NaiveBayes")
pander(model_comp, style="rmarkdown", split.tables=Inf, keep.trailing.zeros=TRUE,
caption="Model results when comparing predictions and test set")
library(randomForest)
# Plot of Importance
varImpPlot(modelRF2$finalModel)
set.seed(7)
modelRF <- train(outcome ~ Age + sex + MAP +
RBS + GCS + tentorium + VE + volume , # Survived is a function of the variables we decided to include
data = trainset, # Use the trainSet dataframe as the training data
method = "rf",# Use the "random forest" algorithm
trControl = trainControl(method = "cv", # Use cross-validation
number = 10) # Use 5 folds for cross-validation
)
modelRF
#GBM
set.seed(7)
modelGbm <- train(outcome ~ Age + sex + MAP +
RBS + GCS + tentorium + VE + volume , # Survived is a function of the variables we decided to include
data = trainset, # Use the trainSet dataframe as the training data
method = "gbm",# Use the "random forest" algorithm
trControl = trainControl(method = "cv", # Use cross-validation
number = 10) # Use 5 folds for cross-validation
)
#
#
set.seed(7)
modelGbm2 <- train(outcome ~ Age + sex + MAP +
RBS + GCS + tentorium + VE + volume , # Survived is a function of the variables we decided to include
data = trainset, # Use the trainSet dataframe as the training data
method = "gbm",# Use the "random forest" algorithm
verbose = FALSE,
tuneGrid = gbmGrid,
## Specify which metric to optimize
metric = "ROC"
trControl = trainControl(method = "cv", # Use cross-validation
repeats = 10,
## Estimate class probabilities
classProbs = TRUE,
## Evaluate performance using
## the following function
summaryFunction = twoClassSummary
number = 10) # Use 5 folds for cross-validation
)
modelGbm2
# SVM
set.seed(7)
modelSvm <- train(outcome ~ Age + sex + MAP +
RBS + GCS + tentorium + VE + volume , # Survived is a function of the variables we decided to include
data = trainset, # Use the trainSet dataframe as the training data
method = "svmRadial",# Use the "random forest" algorithm
trControl = trainControl(method = "cv", # Use cross-validation
number = 10) # Use 5 folds for cross-validation
)
modelSVm2 <- train(outcome ~ Age + sex + MAP +
RBS + GCS + tentorium + VE + volume , # Survived is a function of the variables we decided to include
data = trainset, # Use the trainSet dataframe as the training data
method = "svmRadial",# Use the "random forest" algorithm
metric = "ROC",
trControl = fitControl
)
#LVQ
set.seed(7)
modelLvq <- train(outcome ~ Age + sex + MAP +
RBS + GCS + tentorium + VE + volume , # Survived is a function of the variables we decided to include
data = trainset, # Use the trainSet dataframe as the training data
method = "lvq",# Use the "random forest" algorithm
trControl = trainControl(method = "cv", # Use cross-validation
number = 10) # Use 5 folds for cross-validation
)
#LR
set.seed(7)
modelLR <- train(outcome ~ Age + sex + MAP +
RBS + GCS + tentorium + VE + volume , # Survived is a function of the variables we decided to include
data = ichscore, # Use the trainSet dataframe as the training data
method = "glm",# Use the "random forest" algorithm
family = "binomial", # for logistic
trControl = trainControl(method = "cv", # Use cross-validation
number = 10) # Use 5 folds for cross-validation
)
# Naive Bayes
set.seed(7)
modelNB <- train(outcome ~ Age + sex + MAP +
RBS + GCS + tentorium + VE + volume , # Survived is a function of the variables we decided to include
data = trainset, # Use the trainSet dataframe as the training data
method = "nb",# Use the "random forest" algorithm
trControl = trainControl(method = "cv", # Use cross-validation
number = 10) # Use 5 folds for cross-validation
)
results <- resamples(list( LR = modelLR, RF = modelRF , NB = modelNB))
summary(results)
# Predicting on test
#Checking structure of testset
str(testset)
# Fixing NAs in testset
# Fixing NAs in test case
# Putting median values in NA
# Putting Principal Component/Dimensionality reduction
# Taking complete cases only from remaining cases
# 1list rows of data that have missing values
#testset[!complete.cases(testset),]
#testset <- na.omit(testset)
RFpredict1 <- predict(modelRF, newdata = testset)
cm1 <- confusionMatrix(RFpredict1, reference = testset$outcome,positive="Dead" )
LRpredict = predict(modelLR, newdata = testset)
cm2 <- confusionMatrix(LRpredict, testset$outcome,positive="Dead" )
Gbmpredict = predict(modelGbm, newdata = testset)
cm3 = confusionMatrix(Gbmpredict, testset$outcome,positive="1" )
Lvqpredict = predict(modelLvq, newdata = testset)
cm4 = confusionMatrix(Lvqpredict, testset$outcome,positive="1" )
NBpredict = predict(modelNB, newdata = testset)
cm5 = confusionMatrix(NBpredict, testset$outcome,positive="Dead" )
Svmpredict = predict(modelSvm, newdata = testset)
cm6 = confusionMatrix(Svmpredict, testset$outcome,positive="1" )
m1 <- sumpred(cm1)
m2 <- sumpred(cm2)
m3 <- sumpred(cm3)
m4 <- sumpred(cm4)
m5 <- sumpred(cm5)
m6 <- sumpred(cm6)
model_comp <- as.data.frame(rbind(m1, m2, m3, m4,m5,m6))
# model_comp <- as.data.frame(rbind(m1, m2,m5))
rownames(model_comp) <- c("RandomForest", "LogisticRegression", "GradientBoostingMachine", "LearningVectorQuantization","NaiveBayes","SupportVectorMachine")
# rownames(model_comp) <- c("RandomForest", "LogisticRegression", "NaiveBayes")
pander(model_comp, style="rmarkdown", split.tables=Inf, keep.trailing.zeros=TRUE,
caption="Model results when comparing predictions and test set")
# Plot of Importance
varImpPlot(modelRF2$finalModel)
# Predicting new data
temps = data.frame(Age <- c(80),
sex <- as.factor(c(1)),
MAP <- c(220),
RBS <- c(340),
GCS <- c(7),
Age <- c(80),
tentorium <- as.factor(c(1)),
VE <- as.factor(c(1)),
volume <- as.factor(c(1))
)
temps$outcome <- predict(modelRF, newdata = temps)
temps$outcome
# calculation of new ich
temps = data.frame(Age <- c(input$a),
Sex <- as.factor(input$b),
MAP <- c(input$c),
RBS <- c(input$e),
GCS <- c(input$d),
tentorium <- as.factor(input$f),
VE <- as.factor(input$h),
Volume <- as.factor(input$g)
)
i = ifelse(input$d<=4,2,ifelse(input$d<=12,1,ifelse(input$d<=15,0)))
j = ifelse(input$a<80,0,1)
k = input$g + input$h + input$f + i + j
newich = ifelse(k<=3,"Alive","Dead")
p = c ( " Machine learning algorithms used in Study " )
p
# ROC curve
# not working
install.packages("ROCR")
library(ROCR)
RFpredict1 <- predict(modelRF, newdata = testset)
cm1 <- confusionMatrix(RFpredict1, refernce = testset$outcome,positive="1" )
RFpredict2 <- predict(modelSVm2, newdata = testset, type= "prob")
gbmProbs <- predict(modelGbm, newdata = testset, type = "prob")
head(RFpredict2, n=4)
adult.rf <-randomForest(outcome ~ Age + sex + MAP +
RBS + GCS + tentorium + VE + volume,data= trainset, mtry=2, ntree=1000,
keep.forest=TRUE, importance=TRUE,test= testset)
# generate probabilities instead of class labels type="prob" ensures that
#randomForest generates probabilities for both the class labels,
#we are selecting one of the value [2] at the end does that
<span style="line-height: 1.5;">
adult.rf.pr = predict(adult.rf,type="prob",newdata=testset)[,2]
#prediction is ROCR function
adult.rf.pred = prediction(adult.rf.pr, testset$outcome)
#performance in terms of true and false positive rates
adult.rf.perf = performance(adult.rf.pred,"tpr","fpr")
#plot the curve
plot(adult.rf.perf,main="ROC Curve for Random Forest",col=2,lwd=2)
abline(a=0,b=1,lwd=2,lty=2,col="gray")
#compute area under curve
auc <- performance(adult.rf.pred,"auc")
auc <- unlist(slot(auc, "y.values"))
minauc<-min(round(auc, digits = 2))
maxauc<-max(round(auc, digits = 2))
minauct <- paste(c("min(AUC) = "),minauc,sep="")
maxauct <- paste(c("max(AUC) = "),maxauc,sep="")
###
ichscore$rev.ich = hemorrhage$VE + hemorrhage$volume + hemorrhage$tentorium + hemorrhage$Age2+ hemorrhage$GCSscore2
hemorrhage$nu.ich = ichscore
hemorrhage$VE = ifelse(hemorrhage$VE==1,0,1)
hemorrhage$volume = ifelse(hemorrhage$volume==1,0,1)
hemorrhage$tentorium = ifelse(hemorrhage$tentorium==1,0,1)
hemorrhage$VE = as.integer(factor(hemorrhage$VE))
hemorrhage$volume = as.integer(factor(hemorrhage$volume))
hemorrhage$tentorium = as.integer(factor(hemorrhage$tentorium))
as.numeric.factor <- function(x) {as.numeric(levels(x))[x]}
ichscore$VE = as.numeric.factor(ichscore$VE)
ichscore$volume = as.numeric.factor(ichscore$volume)
ichscore$tentorium = as.numeric.factor(ichscore$tentorium)
# Optimal cut off better
library(OptimalCutpoins)
optimal.cutpoint.Youden<-optimal.cutpoints(X = "rev.ich", status = "outcome", tag.healthy = 0,
methods = "Youden", data = df)
optimal.cutpoint.Youden<-optimal.cutpoints(X = "rev.ich", status = "outcome", tag.healthy = 0,
methods = "Youden", data = ichscore)
summary(optimal.cutpoint.Youden)
print(optimal.cutpoint.Youden)
str(opimal.cutpoint.Youden)
G =list (optimal.cutpoint.Youden,optimal.cutpoint.Youden$Youden$Global$optimal.cutoff)
G
# Correlation matrix in R
library(magrittr)
library(dplyr)
library(corrplot)
n = Book1 %>% dplyr::select(Age,MAP,RBS,GCS)
mycorrelationmatrix <- cor(n)
col <- colorRampPalette(c("#BB4444", "#EE9988", "#FFFFFF", "#77AADD", "#4477AA"))
corrplot::corrplot(mycorrelationmatrix, method="shade", shade.col=NA, tl.col="black", tl.srt=45, col=col(200), addCoef.col="black")
corrplot::corrplot(n, method="shade", shade.col=NA, tl.col="black", tl.srt=45, col=col(200), addCoef.col="black")
# Rename variable in R with dplyr
library(dplyr)
n = select(df, newvalue = oldvalue ,Age = X2 , RBS = X3 , GCS =X4)
select(Book1, Shift = Midlineshift)
Book2 = rename(Book1, shift =
Midlineshift)
Book4 = rename(Book2,shift = Shift, age = Age)
# Recode a variable with R
library(plyr)
str(Book2)
Book2$Outcome = revalue(as.factor(Book2$Outcome) , c("0" = "Dead", "1" ="Alive"))
str(Book2)
levels(Book2$Outcome)
# This has two levels Dead and Alive
# Relevel
#http://www.cookbook-r.com/Manipulating_data/Recoding_data/
#https://stat545-ubc.github.io/block014_factors.html
# data$scode[data$sex=="M"] <- "1"
data$scode[data$sex=="F"] <- "2"
# Convert the column to a factor
data$scode <- factor(data$scode)
|
# Management accounting & controlling
# githubinstall::githubinstall("finstr")
library(finstr)
# Data tables
library(knitr)
library(kableExtra)
library(flextable)
library(htmlTable)
library(htmlwidgets)
library(DT)
library(xlsx)
# Data management
library(skimr)
library(dplyr)
library(tidyverse)
library(lubridate)
# Visualization
library(ggplot2)
library(waterfalls)
library(plotly) | /Rfile/201802/0227_accounting_process.R | no_license | yuanqingye/interestingR | R | false | false | 383 | r | # Management accounting & controlling
# githubinstall::githubinstall("finstr")
library(finstr)
# Data tables
library(knitr)
library(kableExtra)
library(flextable)
library(htmlTable)
library(htmlwidgets)
library(DT)
library(xlsx)
# Data management
library(skimr)
library(dplyr)
library(tidyverse)
library(lubridate)
# Visualization
library(ggplot2)
library(waterfalls)
library(plotly) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/emiss_functions.R
\name{direction_reference}
\alias{direction_reference}
\title{Build reference list for cardinal direction angles.}
\usage{
direction_reference()
}
\value{
A named list of the 16 cardinal directions and their angle ranges.
}
\description{
Build reference list for cardinal direction angles.
}
| /man/direction_reference.Rd | no_license | jwbannister/alamoRiver | R | false | true | 389 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/emiss_functions.R
\name{direction_reference}
\alias{direction_reference}
\title{Build reference list for cardinal direction angles.}
\usage{
direction_reference()
}
\value{
A named list of the 16 cardinal directions and their angle ranges.
}
\description{
Build reference list for cardinal direction angles.
}
|
# install all the necessary packages
list.of.packages <- c("tidyr", "geoR", "dplyr", "maps", "maptools", "rgdal", "rgeos", "sp", "spatialEco",
"plyr", "RColorBrewer", "classInt", "spatstat", "spdep", "sp", "usdm","readr",
"lubridate", "rmarkdown", "leaps", "MASS", "Metrics")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
setwd(file.path(".", "Analyzed_Data", "ArcGIS"))
packages<- c("maps", "maptools", "rgdal", "rgeos", "downloader", "dplyr",
"plyr", "geoR", "RColorBrewer", "classInt", "spatstat",
"spdep", "sp", "usdm", "leaps", "MASS", "Metrics")
sapply(packages, require, character.only = T)
# get the block baltimore shapefile from ArcGIS
# All the shapefiles are in NAD 1983 Maryland state plane projection
blocks <- readShapePoly("balt_vac_crime_estate_cen_health_feet.shp")
# rename
names(blocks)[34] <- "vacancy"
names(blocks)[35] <- "crime"
names(blocks)[38] <- "estate"
# get the census tract shapefile from ArcGIS
tracts <- readShapePoly("track_health_cens_crim_est_vac.shp")
names(tracts)[110] <- "crime"
names(tracts)[113] <- "estate"
names(tracts)[114] <- "vacancy"
# add centroid coordinates to account for spatial correlation
t_coords<- as.data.frame(gCentroid(tracts, byid = TRUE))
b_coords<- as.data.frame(gCentroid(blocks, byid = TRUE))
tracts$X <- t_coords$x
tracts$Y <- t_coords$y
blocks$X <- b_coords$x
blocks$Y <- b_coords$y
tracts_e <- tracts@data[,-(1:9)]
tracts_e[, sapply(tracts_e, is.factor)]
# remove all the values that are factors, since none of them
# have evidence that they are associated with life expectancy
tracts_e<-tracts_e[, !sapply(tracts_e, is.factor)]
# get crime, estate, and vacanacy as numeric
tracts_e[, sapply(tracts_e, is.integer)]
tracts_e$crime <- as.numeric(tracts_e$crime)
tracts_e$estate <- as.numeric(tracts_e$estate)
tracts_e$vacancy <- as.numeric(tracts_e$vacancy)
tracts_e$long <- as.numeric(tracts_e$long)
unique(sapply(tracts_e, class))
# only inlude variables that are reasonable and associated with previous
# literature reviews. This includes:
# City Tax, State Tax, vacancy, crime, X, Y for point level data
# Liquor stores, Fast food stores, race (paa10, pwhite10, pasi10,
#p2more10, ppac10, phisp10, racdiv10, variables associated with income,
# and poverty)
# select only variables that are reasonable
tracts_e2<-tracts_e[,c(29,61,65,67, 71:80, 86:103, 106:107)]
names(tracts_e2)
t_names<-names(tracts_e2)
# look at the correlations between life expectancy in 2011
# and other variables
cors<- cor(tracts_e2$LifeExp11,tracts_e2, use="pairwise.complete.obs")
cors<- t(cors)
corsa<- abs(cors)
corsa
# replace values in the blocks dataset with the
# mean tract values, in order to take care of the few blocks
# that received no values from the tracts shapefile
# 9 blocks have zero values
# replace with Old Town Middle East data
var_names<-names(tracts_e2[,c(1:30)])
block_names<-blocks$BLK2010[blocks$femhhs10 == 0]
block_names<- as.vector(block_names)
blocks@data[blocks@data$BLK2010 %in% block_names,]$"LifeExp11" <- tracts@data[51,]$"LifeExp11"
blocks@data[blocks@data$BLK2010 %in% block_names,]$"liquor11" <- tracts@data[51,]$"liquor11"
blocks@data[blocks@data$BLK2010 %in% block_names,]$"fastfd11" <- tracts@data[51,]$"fastfd11"
blocks@data[blocks@data$BLK2010 %in% block_names,]$"tanf11" <- tracts@data[51,]$"tanf11"
blocks@data[blocks@data$BLK2010 %in% block_names,]$"tpop10" <- tracts@data[51,]$"tpop10"
blocks@data[blocks@data$BLK2010 %in% block_names,]$"male10" <- tracts@data[51,]$"male10"
blocks@data[blocks@data$BLK2010 %in% block_names,]$"female10" <- tracts@data[51,]$"female10"
blocks@data[blocks@data$BLK2010 %in% block_names,]$"pwhite10" <- tracts@data[51,]$"pwhite10"
blocks@data[blocks@data$BLK2010 %in% block_names,]$"paa10" <- tracts@data[51,]$"paa10"
blocks@data[blocks@data$BLK2010 %in% block_names,]$"pasi10" <- tracts@data[51,]$"pasi10"
blocks@data[blocks@data$BLK2010 %in% block_names,]$"p2more10" <- tracts@data[51,]$"p2more10"
blocks@data[blocks@data$BLK2010 %in% block_names,]$"ppac10" <- tracts@data[51,]$"ppac10"
blocks@data[blocks@data$BLK2010 %in% block_names,]$"racdiv10" <- tracts@data[51,]$"racdiv10"
blocks@data[blocks@data$BLK2010 %in% block_names,]$"hhs10" <- tracts@data[51,]$"hhs10"
blocks@data[blocks@data$BLK2010 %in% block_names,]$"femhhs10" <- tracts@data[51,]$"femhhs10"
blocks@data[blocks@data$BLK2010 %in% block_names,]$"fam10" <- tracts@data[51,]$"fam10"
blocks@data[blocks@data$BLK2010 %in% block_names,]$"hhsize10" <- tracts@data[51,]$"hhsize10"
blocks@data[blocks@data$BLK2010 %in% block_names,]$"mhhi14" <- tracts@data[51,]$"mhhi14"
blocks@data[blocks@data$BLK2010 %in% block_names,]$"hh25inc14" <- tracts@data[51,]$"hh25inc14"
blocks@data[blocks@data$BLK2010 %in% block_names,]$"hh40inc14" <- tracts@data[51,]$"hh40inc14"
blocks@data[blocks@data$BLK2010 %in% block_names,]$"hh60inc14" <- tracts@data[51,]$"hh60inc14"
blocks@data[blocks@data$BLK2010 %in% block_names,]$"hh75inc14" <- tracts@data[51,]$"hh75inc14"
blocks@data[blocks@data$BLK2010 %in% block_names,]$"hhpov14" <- tracts@data[51,]$"hhpov14"
blocks@data[blocks@data$BLK2010 %in% block_names,]$"hhchpov14" <- tracts@data[51,]$"hhchpov14"
png(file.path("..", "..", "Figures", "Exploratory_Figures","Life_Exp_Block.png"),
width = 1200, height = 700, pointsize = 20)
# plot 2011 life expectancy by block
par(mfrow=c(1,2))
plotvar0<-blocks$LifeExp11
nclr<-5
plotclr<-brewer.pal(nclr,"YlOrBr")
class<-classIntervals(plotvar0,nclr,style="equal")
colcode<-findColours(class,plotclr)
plot(blocks)
plot(blocks,col=colcode,add=TRUE)
title(main="Baltimore 2011 Life Expectancy by Street Block")
#mtext(side=3,line=.5, text="Life Expectancy")
legend(1390926, 579406, legend=names(attr(colcode, "table")),
fill=attr(colcode,"palette"),title="Life Expectancy",cex=.7,bty="n")
#dev.off()
#png(file.path("..", "..", "Figures", "Exploratory_Figures","Life_Exp_Tract.png"))
# plot 2011 life expectancy by census tract
plotvar0<-tracts$LifeExp11
nclr<-5
plotclr<-brewer.pal(nclr,"YlOrBr")
class<-classIntervals(plotvar0,nclr,style="equal")
colcode<-findColours(class,plotclr)
plot(tracts)
plot(tracts,col=colcode,add=TRUE)
title(main="Baltimore 2011 Life Expetancy by Census Tract")
#mtext(side=3,line=.5, text="Life Expectancy")
legend(1390926, 579406,
legend=names(attr(colcode, "table")),
fill=attr(colcode,"palette"),title="Life Expectancy",cex=.7,bty="n")
par(mfrow=c(1,1))
dev.off()
# now look at something that was the same between the blocks
# and the tracts, like life expectancy and household income
png(file.path("..", "..", "Figures", "Exploratory_Figures","Life_Expec_Histo.png"))
par(mfrow=c(2,2))
hist(tracts$LifeExp11)
hist(tracts$mhhi14)
hist(blocks$LifeExp11)
hist(blocks$mhhi14)
par(mfrow=c(1,1))
dev.off()
summary(blocks@data$LifeExp11)
summary(tracts@data$LifeExp11)
summary(tracts@data$mhhi14)
summary(blocks@data$mhhi14)
var(blocks@data$LifeExp11)
var(tracts@data$LifeExp11)
var(tracts@data$mhhi14)
var(blocks@data$mhhi14)
# not surprisingly, in this case the variance
# is very similar.
# Test the assumption that point variables, such as crime, vacancy, and taxes
# increase with the area in the same way between blocks and tracts.
# If we can assume that, we can say that we can apply a model built
# using tracts onto a block map.
png(file.path("..", "..", "Figures", "Exploratory_Figures","Linear_Increase.png"))
par(mfrow=c(2,2))
plot(tracts@data$tract_area, tracts@data$crime)
plot(blocks@data$block_area, blocks@data$crime)
plot(tracts@data$tract_area, tracts@data$vacancy)
plot(blocks@data$block_area, blocks@data$vacancy)
par(mfrow=c(1,1))
dev.off()
# same thing for taxes
par(mfrow=c(2,2))
plot(tracts@data$tract_area, tracts@data$StateTax)
plot(blocks@data$block_area, blocks@data$StateTax)
plot(tracts@data$tract_area, tracts@data$CityTax)
plot(blocks@data$block_area, blocks@data$CityTax)
par(mfrow=c(1,1))
###########################################################
# Look at the distribution of the crime counts and
# abandoned houses in the blocks and in
#the tracts (CSA). Is it comparable?
png(file.path("..", "..", "Figures", "Exploratory_Figures","Crime_Vacant_Histo.png"))
par(mfrow=c(2,2))
hist(tracts$crime)
hist(tracts$vacancy)
hist(blocks$crime)
hist(blocks$vacancy)
par(mfrow=c(1,1))
dev.off()
############################################################
# get count variables like crime and vacancy per square mile
tracts$crime_mi <- tracts$crime/tracts$tract_area
tracts$vacancy_mi <- tracts$vacancy/tracts$tract_area
blocks$crime_mi <- blocks$crime/blocks$block_area
blocks$vacancy_mi <- blocks$vacancy/blocks$block_area
par(mfrow=c(2,2))
hist(tracts$crime_mi)
hist(tracts$vacancy_mi)
hist(blocks$crime_mi)
hist(blocks$vacancy_mi)
par(mfrow=c(1,1))
# if the crime and vacant house counts are divided by area,
# creating a a crime and vacant house density, the distribution
# of the densities is more comparable between the tracts and the
# blocks
summary(blocks@data$crime)
summary(tracts@data$crime)
summary(blocks@data$crime_mi)
summary(tracts@data$crime_mi)
var(blocks@data$crime)
var(tracts@data$crime)
var(blocks@data$crime_mi)
var(tracts@data$crime_mi)
summary(blocks@data$vacancy)
summary(tracts@data$vacancy)
summary(tracts@data$vacancy_mi)
summary(blocks@data$vacancy_mi)
var(blocks@data$vacancy)
var(tracts@data$vacancy)
var(tracts@data$vacancy_mi)
var(blocks@data$vacancy_mi)
# Now look at State and City Taxes
par(mfrow=c(2,2))
hist(tracts$StateTax)
hist(tracts$CityTax)
hist(blocks$StateTax)
hist(blocks$CityTax)
par(mfrow=c(1,1))
summary(blocks@data$StateTax)
summary(tracts@data$StateTax)
summary(tracts@data$CityTax)
summary(blocks@data$CityTax)
var(blocks@data$StateTax)
var(tracts@data$StateTax)
var(tracts@data$CityTax)
var(blocks@data$CityTax)
# the distributions within the blocks and within the tracts seem to be
# very different, making a comparison more difficult. Both City and State
# Taxes are not distributed in a similar way between blocks and tracts.
#The variance in blocks is much higher than the variance in tracts
### STANDARDIZE! ##
# divide things by variance
# get the deviation from the median
#tracts@data <- mutate(tracts@data, LifeExp11_dev = (LifeExp11 - mean(LifeExp11))/
#var(LifeExp11))
tracts$crime_dev <- tracts$crime - mean(tracts$crime)
tracts$crime_dev <- tracts$crime_dev/var(tracts$crime_dev)
tracts$crime_mi_dev <- tracts$crime_mi - mean(tracts$crime_mi)
tracts$crime_mi_dev <- tracts$crime_mi_dev/var(tracts$crime_mi_dev)
tracts$StateTax_dev <- tracts$StateTax - mean(tracts$StateTax)
tracts$StateTax_dev <- tracts$StateTax_dev/var(tracts$StateTax_dev)
tracts$CityTax_dev <- tracts$CityTax - mean(tracts$CityTax)
tracts$CityTax_dev <- tracts$CityTax_dev/var(tracts$CityTax_dev)
tracts$vacancy_mi_dev <- tracts$vacancy_mi - mean(tracts$vacancy_mi)
tracts$vacancy_mi_dev <- tracts$vacancy_mi_dev/var(tracts$vacancy_mi_dev)
tracts$vacancy_dev <- tracts$vacancy - mean(tracts$vacancy)
tracts$vacancy_dev <- tracts$vacancy_dev/var(tracts$vacancy_dev)
tracts$LifeExp11_dev <- tracts$LifeExp11 - mean(tracts$LifeExp11)
tracts$LifeExp11_dev <- tracts$LifeExp11_dev/var(tracts$LifeExp11_dev)
tracts$liquor11_dev <- tracts$liquor11 - mean(tracts$liquor11)
tracts$liquor11_dev <- tracts$liquor11_dev/var(tracts$liquor11_dev)
tracts$fastfd11_dev <- tracts$fastfd11 - mean(tracts$fastfd11)
tracts$fastfd11_dev <- tracts$fastfd11_dev/var(tracts$fastfd11_dev)
tracts$racdiv10_dev <- tracts$racdiv10 - mean(tracts$racdiv10)
tracts$racdiv10_dev <- tracts$racdiv10_dev/var(tracts$racdiv10_dev)
tracts$femhhs10_dev <- tracts$femhhs10 - mean(tracts$femhhs10)
tracts$femhhs10_dev <- tracts$femhhs10_dev/var(tracts$femhhs10_dev)
tracts$mhhi14_dev <- tracts$mhhi14 - mean(tracts$mhhi14)
tracts$mhhi14_dev <- tracts$mhhi14_dev/var(tracts$mhhi14_dev)
## do the same at the block level
blocks$crime_mi_dev <- blocks$crime_mi - mean(blocks$crime_mi)
blocks$crime_mi_dev <- blocks$crime_mi_dev/var(blocks$crime_mi_dev)
blocks$crime_dev <- blocks$crime - mean(blocks$crime)
blocks$crime_dev <- blocks$crime_dev/var(blocks$crime_dev)
blocks$StateTax_dev <- blocks$StateTax - mean(blocks$StateTax)
blocks$StateTax_dev <- blocks$StateTax_dev/var(blocks$StateTax_dev)
blocks$CityTax_dev <- blocks$CityTax - mean(blocks$CityTax)
blocks$CityTax_dev <- blocks$CityTax_dev/var(blocks$CityTax_dev)
blocks$vacancy_mi_dev <- blocks$vacancy_mi - mean(blocks$vacancy_mi)
blocks$vacancy_mi_dev <- blocks$vacancy_mi_dev/var(blocks$vacancy_mi_dev)
blocks$vacancy_dev <- blocks$vacancy - mean(blocks$vacancy)
blocks$vacancy_dev <- blocks$vacancy_dev/var(blocks$vacancy_dev)
blocks$le11_dev <- blocks$LifeExp11 - mean(blocks$LifeExp11)
blocks$le11_dev <- blocks$le11_dev/var(blocks$le11_dev)
blocks$liquor11_dev <- blocks$liquor11 - mean(blocks$liquor11)
blocks$liquor11_dev <- blocks$liquor11_dev/var(blocks$liquor11_dev)
blocks$fastfd11_dev <- blocks$fastfd11 - mean(blocks$fastfd11)
blocks$fastfd11_dev <- blocks$fastfd11_dev/var(blocks$fastfd11_dev)
blocks$racdiv10_dev <- blocks$racdiv10 - mean(blocks$racdiv10)
blocks$racdiv10_dev <- blocks$racdiv10_dev/var(blocks$racdiv10_dev)
blocks$femhhs10_dev <- blocks$femhhs10 - mean(blocks$femhhs10)
blocks$femhhs10_dev <- blocks$femhhs10_dev/var(blocks$femhhs10_dev)
blocks$mhhi14_dev <- blocks$mhhi14 - mean(blocks$mhhi14)
blocks$mhhi14_dev <- blocks$mhhi14_dev/var(blocks$mhhi14_dev)
# look at the variances after standardization
par(mfrow=c(2,2))
hist(tracts$StateTax_dev)
hist(tracts$CityTax_dev)
hist(blocks$StateTax_dev)
hist(blocks$CityTax_dev)
par(mfrow=c(1,1))
summary(blocks@data$StateTax_dev)
summary(blocks@data$StateTax)
summary(tracts@data$StateTax_dev)
summary(tracts@data$StateTax)
var(tracts@data$crime_mi_dev)
var(blocks@data$crime_mi_dev)
var(tracts@data$vacancy_mi_dev)
var(blocks@data$vacancy_mi_dev)
var(blocks@data$StateTax_dev)
var(tracts@data$StateTax_dev)
var(tracts@data$CityTax_dev)
var(blocks@data$CityTax_dev)
#################################################
# Select the proper variables
# check for collinearity. Select variables you want to check
names(tracts_e2)
vars <- names(tracts_e2)
vars
df <- tracts@data[,(names(tracts) %in% vars)]
# look at the collinearity using vif, leave only those below 10.
# many errors, take away variables that are redundant.
vars_s<- c("liquor11", "fastfd11", "tanf11",
"tpop10", "paa10", "pwhite10" ,
"racdiv10", "hhs10", "femhhs10",
"fam10" , "hhsize10", "mhhi14",
"hhpov14", "hhchpov14", "crime_mi_dev",
"CityTax_dev", "StateTax_dev", "vacancy_mi_dev",
"X", "Y", "femhhs10_dev", "liquor11_dev", "fastfd11_dev",
"racdiv10_dev", "mhhi14_dev")
df <- tracts@data[,(names(tracts) %in% vars_s)]
vif(df)
vif
b<- vifstep(df, th=10)
b
vars_s2<- c("tanf11", "pwhite10" , "hhs10",
"femhhs10", "hhpov14", "crime_mi_dev",
"CityTax_dev", "StateTax_dev", "vacancy_mi_dev",
"X", "Y", "femhhs10_dev", "liquor11_dev",
"racdiv10_dev", "mhhi14_dev")
df <- tracts@data[,(names(tracts) %in% vars_s2)]
vif(df)
vif
b_f<- vifstep(df, th=5)
b_f
############################################################
# do some regression at the tract level. Use the biggest possible model
# first. Look at standardized.
f1 <- LifeExp11 ~ liquor11 + tanf11 + pwhite10 + racdiv10 + hhs10 +
fam10 + hhpov14 + StateTax_dev + crime_mi_dev + vacancy_mi_dev + X + Y
m1 <- lm(f1, data = tracts)
summary(m1)
reg1<-regsubsets(f1, data = tracts, method = "backward")
summary(reg1)
# non standardized.
f2 <- LifeExp11 ~ liquor11 + tanf11 + pwhite10 + racdiv10 + hhs10 +
fam10 + hhpov14 + StateTax + crime + vacancy + X + Y
m2 <- lm(f2, data = tracts)
summary(m2)
reg2<-regsubsets(f2, data = tracts, method = "backward")
summary(reg2)
# looks like temprary assistance for needy families (TANF) proportion
# liquor, percent of white people, household povery proportion
# And Y coordinate are some of the strongest predictors.
f3 <- LifeExp11 ~ liquor11 + tanf11 + pwhite10 +
hhpov14 + StateTax_dev + crime_mi_dev + vacancy_mi_dev
m3 <- lm(f3, data = tracts)
summary(m3)
reg3<-regsubsets(f3, data = tracts, method = "backward")
summary(reg3)
# non standardized.
f4 <- LifeExp11 ~ liquor11 + tanf11 + pwhite10 +
hhpov14 + StateTax + crime + vacancy
m4 <- lm(f4, data = tracts)
summary(m4)
reg4<-regsubsets(f4, data = tracts, method = "backward")
summary(reg4)
# of the point variables, vacancy and state tax seem to be the
# most important
f3s <- LifeExp11 ~ liquor11 + tanf11 + pwhite10 +
hhpov14 + StateTax_dev + crime_mi_dev + vacancy_mi_dev + X+ Y
m3s <- lm(f3s, data = tracts)
summary(m3s)
AIC(m1)
AIC(m2)
AIC(m3)
AIC(m4)
AIC(m3s)
# plot the residuals of m3
png(file.path("..", "..", "Figures", "Exploratory_Figures", "Spatial_Non_Spatial_Residuals.png"),
width = 1200, height = 700, pointsize = 20)
par(mfrow=c(1,2))
plotvar0<- residuals(m3)
nclr<-5
plotclr<-brewer.pal(nclr,"YlOrBr")
class<-classIntervals(plotvar0,nclr,style="equal")
colcode<-findColours(class,plotclr)
plot(tracts)
plot(tracts,col=colcode,add=TRUE)
title(main="M3 residuals")
legend(1390926, 577406,legend=names(attr(colcode, "table")),
fill=attr(colcode,"palette"),title="Residuals",cex=.7,bty="n")
plotvar0<- residuals(m3s)
nclr<-5
plotclr<-brewer.pal(nclr,"YlOrBr")
class<-classIntervals(plotvar0,nclr,style="equal")
colcode<-findColours(class,plotclr)
plot(tracts)
plot(tracts,col=colcode,add=TRUE)
title(main="M3s residuals")
legend(1390926, 577406,legend=names(attr(colcode, "table")),
fill=attr(colcode,"palette"),title="Residuals",cex=.7,bty="n")
par(mfrow=c(1,1))
dev.off()
# the residuals look spatially dependent
# calculate Moran's I
# create border for the plots. allow distance of 300 between neighborhoods to account
# for an "island" in southern Baltimore
tracts_nb<-poly2nb(tracts,queen=FALSE, snap = 300)
plot(tracts)
plot(tracts_nb,coordinates(tracts),pch=19,cex=.6,add=TRUE)
# W is the weight matrix of just 1 and 0, showing if each census tract has a tract as its neighbor.
# select style = W to make it row standardized
tracts_nb_w<-nb2listw(tracts_nb,style="W")
W<-listw2mat(tracts_nb_w)
###################################################
# look at the kinds of autocorrelation that exist
lm.LMtests(m4, tracts_nb_w, test = "all")
lm.LMtests(m3, tracts_nb_w, test = "all")
lm.LMtests(m3s, tracts_nb_w, test = "all")
# a single moran's I test for Baltimore life expectancy
moran<-moran.test(tracts$LifeExp11,listw=nb2listw(tracts_nb,style="W" ))
moran
# the moran I p-value is highly significant, suggesting that there is
# spatial clustering!
# make a correlogram of Moran's I, lagged by neighborhood distance
cor<-sp.correlogram(neighbours=tracts_nb,var=tracts$LifeExp11,order=8,method="I",style="W",
zero.policy =TRUE)
plot(cor, main="Moran's I Correlogram of Baltimore Life Expectancy")
# save this corrallorgram
png(file.path("..", "..", "Figures", "Exploratory_Figures","M_I_corr.png"))
plot(cor, main="Moran's I Correlogram of Baltimore Life Expectancy")
dev.off()
# another way to test for spatial clustering. Plot mean adjacent residulas
# vs residuals
png(file.path("..", "..", "Figures", "Exploratory_Figures", "Residual_Adj_Resid.png"),
width = 1200, height = 700, pointsize = 20)
par(mfrow=c(1,2))
resnb <- sapply(tracts_nb, function(x) mean(m3$residuals[x]))
cor(residuals(m3), resnb)
plot(residuals(m3), resnb, xlab='Residuals', ylab='Mean adjacent residuals', main = "M3")
resnb <- sapply(tracts_nb, function(x) mean(m3s$residuals[x]))
cor(residuals(m3s), resnb)
plot(residuals(m3s), resnb, xlab='Residuals', ylab='Mean adjacent residuals', main = "M3s")
par(mfrow=c(1,1))
dev.off()
# again, looks like there is a trend
# to account for spatial autocorrelation, use
# spatial simultaneous autoregressive lag model
# estimation. (lagsarlm)
m3_s = lagsarlm(f3, data=tracts, tracts_nb_w)
summary(m3_s)
# compare the unlagged and lagged models
m3 <- lm(f3, data = tracts)
m3_s = lagsarlm(f3, data=tracts, tracts_nb_w)
anova(m3_s, m3)
# looks like the lagged model minimizes the AIC
# look at the Moran's I of the residulas
residuals(m3_s)
cor<-sp.correlogram(neighbours=tracts_nb,var=tracts$LifeExp11,order=8,method="I",style="W",
zero.policy =TRUE)
plot(cor, main="Moran's I Correlogram of Baltimore Life Expectancy")
# looks like Moran's I is no longer significant if the rows
# are stanardized
moran_resid<-moran.test(residuals(m3_s),listw=nb2listw(tracts_nb,style="W" ))
moran_resid
cor<-sp.correlogram(neighbours=tracts_nb,var=residuals(m3_s),order=8,method="I",style="W",
zero.policy =TRUE)
plot(cor, main="Moran's I Correlogram of Baltimore Life Expectancy Residuals")
# save the collalogram
png(file.path("..", "..", "Figures", "Exploratory_Figures", "M_I_R_corr.png"))
plot(cor, main="Moran's I Correlogram of Baltimore Life Expectancy Residuals")
dev.off()
#######################################################################
# apply the model built on a census tract level to a the block level #
# select a training set and a testing set
set.seed(123)
samp<- sample(nrow(tracts), 30)
train_set <- tracts[samp,]
test_set <- tracts[-samp,]
m1 <- lm(f1, data = train_set)
summary(m1)
f1.5 <- LifeExp11 ~ femhhs10_dev +
crime_mi_dev + racdiv10_dev + vacancy_mi_dev + X + Y + mhhi14_dev
m1.5 <- lm(f1.5, data = train_set)
summary(m1.5)
m2 <- lm(f2, data = train_set)
summary(m2)
m3 <- lm(f3, data = train_set)
summary(m3)
m3s <- lm(f3s, data = train_set)
summary(m3s)
m4 <- lm(f4, data = train_set)
summary(m4)
# a more basic model
f5 <- LifeExp11 ~ femhhs10_dev +
crime_mi_dev + racdiv10_dev + vacancy_mi_dev + X + Y
m5 <- lm(f5, data = train_set)
summary(m5)
p1 <- predict(m1, newdata = test_set)
p1.5 <- predict(m1.5, newdata = test_set)
p2 <- predict(m2, newdata = test_set)
p3 <- predict(m3, newdata = test_set)
p3s <- predict(m3s, newdata = test_set)
p4 <- predict(m4, newdata = test_set)
p5 <- predict(m5, newdata = test_set)
# look at the correlation between predicted life expectancy p1 and the actual life
# expectancy in the test_set
AIC(m1)
AIC(m1.5)
AIC(m2)
AIC(m3)
AIC(m4)
AIC(m3s)
AIC(m5)
rmse(test_set@data$LifeExp11, p1)
rmse(test_set@data$LifeExp11, p1.5)
rmse(test_set@data$LifeExp11, p2)
rmse(test_set@data$LifeExp11, p3)
rmse(test_set@data$LifeExp11, p3s)
rmse(test_set@data$LifeExp11, p4)
rmse(test_set@data$LifeExp11, p5)
cor(p1, test_set@data$LifeExp11)
cor(p1.5, test_set@data$LifeExp11)
cor(p2, test_set@data$LifeExp11)
cor(p3, test_set@data$LifeExp11)
cor(p3s, test_set@data$LifeExp11)
cor(p4, test_set@data$LifeExp11)
cor(p5, test_set@data$LifeExp11)
# looks like p1 has the best predictive ability
# use the same model to predict life expectancy for each block
bp1 <- predict(m1.5, newdata = blocks, type = "response", interval = "predict")
length(bp1)
# make a test blocks dataset to practice
blocks_t<- blocks
blocks_t$bp1 <- bp1[,1]
# the 95% confidence interval of the prediction
blocks_t$bp1_var <- bp1[,3]- bp1[,2]
summary(blocks_t$bp1)
# the 95% confidence interval of the prediction
summary(blocks_t$bp1_var)
par(mfrow=c(1,2))
# plot the outcome of p2 prediction model!!
plotvar0<-blocks_t$bp1
nclr<-5
plotclr<-brewer.pal(nclr,"YlOrBr")
class<-classIntervals(plotvar0,nclr,style="pretty")
colcode<-findColours(class,plotclr)
plot(blocks_t)
plot(blocks_t,col=colcode,add=TRUE)
title(main="Baltimore Predicted Life Expectancy by Block")
mtext(side=3,line=.5, text="")
legend(1390926, 579406, legend=names(attr(colcode, "table")),
fill=attr(colcode,"palette"),title="Life Expectancy",cex=.7,bty="n")
# plot the variance for comparison
plotvar0<-blocks_t$bp1_var
nclr<-5
plotclr<-brewer.pal(nclr,"YlOrBr")
class<-classIntervals(plotvar0,nclr,style="pretty")
colcode<-findColours(class,plotclr)
plot(blocks_t)
plot(blocks_t,col=colcode,add=TRUE)
title(main="Baltimore Predicted Life Expectancy Confidence Interval")
mtext(side=3,line=.5, text="")
legend(1390926, 579406, legend=names(attr(colcode, "table")),
fill=attr(colcode,"palette"),title="Confidence Interval",cex=.7,bty="n")
par(mfrow=c(1,1))
# looks like one of the parameters is creating large errors downtown.
# see if any of the tract level parameters look out of the ordinary
# looks like downtown has a very high number of liquor stores
# relative to the rest of the tracts. Median is 1.034,Downtown is 8.532
# for number of businesses that possess Class A liqour licences.
png(file.path("..", "..", "Figures", "Exploratory_Figures","Liquor_Tracts.png"))
plotvar0<-tracts$liquor11
nclr<-5
plotclr<-brewer.pal(nclr,"YlOrBr")
class<-classIntervals(plotvar0,nclr,style="equal")
colcode<-findColours(class,plotclr)
plot(tracts)
plot(tracts,col=colcode,add=TRUE)
title(main="Baltimore Liquor Store Density")
mtext(side=3,line=.5, text="")
legend(1390926, 579406, legend=names(attr(colcode, "table")),
fill=attr(colcode,"palette"),title="Liquor Stores",cex=.7,bty="n")
dev.off()
setwd(file.path("..", ".."))
| /R_code/Exploratory_1.R | no_license | akvit1/BLE | R | false | false | 25,352 | r | # install all the necessary packages
list.of.packages <- c("tidyr", "geoR", "dplyr", "maps", "maptools", "rgdal", "rgeos", "sp", "spatialEco",
"plyr", "RColorBrewer", "classInt", "spatstat", "spdep", "sp", "usdm","readr",
"lubridate", "rmarkdown", "leaps", "MASS", "Metrics")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
setwd(file.path(".", "Analyzed_Data", "ArcGIS"))
packages<- c("maps", "maptools", "rgdal", "rgeos", "downloader", "dplyr",
"plyr", "geoR", "RColorBrewer", "classInt", "spatstat",
"spdep", "sp", "usdm", "leaps", "MASS", "Metrics")
sapply(packages, require, character.only = T)
# get the block baltimore shapefile from ArcGIS
# All the shapefiles are in NAD 1983 Maryland state plane projection
blocks <- readShapePoly("balt_vac_crime_estate_cen_health_feet.shp")
# rename
names(blocks)[34] <- "vacancy"
names(blocks)[35] <- "crime"
names(blocks)[38] <- "estate"
# get the census tract shapefile from ArcGIS
tracts <- readShapePoly("track_health_cens_crim_est_vac.shp")
names(tracts)[110] <- "crime"
names(tracts)[113] <- "estate"
names(tracts)[114] <- "vacancy"
# add centroid coordinates to account for spatial correlation
t_coords<- as.data.frame(gCentroid(tracts, byid = TRUE))
b_coords<- as.data.frame(gCentroid(blocks, byid = TRUE))
tracts$X <- t_coords$x
tracts$Y <- t_coords$y
blocks$X <- b_coords$x
blocks$Y <- b_coords$y
tracts_e <- tracts@data[,-(1:9)]
tracts_e[, sapply(tracts_e, is.factor)]
# remove all the values that are factors, since none of them
# have evidence that they are associated with life expectancy
tracts_e<-tracts_e[, !sapply(tracts_e, is.factor)]
# get crime, estate, and vacanacy as numeric
tracts_e[, sapply(tracts_e, is.integer)]
tracts_e$crime <- as.numeric(tracts_e$crime)
tracts_e$estate <- as.numeric(tracts_e$estate)
tracts_e$vacancy <- as.numeric(tracts_e$vacancy)
tracts_e$long <- as.numeric(tracts_e$long)
unique(sapply(tracts_e, class))
# only inlude variables that are reasonable and associated with previous
# literature reviews. This includes:
# City Tax, State Tax, vacancy, crime, X, Y for point level data
# Liquor stores, Fast food stores, race (paa10, pwhite10, pasi10,
#p2more10, ppac10, phisp10, racdiv10, variables associated with income,
# and poverty)
# select only variables that are reasonable
tracts_e2<-tracts_e[,c(29,61,65,67, 71:80, 86:103, 106:107)]
names(tracts_e2)
t_names<-names(tracts_e2)
# look at the correlations between life expectancy in 2011
# and other variables
cors<- cor(tracts_e2$LifeExp11,tracts_e2, use="pairwise.complete.obs")
cors<- t(cors)
corsa<- abs(cors)
corsa
# replace values in the blocks dataset with the
# mean tract values, in order to take care of the few blocks
# that received no values from the tracts shapefile
# 9 blocks have zero values
# replace with Old Town Middle East data
var_names<-names(tracts_e2[,c(1:30)])
block_names<-blocks$BLK2010[blocks$femhhs10 == 0]
block_names<- as.vector(block_names)
blocks@data[blocks@data$BLK2010 %in% block_names,]$"LifeExp11" <- tracts@data[51,]$"LifeExp11"
blocks@data[blocks@data$BLK2010 %in% block_names,]$"liquor11" <- tracts@data[51,]$"liquor11"
blocks@data[blocks@data$BLK2010 %in% block_names,]$"fastfd11" <- tracts@data[51,]$"fastfd11"
blocks@data[blocks@data$BLK2010 %in% block_names,]$"tanf11" <- tracts@data[51,]$"tanf11"
blocks@data[blocks@data$BLK2010 %in% block_names,]$"tpop10" <- tracts@data[51,]$"tpop10"
blocks@data[blocks@data$BLK2010 %in% block_names,]$"male10" <- tracts@data[51,]$"male10"
blocks@data[blocks@data$BLK2010 %in% block_names,]$"female10" <- tracts@data[51,]$"female10"
blocks@data[blocks@data$BLK2010 %in% block_names,]$"pwhite10" <- tracts@data[51,]$"pwhite10"
blocks@data[blocks@data$BLK2010 %in% block_names,]$"paa10" <- tracts@data[51,]$"paa10"
blocks@data[blocks@data$BLK2010 %in% block_names,]$"pasi10" <- tracts@data[51,]$"pasi10"
blocks@data[blocks@data$BLK2010 %in% block_names,]$"p2more10" <- tracts@data[51,]$"p2more10"
blocks@data[blocks@data$BLK2010 %in% block_names,]$"ppac10" <- tracts@data[51,]$"ppac10"
blocks@data[blocks@data$BLK2010 %in% block_names,]$"racdiv10" <- tracts@data[51,]$"racdiv10"
blocks@data[blocks@data$BLK2010 %in% block_names,]$"hhs10" <- tracts@data[51,]$"hhs10"
blocks@data[blocks@data$BLK2010 %in% block_names,]$"femhhs10" <- tracts@data[51,]$"femhhs10"
blocks@data[blocks@data$BLK2010 %in% block_names,]$"fam10" <- tracts@data[51,]$"fam10"
blocks@data[blocks@data$BLK2010 %in% block_names,]$"hhsize10" <- tracts@data[51,]$"hhsize10"
blocks@data[blocks@data$BLK2010 %in% block_names,]$"mhhi14" <- tracts@data[51,]$"mhhi14"
blocks@data[blocks@data$BLK2010 %in% block_names,]$"hh25inc14" <- tracts@data[51,]$"hh25inc14"
blocks@data[blocks@data$BLK2010 %in% block_names,]$"hh40inc14" <- tracts@data[51,]$"hh40inc14"
blocks@data[blocks@data$BLK2010 %in% block_names,]$"hh60inc14" <- tracts@data[51,]$"hh60inc14"
blocks@data[blocks@data$BLK2010 %in% block_names,]$"hh75inc14" <- tracts@data[51,]$"hh75inc14"
blocks@data[blocks@data$BLK2010 %in% block_names,]$"hhpov14" <- tracts@data[51,]$"hhpov14"
blocks@data[blocks@data$BLK2010 %in% block_names,]$"hhchpov14" <- tracts@data[51,]$"hhchpov14"
png(file.path("..", "..", "Figures", "Exploratory_Figures","Life_Exp_Block.png"),
width = 1200, height = 700, pointsize = 20)
# plot 2011 life expectancy by block
par(mfrow=c(1,2))
plotvar0<-blocks$LifeExp11
nclr<-5
plotclr<-brewer.pal(nclr,"YlOrBr")
class<-classIntervals(plotvar0,nclr,style="equal")
colcode<-findColours(class,plotclr)
plot(blocks)
plot(blocks,col=colcode,add=TRUE)
title(main="Baltimore 2011 Life Expectancy by Street Block")
#mtext(side=3,line=.5, text="Life Expectancy")
legend(1390926, 579406, legend=names(attr(colcode, "table")),
fill=attr(colcode,"palette"),title="Life Expectancy",cex=.7,bty="n")
#dev.off()
#png(file.path("..", "..", "Figures", "Exploratory_Figures","Life_Exp_Tract.png"))
# plot 2011 life expectancy by census tract
plotvar0<-tracts$LifeExp11
nclr<-5
plotclr<-brewer.pal(nclr,"YlOrBr")
class<-classIntervals(plotvar0,nclr,style="equal")
colcode<-findColours(class,plotclr)
plot(tracts)
plot(tracts,col=colcode,add=TRUE)
title(main="Baltimore 2011 Life Expetancy by Census Tract")
#mtext(side=3,line=.5, text="Life Expectancy")
legend(1390926, 579406,
legend=names(attr(colcode, "table")),
fill=attr(colcode,"palette"),title="Life Expectancy",cex=.7,bty="n")
par(mfrow=c(1,1))
dev.off()
# now look at something that was the same between the blocks
# and the tracts, like life expectancy and household income
png(file.path("..", "..", "Figures", "Exploratory_Figures","Life_Expec_Histo.png"))
par(mfrow=c(2,2))
hist(tracts$LifeExp11)
hist(tracts$mhhi14)
hist(blocks$LifeExp11)
hist(blocks$mhhi14)
par(mfrow=c(1,1))
dev.off()
summary(blocks@data$LifeExp11)
summary(tracts@data$LifeExp11)
summary(tracts@data$mhhi14)
summary(blocks@data$mhhi14)
var(blocks@data$LifeExp11)
var(tracts@data$LifeExp11)
var(tracts@data$mhhi14)
var(blocks@data$mhhi14)
# not surprisingly, in this case the variance
# is very similar.
# Test the assumption that point variables, such as crime, vacancy, and taxes
# increase with the area in the same way between blocks and tracts.
# If we can assume that, we can say that we can apply a model built
# using tracts onto a block map.
png(file.path("..", "..", "Figures", "Exploratory_Figures","Linear_Increase.png"))
par(mfrow=c(2,2))
plot(tracts@data$tract_area, tracts@data$crime)
plot(blocks@data$block_area, blocks@data$crime)
plot(tracts@data$tract_area, tracts@data$vacancy)
plot(blocks@data$block_area, blocks@data$vacancy)
par(mfrow=c(1,1))
dev.off()
# same thing for taxes
par(mfrow=c(2,2))
plot(tracts@data$tract_area, tracts@data$StateTax)
plot(blocks@data$block_area, blocks@data$StateTax)
plot(tracts@data$tract_area, tracts@data$CityTax)
plot(blocks@data$block_area, blocks@data$CityTax)
par(mfrow=c(1,1))
###########################################################
# Look at the distribution of the crime counts and
# abandoned houses in the blocks and in
#the tracts (CSA). Is it comparable?
png(file.path("..", "..", "Figures", "Exploratory_Figures","Crime_Vacant_Histo.png"))
par(mfrow=c(2,2))
hist(tracts$crime)
hist(tracts$vacancy)
hist(blocks$crime)
hist(blocks$vacancy)
par(mfrow=c(1,1))
dev.off()
############################################################
# get count variables like crime and vacancy per square mile
tracts$crime_mi <- tracts$crime/tracts$tract_area
tracts$vacancy_mi <- tracts$vacancy/tracts$tract_area
blocks$crime_mi <- blocks$crime/blocks$block_area
blocks$vacancy_mi <- blocks$vacancy/blocks$block_area
par(mfrow=c(2,2))
hist(tracts$crime_mi)
hist(tracts$vacancy_mi)
hist(blocks$crime_mi)
hist(blocks$vacancy_mi)
par(mfrow=c(1,1))
# if the crime and vacant house counts are divided by area,
# creating a a crime and vacant house density, the distribution
# of the densities is more comparable between the tracts and the
# blocks
summary(blocks@data$crime)
summary(tracts@data$crime)
summary(blocks@data$crime_mi)
summary(tracts@data$crime_mi)
var(blocks@data$crime)
var(tracts@data$crime)
var(blocks@data$crime_mi)
var(tracts@data$crime_mi)
summary(blocks@data$vacancy)
summary(tracts@data$vacancy)
summary(tracts@data$vacancy_mi)
summary(blocks@data$vacancy_mi)
var(blocks@data$vacancy)
var(tracts@data$vacancy)
var(tracts@data$vacancy_mi)
var(blocks@data$vacancy_mi)
# Now look at State and City Taxes
par(mfrow=c(2,2))
hist(tracts$StateTax)
hist(tracts$CityTax)
hist(blocks$StateTax)
hist(blocks$CityTax)
par(mfrow=c(1,1))
summary(blocks@data$StateTax)
summary(tracts@data$StateTax)
summary(tracts@data$CityTax)
summary(blocks@data$CityTax)
var(blocks@data$StateTax)
var(tracts@data$StateTax)
var(tracts@data$CityTax)
var(blocks@data$CityTax)
# the distributions within the blocks and within the tracts seem to be
# very different, making a comparison more difficult. Both City and State
# Taxes are not distributed in a similar way between blocks and tracts.
#The variance in blocks is much higher than the variance in tracts
### STANDARDIZE! ##
# divide things by variance
# get the deviation from the median
#tracts@data <- mutate(tracts@data, LifeExp11_dev = (LifeExp11 - mean(LifeExp11))/
#var(LifeExp11))
tracts$crime_dev <- tracts$crime - mean(tracts$crime)
tracts$crime_dev <- tracts$crime_dev/var(tracts$crime_dev)
tracts$crime_mi_dev <- tracts$crime_mi - mean(tracts$crime_mi)
tracts$crime_mi_dev <- tracts$crime_mi_dev/var(tracts$crime_mi_dev)
tracts$StateTax_dev <- tracts$StateTax - mean(tracts$StateTax)
tracts$StateTax_dev <- tracts$StateTax_dev/var(tracts$StateTax_dev)
tracts$CityTax_dev <- tracts$CityTax - mean(tracts$CityTax)
tracts$CityTax_dev <- tracts$CityTax_dev/var(tracts$CityTax_dev)
tracts$vacancy_mi_dev <- tracts$vacancy_mi - mean(tracts$vacancy_mi)
tracts$vacancy_mi_dev <- tracts$vacancy_mi_dev/var(tracts$vacancy_mi_dev)
tracts$vacancy_dev <- tracts$vacancy - mean(tracts$vacancy)
tracts$vacancy_dev <- tracts$vacancy_dev/var(tracts$vacancy_dev)
tracts$LifeExp11_dev <- tracts$LifeExp11 - mean(tracts$LifeExp11)
tracts$LifeExp11_dev <- tracts$LifeExp11_dev/var(tracts$LifeExp11_dev)
tracts$liquor11_dev <- tracts$liquor11 - mean(tracts$liquor11)
tracts$liquor11_dev <- tracts$liquor11_dev/var(tracts$liquor11_dev)
tracts$fastfd11_dev <- tracts$fastfd11 - mean(tracts$fastfd11)
tracts$fastfd11_dev <- tracts$fastfd11_dev/var(tracts$fastfd11_dev)
tracts$racdiv10_dev <- tracts$racdiv10 - mean(tracts$racdiv10)
tracts$racdiv10_dev <- tracts$racdiv10_dev/var(tracts$racdiv10_dev)
tracts$femhhs10_dev <- tracts$femhhs10 - mean(tracts$femhhs10)
tracts$femhhs10_dev <- tracts$femhhs10_dev/var(tracts$femhhs10_dev)
tracts$mhhi14_dev <- tracts$mhhi14 - mean(tracts$mhhi14)
tracts$mhhi14_dev <- tracts$mhhi14_dev/var(tracts$mhhi14_dev)
## do the same at the block level
blocks$crime_mi_dev <- blocks$crime_mi - mean(blocks$crime_mi)
blocks$crime_mi_dev <- blocks$crime_mi_dev/var(blocks$crime_mi_dev)
blocks$crime_dev <- blocks$crime - mean(blocks$crime)
blocks$crime_dev <- blocks$crime_dev/var(blocks$crime_dev)
blocks$StateTax_dev <- blocks$StateTax - mean(blocks$StateTax)
blocks$StateTax_dev <- blocks$StateTax_dev/var(blocks$StateTax_dev)
blocks$CityTax_dev <- blocks$CityTax - mean(blocks$CityTax)
blocks$CityTax_dev <- blocks$CityTax_dev/var(blocks$CityTax_dev)
blocks$vacancy_mi_dev <- blocks$vacancy_mi - mean(blocks$vacancy_mi)
blocks$vacancy_mi_dev <- blocks$vacancy_mi_dev/var(blocks$vacancy_mi_dev)
blocks$vacancy_dev <- blocks$vacancy - mean(blocks$vacancy)
blocks$vacancy_dev <- blocks$vacancy_dev/var(blocks$vacancy_dev)
blocks$le11_dev <- blocks$LifeExp11 - mean(blocks$LifeExp11)
blocks$le11_dev <- blocks$le11_dev/var(blocks$le11_dev)
blocks$liquor11_dev <- blocks$liquor11 - mean(blocks$liquor11)
blocks$liquor11_dev <- blocks$liquor11_dev/var(blocks$liquor11_dev)
blocks$fastfd11_dev <- blocks$fastfd11 - mean(blocks$fastfd11)
blocks$fastfd11_dev <- blocks$fastfd11_dev/var(blocks$fastfd11_dev)
blocks$racdiv10_dev <- blocks$racdiv10 - mean(blocks$racdiv10)
blocks$racdiv10_dev <- blocks$racdiv10_dev/var(blocks$racdiv10_dev)
blocks$femhhs10_dev <- blocks$femhhs10 - mean(blocks$femhhs10)
blocks$femhhs10_dev <- blocks$femhhs10_dev/var(blocks$femhhs10_dev)
blocks$mhhi14_dev <- blocks$mhhi14 - mean(blocks$mhhi14)
blocks$mhhi14_dev <- blocks$mhhi14_dev/var(blocks$mhhi14_dev)
# look at the variances after standardization
par(mfrow=c(2,2))
hist(tracts$StateTax_dev)
hist(tracts$CityTax_dev)
hist(blocks$StateTax_dev)
hist(blocks$CityTax_dev)
par(mfrow=c(1,1))
summary(blocks@data$StateTax_dev)
summary(blocks@data$StateTax)
summary(tracts@data$StateTax_dev)
summary(tracts@data$StateTax)
var(tracts@data$crime_mi_dev)
var(blocks@data$crime_mi_dev)
var(tracts@data$vacancy_mi_dev)
var(blocks@data$vacancy_mi_dev)
var(blocks@data$StateTax_dev)
var(tracts@data$StateTax_dev)
var(tracts@data$CityTax_dev)
var(blocks@data$CityTax_dev)
#################################################
# Select the proper variables
# check for collinearity. Select variables you want to check
names(tracts_e2)
vars <- names(tracts_e2)
vars
df <- tracts@data[,(names(tracts) %in% vars)]
# look at the collinearity using vif, leave only those below 10.
# many errors, take away variables that are redundant.
vars_s<- c("liquor11", "fastfd11", "tanf11",
"tpop10", "paa10", "pwhite10" ,
"racdiv10", "hhs10", "femhhs10",
"fam10" , "hhsize10", "mhhi14",
"hhpov14", "hhchpov14", "crime_mi_dev",
"CityTax_dev", "StateTax_dev", "vacancy_mi_dev",
"X", "Y", "femhhs10_dev", "liquor11_dev", "fastfd11_dev",
"racdiv10_dev", "mhhi14_dev")
df <- tracts@data[,(names(tracts) %in% vars_s)]
vif(df)
vif
b<- vifstep(df, th=10)
b
vars_s2<- c("tanf11", "pwhite10" , "hhs10",
"femhhs10", "hhpov14", "crime_mi_dev",
"CityTax_dev", "StateTax_dev", "vacancy_mi_dev",
"X", "Y", "femhhs10_dev", "liquor11_dev",
"racdiv10_dev", "mhhi14_dev")
df <- tracts@data[,(names(tracts) %in% vars_s2)]
vif(df)
vif
b_f<- vifstep(df, th=5)
b_f
############################################################
# do some regression at the tract level. Use the biggest possible model
# first. Look at standardized.
f1 <- LifeExp11 ~ liquor11 + tanf11 + pwhite10 + racdiv10 + hhs10 +
fam10 + hhpov14 + StateTax_dev + crime_mi_dev + vacancy_mi_dev + X + Y
m1 <- lm(f1, data = tracts)
summary(m1)
reg1<-regsubsets(f1, data = tracts, method = "backward")
summary(reg1)
# non standardized.
f2 <- LifeExp11 ~ liquor11 + tanf11 + pwhite10 + racdiv10 + hhs10 +
fam10 + hhpov14 + StateTax + crime + vacancy + X + Y
m2 <- lm(f2, data = tracts)
summary(m2)
reg2<-regsubsets(f2, data = tracts, method = "backward")
summary(reg2)
# looks like temprary assistance for needy families (TANF) proportion
# liquor, percent of white people, household povery proportion
# And Y coordinate are some of the strongest predictors.
f3 <- LifeExp11 ~ liquor11 + tanf11 + pwhite10 +
hhpov14 + StateTax_dev + crime_mi_dev + vacancy_mi_dev
m3 <- lm(f3, data = tracts)
summary(m3)
reg3<-regsubsets(f3, data = tracts, method = "backward")
summary(reg3)
# non standardized.
f4 <- LifeExp11 ~ liquor11 + tanf11 + pwhite10 +
hhpov14 + StateTax + crime + vacancy
m4 <- lm(f4, data = tracts)
summary(m4)
reg4<-regsubsets(f4, data = tracts, method = "backward")
summary(reg4)
# of the point variables, vacancy and state tax seem to be the
# most important
f3s <- LifeExp11 ~ liquor11 + tanf11 + pwhite10 +
hhpov14 + StateTax_dev + crime_mi_dev + vacancy_mi_dev + X+ Y
m3s <- lm(f3s, data = tracts)
summary(m3s)
AIC(m1)
AIC(m2)
AIC(m3)
AIC(m4)
AIC(m3s)
# plot the residuals of m3
png(file.path("..", "..", "Figures", "Exploratory_Figures", "Spatial_Non_Spatial_Residuals.png"),
width = 1200, height = 700, pointsize = 20)
par(mfrow=c(1,2))
plotvar0<- residuals(m3)
nclr<-5
plotclr<-brewer.pal(nclr,"YlOrBr")
class<-classIntervals(plotvar0,nclr,style="equal")
colcode<-findColours(class,plotclr)
plot(tracts)
plot(tracts,col=colcode,add=TRUE)
title(main="M3 residuals")
legend(1390926, 577406,legend=names(attr(colcode, "table")),
fill=attr(colcode,"palette"),title="Residuals",cex=.7,bty="n")
plotvar0<- residuals(m3s)
nclr<-5
plotclr<-brewer.pal(nclr,"YlOrBr")
class<-classIntervals(plotvar0,nclr,style="equal")
colcode<-findColours(class,plotclr)
plot(tracts)
plot(tracts,col=colcode,add=TRUE)
title(main="M3s residuals")
legend(1390926, 577406,legend=names(attr(colcode, "table")),
fill=attr(colcode,"palette"),title="Residuals",cex=.7,bty="n")
par(mfrow=c(1,1))
dev.off()
# the residuals look spatially dependent
# calculate Moran's I
# create border for the plots. allow distance of 300 between neighborhoods to account
# for an "island" in southern Baltimore
tracts_nb<-poly2nb(tracts,queen=FALSE, snap = 300)
plot(tracts)
plot(tracts_nb,coordinates(tracts),pch=19,cex=.6,add=TRUE)
# W is the weight matrix of just 1 and 0, showing if each census tract has a tract as its neighbor.
# select style = W to make it row standardized
tracts_nb_w<-nb2listw(tracts_nb,style="W")
W<-listw2mat(tracts_nb_w)
###################################################
# look at the kinds of autocorrelation that exist
lm.LMtests(m4, tracts_nb_w, test = "all")
lm.LMtests(m3, tracts_nb_w, test = "all")
lm.LMtests(m3s, tracts_nb_w, test = "all")
# a single moran's I test for Baltimore life expectancy
moran<-moran.test(tracts$LifeExp11,listw=nb2listw(tracts_nb,style="W" ))
moran
# the moran I p-value is highly significant, suggesting that there is
# spatial clustering!
# make a correlogram of Moran's I, lagged by neighborhood distance
cor<-sp.correlogram(neighbours=tracts_nb,var=tracts$LifeExp11,order=8,method="I",style="W",
zero.policy =TRUE)
plot(cor, main="Moran's I Correlogram of Baltimore Life Expectancy")
# save this corrallorgram
png(file.path("..", "..", "Figures", "Exploratory_Figures","M_I_corr.png"))
plot(cor, main="Moran's I Correlogram of Baltimore Life Expectancy")
dev.off()
# another way to test for spatial clustering. Plot mean adjacent residulas
# vs residuals
png(file.path("..", "..", "Figures", "Exploratory_Figures", "Residual_Adj_Resid.png"),
width = 1200, height = 700, pointsize = 20)
par(mfrow=c(1,2))
resnb <- sapply(tracts_nb, function(x) mean(m3$residuals[x]))
cor(residuals(m3), resnb)
plot(residuals(m3), resnb, xlab='Residuals', ylab='Mean adjacent residuals', main = "M3")
resnb <- sapply(tracts_nb, function(x) mean(m3s$residuals[x]))
cor(residuals(m3s), resnb)
plot(residuals(m3s), resnb, xlab='Residuals', ylab='Mean adjacent residuals', main = "M3s")
par(mfrow=c(1,1))
dev.off()
# again, looks like there is a trend
# to account for spatial autocorrelation, use
# spatial simultaneous autoregressive lag model
# estimation. (lagsarlm)
m3_s = lagsarlm(f3, data=tracts, tracts_nb_w)
summary(m3_s)
# compare the unlagged and lagged models
m3 <- lm(f3, data = tracts)
m3_s = lagsarlm(f3, data=tracts, tracts_nb_w)
anova(m3_s, m3)
# looks like the lagged model minimizes the AIC
# look at the Moran's I of the residulas
residuals(m3_s)
cor<-sp.correlogram(neighbours=tracts_nb,var=tracts$LifeExp11,order=8,method="I",style="W",
zero.policy =TRUE)
plot(cor, main="Moran's I Correlogram of Baltimore Life Expectancy")
# looks like Moran's I is no longer significant if the rows
# are stanardized
moran_resid<-moran.test(residuals(m3_s),listw=nb2listw(tracts_nb,style="W" ))
moran_resid
cor<-sp.correlogram(neighbours=tracts_nb,var=residuals(m3_s),order=8,method="I",style="W",
zero.policy =TRUE)
plot(cor, main="Moran's I Correlogram of Baltimore Life Expectancy Residuals")
# save the collalogram
png(file.path("..", "..", "Figures", "Exploratory_Figures", "M_I_R_corr.png"))
plot(cor, main="Moran's I Correlogram of Baltimore Life Expectancy Residuals")
dev.off()
#######################################################################
# apply the model built on a census tract level to a the block level #
# select a training set and a testing set
set.seed(123)
samp<- sample(nrow(tracts), 30)
train_set <- tracts[samp,]
test_set <- tracts[-samp,]
m1 <- lm(f1, data = train_set)
summary(m1)
f1.5 <- LifeExp11 ~ femhhs10_dev +
crime_mi_dev + racdiv10_dev + vacancy_mi_dev + X + Y + mhhi14_dev
m1.5 <- lm(f1.5, data = train_set)
summary(m1.5)
m2 <- lm(f2, data = train_set)
summary(m2)
m3 <- lm(f3, data = train_set)
summary(m3)
m3s <- lm(f3s, data = train_set)
summary(m3s)
m4 <- lm(f4, data = train_set)
summary(m4)
# a more basic model
f5 <- LifeExp11 ~ femhhs10_dev +
crime_mi_dev + racdiv10_dev + vacancy_mi_dev + X + Y
m5 <- lm(f5, data = train_set)
summary(m5)
p1 <- predict(m1, newdata = test_set)
p1.5 <- predict(m1.5, newdata = test_set)
p2 <- predict(m2, newdata = test_set)
p3 <- predict(m3, newdata = test_set)
p3s <- predict(m3s, newdata = test_set)
p4 <- predict(m4, newdata = test_set)
p5 <- predict(m5, newdata = test_set)
# look at the correlation between predicted life expectancy p1 and the actual life
# expectancy in the test_set
AIC(m1)
AIC(m1.5)
AIC(m2)
AIC(m3)
AIC(m4)
AIC(m3s)
AIC(m5)
rmse(test_set@data$LifeExp11, p1)
rmse(test_set@data$LifeExp11, p1.5)
rmse(test_set@data$LifeExp11, p2)
rmse(test_set@data$LifeExp11, p3)
rmse(test_set@data$LifeExp11, p3s)
rmse(test_set@data$LifeExp11, p4)
rmse(test_set@data$LifeExp11, p5)
cor(p1, test_set@data$LifeExp11)
cor(p1.5, test_set@data$LifeExp11)
cor(p2, test_set@data$LifeExp11)
cor(p3, test_set@data$LifeExp11)
cor(p3s, test_set@data$LifeExp11)
cor(p4, test_set@data$LifeExp11)
cor(p5, test_set@data$LifeExp11)
# looks like p1 has the best predictive ability
# use the same model to predict life expectancy for each block
bp1 <- predict(m1.5, newdata = blocks, type = "response", interval = "predict")
length(bp1)
# make a test blocks dataset to practice
blocks_t<- blocks
blocks_t$bp1 <- bp1[,1]
# the 95% confidence interval of the prediction
blocks_t$bp1_var <- bp1[,3]- bp1[,2]
summary(blocks_t$bp1)
# the 95% confidence interval of the prediction
summary(blocks_t$bp1_var)
par(mfrow=c(1,2))
# plot the outcome of p2 prediction model!!
plotvar0<-blocks_t$bp1
nclr<-5
plotclr<-brewer.pal(nclr,"YlOrBr")
class<-classIntervals(plotvar0,nclr,style="pretty")
colcode<-findColours(class,plotclr)
plot(blocks_t)
plot(blocks_t,col=colcode,add=TRUE)
title(main="Baltimore Predicted Life Expectancy by Block")
mtext(side=3,line=.5, text="")
legend(1390926, 579406, legend=names(attr(colcode, "table")),
fill=attr(colcode,"palette"),title="Life Expectancy",cex=.7,bty="n")
# plot the variance for comparison
plotvar0<-blocks_t$bp1_var
nclr<-5
plotclr<-brewer.pal(nclr,"YlOrBr")
class<-classIntervals(plotvar0,nclr,style="pretty")
colcode<-findColours(class,plotclr)
plot(blocks_t)
plot(blocks_t,col=colcode,add=TRUE)
title(main="Baltimore Predicted Life Expectancy Confidence Interval")
mtext(side=3,line=.5, text="")
legend(1390926, 579406, legend=names(attr(colcode, "table")),
fill=attr(colcode,"palette"),title="Confidence Interval",cex=.7,bty="n")
par(mfrow=c(1,1))
# looks like one of the parameters is creating large errors downtown.
# see if any of the tract level parameters look out of the ordinary
# looks like downtown has a very high number of liquor stores
# relative to the rest of the tracts. Median is 1.034,Downtown is 8.532
# for number of businesses that possess Class A liqour licences.
png(file.path("..", "..", "Figures", "Exploratory_Figures","Liquor_Tracts.png"))
plotvar0<-tracts$liquor11
nclr<-5
plotclr<-brewer.pal(nclr,"YlOrBr")
class<-classIntervals(plotvar0,nclr,style="equal")
colcode<-findColours(class,plotclr)
plot(tracts)
plot(tracts,col=colcode,add=TRUE)
title(main="Baltimore Liquor Store Density")
mtext(side=3,line=.5, text="")
legend(1390926, 579406, legend=names(attr(colcode, "table")),
fill=attr(colcode,"palette"),title="Liquor Stores",cex=.7,bty="n")
dev.off()
setwd(file.path("..", ".."))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lgb.plot.importance.R
\name{lgb.plot.importance}
\alias{lgb.plot.importance}
\title{Plot feature importance as a bar graph}
\usage{
lgb.plot.importance(tree_imp, top_n = 10, measure = "Gain",
left_margin = 10, cex = NULL)
}
\arguments{
\item{tree_imp}{a \code{data.table} returned by \code{\link{lgb.importance}}.}
\item{top_n}{maximal number of top features to include into the plot.}
\item{measure}{the name of importance measure to plot, can be "Gain", "Cover" or "Frequency".}
\item{left_margin}{(base R barplot) allows to adjust the left margin size to fit feature names.}
\item{cex}{(base R barplot) passed as \code{cex.names} parameter to \code{barplot}.}
}
\value{
The \code{lgb.plot.importance} function creates a \code{barplot}
and silently returns a processed data.table with \code{top_n} features sorted by defined importance.
}
\description{
Plot previously calculated feature importance: Gain, Cover and Frequency, as a bar graph.
}
\details{
The graph represents each feature as a horizontal bar of length proportional to the defined importance of a feature.
Features are shown ranked in a decreasing importance order.
}
\examples{
data(agaricus.train, package = "lightgbm")
train <- agaricus.train
dtrain <- lgb.Dataset(train$data, label = train$label)
params <- list(
objective = "binary"
, learning_rate = 0.01
, num_leaves = 63
, max_depth = -1
, min_data_in_leaf = 1
, min_sum_hessian_in_leaf = 1
)
model <- lgb.train(params, dtrain, 10)
tree_imp <- lgb.importance(model, percentage = TRUE)
lgb.plot.importance(tree_imp, top_n = 10, measure = "Gain")
}
| /R-package/man/lgb.plot.importance.Rd | permissive | gomlfx/LightGBM | R | false | true | 1,684 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lgb.plot.importance.R
\name{lgb.plot.importance}
\alias{lgb.plot.importance}
\title{Plot feature importance as a bar graph}
\usage{
lgb.plot.importance(tree_imp, top_n = 10, measure = "Gain",
left_margin = 10, cex = NULL)
}
\arguments{
\item{tree_imp}{a \code{data.table} returned by \code{\link{lgb.importance}}.}
\item{top_n}{maximal number of top features to include into the plot.}
\item{measure}{the name of importance measure to plot, can be "Gain", "Cover" or "Frequency".}
\item{left_margin}{(base R barplot) allows to adjust the left margin size to fit feature names.}
\item{cex}{(base R barplot) passed as \code{cex.names} parameter to \code{barplot}.}
}
\value{
The \code{lgb.plot.importance} function creates a \code{barplot}
and silently returns a processed data.table with \code{top_n} features sorted by defined importance.
}
\description{
Plot previously calculated feature importance: Gain, Cover and Frequency, as a bar graph.
}
\details{
The graph represents each feature as a horizontal bar of length proportional to the defined importance of a feature.
Features are shown ranked in a decreasing importance order.
}
\examples{
data(agaricus.train, package = "lightgbm")
train <- agaricus.train
dtrain <- lgb.Dataset(train$data, label = train$label)
params <- list(
objective = "binary"
, learning_rate = 0.01
, num_leaves = 63
, max_depth = -1
, min_data_in_leaf = 1
, min_sum_hessian_in_leaf = 1
)
model <- lgb.train(params, dtrain, 10)
tree_imp <- lgb.importance(model, percentage = TRUE)
lgb.plot.importance(tree_imp, top_n = 10, measure = "Gain")
}
|
# A ton of example plots with code:
# http://r-statistics.co/Top50-Ggplot2-Visualizations-MasterList-R-Code.html | /links.R | no_license | michaelfrancenelson/ggplot2_examples | R | false | false | 113 | r | # A ton of example plots with code:
# http://r-statistics.co/Top50-Ggplot2-Visualizations-MasterList-R-Code.html |
# add date and time
subsettedData$Time <- strptime(subsettedData$Time, format="%H:%M:%S")
subsettedData[1:1440,"Time"] <- format(subsettedData[1:1440,"Time"],"2007-02-01 %H:%M:%S")
subsettedData[1441:2880,"Time"] <- format(subsettedData[1441:2880,"Time"],"2007-02-02 %H:%M:%S")
subsettedData$datetime<-strptime(paste(subsettedData$Date,subsettedData$Time,sep=""),"%d/%m/%Y %H:%M:%S")
datetime <- strptime(paste(subsettedData$Date, subsettedData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
png("plot2.png",width = 480,height = 480)
plot(subsettedData$Time,as.numeric(as.character(subsettedData$Global_active_power)),type="l",xlab="",ylab="Global Active Power (kilowatts)")
title(main = "Global active Power vs time ")
dev.off()
| /Plot2.R | no_license | imethun/Exploratory-Data-Analysis | R | false | false | 733 | r | # add date and time
subsettedData$Time <- strptime(subsettedData$Time, format="%H:%M:%S")
subsettedData[1:1440,"Time"] <- format(subsettedData[1:1440,"Time"],"2007-02-01 %H:%M:%S")
subsettedData[1441:2880,"Time"] <- format(subsettedData[1441:2880,"Time"],"2007-02-02 %H:%M:%S")
subsettedData$datetime<-strptime(paste(subsettedData$Date,subsettedData$Time,sep=""),"%d/%m/%Y %H:%M:%S")
datetime <- strptime(paste(subsettedData$Date, subsettedData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
png("plot2.png",width = 480,height = 480)
plot(subsettedData$Time,as.numeric(as.character(subsettedData$Global_active_power)),type="l",xlab="",ylab="Global Active Power (kilowatts)")
title(main = "Global active Power vs time ")
dev.off()
|
\name{fore.glambda.wge}
\alias{fore.glambda.wge}
\title{Forecast using a G(lambda) model}
\description{Find forecasts using a specified G(lambda) model}
\usage{fore.glambda.wge(data.orig,lambda=0,offset=60,phi=0,h=0,n.ahead=10,lastn=TRUE,plot=TRUE)}
\arguments{
\item{data.orig}{Time series data in the original time scale}
\item{lambda}{The value of lambda under the Box-Cox time transformation with parameter lambda.}
\item{offset}{Offset (or shift) value in the G(lambda) model.}
\item{phi}{Coefficients of the AR component of the AR model fit to the dual data}
\item{h}{Value of h which will be calculated to produce the desired number of forecasts in the original time scale}
\item{n.ahead}{Number of values to forecast}
\item{lastn}{If lastn=TRUE then the last n.ahead values are forecast. Otherwise, if lastn=FALSE the next n.ahead values are forecast}
\item{plot}{If plot=TRUE then plots of the data and forecasts are plotted}
}
\details{Forecasts for an AR model fit to the data in the original time scale are also calculated and optionally plotted}
\value{
\item{f.ar}{Forecasts using AR model fit to data in original time}
\item{f.glam}{Forecasts using AR model fit to the dual and then reinterpolated}
}
\references{Applied Time Series Analysis with R, second edition by Woodward, Gray, and Elliott}
\author{Wayne Woodward}
\examples{data(fig13.2c)
fore.glambda.wge(fig13.2c,lambda=-.4,offset=63,phi=c(0.93,-0.32,-0.15,-0.15,-0.17),n.ahead=30)
}
\keyword{ forecasts }
\keyword{ G(lambda) }
\keyword{ TVF } | /man/fore.glambda.wge.Rd | no_license | cran/tswge | R | false | false | 1,566 | rd | \name{fore.glambda.wge}
\alias{fore.glambda.wge}
\title{Forecast using a G(lambda) model}
\description{Find forecasts using a specified G(lambda) model}
\usage{fore.glambda.wge(data.orig,lambda=0,offset=60,phi=0,h=0,n.ahead=10,lastn=TRUE,plot=TRUE)}
\arguments{
\item{data.orig}{Time series data in the original time scale}
\item{lambda}{The value of lambda under the Box-Cox time transformation with parameter lambda.}
\item{offset}{Offset (or shift) value in the G(lambda) model.}
\item{phi}{Coefficients of the AR component of the AR model fit to the dual data}
\item{h}{Value of h which will be calculated to produce the desired number of forecasts in the original time scale}
\item{n.ahead}{Number of values to forecast}
\item{lastn}{If lastn=TRUE then the last n.ahead values are forecast. Otherwise, if lastn=FALSE the next n.ahead values are forecast}
\item{plot}{If plot=TRUE then plots of the data and forecasts are plotted}
}
\details{Forecasts for an AR model fit to the data in the original time scale are also calculated and optionally plotted}
\value{
\item{f.ar}{Forecasts using AR model fit to data in original time}
\item{f.glam}{Forecasts using AR model fit to the dual and then reinterpolated}
}
\references{Applied Time Series Analysis with R, second edition by Woodward, Gray, and Elliott}
\author{Wayne Woodward}
\examples{data(fig13.2c)
fore.glambda.wge(fig13.2c,lambda=-.4,offset=63,phi=c(0.93,-0.32,-0.15,-0.15,-0.17),n.ahead=30)
}
\keyword{ forecasts }
\keyword{ G(lambda) }
\keyword{ TVF } |
setwd('/Users/saulforman/Desktop/datafest')
rpe = read.csv('rpe.csv')
games = read.csv('games.csv')
gps = read.csv('gps.csv')
wellness = read.csv('wellness.csv')
#### MULTI-DEFINITION OF IMPORTANCE
## Define Average Active Sprint Speed
speed_sd = gps %>%
filter(Speed != 0) %>%
group_by(PlayerID, GameID) %>%
summarise(speed_sd = sd(Speed))
speed_mean = gps %>%
filter(Speed != 0) %>%
group_by(PlayerID, GameID) %>%
summarise(speed_mean = mean(Speed))
speed_summ = speed_mean %>% left_join(speed_sd, by = c('PlayerID', 'GameID'))
speed_summ$active_speed = speed_summ$speed_mean + 2*speed_summ$speed_sd
speed_summ = speed_summ %>%
select(-c(speed_mean, speed_sd))
gps_sprint = gps %>%
left_join(speed_summ, by = c('PlayerID', 'GameID')) %>%
filter(Speed >= active_speed) %>%
group_by(PlayerID, GameID) %>%
summarise(AASS = mean(Speed))
## Define Average Active Acceleration Impulse
accel_sd = gps %>%
filter(AccelImpulse != 0) %>%
group_by(PlayerID, GameID) %>%
summarise(accel_sd = sd(AccelImpulse))
accel_mean = gps %>%
filter(AccelImpulse != 0) %>%
group_by(PlayerID, GameID) %>%
summarise(accel_mean = mean(AccelImpulse))
accel_summ = accel_mean %>% left_join(accel_sd, by = c('PlayerID', 'GameID'))
accel_summ$active_accel = accel_summ$accel_mean + 2*accel_summ$accel_sd
accel_summ = accel_summ %>%
select(-c(accel_mean, accel_sd))
gps_sprint2 = gps %>%
left_join(accel_summ, by = c('PlayerID', 'GameID')) %>%
filter(AccelImpulse >= active_accel) %>%
group_by(PlayerID, GameID) %>%
summarise(AAAI = mean(active_accel))
## Each player's AAAI looks close (not perfectly) normal
boxplot(AAAI ~ PlayerID, data = gps_sprint2)
## In the aggregate, AAAI appears normally distributed around mean
hist(gps_sprint2$AAAI)
## Define Average Active Acceleration Load
load_sd = gps %>%
filter(AccelLoad != 0) %>%
group_by(PlayerID, GameID) %>%
summarise(load_sd = sd(AccelLoad))
load_mean = gps %>%
filter(AccelLoad != 0) %>%
group_by(PlayerID, GameID) %>%
summarise(load_mean = mean(AccelLoad))
load_summ = load_mean %>% left_join(load_sd, by = c('PlayerID', 'GameID'))
load_summ$active_load = load_summ$load_mean + 2*load_summ$load_sd
load_summ = load_summ %>%
select(-c(load_mean, load_sd))
gps_sprint3 = gps %>%
left_join(load_summ, by = c('PlayerID', 'GameID')) %>%
filter(AccelLoad >= active_load) %>%
group_by(PlayerID, GameID) %>%
summarise(AAAL = mean(active_load))
## Each player's AAAI looks close (not perfectly) normal
boxplot(AAAL ~ PlayerID, data = gps_sprint3)
## In the aggregate, AAAI appears normally distributed around mean
hist(gps_sprint2$AAAI)
## Average Length of Sprint
LOS = gps %>%
left_join(speed_summ, by = c('PlayerID', 'GameID')) %>%
filter(Speed >= active_speed)
LOS$Group = 0
Group = 1
for(i in 1:nrow(LOS)) {
if(LOS[i,]$FrameID == (LOS[i+1,]$FrameID - 1)) {
LOS[i,]$Group = Group
} else {
LOS[i,]$Group = Group
Group = Group + 1
}
}
library(lubridate)
LOS2 = LOS %>%
mutate(Time = hms(Time)) %>%
group_by(PlayerID, GameID, Group) %>%
summarise(max = max(Time))
LOS3 = LOS %>%
mutate(Time = hms(Time)) %>%
group_by(PlayerID, GameID, Group) %>%
summarise(min = min(Time))
LOS4 = LOS2 %>% left_join(LOS3, by = c('PlayerID', 'GameID', 'Group'))
LOS4$duration = LOS4$max-LOS4$min
LOS_summ = LOS4 %>%
group_by(PlayerID, GameID) %>%
summarise(mean_duration = mean(duration))
hist(LOS_summ$mean_duration)
game_dates = games %>%
select(GameID, Date)
game_rpe = rpe %>%
filter(SessionType == 'Game') %>%
inner_join(game_dates, by = c('Date'))
outcomes = gps_sprint %>% left_join(gps_sprint2, by = c('PlayerID', 'GameID')) %>%
left_join(gps_sprint3, by = c('PlayerID', 'GameID')) %>%
left_join(LOS_summ, by = c('PlayerID', 'GameID')) %>%
left_join(game_rpe, by = c('PlayerID', 'GameID')) %>%
select(-c(Date, Training, SessionType))
library(fastDummies)
append = dummy_cols(outcomes)
append = append %>% rename(Best_NA = BestOutOfMyself_NA, Best_Absolutely = BestOutOfMyself_Absolutely, Best_Somewhat = BestOutOfMyself_Somewhat)
colnames(append)[20] = 'Best_Not_at_All'
append = append[,-16]
max_game = game_dates %>%
group_by(Date) %>%
summarise(GOD = max(GameID)-min(GameID)+1)
append_games = append %>% left_join(game_dates, by = 'GameID') %>%
left_join(max_game, by = 'Date') %>%
select(-Date)
library(bnstruct)
## Convert to Matrix
append_imp = as.matrix(append_games[,c(3:15,17:19)])
## Impute Missing Values Using 5 Nearest Neighbors
append_games[,c(3:15,17:19)] = knn.impute(append_imp, k = 5, cat.var = NA, to.impute = 1:nrow(append_imp),
using = 1:nrow(append_imp))
## Drop Best_NA (Perfectly Colinear)
append_games = append_games[,-16] %>%
left_join(game_dates, by = 'GameID')
head(append_games)
## Scale & Center
append_games = append_games %>%
group_by(PlayerID, Date) %>%
summarise(AASS = mean(AASS), AAAI = mean(AAAI), AAAL = mean(AAAL),
mean_duration = mean(mean_duration), Duration = mean(Duration),
RPE = mean(RPE), SessionLoad = mean(SessionLoad),
DailyLoad = mean(DailyLoad), AcuteLoad = mean(AcuteLoad),
ChronicLoad = mean(ChronicLoad), AcuteChronicRatio = mean(AcuteChronicRatio),
ObjectiveRating = mean(ObjectiveRating), FocusRating = mean(FocusRating),
Best_Somewhat = max(Best_Somewhat), Best_Not_at_All = max(Best_Not_at_All),
GOD = max(GOD))
append_scale = append_games
append_scale[,3:18] = scale(append_scale[,3:18], center = TRUE, scale = TRUE)
## Create and Plot Covariance Matrix
cormat = round(cor(append_scale[,3:18]),2)
library(reshape2)
melted_cormat = melt(cormat)
head(melted_cormat)
library(ggplot2)
ggplot(data = melted_cormat, aes(x=Var1, y=Var2, fill=value)) +
geom_tile() +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
## Use PCA on Outcome Variables
out_pca = prcomp(append_scale[,3:18])
summary(out_pca)
append_scale2 = append_scale
append_scale2$pca = out_pca$x[,1]
append_scale2 = append_scale2 %>%
select(PlayerID, Date, pca)
####### ASSEMBLE PREDICTORS MATRIX
wellness = wellness %>%
select(-c(BedTime, WakeTime))
wellness[,'TrainingReadiness'] = as.numeric(sub('%', '', wellness$TrainingReadiness, fixed = TRUE))/100
wellness2 = dummy_cols(wellness, select_columns = colnames(wellness)[10:15])
wellness2 = wellness2[,-(10:15)]
colnames(wellness2)[27] = 'NutritionAdjustment_IDK'
colnames(wellness2)[15] = 'Illness_Slightly_Off'
head(wellness2)
wellness_scaled = wellness2 %>%
group_by(PlayerID) %>%
select(-c(USGMeasurement_NA, NutritionAdjustment_NA, Nutrition_NA, Menstruation_NA,
Menstruation_No, Illness_Yes, Pain_Yes)) %>%
mutate(Fatigue = scale(Fatigue), Soreness = scale(Soreness),
Desire = scale(Desire), Irritability = scale(Irritability),
SleepHours = scale(SleepHours), SleepQuality = scale(SleepQuality),
MonitoringScore = scale(MonitoringScore), USG = scale(USG),
TrainingReadiness = scale(TrainingReadiness),
Pain_No = scale(Pain_No),
Illness_No = scale(Illness_No), Illness_Slightly_Off = scale(Illness_Slightly_Off),
Menstruation_Yes = scale(Menstruation_Yes),
Nutrition_Excellent = scale(Nutrition_Excellent),
Nutrition_Okay = scale(Nutrition_Okay), Nutrition_Poor = scale(Nutrition_Poor),
NutritionAdjustment_Yes = scale(NutritionAdjustment_Yes),
NutritionAdjustment_No = scale(NutritionAdjustment_No),
NutritionAdjustment_IDK = scale(NutritionAdjustment_IDK),
USGMeasurement_No = scale(USGMeasurement_No),
USGMeasurement_Yes = scale(USGMeasurement_Yes)) %>%
mutate(Date = as.character(as.Date(Date) + 1))
head(outcomes)
outcomes_t1 = outcomes[,-c(6:16)]
colnames(outcomes_t1)[3:5] = paste(colnames(outcomes)[3:5], 't1', sep = '_')
outcomes_t1 = outcomes_t1 %>% left_join(game_dates, by = 'GameID') %>%
select(-GameID) %>%
group_by(PlayerID) %>%
mutate(AASS_t1 = scale(AASS_t1),
AAAI_t1 = scale(AAAI_t1),
AAAL_t1 = scale(AAAL_t1)) %>%
mutate(Date = as.character(as.Date(Date) + 1)) %>%
group_by(PlayerID, Date) %>%
summarise(AASS_t1 = mean(AASS_t1),
AAAI_t1 = mean(AAAI_t1),
AAAL_t1 = mean(AAAL_t1))
X_scaled = wellness_scaled %>% left_join(outcomes_t1, by = c('PlayerID','Date'))
rpe_train = dummy_cols(rpe, select_columns = c('SessionType','BestOutOfMyself'))
head(rpe_train)
colnames(rpe_train)[19] = 'SessionType_Mobility_Recovery'
rpe_train = rpe_train %>%
mutate(Date = as.character(as.Date(Date) + 1)) %>%
select(-c(Training, RPE, AcuteChronicRatio, `BestOutOfMyself_Not at all`)) %>%
group_by(PlayerID) %>%
mutate(Duration = scale(Duration),
SessionLoad = scale(SessionLoad), DailyLoad = scale(DailyLoad),
AcuteLoad = scale(AcuteLoad), ChronicLoad = scale(ChronicLoad),
ObjectiveRating = scale(ObjectiveRating), FocusRating = scale(FocusRating)) %>%
group_by(PlayerID, Date) %>%
summarise(Duration = mean(Duration), SessionLoad = mean(SessionLoad),
DailyLoad = mean(DailyLoad), AcuteLoad = mean(AcuteLoad),
ChronicLoad = mean(ChronicLoad), ObjectiveRating = mean(ObjectiveRating),
FocusRating = mean(FocusRating), SessionType_Mobility_Recovery = max(SessionType_Mobility_Recovery),
SessionType_Game = max(SessionType_Game), SessionType_Skills = max(SessionType_Skills),
SessionType_Conditioning = max(SessionType_Conditioning),
SessionType_Combat = max(SessionType_Combat), SessionType_NA = max(SessionType_NA),
SessionType_Speed = max(SessionType_Speed), BestOutOfMyself_Absolutely = max(BestOutOfMyself_Absolutely),
BestOutOfMyself_NA = max(BestOutOfMyself_NA))
rpe_train[,3:18] = sapply(rpe_train[,3:18], as.numeric)
data = rpe_train %>% left_join(append_scale2, by = c('PlayerID', 'Date')) %>%
left_join(outcomes_t1, by = c('PlayerID', 'Date')) %>%
filter(is.na(pca) != TRUE)
## Roughfix NAs for random forest
X = as.matrix(data[,3:22])
rf = as.data.frame(rfImpute(X, data$pca))
## Train Random Forest
library(caret)
library(randomForest)
inTrain = createDataPartition(rf$y, p=0.7, list = FALSE)
training = rf[inTrain,-18]
testing = rf[-inTrain,-18]
ctrl = trainControl(method = 'repeatedcv', number = 5, repeats = 5)
model = randomForest(y ~., data = training, method = 'rf', trControl = ctrl)
testing$predicted = predict(model, newdata=testing)
varImpPlot(model, type=2)
save(model, file = "mymodel.rda")
| /code - datafest.R | no_license | saulforman/DataFest2019 | R | false | false | 10,544 | r | setwd('/Users/saulforman/Desktop/datafest')
rpe = read.csv('rpe.csv')
games = read.csv('games.csv')
gps = read.csv('gps.csv')
wellness = read.csv('wellness.csv')
#### MULTI-DEFINITION OF IMPORTANCE
## Define Average Active Sprint Speed
speed_sd = gps %>%
filter(Speed != 0) %>%
group_by(PlayerID, GameID) %>%
summarise(speed_sd = sd(Speed))
speed_mean = gps %>%
filter(Speed != 0) %>%
group_by(PlayerID, GameID) %>%
summarise(speed_mean = mean(Speed))
speed_summ = speed_mean %>% left_join(speed_sd, by = c('PlayerID', 'GameID'))
speed_summ$active_speed = speed_summ$speed_mean + 2*speed_summ$speed_sd
speed_summ = speed_summ %>%
select(-c(speed_mean, speed_sd))
gps_sprint = gps %>%
left_join(speed_summ, by = c('PlayerID', 'GameID')) %>%
filter(Speed >= active_speed) %>%
group_by(PlayerID, GameID) %>%
summarise(AASS = mean(Speed))
## Define Average Active Acceleration Impulse
accel_sd = gps %>%
filter(AccelImpulse != 0) %>%
group_by(PlayerID, GameID) %>%
summarise(accel_sd = sd(AccelImpulse))
accel_mean = gps %>%
filter(AccelImpulse != 0) %>%
group_by(PlayerID, GameID) %>%
summarise(accel_mean = mean(AccelImpulse))
accel_summ = accel_mean %>% left_join(accel_sd, by = c('PlayerID', 'GameID'))
accel_summ$active_accel = accel_summ$accel_mean + 2*accel_summ$accel_sd
accel_summ = accel_summ %>%
select(-c(accel_mean, accel_sd))
gps_sprint2 = gps %>%
left_join(accel_summ, by = c('PlayerID', 'GameID')) %>%
filter(AccelImpulse >= active_accel) %>%
group_by(PlayerID, GameID) %>%
summarise(AAAI = mean(active_accel))
## Each player's AAAI looks close (not perfectly) normal
boxplot(AAAI ~ PlayerID, data = gps_sprint2)
## In the aggregate, AAAI appears normally distributed around mean
hist(gps_sprint2$AAAI)
## Define Average Active Acceleration Load
load_sd = gps %>%
filter(AccelLoad != 0) %>%
group_by(PlayerID, GameID) %>%
summarise(load_sd = sd(AccelLoad))
load_mean = gps %>%
filter(AccelLoad != 0) %>%
group_by(PlayerID, GameID) %>%
summarise(load_mean = mean(AccelLoad))
load_summ = load_mean %>% left_join(load_sd, by = c('PlayerID', 'GameID'))
load_summ$active_load = load_summ$load_mean + 2*load_summ$load_sd
load_summ = load_summ %>%
select(-c(load_mean, load_sd))
gps_sprint3 = gps %>%
left_join(load_summ, by = c('PlayerID', 'GameID')) %>%
filter(AccelLoad >= active_load) %>%
group_by(PlayerID, GameID) %>%
summarise(AAAL = mean(active_load))
## Each player's AAAI looks close (not perfectly) normal
boxplot(AAAL ~ PlayerID, data = gps_sprint3)
## In the aggregate, AAAI appears normally distributed around mean
hist(gps_sprint2$AAAI)
## Average Length of Sprint
LOS = gps %>%
left_join(speed_summ, by = c('PlayerID', 'GameID')) %>%
filter(Speed >= active_speed)
LOS$Group = 0
Group = 1
for(i in 1:nrow(LOS)) {
if(LOS[i,]$FrameID == (LOS[i+1,]$FrameID - 1)) {
LOS[i,]$Group = Group
} else {
LOS[i,]$Group = Group
Group = Group + 1
}
}
library(lubridate)
LOS2 = LOS %>%
mutate(Time = hms(Time)) %>%
group_by(PlayerID, GameID, Group) %>%
summarise(max = max(Time))
LOS3 = LOS %>%
mutate(Time = hms(Time)) %>%
group_by(PlayerID, GameID, Group) %>%
summarise(min = min(Time))
LOS4 = LOS2 %>% left_join(LOS3, by = c('PlayerID', 'GameID', 'Group'))
LOS4$duration = LOS4$max-LOS4$min
LOS_summ = LOS4 %>%
group_by(PlayerID, GameID) %>%
summarise(mean_duration = mean(duration))
hist(LOS_summ$mean_duration)
game_dates = games %>%
select(GameID, Date)
game_rpe = rpe %>%
filter(SessionType == 'Game') %>%
inner_join(game_dates, by = c('Date'))
outcomes = gps_sprint %>% left_join(gps_sprint2, by = c('PlayerID', 'GameID')) %>%
left_join(gps_sprint3, by = c('PlayerID', 'GameID')) %>%
left_join(LOS_summ, by = c('PlayerID', 'GameID')) %>%
left_join(game_rpe, by = c('PlayerID', 'GameID')) %>%
select(-c(Date, Training, SessionType))
library(fastDummies)
append = dummy_cols(outcomes)
append = append %>% rename(Best_NA = BestOutOfMyself_NA, Best_Absolutely = BestOutOfMyself_Absolutely, Best_Somewhat = BestOutOfMyself_Somewhat)
colnames(append)[20] = 'Best_Not_at_All'
append = append[,-16]
max_game = game_dates %>%
group_by(Date) %>%
summarise(GOD = max(GameID)-min(GameID)+1)
append_games = append %>% left_join(game_dates, by = 'GameID') %>%
left_join(max_game, by = 'Date') %>%
select(-Date)
library(bnstruct)
## Convert to Matrix
append_imp = as.matrix(append_games[,c(3:15,17:19)])
## Impute Missing Values Using 5 Nearest Neighbors
append_games[,c(3:15,17:19)] = knn.impute(append_imp, k = 5, cat.var = NA, to.impute = 1:nrow(append_imp),
using = 1:nrow(append_imp))
## Drop Best_NA (Perfectly Colinear)
append_games = append_games[,-16] %>%
left_join(game_dates, by = 'GameID')
head(append_games)
## Scale & Center
append_games = append_games %>%
group_by(PlayerID, Date) %>%
summarise(AASS = mean(AASS), AAAI = mean(AAAI), AAAL = mean(AAAL),
mean_duration = mean(mean_duration), Duration = mean(Duration),
RPE = mean(RPE), SessionLoad = mean(SessionLoad),
DailyLoad = mean(DailyLoad), AcuteLoad = mean(AcuteLoad),
ChronicLoad = mean(ChronicLoad), AcuteChronicRatio = mean(AcuteChronicRatio),
ObjectiveRating = mean(ObjectiveRating), FocusRating = mean(FocusRating),
Best_Somewhat = max(Best_Somewhat), Best_Not_at_All = max(Best_Not_at_All),
GOD = max(GOD))
append_scale = append_games
append_scale[,3:18] = scale(append_scale[,3:18], center = TRUE, scale = TRUE)
## Create and Plot Covariance Matrix
cormat = round(cor(append_scale[,3:18]),2)
library(reshape2)
melted_cormat = melt(cormat)
head(melted_cormat)
library(ggplot2)
ggplot(data = melted_cormat, aes(x=Var1, y=Var2, fill=value)) +
geom_tile() +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
## Use PCA on Outcome Variables
out_pca = prcomp(append_scale[,3:18])
summary(out_pca)
append_scale2 = append_scale
append_scale2$pca = out_pca$x[,1]
append_scale2 = append_scale2 %>%
select(PlayerID, Date, pca)
####### ASSEMBLE PREDICTORS MATRIX
wellness = wellness %>%
select(-c(BedTime, WakeTime))
wellness[,'TrainingReadiness'] = as.numeric(sub('%', '', wellness$TrainingReadiness, fixed = TRUE))/100
wellness2 = dummy_cols(wellness, select_columns = colnames(wellness)[10:15])
wellness2 = wellness2[,-(10:15)]
colnames(wellness2)[27] = 'NutritionAdjustment_IDK'
colnames(wellness2)[15] = 'Illness_Slightly_Off'
head(wellness2)
wellness_scaled = wellness2 %>%
group_by(PlayerID) %>%
select(-c(USGMeasurement_NA, NutritionAdjustment_NA, Nutrition_NA, Menstruation_NA,
Menstruation_No, Illness_Yes, Pain_Yes)) %>%
mutate(Fatigue = scale(Fatigue), Soreness = scale(Soreness),
Desire = scale(Desire), Irritability = scale(Irritability),
SleepHours = scale(SleepHours), SleepQuality = scale(SleepQuality),
MonitoringScore = scale(MonitoringScore), USG = scale(USG),
TrainingReadiness = scale(TrainingReadiness),
Pain_No = scale(Pain_No),
Illness_No = scale(Illness_No), Illness_Slightly_Off = scale(Illness_Slightly_Off),
Menstruation_Yes = scale(Menstruation_Yes),
Nutrition_Excellent = scale(Nutrition_Excellent),
Nutrition_Okay = scale(Nutrition_Okay), Nutrition_Poor = scale(Nutrition_Poor),
NutritionAdjustment_Yes = scale(NutritionAdjustment_Yes),
NutritionAdjustment_No = scale(NutritionAdjustment_No),
NutritionAdjustment_IDK = scale(NutritionAdjustment_IDK),
USGMeasurement_No = scale(USGMeasurement_No),
USGMeasurement_Yes = scale(USGMeasurement_Yes)) %>%
mutate(Date = as.character(as.Date(Date) + 1))
head(outcomes)
outcomes_t1 = outcomes[,-c(6:16)]
colnames(outcomes_t1)[3:5] = paste(colnames(outcomes)[3:5], 't1', sep = '_')
outcomes_t1 = outcomes_t1 %>% left_join(game_dates, by = 'GameID') %>%
select(-GameID) %>%
group_by(PlayerID) %>%
mutate(AASS_t1 = scale(AASS_t1),
AAAI_t1 = scale(AAAI_t1),
AAAL_t1 = scale(AAAL_t1)) %>%
mutate(Date = as.character(as.Date(Date) + 1)) %>%
group_by(PlayerID, Date) %>%
summarise(AASS_t1 = mean(AASS_t1),
AAAI_t1 = mean(AAAI_t1),
AAAL_t1 = mean(AAAL_t1))
X_scaled = wellness_scaled %>% left_join(outcomes_t1, by = c('PlayerID','Date'))
rpe_train = dummy_cols(rpe, select_columns = c('SessionType','BestOutOfMyself'))
head(rpe_train)
colnames(rpe_train)[19] = 'SessionType_Mobility_Recovery'
rpe_train = rpe_train %>%
mutate(Date = as.character(as.Date(Date) + 1)) %>%
select(-c(Training, RPE, AcuteChronicRatio, `BestOutOfMyself_Not at all`)) %>%
group_by(PlayerID) %>%
mutate(Duration = scale(Duration),
SessionLoad = scale(SessionLoad), DailyLoad = scale(DailyLoad),
AcuteLoad = scale(AcuteLoad), ChronicLoad = scale(ChronicLoad),
ObjectiveRating = scale(ObjectiveRating), FocusRating = scale(FocusRating)) %>%
group_by(PlayerID, Date) %>%
summarise(Duration = mean(Duration), SessionLoad = mean(SessionLoad),
DailyLoad = mean(DailyLoad), AcuteLoad = mean(AcuteLoad),
ChronicLoad = mean(ChronicLoad), ObjectiveRating = mean(ObjectiveRating),
FocusRating = mean(FocusRating), SessionType_Mobility_Recovery = max(SessionType_Mobility_Recovery),
SessionType_Game = max(SessionType_Game), SessionType_Skills = max(SessionType_Skills),
SessionType_Conditioning = max(SessionType_Conditioning),
SessionType_Combat = max(SessionType_Combat), SessionType_NA = max(SessionType_NA),
SessionType_Speed = max(SessionType_Speed), BestOutOfMyself_Absolutely = max(BestOutOfMyself_Absolutely),
BestOutOfMyself_NA = max(BestOutOfMyself_NA))
rpe_train[,3:18] = sapply(rpe_train[,3:18], as.numeric)
data = rpe_train %>% left_join(append_scale2, by = c('PlayerID', 'Date')) %>%
left_join(outcomes_t1, by = c('PlayerID', 'Date')) %>%
filter(is.na(pca) != TRUE)
## Roughfix NAs for random forest
X = as.matrix(data[,3:22])
rf = as.data.frame(rfImpute(X, data$pca))
## Train Random Forest
library(caret)
library(randomForest)
inTrain = createDataPartition(rf$y, p=0.7, list = FALSE)
training = rf[inTrain,-18]
testing = rf[-inTrain,-18]
ctrl = trainControl(method = 'repeatedcv', number = 5, repeats = 5)
model = randomForest(y ~., data = training, method = 'rf', trControl = ctrl)
testing$predicted = predict(model, newdata=testing)
varImpPlot(model, type=2)
save(model, file = "mymodel.rda")
|
library("ggplot2")
x = read.csv("file", header=F)
a = aggregate(x[x[,1]=='a',5:6], list(x[x[,1]=='a',2]), mean)
b = aggregate(x[x[,1]=='b',5:6], list(x[x[,1]=='b',2]), mean)
atrain = aes(y = V5, colour = "A Entrenamiento")
atest = aes(y = V6, colour = "A Test")
btrain = aes(y = V5, colour = "B Entrenamiento")
btest = aes(y = V6, colour = "B Test")
p<- ggplot(a, aes(Group.1)) +
geom_point(atrain) +
geom_point(atest) +
geom_line(atrain) +
geom_line(atest) +
geom_point(data = b, btrain) +
geom_point(data = b, btest) +
geom_line(data = b, btrain) +
geom_line(data = b, btest) +
labs(y = "Error porcentual promedio") +
labs(x = "Cantidad de puntos de entrenamiento") +
labs(title = "Error porcentual After Pruning") +
theme(legend.title=element_blank()) +
theme(legend.position = "bottom") +
theme(panel.background = element_rect(fill = 'white', colour = 'black')) +
#scale_x_continuous(breaks=c(2,4,8,16,32)) +
scale_x_continuous(breaks=a[,1]) +
scale_color_manual(values=c("#CC6666", "#9999CC","#AC0000","#909990"))
pdf("Error porcentual After Pruning")
print(p)
dev.off()
x = read.csv("file", header=F)
a = aggregate(x[x[,1]=='a',3:4], list(x[x[,1]=='a',2]), mean)
b = aggregate(x[x[,1]=='b',3:4], list(x[x[,1]=='b',2]), mean)
atrain = aes(y = V3, colour = "A - Entrenamiento")
atest = aes(y = V4, colour = "A - Test")
btrain = aes(y = V3, colour = "B - Entrenamiento")
btest = aes(y = V4, colour = "B - Test")
p <- ggplot(a, aes(Group.1)) +
geom_point(atrain) +
geom_point(atest) +
geom_line(atrain) +
geom_line(atest) +
geom_point(data = b, btrain) +
geom_point(data = b, btest) +
geom_line(data = b, btrain) +
geom_line(data = b, btest) +
labs(y = "Error porcentual promedio") +
labs(x = "Cantidad de puntos de entrenamiento") +
labs(title = "Error porcentual Before Pruning") +
theme(legend.title=element_blank()) +
theme(panel.background = element_rect(fill = 'white', colour = 'black')) +
scale_color_manual(values=c("#CC6666", "#9999CC","#AC0000","#909990"))
pdf("Error porcentual Before Pruning")
print(p)
dev.off()
| /TP1/ej8/graficarError.R | no_license | ljfigueroa/Aprendizaje-Automatizado | R | false | false | 2,091 | r | library("ggplot2")
x = read.csv("file", header=F)
a = aggregate(x[x[,1]=='a',5:6], list(x[x[,1]=='a',2]), mean)
b = aggregate(x[x[,1]=='b',5:6], list(x[x[,1]=='b',2]), mean)
atrain = aes(y = V5, colour = "A Entrenamiento")
atest = aes(y = V6, colour = "A Test")
btrain = aes(y = V5, colour = "B Entrenamiento")
btest = aes(y = V6, colour = "B Test")
p<- ggplot(a, aes(Group.1)) +
geom_point(atrain) +
geom_point(atest) +
geom_line(atrain) +
geom_line(atest) +
geom_point(data = b, btrain) +
geom_point(data = b, btest) +
geom_line(data = b, btrain) +
geom_line(data = b, btest) +
labs(y = "Error porcentual promedio") +
labs(x = "Cantidad de puntos de entrenamiento") +
labs(title = "Error porcentual After Pruning") +
theme(legend.title=element_blank()) +
theme(legend.position = "bottom") +
theme(panel.background = element_rect(fill = 'white', colour = 'black')) +
#scale_x_continuous(breaks=c(2,4,8,16,32)) +
scale_x_continuous(breaks=a[,1]) +
scale_color_manual(values=c("#CC6666", "#9999CC","#AC0000","#909990"))
pdf("Error porcentual After Pruning")
print(p)
dev.off()
x = read.csv("file", header=F)
a = aggregate(x[x[,1]=='a',3:4], list(x[x[,1]=='a',2]), mean)
b = aggregate(x[x[,1]=='b',3:4], list(x[x[,1]=='b',2]), mean)
atrain = aes(y = V3, colour = "A - Entrenamiento")
atest = aes(y = V4, colour = "A - Test")
btrain = aes(y = V3, colour = "B - Entrenamiento")
btest = aes(y = V4, colour = "B - Test")
p <- ggplot(a, aes(Group.1)) +
geom_point(atrain) +
geom_point(atest) +
geom_line(atrain) +
geom_line(atest) +
geom_point(data = b, btrain) +
geom_point(data = b, btest) +
geom_line(data = b, btrain) +
geom_line(data = b, btest) +
labs(y = "Error porcentual promedio") +
labs(x = "Cantidad de puntos de entrenamiento") +
labs(title = "Error porcentual Before Pruning") +
theme(legend.title=element_blank()) +
theme(panel.background = element_rect(fill = 'white', colour = 'black')) +
scale_color_manual(values=c("#CC6666", "#9999CC","#AC0000","#909990"))
pdf("Error porcentual Before Pruning")
print(p)
dev.off()
|
library(shiny)
library(reshape2)
library(ggplot2)
shinyServer(
function(input, output) {
t <- melt(Titanic)
t <- dcast(t, Class + Sex + Age ~ Survived)
t <- transform(t, total = No + Yes)
t <- transform(t, rate = Yes / total)
output$myplot <- renderPlot({
xvar <- input$xvar
xvars <- c("Class", "Sex", "Age")
xvars <- xvars[-which(xvars == xvar)]
eval(parse(text = paste0("ggplot(t, aes(x = ", input$xvar, ", y = ", input$yvar, ", fill = Class)) +",
"geom_bar(stat=\"identity\") + facet_wrap(", xvars[1], "~", xvars[2], ") + ylab(\"\")"
)))
})
}
)
| /server.R | no_license | Dixhom/Coursera-Developing-Data-Products-Assignment | R | false | false | 705 | r | library(shiny)
library(reshape2)
library(ggplot2)
shinyServer(
function(input, output) {
t <- melt(Titanic)
t <- dcast(t, Class + Sex + Age ~ Survived)
t <- transform(t, total = No + Yes)
t <- transform(t, rate = Yes / total)
output$myplot <- renderPlot({
xvar <- input$xvar
xvars <- c("Class", "Sex", "Age")
xvars <- xvars[-which(xvars == xvar)]
eval(parse(text = paste0("ggplot(t, aes(x = ", input$xvar, ", y = ", input$yvar, ", fill = Class)) +",
"geom_bar(stat=\"identity\") + facet_wrap(", xvars[1], "~", xvars[2], ") + ylab(\"\")"
)))
})
}
)
|
library(dplyr)
library(jpeg)
getcutecat <- function(x){
# writeLines(
# sprintf("", x, x)
# )
sprintf("http://placekitten.com/g/%s/%s", x, x) %>%
download.file('y.jpg', mode = 'wb')
jj <- readJPEG("y.jpg",native=TRUE)
plot(0:1,0:1,type="n",ann=FALSE,axes=FALSE)
rasterImage(jj,0,0,1,1)
}
| /getcat.R | no_license | albgarre/regression_SEAST_2017 | R | false | false | 377 | r |
library(dplyr)
library(jpeg)
getcutecat <- function(x){
# writeLines(
# sprintf("", x, x)
# )
sprintf("http://placekitten.com/g/%s/%s", x, x) %>%
download.file('y.jpg', mode = 'wb')
jj <- readJPEG("y.jpg",native=TRUE)
plot(0:1,0:1,type="n",ann=FALSE,axes=FALSE)
rasterImage(jj,0,0,1,1)
}
|
#!/usr/bin/env Rscript
rm(list=ls())
# library to read xls files
#library(gdata)
# read subject information files:
subject.info = read.table("outb01_subject_info.txt", header=TRUE)
subject.info$Sex = as.numeric(subject.info$Sex)
subject.info$age = scale(subject.info$age)
# delete subjects with severe head motion or other problems:
subject.delete = read.table("subject_delete.txt", header=F)
print(subject.delete)
delete.idx = subject.info$SUBJID%in%subject.delete$V1
#delete.idx = rep(FALSE, length(delete.idx))
subject.info = subject.info[!delete.idx,]
write.table(subject.info, "outd01_ptsd_subject_info.txt", row.names=F)
print(head(subject.info))
print(dim(subject.info))
# -------------------------- read pca features ---------------------
pc1 = read.table("outc02_pca_feature1.txt", header=TRUE)
colnames(pc1)=paste("pc1", colnames(pc1), sep = "_")
pc2 = read.table("outc02_pca_feature2.txt", header=TRUE)
colnames(pc2)=paste("pc2", colnames(pc2), sep = "_")
pc3 = read.table("outc02_pca_feature3.txt", header=TRUE)
colnames(pc3)=paste("pc3", colnames(pc3), sep = "_")
pc.loading = read.table("outc02_pca_loading.txt", header=TRUE)
| /scriptd_stats01_read_pca_features.R | no_license | NxNiki/BrainImagingAnalysisCode | R | false | false | 1,151 | r | #!/usr/bin/env Rscript
rm(list=ls())
# library to read xls files
#library(gdata)
# read subject information files:
subject.info = read.table("outb01_subject_info.txt", header=TRUE)
subject.info$Sex = as.numeric(subject.info$Sex)
subject.info$age = scale(subject.info$age)
# delete subjects with severe head motion or other problems:
subject.delete = read.table("subject_delete.txt", header=F)
print(subject.delete)
delete.idx = subject.info$SUBJID%in%subject.delete$V1
#delete.idx = rep(FALSE, length(delete.idx))
subject.info = subject.info[!delete.idx,]
write.table(subject.info, "outd01_ptsd_subject_info.txt", row.names=F)
print(head(subject.info))
print(dim(subject.info))
# -------------------------- read pca features ---------------------
pc1 = read.table("outc02_pca_feature1.txt", header=TRUE)
colnames(pc1)=paste("pc1", colnames(pc1), sep = "_")
pc2 = read.table("outc02_pca_feature2.txt", header=TRUE)
colnames(pc2)=paste("pc2", colnames(pc2), sep = "_")
pc3 = read.table("outc02_pca_feature3.txt", header=TRUE)
colnames(pc3)=paste("pc3", colnames(pc3), sep = "_")
pc.loading = read.table("outc02_pca_loading.txt", header=TRUE)
|
options(warn=-2)
# ------------------------------------------------------------------------------
# Import dependencies
# ------------------------------------------------------------------------------
library(pacman)
p_load(this.path, yaml)
setwd(this.path::this.dir())
source('../lib/import.R')
import('./init.R')
# ------------------------------------------------------------------------------
#
#
#
#
# ------------------------------------------------------------------------------
# Variables
# ------------------------------------------------------------------------------
excluded_columns = c(
'numero_de_cliente',
'foto_mes',
'ccajas_transacciones',
'Master_mpagominimo'
)
#
#
#
#
# ------------------------------------------------------------------------------
# Functions
# ------------------------------------------------------------------------------
load_train_set <- function(type='original') {
setwd(this.path::this.dir())
loadcsv(paste('../../dataset/', type , '/paquete_premium_202009.csv', sep=''))
}
load_test_set <- function(type='original') {
setwd(this.path::this.dir())
loadcsv(paste('../../dataset/', type, '/paquete_premium_202011.csv', sep=''))
}
build_gain_filename_fn <- function(gain) {
function(path, model_name, params) {
paste(
path,
'/',
strftime(Sys.time(), format="%Y-%m-%d_%H-%M-%S"),
'_',
model_name,
'_gain_',
gain,
sep=''
)
}
}
kaggle_df <-function(test_set, test_pred) {
test_set %>%
dplyr::mutate(Predicted = test_pred) %>%
dplyr::select(numero_de_cliente, Predicted)
}
preprocessing <- function(dev_set, excludes=c()) {
dev_set %>%
dplyr::rename(target = clase_ternaria) %>%
dplyr::mutate(target = as.numeric(as.factor(target))-1) %>%
dplyr::mutate(target = ifelse(target==2, 0, 1)) %>%
dplyr::select(-excludes)
}
show_groups <- function(data) data %>% group_by(target) %>% tally() | /src/common.R | no_license | magistery-tps/dm-eyf-tp1 | R | false | false | 1,935 | r | options(warn=-2)
# ------------------------------------------------------------------------------
# Import dependencies
# ------------------------------------------------------------------------------
library(pacman)
p_load(this.path, yaml)
setwd(this.path::this.dir())
source('../lib/import.R')
import('./init.R')
# ------------------------------------------------------------------------------
#
#
#
#
# ------------------------------------------------------------------------------
# Variables
# ------------------------------------------------------------------------------
excluded_columns = c(
'numero_de_cliente',
'foto_mes',
'ccajas_transacciones',
'Master_mpagominimo'
)
#
#
#
#
# ------------------------------------------------------------------------------
# Functions
# ------------------------------------------------------------------------------
load_train_set <- function(type='original') {
setwd(this.path::this.dir())
loadcsv(paste('../../dataset/', type , '/paquete_premium_202009.csv', sep=''))
}
load_test_set <- function(type='original') {
setwd(this.path::this.dir())
loadcsv(paste('../../dataset/', type, '/paquete_premium_202011.csv', sep=''))
}
build_gain_filename_fn <- function(gain) {
function(path, model_name, params) {
paste(
path,
'/',
strftime(Sys.time(), format="%Y-%m-%d_%H-%M-%S"),
'_',
model_name,
'_gain_',
gain,
sep=''
)
}
}
kaggle_df <-function(test_set, test_pred) {
test_set %>%
dplyr::mutate(Predicted = test_pred) %>%
dplyr::select(numero_de_cliente, Predicted)
}
preprocessing <- function(dev_set, excludes=c()) {
dev_set %>%
dplyr::rename(target = clase_ternaria) %>%
dplyr::mutate(target = as.numeric(as.factor(target))-1) %>%
dplyr::mutate(target = ifelse(target==2, 0, 1)) %>%
dplyr::select(-excludes)
}
show_groups <- function(data) data %>% group_by(target) %>% tally() |
#' A function to read and re-arrange the data in different ways
#'
#' This internal function imports the data and outputs only those variables that are needed to run the model
#' according to the information provided by the user.
#' @param data A data frame in which to find variables supplied in \code{model.eff} and \code{model.cost}. Among these,
#' effectiveness, cost and treatment indicator (only two arms) variables must always be provided and named 'e', 'c' and 't' respectively.
#' @param model.eff A formula expression in conventional \code{R} linear modelling syntax. The response must be a health economics
#' effectiveness outcome ('e') whose name must correspond to that used in \code{data}, and
#' any covariates are given on the right-hand side. If there are no covariates, specify \code{1} on the right hand side.
#' By default, covariates are placed on the "location" parameter of the distribution through a linear model.
#' Random effects can also be specified for each model parameter.
#' @param model.cost A formula expression in conventional \code{R} linear modelling syntax. The response must be a health economics
#' cost outcome ('c') whose name must correspond to that used in \code{data}, and any covariates are given on the right-hand side.
#' If there are no covariates, specify \code{1} on the right hand side. By default, covariates are placed on the "location"
#' parameter of the distribution through a linear model. Random effects can also be specified for each model parameter.
#' @param model.me A formula expression in conventional \code{R} linear modelling syntax. The response must be indicated with the
#' term 'me'(missing effects) and any covariates used to estimate the probability of missing effects are given on the right-hand side.
#' If there are no covariates, specify \code{1} on the right hand side. By default, covariates are placed on the "probability" parameter for the missing effects through a logistic-linear model.
#' Random effects can also be specified for each model parameter.
#' @param model.mc A formula expression in conventional R linear modelling syntax. The response must be indicated with the
#' term 'mc'(missing costs) and any covariates used to estimate the probability of missing costs should be given on the right-hand side.
#' If there are no covariates, specify \code{1} on the right hand side. By default, covariates are placed on the "probability" parameter for the missing costs through a logistic-linear model.
#' Random effects can also be specified for each model parameter.
#' @param type Type of missingness mechanism assumed. Choices are Missing At Random (MAR) and Missing Not At Random (MNAR).
#' @param center Logical. If \code{center} is \code{TRUE} all the covariates in the model are centered.
#' @keywords read data
#' @importFrom stats na.omit sd as.formula model.matrix model.frame model.response terms
#' @export
#' @examples
#' #Internal function only
#' #no examples
#' #
#' #
data_read_selection <- function(data, model.eff, model.cost, model.me, model.mc, type, center) {
if(is.data.frame(data) == FALSE) {
stop("object data must be provided as data frame")
}
if(any(names(data) == "e") == TRUE & any(names(data) == "c") == TRUE) {
e <- as.name("e")
c <- as.name("c")
}
cov_matrix <- subset(data, select = -c(e, c))
cov_matrix <- cov_matrix[!unlist(vapply(cov_matrix, anyNA, logical(1)))]
is.formula <- function (x) { inherits(x, "formula") }
if(is.formula(model.eff) == FALSE | is.formula(model.cost) == FALSE) {
stop("model.eff and/or model.cost must be formula objects")
}
if(is.logical(center) == FALSE) { stop("center must be either TRUE or FALSE") }
fixed_e <- nobars_(model.eff)
fixed_c <- nobars_(model.cost)
random_e <- fb(model.eff)
random_c <- fb(model.cost)
fname_re_e_coeff <- as.formula(paste("e", "0", sep=" ~ "))
fname_re_c_coeff <- as.formula(paste("c", "0", sep=" ~ "))
fname_re_me_coeff <- as.formula(paste("me", "0", sep=" ~ "))
fname_re_mc_coeff <- as.formula(paste("mc", "0", sep=" ~ "))
clusn_e <- clusn_c <- NULL
clusn_me <- clusn_mc <- NULL
if(!is.null(random_e) & length(random_e) > 1 | !is.null(random_c) & length(random_c) > 1) {
stop("random effects can be included in the formula only through a single expression within brackets")
}
if(all(names(model.frame(fixed_e, data = data)) %in% c("e", names(cov_matrix))) == FALSE |
all(names(model.frame(fixed_c, data = data)) %in% c("c", "e", names(cov_matrix))) == FALSE) {
stop("partially-observed covariates cannot be included in the fixed effects model")
}
if(all(names(model.frame(fixed_e, data = data)) %in% names(data)) == FALSE |
all(names(model.frame(fixed_c, data = data)) %in% names(data)) == FALSE) {
stop("you must provide names in the formula that correspond to those in the data")
}
if("e" %in% labels(terms(fixed_e)) | "c" %in% labels(terms(fixed_c))) {
stop("please remove 'e' from the right hand side of model.eff and/or 'c' from the right hand side of model.cost")
}
if(names(model.frame(fixed_e, data = data)[1]) != "e") {
stop("you must set 'e' as the response in the formula model.eff")
}
if("c" %in% names(model.frame(fixed_e, data = data))) {
stop("dependence allowed only through the cost model; please remove 'c' from model.eff")
}
if(names(model.frame(fixed_c, data = data)[1]) != "c") {
stop("you must set 'c' as the response in the formula model.cost")
}
if("e" %in% labels(terms(fixed_c))) {
if(length(grep(":e", labels(terms(fixed_c)))) != 0 | length(grep("e:", labels(terms(fixed_c)))) != 0) {
stop("no interaction effects for 'e' is allowed")
}
}
if("t" %in% names(model.frame(fixed_c, data = data)) | "t" %in% names(model.frame(fixed_e, data = data))) {
stop("treatment indicator must be provided only in the data. Please remove 't' from 'model.eff' and/or 'model.cost'")
}
index_mis_e <- which(is.na(data$e))
index_mis_c <- which(is.na(data$c))
data2 <- data
data$e[is.na(data$e) == TRUE] <- -999999
data$c[is.na(data$c) == TRUE] <- -999999
mf_e_fixed <- model.frame(formula = fixed_e, data = data)
mf_c_fixed <- model.frame(formula = fixed_c, data = data)
terms <- NULL
x_e_fixed <- model.matrix(attr(mf_e_fixed, "terms"), data = mf_e_fixed)
x_c_fixed <- model.matrix(attr(mf_c_fixed, "terms"), data = mf_c_fixed)
if("e" %in% names(mf_c_fixed)){
mf_c_fixed$e[index_mis_e] <- NA
}
name_re_e_coeff <- NULL
name_re_c_coeff <- NULL
if(!is.null(random_e)){
name_re_e_coeff <- sub("\\|.*", "", random_e)
if(grepl("0 + 1", name_re_e_coeff, fixed = TRUE) == TRUE) { stop("Either remove or add the random intercept")}
name_clus_e <- sub('.*\\|', '', random_e)
if(lengths(strsplit(name_clus_e, " ")) > 2) { stop("a single clustering variable must selected for each formula") }
name_clus_e <- gsub(" ", "", name_clus_e, fixed = TRUE)
if(!name_clus_e %in% names(cov_matrix)) { stop("the clustering variable must be among the variables in the dataset") }
if(strsplit(name_re_e_coeff, "")[[1]][1] == 0) {
no_random_int_e <- TRUE} else {no_random_int_e <- FALSE }
if(no_random_int_e == TRUE) {
name_re_e_coeff <- sub("[0]", "", name_re_e_coeff)
name_re_e_coeff <- sub("[+]", "", name_re_e_coeff)
}
if(name_re_e_coeff == "" | name_re_e_coeff == " ") { stop("please state for which variables the random effects are assumed") }
fname_re_e_coeff <- as.formula(paste("e", name_re_e_coeff, sep = " ~ "))
if(all(names(model.frame(fname_re_e_coeff, data = data)) %in% c("0", "1", names(model.frame(fixed_e, data = data)))) == FALSE) {
stop("only covariates defined as fixed effects can be included in the random effects model")
}
if("e" %in% labels(terms(fname_re_e_coeff))) {
stop("please remove 'e' from the random effects expression of model.eff")
}
if("c" %in% labels(terms(fname_re_e_coeff))) {
stop("dependence allowed only through the cost model; please remove 'c' from model.eff")
}
mf_e_random <- model.frame(formula = fname_re_e_coeff, data = data)
x_e_random <- model.matrix(attr(mf_e_random, "terms"), data = mf_e_random)
if(no_random_int_e == TRUE) {
x_e_random <- as.matrix(x_e_random[, !colnames(x_e_random) == "(Intercept)"])
if(is.null(colnames(x_e_random)) == TRUE & dim(x_e_random)[2] == 1) {
colnames(x_e_random) <- gsub(" ", "", name_re_e_coeff)
}
}
clus_e <- data[, name_clus_e]
if(!is.factor(clus_e)) { stop("clustering variables must be defined as factors") }
clusn_e <- as.numeric(clus_e)
if(!all(diff(sort(unique(clusn_e))) == 1) | !min(clusn_e) == 1) {
stop("ordered levels of clustering variables must not have gaps and must start from 1")
}
}
if(!is.null(random_c)){
name_re_c_coeff <- sub("\\|.*", "", random_c)
if(grepl("0 + 1", name_re_c_coeff, fixed = TRUE) == TRUE) { stop("Either remove or add the random intercept")}
name_clus_c <- sub('.*\\|', '', random_c)
if(lengths(strsplit(name_clus_c, " ")) > 2) { stop("a single clustering variable must selected for each formula") }
name_clus_c <- gsub(" ", "", name_clus_c, fixed = TRUE)
if(!name_clus_c %in% names(cov_matrix)) { stop("the clustering variable must be among the variables in the dataset") }
if(strsplit(name_re_c_coeff, "")[[1]][1] == 0) {
no_random_int_c <- TRUE} else {no_random_int_c <- FALSE }
if(no_random_int_c == TRUE) {
name_re_c_coeff <- sub("[0]", "", name_re_c_coeff)
name_re_c_coeff <- sub("[+]", "", name_re_c_coeff)
}
if(name_re_c_coeff == "" | name_re_c_coeff == " ") { stop("please state for which variables the random effects are assumed") }
if(gsub(" ", "", name_re_c_coeff) == "e" & no_random_int_c == FALSE) {name_re_c_coeff <- "1 + e" }
fname_re_c_coeff <- as.formula(paste("c", name_re_c_coeff, sep = " ~ "))
if(all(names(model.frame(fname_re_c_coeff, data = data)) %in% c("0", "1", names(model.frame(fixed_c, data = data)))) == FALSE) {
stop("only covariates defined as fixed effects can be included in the random effects model")
}
if("c" %in% labels(terms(fname_re_c_coeff))) {
stop("please remove 'c' from the random effects expression of model.cost")
}
if("e" %in% labels(terms(fname_re_c_coeff))) {
if(length(grep(":e", labels(terms(fname_re_c_coeff)))) != 0 | length(grep("e:", labels(terms(fname_re_c_coeff)))) != 0) {
stop("no interaction effects for 'e' is allowed")
}
}
mf_c_random <- model.frame(formula = fname_re_c_coeff, data = data)
x_c_random <- model.matrix(attr(mf_c_random, "terms"), data = mf_c_random)
if("e" %in% labels(terms(fname_re_c_coeff)) & length(labels(terms(fname_re_c_coeff))) == 1) {
x_c_random <- subset(x_c_random, select = -c(e))
}
if(no_random_int_c == TRUE) {
x_c_random <- as.matrix(x_c_random[, !colnames(x_c_random) == "(Intercept)"])
if(is.null(colnames(x_c_random)) == TRUE & dim(x_c_random)[2] == 1) {
colnames(x_c_random) <- gsub(" ", "", name_re_c_coeff)
}
}
clus_c <- data[, name_clus_c]
if(!is.factor(clus_c)) { stop("clustering variables must be defined as factors") }
clusn_c <- as.numeric(clus_c)
if(!all(diff(sort(unique(clusn_c))) == 1) | !min(clusn_c) == 1) {
stop("ordered levels of clustering variables must not have gaps and must start from 1")
}
}
y_e <- model.response(mf_e_fixed)
y_c <- model.response(mf_c_fixed)
y_e[index_mis_e] <- NA
y_c[index_mis_c] <- NA
data$e[index_mis_e] <- NA
data$c[index_mis_c] <- NA
N1 <- N2 <- c()
N1 <- sum(data$t == 1)
N2 <- length(data$t) - N1
N <- c(N1, N2)
m_eff <- rep(0, length(data$e))
m_eff[index_mis_e] <- 1
m_cost <- rep(0, length(data$c))
m_cost[index_mis_c] <- 1
m_eff1 <- m_eff2 <- m_cost1 <- m_cost2 <- c()
t1_index <- which(data$t == 1)
t2_index <- which(data$t == 2)
eff1 <- y_e[t1_index]
eff2 <- y_e[t2_index]
eff <- list(eff1, eff2)
cost1 <- y_c[t1_index]
cost2 <- y_c[t2_index]
cost <- list(cost1, cost2)
m_eff1 <- m_eff[t1_index]
m_eff2 <- m_eff[t2_index]
m_eff <- list(m_eff1, m_eff2)
m_cost1 <- m_cost[t1_index]
m_cost2 <- m_cost[t2_index]
m_cost <- list(m_cost1, m_cost2)
N1_cc <- N2_cc <- N1_mis <- N2_mis <- c()
N1_cc[1] <- length(na.omit(eff1))
N1_cc[2] <- length(na.omit(cost1))
N2_cc[1] <- length(na.omit(eff2))
N2_cc[2] <- length(na.omit(cost2))
N_cc <- cbind(N1_cc, N2_cc)
N1_mis <- N1 - N1_cc
N2_mis <- N2 - N2_cc
N_mis <- cbind(N1_mis, N2_mis)
effects <- list(eff1, eff2)
costs <- list(cost1, cost2)
eff1_cc <- eff2_cc <- cost1_cc <- cost2_cc <- c()
eff1_cc <- na.omit(eff1)
eff2_cc <- na.omit(eff2)
eff_cc <- list(eff1_cc, eff2_cc)
cost1_cc <- na.omit(cost1)
cost2_cc <- na.omit(cost2)
cost_cc <- list(cost1_cc, cost2_cc)
cov1_e_fixed <- as.data.frame(x_e_fixed[t1_index, ])
names(cov1_e_fixed) <- colnames(x_e_fixed)
cov2_e_fixed <- as.data.frame(x_e_fixed[t2_index, ])
names(cov2_e_fixed) <- colnames(x_e_fixed)
cov_e_fixed <- list(cov1_e_fixed, cov2_e_fixed)
x_c_hold_fixed <- x_c_fixed
if("e" %in% colnames(x_c_hold_fixed)) {
x_c_fixed <- subset(x_c_hold_fixed, select = -c(e))
}
cov1_c_fixed <- as.data.frame(x_c_fixed[t1_index, ])
names(cov1_c_fixed) <- colnames(x_c_fixed)
cov2_c_fixed <- as.data.frame(x_c_fixed[t2_index, ])
names(cov2_c_fixed) <- colnames(x_c_fixed)
cov_c_fixed <- list(cov1_c_fixed, cov2_c_fixed)
cove_fixed <- list(cov1_e_fixed, cov2_e_fixed)
mean_cov_e_fixed <- list(apply(as.matrix(cov1_e_fixed), 2, mean), apply(as.matrix(cov2_e_fixed), 2, mean))
covc_fixed <- list(cov1_c_fixed, cov2_c_fixed)
mean_cov_c_fixed <- list(apply(as.matrix(cov1_c_fixed), 2, mean), apply(as.matrix(cov2_c_fixed), 2, mean))
cov1_e_center_fixed <- as.data.frame(scale(cov1_e_fixed, scale = FALSE))
cov2_e_center_fixed <- as.data.frame(scale(cov2_e_fixed, scale = FALSE))
cov1_e_center_fixed[, 1] <- rep(1, nrow(cov1_e_fixed))
cov2_e_center_fixed[, 1] <- rep(1, nrow(cov2_e_fixed))
cov_e_center_fixed <- list(cov1_e_center_fixed, cov2_e_center_fixed)
mean_cov_e_center_fixed <- list(apply(as.matrix(cov1_e_center_fixed), 2, mean), apply(as.matrix(cov2_e_center_fixed), 2, mean))
cov1_c_center_fixed <- as.data.frame(scale(cov1_c_fixed, scale = FALSE))
cov2_c_center_fixed <- as.data.frame(scale(cov2_c_fixed, scale = FALSE))
cov1_c_center_fixed[, 1] <- rep(1, nrow(cov1_c_fixed))
cov2_c_center_fixed[, 1] <- rep(1, nrow(cov2_c_fixed))
cov_c_center_fixed <- list(cov1_c_center_fixed, cov2_c_center_fixed)
mean_cov_c_center_fixed <- list(apply(as.matrix(cov1_c_center_fixed), 2, mean), apply(as.matrix(cov2_c_center_fixed), 2, mean))
if(center == TRUE) {
cov_e_fixed <- cov_e_center_fixed
cov_c_fixed <- cov_c_center_fixed
mean_cov_e_fixed <- mean_cov_e_center_fixed
mean_cov_c_fixed <- mean_cov_c_center_fixed
}
if(!is.null(random_e)){
cov1_e_random <- as.data.frame(x_e_random[t1_index, ])
names(cov1_e_random) <- colnames(x_e_random)
cov2_e_random <- as.data.frame(x_e_random[t2_index, ])
names(cov2_e_random) <- colnames(x_e_random)
cov_e_random <- list(cov1_e_random, cov2_e_random)
cove_random <- list(cov1_e_random, cov2_e_random)
mean_cov_e_random <- list(apply(as.matrix(cov1_e_random), 2, mean), apply(as.matrix(cov2_e_random), 2, mean))
cov1_e_center_random <- as.data.frame(scale(cov1_e_random, scale = FALSE))
cov2_e_center_random <- as.data.frame(scale(cov2_e_random, scale = FALSE))
if(no_random_int_e == FALSE) {
cov1_e_center_random[, 1] <- rep(1, nrow(cov1_e_random))
cov2_e_center_random[, 1] <- rep(1, nrow(cov2_e_random))
}
cov_e_center_random <- list(cov1_e_center_random, cov2_e_center_random)
mean_cov_e_center_random <- list(apply(as.matrix(cov1_e_center_random), 2, mean), apply(as.matrix(cov2_e_center_random), 2, mean))
if(center == TRUE) {
cov_e_random <- cov_e_center_random
mean_cov_e_random <- mean_cov_e_center_random
}
clusn_e1 <- clusn_e[t1_index]
clusn_e1 <- factor(clusn_e1, levels = unique(clusn_e1))
clusn_e2 <- clusn_e[t2_index]
clusn_e2 <- factor(clusn_e2, levels = unique(clusn_e2))
}
if(!is.null(random_c)){
x_c_hold_random <- x_c_random
if("e" %in% colnames(x_c_hold_random)) {
x_c_random <- subset(x_c_hold_random, select = -c(e))
}
cov1_c_random <- as.data.frame(x_c_random[t1_index, ])
names(cov1_c_random) <- colnames(x_c_random)
cov2_c_random <- as.data.frame(x_c_random[t2_index, ])
names(cov2_c_random) <- colnames(x_c_random)
cov_c_random <- list(cov1_c_random, cov2_c_random)
covc_random <- list(cov1_c_random, cov2_c_random)
mean_cov_c_random <- list(apply(as.matrix(cov1_c_random), 2, mean), apply(as.matrix(cov2_c_random), 2, mean))
cov1_c_center_random <- as.data.frame(scale(cov1_c_random, scale = FALSE))
cov2_c_center_random <- as.data.frame(scale(cov2_c_random, scale = FALSE))
if(no_random_int_c == FALSE) {
cov1_c_center_random[, 1] <- rep(1, nrow(cov1_c_random))
cov2_c_center_random[, 1] <- rep(1, nrow(cov2_c_random))
}
cov_c_center_random <- list(cov1_c_center_random, cov2_c_center_random)
mean_cov_c_center_random <- list(apply(as.matrix(cov1_c_center_random), 2, mean), apply(as.matrix(cov2_c_center_random), 2, mean))
if(center == TRUE) {
cov_c_random <- cov_c_center_random
mean_cov_c_random <- mean_cov_c_center_random
}
clusn_c1 <- clusn_c[t1_index]
clusn_c1 <- factor(clusn_c1, levels = unique(clusn_c1))
clusn_c2 <- clusn_c[t2_index]
clusn_c2 <- factor(clusn_c2, levels = unique(clusn_c2))
}
data2$e[is.na(data2$e) == TRUE] <- -999999
data2$c[is.na(data2$c) == TRUE] <- -999999
data2$me <- c(m_eff1, m_eff2)
data2$mc <- c(m_cost1, m_cost2)
if(!is.formula(model.me) | !is.formula(model.mc)) {
stop("model.me and/or model.mc must be formula objects")
}
fixed_me <- nobars_(model.me)
fixed_mc <- nobars_(model.mc)
random_me <- fb(model.me)
random_mc <- fb(model.mc)
if(!is.null(random_me) & length(random_me) > 1 | !is.null(random_mc) & length(random_mc) > 1) {
stop("random effects can be included in the formula only through a single expression within brackets")
}
if(all(names(model.frame(fixed_me, data = data2)) %in% c("me", "e", names(cov_matrix))) == FALSE |
all(names(model.frame(fixed_mc, data = data2)) %in% c("mc", "c", names(cov_matrix))) == FALSE) {
stop("partially-observed covariates cannot be included in the model")
}
if(all(names(model.frame(fixed_me, data = data2)) %in% names(data2)) == FALSE |
all(names(model.frame(fixed_mc, data = data2)) %in% names(data2)) == FALSE) {
stop("you must provide names in the formula that correspond to those in the data")
}
if(names(model.frame(fixed_me, data = data2)[1]) != "me") {
stop("you must set 'me' as the response in the formula model.me")
}
if(names(model.frame(fixed_mc, data = data2)[1]) != "mc") {
stop("you must set 'mc' as the response in the formula model.mc")
}
if("t" %in% names(model.frame(fixed_mc, data = data2)) | "t" %in% names(model.frame(fixed_me, data = data2))) {
stop("treatment indicator must be provided only in the data. Please remove 't' from 'model.me' and/or 'model.mc'")
}
if("c" %in% names(model.frame(fixed_me, data = data2)) | "e" %in% names(model.frame(fixed_mc, data = data2))) {
stop("please remove 'e' from model.mc and/or remove 'c' from model.me")
}
if("e" %in% labels(terms(fixed_me))) {
if(length(grep(":e", labels(terms(fixed_me)))) != 0 | length(grep("e:", labels(terms(fixed_me)))) != 0) {
stop("no interaction effects for 'e' is allowed")
}
}
if("c" %in% labels(terms(fixed_mc))) {
if(length(grep(":c", labels(terms(fixed_mc)))) != 0 | length(grep("c:", labels(terms(fixed_mc)))) != 0) {
stop("no interaction effects for 'c' is allowed")
}
}
name_re_me_coeff <- NULL
name_re_mc_coeff <- NULL
if(!is.null(random_me)){
name_re_me_coeff <- sub("\\|.*", "", random_me)
if(grepl("0 + 1", name_re_me_coeff, fixed = TRUE) == TRUE) { stop("Either remove or add the random intercept")}
name_clus_me <- sub('.*\\|', '', random_me)
if(lengths(strsplit(name_clus_me, " ")) > 2) {stop("a single clustering variable must be selected for each formula") }
name_clus_me <- gsub(" ", "", name_clus_me, fixed = TRUE)
if(!name_clus_me %in% names(cov_matrix)) { stop("the clustering variable must be among the variables in the dataset") }
if(strsplit(name_re_me_coeff, "")[[1]][1] == 0) {
no_random_int_me <- TRUE} else {no_random_int_me <- FALSE }
if(no_random_int_me == TRUE) {
name_re_me_coeff <- sub("[0]", "", name_re_me_coeff)
name_re_me_coeff <- sub("[+]", "", name_re_me_coeff)
}
if(name_re_me_coeff == "" | name_re_me_coeff == " ") { stop("please state for which variables the random effects are assumed") }
if(gsub(" ", "", name_re_me_coeff) == "e" & no_random_int_me == FALSE) {name_re_me_coeff <- "1 + e" }
fname_re_me_coeff <- as.formula(paste("me", name_re_me_coeff, sep=" ~ "))
if(all(names(model.frame(fname_re_me_coeff, data = data2)) %in% c("0","1", names(model.frame(fixed_me, data = data2)))) == FALSE) {
stop("only covariates inlcued as fixed effects can be included in the random effects model")
}
if("me" %in% labels(terms(fname_re_me_coeff))) {
stop("please remove 'me' from the right hand side of model.me")
}
if("e" %in% labels(terms(fname_re_me_coeff))) {
if(length(grep(":e", labels(terms(fname_re_me_coeff)))) != 0 | length(grep("e:", labels(terms(fname_re_me_coeff)))) != 0) {
stop("no interaction effects for 'e' are allowed")
}
}
clus_me <- data[, name_clus_me]
if(!is.factor(clus_me)) { stop("clustering variables must be defined as factors") }
clusn_me <- as.numeric(clus_me)
if(!all(diff(sort(unique(clusn_me))) == 1) | !min(clusn_me) == 1) {
stop("ordered levels of clustering variables must not have gaps and must start from 1")
}
}
if(!is.null(random_mc)){
name_re_mc_coeff <- sub("\\|.*", "", random_mc)
if(grepl("0 + 1", name_re_mc_coeff, fixed = TRUE) == TRUE) { stop("Either remove or add the random intercept")}
name_clus_mc <- sub('.*\\|', '', random_mc)
if(lengths(strsplit(name_clus_mc, " ")) > 2) {stop("a single clustering variable must be selected for each formula") }
name_clus_mc <- gsub(" ", "", name_clus_mc, fixed = TRUE)
if(!name_clus_mc %in% names(cov_matrix)) { stop("the clustering variable must be among the variables in the dataset") }
if(strsplit(name_re_mc_coeff, "")[[1]][1] == 0) {
no_random_int_mc <- TRUE} else {no_random_int_mc <- FALSE }
if(no_random_int_mc == TRUE) {
name_re_mc_coeff <- sub("[0]", "", name_re_mc_coeff)
name_re_mc_coeff <- sub("[+]", "", name_re_mc_coeff)
}
if(name_re_mc_coeff == "" | name_re_mc_coeff == " ") { stop("please state for which variables the random effects are assumed") }
if(gsub(" ", "", name_re_mc_coeff) == "c" & no_random_int_mc == FALSE) {name_re_mc_coeff <- "1 + c" }
fname_re_mc_coeff <- as.formula(paste("mc", name_re_mc_coeff, sep=" ~ "))
if(all(names(model.frame(fname_re_mc_coeff, data = data2)) %in% c("0","1", names(model.frame(fixed_mc, data = data2)))) == FALSE) {
stop("only covariates inlcued as fixed effects can be included in the random effects model")
}
if("mc" %in% labels(terms(fname_re_mc_coeff))) {
stop("please remove 'mc' from the right hand side of model.mc")
}
if("c" %in% labels(terms(fname_re_mc_coeff))) {
if(length(grep(":c", labels(terms(fname_re_mc_coeff)))) != 0 | length(grep("c:", labels(terms(fname_re_mc_coeff)))) != 0) {
stop("no interaction effects for 'c' is allowed")
}
}
clus_mc <- data[, name_clus_mc]
if(!is.factor(clus_mc)) { stop("clustering variables must be defined as factors") }
clusn_mc <- as.numeric(clus_mc)
if(!all(diff(sort(unique(clusn_mc))) == 1) | !min(clusn_mc) == 1) {
stop("ordered levels of clustering variables must not have gaps and must start from 1")
}
}
mf_me_fixed <- model.frame(formula = fixed_me, data = data2)
mf_mc_fixed <- model.frame(formula = fixed_mc, data = data2)
z_e_fixed <- model.matrix(attr(mf_me_fixed, "terms"), data = mf_me_fixed)
z_c_fixed <- model.matrix(attr(mf_mc_fixed, "terms"), data = mf_mc_fixed)
z_e_hold_fixed <- z_e_fixed
if("e" %in% colnames(z_e_hold_fixed)) {
z_e_fixed <- subset(z_e_hold_fixed, select = -c(e))
}
z_c_hold_fixed <- z_c_fixed
if("c" %in% colnames(z_c_hold_fixed)) {
z_c_fixed <- subset(z_c_hold_fixed, select = -c(c))
}
covz1_e_fixed <- as.data.frame(z_e_fixed[t1_index, ])
names(covz1_e_fixed) <- colnames(z_e_fixed)
covz2_e_fixed <- as.data.frame(z_e_fixed[t2_index, ])
names(covz2_e_fixed) <- colnames(z_e_fixed)
covz_e_fixed <- list(covz1_e_fixed, covz2_e_fixed)
covze_fixed <- list(covz1_e_fixed, covz2_e_fixed)
mean_covz_e_fixed <- list(apply(as.matrix(covz1_e_fixed), 2, mean), apply(as.matrix(covz2_e_fixed), 2, mean))
names(covz_e_fixed) <- names(mean_covz_e_fixed) <- c("Control", "Intervention")
covz1_c_fixed <- as.data.frame(z_c_fixed[t1_index, ])
names(covz1_c_fixed) <- colnames(z_c_fixed)
covz2_c_fixed <- as.data.frame(z_c_fixed[t2_index, ])
names(covz2_c_fixed) <- colnames(z_c_fixed)
covz_c_fixed <- list(covz1_c_fixed, covz2_c_fixed)
covzc_fixed <- list(covz1_c_fixed, covz2_c_fixed)
mean_covz_c_fixed <- list(apply(as.matrix(covz1_c_fixed), 2, mean), apply(as.matrix(covz2_c_fixed), 2, mean))
covz1_e_center_fixed <- as.data.frame(scale(covz1_e_fixed, scale = FALSE))
covz2_e_center_fixed <- as.data.frame(scale(covz2_e_fixed, scale = FALSE))
covz1_e_center_fixed[, 1] <- rep(1, nrow(covz1_e_fixed))
covz2_e_center_fixed[, 1] <- rep(1, nrow(covz2_e_fixed))
covz_e_center_fixed <- list(covz1_e_center_fixed, covz2_e_center_fixed)
mean_covz_e_center_fixed <- list(apply(as.matrix(covz1_e_center_fixed), 2, mean), apply(as.matrix(covz2_e_center_fixed), 2, mean))
covz1_c_center_fixed <- as.data.frame(scale(covz1_c_fixed, scale = FALSE))
covz2_c_center_fixed <- as.data.frame(scale(covz2_c_fixed, scale = FALSE))
covz1_c_center_fixed[, 1] <- rep(1, nrow(covz1_c_fixed))
covz2_c_center_fixed[, 1] <- rep(1, nrow(covz2_c_fixed))
covz_c_center_fixed <- list(covz1_c_center_fixed, covz2_c_center_fixed)
mean_covz_c_center_fixed <- list(apply(as.matrix(covz1_c_center_fixed), 2, mean), apply(as.matrix(covz2_c_center_fixed), 2, mean))
if(center == TRUE) {
covz_e_fixed <- covz_e_center_fixed
covz_c_fixed <- covz_c_center_fixed
mean_covz_e_fixed <- mean_covz_e_center_fixed
mean_covz_c_fixed <- mean_covz_c_center_fixed
}
if(!is.null(random_me)){
mf_me_random <- model.frame(formula = fname_re_me_coeff, data = data2)
z_e_random <- model.matrix(attr(mf_me_random, "terms"), data = mf_me_random)
if(no_random_int_me == TRUE) {
z_e_random <- as.matrix(z_e_random[, !colnames(z_e_random) == "(Intercept)"])
if(dim(z_e_random)[2] == 1) { colnames(z_e_random) <- colnames(model.matrix(attr(mf_me_random, "terms"), data = mf_me_random))[2] }
}
z_e_hold_random <- z_e_random
if("e" %in% colnames(z_e_hold_random)) {
z_e_random <- subset(z_e_hold_random, select = -c(e))
}
covz1_e_random <- as.data.frame(z_e_random[t1_index, ])
names(covz1_e_random) <- colnames(z_e_random)
covz2_e_random <- as.data.frame(z_e_random[t2_index, ])
names(covz2_e_random) <- colnames(z_e_random)
covz_e_random <- list(covz1_e_random, covz2_e_random)
covze_random <- list(covz1_e_random, covz2_e_random)
mean_covz_e_random <- list(apply(as.matrix(covz1_e_random), 2, mean), apply(as.matrix(covz2_e_random), 2, mean))
names(covz_e_random) <- names(mean_covz_e_random) <- c("Control", "Intervention")
covz1_e_center_random <- as.data.frame(scale(covz1_e_random, scale = FALSE))
covz2_e_center_random <- as.data.frame(scale(covz2_e_random, scale = FALSE))
if(no_random_int_me == FALSE) {
covz1_e_center_random[, 1] <- rep(1, nrow(covz1_e_random))
covz2_e_center_random[, 1] <- rep(1, nrow(covz2_e_random))
}
covz_e_center_random <- list(covz1_e_center_random, covz2_e_center_random)
mean_covz_e_center_random <- list(apply(as.matrix(covz1_e_center_random), 2, mean), apply(as.matrix(covz2_e_center_random), 2, mean))
if(center == TRUE) {
covz_e_random <- covz_e_center_random
mean_covz_e_random <- mean_covz_e_center_random
}
clusn_me1 <- clusn_me[t1_index]
clusn_me1 <- factor(clusn_me1, levels = unique(clusn_me1))
clusn_me2 <- clusn_me[t2_index]
clusn_me2 <- factor(clusn_me2, levels = unique(clusn_me2))
}
if(!is.null(random_mc)){
mf_mc_random <- model.frame(formula = fname_re_mc_coeff, data = data2)
z_c_random <- model.matrix(attr(mf_mc_random, "terms"), data = mf_mc_random)
if(no_random_int_mc == TRUE) {
z_c_random <- as.matrix(z_c_random[, !colnames(z_c_random) == "(Intercept)"])
if(dim(z_c_random)[2] == 1) { colnames(z_c_random) <- colnames(model.matrix(attr(mf_mc_random, "terms"), data = mf_mc_random))[2] }
}
z_c_hold_random <- z_c_random
if("c" %in% colnames(z_c_hold_random)) {
z_c_random <- subset(z_c_hold_random, select = -c(c))
}
covz1_c_random <- as.data.frame(z_c_random[t1_index, ])
names(covz1_c_random) <- colnames(z_c_random)
covz2_c_random <- as.data.frame(z_c_random[t2_index, ])
names(covz2_c_random) <- colnames(z_c_random)
covz_c_random <- list(covz1_c_random, covz2_c_random)
covzc_random <- list(covz1_c_random, covz2_c_random)
mean_covz_c_random <- list(apply(as.matrix(covz1_c_random), 2, mean), apply(as.matrix(covz2_c_random), 2, mean))
names(covz_c_random) <- names(mean_covz_c_random) <- c("Control", "Intervention")
covz1_c_center_random <- as.data.frame(scale(covz1_c_random, scale = FALSE))
covz2_c_center_random <- as.data.frame(scale(covz2_c_random, scale = FALSE))
if(no_random_int_mc == FALSE) {
covz1_c_center_random[, 1] <- rep(1, nrow(covz1_c_random))
covz2_c_center_random[, 1] <- rep(1, nrow(covz2_c_random))
}
covz_c_center_random <- list(covz1_c_center_random, covz2_c_center_random)
mean_covz_c_center_random <- list(apply(as.matrix(covz1_c_center_random), 2, mean), apply(as.matrix(covz2_c_center_random), 2, mean))
if(center == TRUE) {
covz_c_random <- covz_c_center_random
mean_covz_c_random <- mean_covz_c_center_random
}
clusn_mc1 <- clusn_mc[t1_index]
clusn_mc1 <- factor(clusn_mc1, levels = unique(clusn_mc1))
clusn_mc2 <- clusn_mc[t2_index]
clusn_mc2 <- factor(clusn_mc2, levels = unique(clusn_mc2))
}
names(covz_e_fixed) <- names(mean_covz_e_fixed) <- names(covz_c_fixed) <- names(mean_covz_c_fixed) <- c("Control", "Intervention")
names(cov_e_fixed) <- names(cov_c_fixed) <- names(mean_cov_e_fixed) <- names(mean_cov_c_fixed) <- c("Control", "Intervention")
if(!is.null(random_c)) {
names(cov_c_random) <- names(mean_cov_c_random) <- c("Control", "Intervention")
clusn_c <- list("Control" = clusn_c1, "Intervention" = clusn_c2)
} else {cov_c_random <- mean_cov_c_random <- NULL}
if(!is.null(random_mc)) {
names(covz_c_random) <- names(mean_covz_c_random) <- c("Control", "Intervention")
clusn_mc <- list("Control" = clusn_mc1, "Intervention" = clusn_mc2)
} else {covz_c_random <- mean_covz_c_random <- NULL}
if(!is.null(random_e)) {
names(cov_e_random) <- names(mean_cov_e_random) <- c("Control", "Intervention")
clusn_e <- list("Control" = clusn_e1, "Intervention" = clusn_e2)
} else {cov_e_random <- mean_cov_e_random <- NULL}
if(!is.null(random_me)) {
names(covz_e_random) <- names(mean_covz_e_random) <- c("Control", "Intervention")
clusn_me <- list("Control" = clusn_me1, "Intervention" = clusn_me2)
} else {covz_e_random <- mean_covz_e_random <- NULL}
names(m_eff) <- names(m_cost) <- c("Control", "Intervention")
names(effects) <- names(costs) <- names(eff_cc) <- names(cost_cc) <- c("Control", "Intervention")
data_raw <- list("raw_effects" = effects, "raw_costs" = costs, "raw_effects_cc" = eff_cc, "raw_costs_cc" = cost_cc, "arm_lengths" = N,
"arm_lengths_cc" = N_cc, "arm_missing_data" = N_mis, "missing_effects" = m_eff, "missing_costs" = m_cost,
"covariates_effects_fixed" = cov_e_fixed, "covariates_costs_fixed" = cov_c_fixed, "mean_cov_effects_fixed" = mean_cov_e_fixed, "mean_cov_costs_fixed" = mean_cov_c_fixed,
"covariates_missing_effects_fixed" = covz_e_fixed, "mean_cov_missing_effects_fixed" = mean_covz_e_fixed, "covariates_missing_costs_fixed" = covz_c_fixed,
"mean_cov_missing_costs_fixed" = mean_covz_c_fixed, "covariates_effects_random" = cov_e_random, "covariates_costs_random" = cov_c_random, "mean_cov_effects_random" = mean_cov_e_random, "mean_cov_costs_random" = mean_cov_c_random,
"covariates_missing_effects_random" = covz_e_random, "mean_cov_missing_effects_random" = mean_covz_e_random, "covariates_missing_costs_random" = covz_c_random,
"mean_cov_missing_costs_random" = mean_covz_c_random, "clus_e" = clusn_e, "clus_c" = clusn_c, "clus_me" = clusn_me, "clus_mc" = clusn_mc, "data_ind" = data2)
model_formula <- list("mf_model.e_fixed" = fixed_e, "mf_model.c_fixed" = fixed_c, "mf_model.me_fixed" = fixed_me, "mf_model.mc_fixed" = fixed_mc,
"mf_model.e_random" = fname_re_e_coeff, "mf_model.c_random" = fname_re_c_coeff, "mf_model.me_random" = fname_re_me_coeff, "mf_model.mc_random" = fname_re_mc_coeff)
data_list <- list("data_raw" = data_raw, "model_formula" = model_formula)
return(data_list)
} | /R/data_read_selection.R | no_license | AnGabrio/missingHE | R | false | false | 34,676 | r | #' A function to read and re-arrange the data in different ways
#'
#' This internal function imports the data and outputs only those variables that are needed to run the model
#' according to the information provided by the user.
#' @param data A data frame in which to find variables supplied in \code{model.eff} and \code{model.cost}. Among these,
#' effectiveness, cost and treatment indicator (only two arms) variables must always be provided and named 'e', 'c' and 't' respectively.
#' @param model.eff A formula expression in conventional \code{R} linear modelling syntax. The response must be a health economics
#' effectiveness outcome ('e') whose name must correspond to that used in \code{data}, and
#' any covariates are given on the right-hand side. If there are no covariates, specify \code{1} on the right hand side.
#' By default, covariates are placed on the "location" parameter of the distribution through a linear model.
#' Random effects can also be specified for each model parameter.
#' @param model.cost A formula expression in conventional \code{R} linear modelling syntax. The response must be a health economics
#' cost outcome ('c') whose name must correspond to that used in \code{data}, and any covariates are given on the right-hand side.
#' If there are no covariates, specify \code{1} on the right hand side. By default, covariates are placed on the "location"
#' parameter of the distribution through a linear model. Random effects can also be specified for each model parameter.
#' @param model.me A formula expression in conventional \code{R} linear modelling syntax. The response must be indicated with the
#' term 'me'(missing effects) and any covariates used to estimate the probability of missing effects are given on the right-hand side.
#' If there are no covariates, specify \code{1} on the right hand side. By default, covariates are placed on the "probability" parameter for the missing effects through a logistic-linear model.
#' Random effects can also be specified for each model parameter.
#' @param model.mc A formula expression in conventional R linear modelling syntax. The response must be indicated with the
#' term 'mc'(missing costs) and any covariates used to estimate the probability of missing costs should be given on the right-hand side.
#' If there are no covariates, specify \code{1} on the right hand side. By default, covariates are placed on the "probability" parameter for the missing costs through a logistic-linear model.
#' Random effects can also be specified for each model parameter.
#' @param type Type of missingness mechanism assumed. Choices are Missing At Random (MAR) and Missing Not At Random (MNAR).
#' @param center Logical. If \code{center} is \code{TRUE} all the covariates in the model are centered.
#' @keywords read data
#' @importFrom stats na.omit sd as.formula model.matrix model.frame model.response terms
#' @export
#' @examples
#' #Internal function only
#' #no examples
#' #
#' #
data_read_selection <- function(data, model.eff, model.cost, model.me, model.mc, type, center) {
if(is.data.frame(data) == FALSE) {
stop("object data must be provided as data frame")
}
if(any(names(data) == "e") == TRUE & any(names(data) == "c") == TRUE) {
e <- as.name("e")
c <- as.name("c")
}
cov_matrix <- subset(data, select = -c(e, c))
cov_matrix <- cov_matrix[!unlist(vapply(cov_matrix, anyNA, logical(1)))]
is.formula <- function (x) { inherits(x, "formula") }
if(is.formula(model.eff) == FALSE | is.formula(model.cost) == FALSE) {
stop("model.eff and/or model.cost must be formula objects")
}
if(is.logical(center) == FALSE) { stop("center must be either TRUE or FALSE") }
fixed_e <- nobars_(model.eff)
fixed_c <- nobars_(model.cost)
random_e <- fb(model.eff)
random_c <- fb(model.cost)
fname_re_e_coeff <- as.formula(paste("e", "0", sep=" ~ "))
fname_re_c_coeff <- as.formula(paste("c", "0", sep=" ~ "))
fname_re_me_coeff <- as.formula(paste("me", "0", sep=" ~ "))
fname_re_mc_coeff <- as.formula(paste("mc", "0", sep=" ~ "))
clusn_e <- clusn_c <- NULL
clusn_me <- clusn_mc <- NULL
if(!is.null(random_e) & length(random_e) > 1 | !is.null(random_c) & length(random_c) > 1) {
stop("random effects can be included in the formula only through a single expression within brackets")
}
if(all(names(model.frame(fixed_e, data = data)) %in% c("e", names(cov_matrix))) == FALSE |
all(names(model.frame(fixed_c, data = data)) %in% c("c", "e", names(cov_matrix))) == FALSE) {
stop("partially-observed covariates cannot be included in the fixed effects model")
}
if(all(names(model.frame(fixed_e, data = data)) %in% names(data)) == FALSE |
all(names(model.frame(fixed_c, data = data)) %in% names(data)) == FALSE) {
stop("you must provide names in the formula that correspond to those in the data")
}
if("e" %in% labels(terms(fixed_e)) | "c" %in% labels(terms(fixed_c))) {
stop("please remove 'e' from the right hand side of model.eff and/or 'c' from the right hand side of model.cost")
}
if(names(model.frame(fixed_e, data = data)[1]) != "e") {
stop("you must set 'e' as the response in the formula model.eff")
}
if("c" %in% names(model.frame(fixed_e, data = data))) {
stop("dependence allowed only through the cost model; please remove 'c' from model.eff")
}
if(names(model.frame(fixed_c, data = data)[1]) != "c") {
stop("you must set 'c' as the response in the formula model.cost")
}
if("e" %in% labels(terms(fixed_c))) {
if(length(grep(":e", labels(terms(fixed_c)))) != 0 | length(grep("e:", labels(terms(fixed_c)))) != 0) {
stop("no interaction effects for 'e' is allowed")
}
}
if("t" %in% names(model.frame(fixed_c, data = data)) | "t" %in% names(model.frame(fixed_e, data = data))) {
stop("treatment indicator must be provided only in the data. Please remove 't' from 'model.eff' and/or 'model.cost'")
}
index_mis_e <- which(is.na(data$e))
index_mis_c <- which(is.na(data$c))
data2 <- data
data$e[is.na(data$e) == TRUE] <- -999999
data$c[is.na(data$c) == TRUE] <- -999999
mf_e_fixed <- model.frame(formula = fixed_e, data = data)
mf_c_fixed <- model.frame(formula = fixed_c, data = data)
terms <- NULL
x_e_fixed <- model.matrix(attr(mf_e_fixed, "terms"), data = mf_e_fixed)
x_c_fixed <- model.matrix(attr(mf_c_fixed, "terms"), data = mf_c_fixed)
if("e" %in% names(mf_c_fixed)){
mf_c_fixed$e[index_mis_e] <- NA
}
name_re_e_coeff <- NULL
name_re_c_coeff <- NULL
if(!is.null(random_e)){
name_re_e_coeff <- sub("\\|.*", "", random_e)
if(grepl("0 + 1", name_re_e_coeff, fixed = TRUE) == TRUE) { stop("Either remove or add the random intercept")}
name_clus_e <- sub('.*\\|', '', random_e)
if(lengths(strsplit(name_clus_e, " ")) > 2) { stop("a single clustering variable must selected for each formula") }
name_clus_e <- gsub(" ", "", name_clus_e, fixed = TRUE)
if(!name_clus_e %in% names(cov_matrix)) { stop("the clustering variable must be among the variables in the dataset") }
if(strsplit(name_re_e_coeff, "")[[1]][1] == 0) {
no_random_int_e <- TRUE} else {no_random_int_e <- FALSE }
if(no_random_int_e == TRUE) {
name_re_e_coeff <- sub("[0]", "", name_re_e_coeff)
name_re_e_coeff <- sub("[+]", "", name_re_e_coeff)
}
if(name_re_e_coeff == "" | name_re_e_coeff == " ") { stop("please state for which variables the random effects are assumed") }
fname_re_e_coeff <- as.formula(paste("e", name_re_e_coeff, sep = " ~ "))
if(all(names(model.frame(fname_re_e_coeff, data = data)) %in% c("0", "1", names(model.frame(fixed_e, data = data)))) == FALSE) {
stop("only covariates defined as fixed effects can be included in the random effects model")
}
if("e" %in% labels(terms(fname_re_e_coeff))) {
stop("please remove 'e' from the random effects expression of model.eff")
}
if("c" %in% labels(terms(fname_re_e_coeff))) {
stop("dependence allowed only through the cost model; please remove 'c' from model.eff")
}
mf_e_random <- model.frame(formula = fname_re_e_coeff, data = data)
x_e_random <- model.matrix(attr(mf_e_random, "terms"), data = mf_e_random)
if(no_random_int_e == TRUE) {
x_e_random <- as.matrix(x_e_random[, !colnames(x_e_random) == "(Intercept)"])
if(is.null(colnames(x_e_random)) == TRUE & dim(x_e_random)[2] == 1) {
colnames(x_e_random) <- gsub(" ", "", name_re_e_coeff)
}
}
clus_e <- data[, name_clus_e]
if(!is.factor(clus_e)) { stop("clustering variables must be defined as factors") }
clusn_e <- as.numeric(clus_e)
if(!all(diff(sort(unique(clusn_e))) == 1) | !min(clusn_e) == 1) {
stop("ordered levels of clustering variables must not have gaps and must start from 1")
}
}
if(!is.null(random_c)){
name_re_c_coeff <- sub("\\|.*", "", random_c)
if(grepl("0 + 1", name_re_c_coeff, fixed = TRUE) == TRUE) { stop("Either remove or add the random intercept")}
name_clus_c <- sub('.*\\|', '', random_c)
if(lengths(strsplit(name_clus_c, " ")) > 2) { stop("a single clustering variable must selected for each formula") }
name_clus_c <- gsub(" ", "", name_clus_c, fixed = TRUE)
if(!name_clus_c %in% names(cov_matrix)) { stop("the clustering variable must be among the variables in the dataset") }
if(strsplit(name_re_c_coeff, "")[[1]][1] == 0) {
no_random_int_c <- TRUE} else {no_random_int_c <- FALSE }
if(no_random_int_c == TRUE) {
name_re_c_coeff <- sub("[0]", "", name_re_c_coeff)
name_re_c_coeff <- sub("[+]", "", name_re_c_coeff)
}
if(name_re_c_coeff == "" | name_re_c_coeff == " ") { stop("please state for which variables the random effects are assumed") }
if(gsub(" ", "", name_re_c_coeff) == "e" & no_random_int_c == FALSE) {name_re_c_coeff <- "1 + e" }
fname_re_c_coeff <- as.formula(paste("c", name_re_c_coeff, sep = " ~ "))
if(all(names(model.frame(fname_re_c_coeff, data = data)) %in% c("0", "1", names(model.frame(fixed_c, data = data)))) == FALSE) {
stop("only covariates defined as fixed effects can be included in the random effects model")
}
if("c" %in% labels(terms(fname_re_c_coeff))) {
stop("please remove 'c' from the random effects expression of model.cost")
}
if("e" %in% labels(terms(fname_re_c_coeff))) {
if(length(grep(":e", labels(terms(fname_re_c_coeff)))) != 0 | length(grep("e:", labels(terms(fname_re_c_coeff)))) != 0) {
stop("no interaction effects for 'e' is allowed")
}
}
mf_c_random <- model.frame(formula = fname_re_c_coeff, data = data)
x_c_random <- model.matrix(attr(mf_c_random, "terms"), data = mf_c_random)
if("e" %in% labels(terms(fname_re_c_coeff)) & length(labels(terms(fname_re_c_coeff))) == 1) {
x_c_random <- subset(x_c_random, select = -c(e))
}
if(no_random_int_c == TRUE) {
x_c_random <- as.matrix(x_c_random[, !colnames(x_c_random) == "(Intercept)"])
if(is.null(colnames(x_c_random)) == TRUE & dim(x_c_random)[2] == 1) {
colnames(x_c_random) <- gsub(" ", "", name_re_c_coeff)
}
}
clus_c <- data[, name_clus_c]
if(!is.factor(clus_c)) { stop("clustering variables must be defined as factors") }
clusn_c <- as.numeric(clus_c)
if(!all(diff(sort(unique(clusn_c))) == 1) | !min(clusn_c) == 1) {
stop("ordered levels of clustering variables must not have gaps and must start from 1")
}
}
y_e <- model.response(mf_e_fixed)
y_c <- model.response(mf_c_fixed)
y_e[index_mis_e] <- NA
y_c[index_mis_c] <- NA
data$e[index_mis_e] <- NA
data$c[index_mis_c] <- NA
N1 <- N2 <- c()
N1 <- sum(data$t == 1)
N2 <- length(data$t) - N1
N <- c(N1, N2)
m_eff <- rep(0, length(data$e))
m_eff[index_mis_e] <- 1
m_cost <- rep(0, length(data$c))
m_cost[index_mis_c] <- 1
m_eff1 <- m_eff2 <- m_cost1 <- m_cost2 <- c()
t1_index <- which(data$t == 1)
t2_index <- which(data$t == 2)
eff1 <- y_e[t1_index]
eff2 <- y_e[t2_index]
eff <- list(eff1, eff2)
cost1 <- y_c[t1_index]
cost2 <- y_c[t2_index]
cost <- list(cost1, cost2)
m_eff1 <- m_eff[t1_index]
m_eff2 <- m_eff[t2_index]
m_eff <- list(m_eff1, m_eff2)
m_cost1 <- m_cost[t1_index]
m_cost2 <- m_cost[t2_index]
m_cost <- list(m_cost1, m_cost2)
N1_cc <- N2_cc <- N1_mis <- N2_mis <- c()
N1_cc[1] <- length(na.omit(eff1))
N1_cc[2] <- length(na.omit(cost1))
N2_cc[1] <- length(na.omit(eff2))
N2_cc[2] <- length(na.omit(cost2))
N_cc <- cbind(N1_cc, N2_cc)
N1_mis <- N1 - N1_cc
N2_mis <- N2 - N2_cc
N_mis <- cbind(N1_mis, N2_mis)
effects <- list(eff1, eff2)
costs <- list(cost1, cost2)
eff1_cc <- eff2_cc <- cost1_cc <- cost2_cc <- c()
eff1_cc <- na.omit(eff1)
eff2_cc <- na.omit(eff2)
eff_cc <- list(eff1_cc, eff2_cc)
cost1_cc <- na.omit(cost1)
cost2_cc <- na.omit(cost2)
cost_cc <- list(cost1_cc, cost2_cc)
cov1_e_fixed <- as.data.frame(x_e_fixed[t1_index, ])
names(cov1_e_fixed) <- colnames(x_e_fixed)
cov2_e_fixed <- as.data.frame(x_e_fixed[t2_index, ])
names(cov2_e_fixed) <- colnames(x_e_fixed)
cov_e_fixed <- list(cov1_e_fixed, cov2_e_fixed)
x_c_hold_fixed <- x_c_fixed
if("e" %in% colnames(x_c_hold_fixed)) {
x_c_fixed <- subset(x_c_hold_fixed, select = -c(e))
}
cov1_c_fixed <- as.data.frame(x_c_fixed[t1_index, ])
names(cov1_c_fixed) <- colnames(x_c_fixed)
cov2_c_fixed <- as.data.frame(x_c_fixed[t2_index, ])
names(cov2_c_fixed) <- colnames(x_c_fixed)
cov_c_fixed <- list(cov1_c_fixed, cov2_c_fixed)
cove_fixed <- list(cov1_e_fixed, cov2_e_fixed)
mean_cov_e_fixed <- list(apply(as.matrix(cov1_e_fixed), 2, mean), apply(as.matrix(cov2_e_fixed), 2, mean))
covc_fixed <- list(cov1_c_fixed, cov2_c_fixed)
mean_cov_c_fixed <- list(apply(as.matrix(cov1_c_fixed), 2, mean), apply(as.matrix(cov2_c_fixed), 2, mean))
cov1_e_center_fixed <- as.data.frame(scale(cov1_e_fixed, scale = FALSE))
cov2_e_center_fixed <- as.data.frame(scale(cov2_e_fixed, scale = FALSE))
cov1_e_center_fixed[, 1] <- rep(1, nrow(cov1_e_fixed))
cov2_e_center_fixed[, 1] <- rep(1, nrow(cov2_e_fixed))
cov_e_center_fixed <- list(cov1_e_center_fixed, cov2_e_center_fixed)
mean_cov_e_center_fixed <- list(apply(as.matrix(cov1_e_center_fixed), 2, mean), apply(as.matrix(cov2_e_center_fixed), 2, mean))
cov1_c_center_fixed <- as.data.frame(scale(cov1_c_fixed, scale = FALSE))
cov2_c_center_fixed <- as.data.frame(scale(cov2_c_fixed, scale = FALSE))
cov1_c_center_fixed[, 1] <- rep(1, nrow(cov1_c_fixed))
cov2_c_center_fixed[, 1] <- rep(1, nrow(cov2_c_fixed))
cov_c_center_fixed <- list(cov1_c_center_fixed, cov2_c_center_fixed)
mean_cov_c_center_fixed <- list(apply(as.matrix(cov1_c_center_fixed), 2, mean), apply(as.matrix(cov2_c_center_fixed), 2, mean))
if(center == TRUE) {
cov_e_fixed <- cov_e_center_fixed
cov_c_fixed <- cov_c_center_fixed
mean_cov_e_fixed <- mean_cov_e_center_fixed
mean_cov_c_fixed <- mean_cov_c_center_fixed
}
if(!is.null(random_e)){
cov1_e_random <- as.data.frame(x_e_random[t1_index, ])
names(cov1_e_random) <- colnames(x_e_random)
cov2_e_random <- as.data.frame(x_e_random[t2_index, ])
names(cov2_e_random) <- colnames(x_e_random)
cov_e_random <- list(cov1_e_random, cov2_e_random)
cove_random <- list(cov1_e_random, cov2_e_random)
mean_cov_e_random <- list(apply(as.matrix(cov1_e_random), 2, mean), apply(as.matrix(cov2_e_random), 2, mean))
cov1_e_center_random <- as.data.frame(scale(cov1_e_random, scale = FALSE))
cov2_e_center_random <- as.data.frame(scale(cov2_e_random, scale = FALSE))
if(no_random_int_e == FALSE) {
cov1_e_center_random[, 1] <- rep(1, nrow(cov1_e_random))
cov2_e_center_random[, 1] <- rep(1, nrow(cov2_e_random))
}
cov_e_center_random <- list(cov1_e_center_random, cov2_e_center_random)
mean_cov_e_center_random <- list(apply(as.matrix(cov1_e_center_random), 2, mean), apply(as.matrix(cov2_e_center_random), 2, mean))
if(center == TRUE) {
cov_e_random <- cov_e_center_random
mean_cov_e_random <- mean_cov_e_center_random
}
clusn_e1 <- clusn_e[t1_index]
clusn_e1 <- factor(clusn_e1, levels = unique(clusn_e1))
clusn_e2 <- clusn_e[t2_index]
clusn_e2 <- factor(clusn_e2, levels = unique(clusn_e2))
}
if(!is.null(random_c)){
x_c_hold_random <- x_c_random
if("e" %in% colnames(x_c_hold_random)) {
x_c_random <- subset(x_c_hold_random, select = -c(e))
}
cov1_c_random <- as.data.frame(x_c_random[t1_index, ])
names(cov1_c_random) <- colnames(x_c_random)
cov2_c_random <- as.data.frame(x_c_random[t2_index, ])
names(cov2_c_random) <- colnames(x_c_random)
cov_c_random <- list(cov1_c_random, cov2_c_random)
covc_random <- list(cov1_c_random, cov2_c_random)
mean_cov_c_random <- list(apply(as.matrix(cov1_c_random), 2, mean), apply(as.matrix(cov2_c_random), 2, mean))
cov1_c_center_random <- as.data.frame(scale(cov1_c_random, scale = FALSE))
cov2_c_center_random <- as.data.frame(scale(cov2_c_random, scale = FALSE))
if(no_random_int_c == FALSE) {
cov1_c_center_random[, 1] <- rep(1, nrow(cov1_c_random))
cov2_c_center_random[, 1] <- rep(1, nrow(cov2_c_random))
}
cov_c_center_random <- list(cov1_c_center_random, cov2_c_center_random)
mean_cov_c_center_random <- list(apply(as.matrix(cov1_c_center_random), 2, mean), apply(as.matrix(cov2_c_center_random), 2, mean))
if(center == TRUE) {
cov_c_random <- cov_c_center_random
mean_cov_c_random <- mean_cov_c_center_random
}
clusn_c1 <- clusn_c[t1_index]
clusn_c1 <- factor(clusn_c1, levels = unique(clusn_c1))
clusn_c2 <- clusn_c[t2_index]
clusn_c2 <- factor(clusn_c2, levels = unique(clusn_c2))
}
data2$e[is.na(data2$e) == TRUE] <- -999999
data2$c[is.na(data2$c) == TRUE] <- -999999
data2$me <- c(m_eff1, m_eff2)
data2$mc <- c(m_cost1, m_cost2)
if(!is.formula(model.me) | !is.formula(model.mc)) {
stop("model.me and/or model.mc must be formula objects")
}
fixed_me <- nobars_(model.me)
fixed_mc <- nobars_(model.mc)
random_me <- fb(model.me)
random_mc <- fb(model.mc)
if(!is.null(random_me) & length(random_me) > 1 | !is.null(random_mc) & length(random_mc) > 1) {
stop("random effects can be included in the formula only through a single expression within brackets")
}
if(all(names(model.frame(fixed_me, data = data2)) %in% c("me", "e", names(cov_matrix))) == FALSE |
all(names(model.frame(fixed_mc, data = data2)) %in% c("mc", "c", names(cov_matrix))) == FALSE) {
stop("partially-observed covariates cannot be included in the model")
}
if(all(names(model.frame(fixed_me, data = data2)) %in% names(data2)) == FALSE |
all(names(model.frame(fixed_mc, data = data2)) %in% names(data2)) == FALSE) {
stop("you must provide names in the formula that correspond to those in the data")
}
if(names(model.frame(fixed_me, data = data2)[1]) != "me") {
stop("you must set 'me' as the response in the formula model.me")
}
if(names(model.frame(fixed_mc, data = data2)[1]) != "mc") {
stop("you must set 'mc' as the response in the formula model.mc")
}
if("t" %in% names(model.frame(fixed_mc, data = data2)) | "t" %in% names(model.frame(fixed_me, data = data2))) {
stop("treatment indicator must be provided only in the data. Please remove 't' from 'model.me' and/or 'model.mc'")
}
if("c" %in% names(model.frame(fixed_me, data = data2)) | "e" %in% names(model.frame(fixed_mc, data = data2))) {
stop("please remove 'e' from model.mc and/or remove 'c' from model.me")
}
if("e" %in% labels(terms(fixed_me))) {
if(length(grep(":e", labels(terms(fixed_me)))) != 0 | length(grep("e:", labels(terms(fixed_me)))) != 0) {
stop("no interaction effects for 'e' is allowed")
}
}
if("c" %in% labels(terms(fixed_mc))) {
if(length(grep(":c", labels(terms(fixed_mc)))) != 0 | length(grep("c:", labels(terms(fixed_mc)))) != 0) {
stop("no interaction effects for 'c' is allowed")
}
}
name_re_me_coeff <- NULL
name_re_mc_coeff <- NULL
if(!is.null(random_me)){
name_re_me_coeff <- sub("\\|.*", "", random_me)
if(grepl("0 + 1", name_re_me_coeff, fixed = TRUE) == TRUE) { stop("Either remove or add the random intercept")}
name_clus_me <- sub('.*\\|', '', random_me)
if(lengths(strsplit(name_clus_me, " ")) > 2) {stop("a single clustering variable must be selected for each formula") }
name_clus_me <- gsub(" ", "", name_clus_me, fixed = TRUE)
if(!name_clus_me %in% names(cov_matrix)) { stop("the clustering variable must be among the variables in the dataset") }
if(strsplit(name_re_me_coeff, "")[[1]][1] == 0) {
no_random_int_me <- TRUE} else {no_random_int_me <- FALSE }
if(no_random_int_me == TRUE) {
name_re_me_coeff <- sub("[0]", "", name_re_me_coeff)
name_re_me_coeff <- sub("[+]", "", name_re_me_coeff)
}
if(name_re_me_coeff == "" | name_re_me_coeff == " ") { stop("please state for which variables the random effects are assumed") }
if(gsub(" ", "", name_re_me_coeff) == "e" & no_random_int_me == FALSE) {name_re_me_coeff <- "1 + e" }
fname_re_me_coeff <- as.formula(paste("me", name_re_me_coeff, sep=" ~ "))
if(all(names(model.frame(fname_re_me_coeff, data = data2)) %in% c("0","1", names(model.frame(fixed_me, data = data2)))) == FALSE) {
stop("only covariates inlcued as fixed effects can be included in the random effects model")
}
if("me" %in% labels(terms(fname_re_me_coeff))) {
stop("please remove 'me' from the right hand side of model.me")
}
if("e" %in% labels(terms(fname_re_me_coeff))) {
if(length(grep(":e", labels(terms(fname_re_me_coeff)))) != 0 | length(grep("e:", labels(terms(fname_re_me_coeff)))) != 0) {
stop("no interaction effects for 'e' are allowed")
}
}
clus_me <- data[, name_clus_me]
if(!is.factor(clus_me)) { stop("clustering variables must be defined as factors") }
clusn_me <- as.numeric(clus_me)
if(!all(diff(sort(unique(clusn_me))) == 1) | !min(clusn_me) == 1) {
stop("ordered levels of clustering variables must not have gaps and must start from 1")
}
}
if(!is.null(random_mc)){
name_re_mc_coeff <- sub("\\|.*", "", random_mc)
if(grepl("0 + 1", name_re_mc_coeff, fixed = TRUE) == TRUE) { stop("Either remove or add the random intercept")}
name_clus_mc <- sub('.*\\|', '', random_mc)
if(lengths(strsplit(name_clus_mc, " ")) > 2) {stop("a single clustering variable must be selected for each formula") }
name_clus_mc <- gsub(" ", "", name_clus_mc, fixed = TRUE)
if(!name_clus_mc %in% names(cov_matrix)) { stop("the clustering variable must be among the variables in the dataset") }
if(strsplit(name_re_mc_coeff, "")[[1]][1] == 0) {
no_random_int_mc <- TRUE} else {no_random_int_mc <- FALSE }
if(no_random_int_mc == TRUE) {
name_re_mc_coeff <- sub("[0]", "", name_re_mc_coeff)
name_re_mc_coeff <- sub("[+]", "", name_re_mc_coeff)
}
if(name_re_mc_coeff == "" | name_re_mc_coeff == " ") { stop("please state for which variables the random effects are assumed") }
if(gsub(" ", "", name_re_mc_coeff) == "c" & no_random_int_mc == FALSE) {name_re_mc_coeff <- "1 + c" }
fname_re_mc_coeff <- as.formula(paste("mc", name_re_mc_coeff, sep=" ~ "))
if(all(names(model.frame(fname_re_mc_coeff, data = data2)) %in% c("0","1", names(model.frame(fixed_mc, data = data2)))) == FALSE) {
stop("only covariates inlcued as fixed effects can be included in the random effects model")
}
if("mc" %in% labels(terms(fname_re_mc_coeff))) {
stop("please remove 'mc' from the right hand side of model.mc")
}
if("c" %in% labels(terms(fname_re_mc_coeff))) {
if(length(grep(":c", labels(terms(fname_re_mc_coeff)))) != 0 | length(grep("c:", labels(terms(fname_re_mc_coeff)))) != 0) {
stop("no interaction effects for 'c' is allowed")
}
}
clus_mc <- data[, name_clus_mc]
if(!is.factor(clus_mc)) { stop("clustering variables must be defined as factors") }
clusn_mc <- as.numeric(clus_mc)
if(!all(diff(sort(unique(clusn_mc))) == 1) | !min(clusn_mc) == 1) {
stop("ordered levels of clustering variables must not have gaps and must start from 1")
}
}
mf_me_fixed <- model.frame(formula = fixed_me, data = data2)
mf_mc_fixed <- model.frame(formula = fixed_mc, data = data2)
z_e_fixed <- model.matrix(attr(mf_me_fixed, "terms"), data = mf_me_fixed)
z_c_fixed <- model.matrix(attr(mf_mc_fixed, "terms"), data = mf_mc_fixed)
z_e_hold_fixed <- z_e_fixed
if("e" %in% colnames(z_e_hold_fixed)) {
z_e_fixed <- subset(z_e_hold_fixed, select = -c(e))
}
z_c_hold_fixed <- z_c_fixed
if("c" %in% colnames(z_c_hold_fixed)) {
z_c_fixed <- subset(z_c_hold_fixed, select = -c(c))
}
covz1_e_fixed <- as.data.frame(z_e_fixed[t1_index, ])
names(covz1_e_fixed) <- colnames(z_e_fixed)
covz2_e_fixed <- as.data.frame(z_e_fixed[t2_index, ])
names(covz2_e_fixed) <- colnames(z_e_fixed)
covz_e_fixed <- list(covz1_e_fixed, covz2_e_fixed)
covze_fixed <- list(covz1_e_fixed, covz2_e_fixed)
mean_covz_e_fixed <- list(apply(as.matrix(covz1_e_fixed), 2, mean), apply(as.matrix(covz2_e_fixed), 2, mean))
names(covz_e_fixed) <- names(mean_covz_e_fixed) <- c("Control", "Intervention")
covz1_c_fixed <- as.data.frame(z_c_fixed[t1_index, ])
names(covz1_c_fixed) <- colnames(z_c_fixed)
covz2_c_fixed <- as.data.frame(z_c_fixed[t2_index, ])
names(covz2_c_fixed) <- colnames(z_c_fixed)
covz_c_fixed <- list(covz1_c_fixed, covz2_c_fixed)
covzc_fixed <- list(covz1_c_fixed, covz2_c_fixed)
mean_covz_c_fixed <- list(apply(as.matrix(covz1_c_fixed), 2, mean), apply(as.matrix(covz2_c_fixed), 2, mean))
covz1_e_center_fixed <- as.data.frame(scale(covz1_e_fixed, scale = FALSE))
covz2_e_center_fixed <- as.data.frame(scale(covz2_e_fixed, scale = FALSE))
covz1_e_center_fixed[, 1] <- rep(1, nrow(covz1_e_fixed))
covz2_e_center_fixed[, 1] <- rep(1, nrow(covz2_e_fixed))
covz_e_center_fixed <- list(covz1_e_center_fixed, covz2_e_center_fixed)
mean_covz_e_center_fixed <- list(apply(as.matrix(covz1_e_center_fixed), 2, mean), apply(as.matrix(covz2_e_center_fixed), 2, mean))
covz1_c_center_fixed <- as.data.frame(scale(covz1_c_fixed, scale = FALSE))
covz2_c_center_fixed <- as.data.frame(scale(covz2_c_fixed, scale = FALSE))
covz1_c_center_fixed[, 1] <- rep(1, nrow(covz1_c_fixed))
covz2_c_center_fixed[, 1] <- rep(1, nrow(covz2_c_fixed))
covz_c_center_fixed <- list(covz1_c_center_fixed, covz2_c_center_fixed)
mean_covz_c_center_fixed <- list(apply(as.matrix(covz1_c_center_fixed), 2, mean), apply(as.matrix(covz2_c_center_fixed), 2, mean))
if(center == TRUE) {
covz_e_fixed <- covz_e_center_fixed
covz_c_fixed <- covz_c_center_fixed
mean_covz_e_fixed <- mean_covz_e_center_fixed
mean_covz_c_fixed <- mean_covz_c_center_fixed
}
if(!is.null(random_me)){
mf_me_random <- model.frame(formula = fname_re_me_coeff, data = data2)
z_e_random <- model.matrix(attr(mf_me_random, "terms"), data = mf_me_random)
if(no_random_int_me == TRUE) {
z_e_random <- as.matrix(z_e_random[, !colnames(z_e_random) == "(Intercept)"])
if(dim(z_e_random)[2] == 1) { colnames(z_e_random) <- colnames(model.matrix(attr(mf_me_random, "terms"), data = mf_me_random))[2] }
}
z_e_hold_random <- z_e_random
if("e" %in% colnames(z_e_hold_random)) {
z_e_random <- subset(z_e_hold_random, select = -c(e))
}
covz1_e_random <- as.data.frame(z_e_random[t1_index, ])
names(covz1_e_random) <- colnames(z_e_random)
covz2_e_random <- as.data.frame(z_e_random[t2_index, ])
names(covz2_e_random) <- colnames(z_e_random)
covz_e_random <- list(covz1_e_random, covz2_e_random)
covze_random <- list(covz1_e_random, covz2_e_random)
mean_covz_e_random <- list(apply(as.matrix(covz1_e_random), 2, mean), apply(as.matrix(covz2_e_random), 2, mean))
names(covz_e_random) <- names(mean_covz_e_random) <- c("Control", "Intervention")
covz1_e_center_random <- as.data.frame(scale(covz1_e_random, scale = FALSE))
covz2_e_center_random <- as.data.frame(scale(covz2_e_random, scale = FALSE))
if(no_random_int_me == FALSE) {
covz1_e_center_random[, 1] <- rep(1, nrow(covz1_e_random))
covz2_e_center_random[, 1] <- rep(1, nrow(covz2_e_random))
}
covz_e_center_random <- list(covz1_e_center_random, covz2_e_center_random)
mean_covz_e_center_random <- list(apply(as.matrix(covz1_e_center_random), 2, mean), apply(as.matrix(covz2_e_center_random), 2, mean))
if(center == TRUE) {
covz_e_random <- covz_e_center_random
mean_covz_e_random <- mean_covz_e_center_random
}
clusn_me1 <- clusn_me[t1_index]
clusn_me1 <- factor(clusn_me1, levels = unique(clusn_me1))
clusn_me2 <- clusn_me[t2_index]
clusn_me2 <- factor(clusn_me2, levels = unique(clusn_me2))
}
if(!is.null(random_mc)){
mf_mc_random <- model.frame(formula = fname_re_mc_coeff, data = data2)
z_c_random <- model.matrix(attr(mf_mc_random, "terms"), data = mf_mc_random)
if(no_random_int_mc == TRUE) {
z_c_random <- as.matrix(z_c_random[, !colnames(z_c_random) == "(Intercept)"])
if(dim(z_c_random)[2] == 1) { colnames(z_c_random) <- colnames(model.matrix(attr(mf_mc_random, "terms"), data = mf_mc_random))[2] }
}
z_c_hold_random <- z_c_random
if("c" %in% colnames(z_c_hold_random)) {
z_c_random <- subset(z_c_hold_random, select = -c(c))
}
covz1_c_random <- as.data.frame(z_c_random[t1_index, ])
names(covz1_c_random) <- colnames(z_c_random)
covz2_c_random <- as.data.frame(z_c_random[t2_index, ])
names(covz2_c_random) <- colnames(z_c_random)
covz_c_random <- list(covz1_c_random, covz2_c_random)
covzc_random <- list(covz1_c_random, covz2_c_random)
mean_covz_c_random <- list(apply(as.matrix(covz1_c_random), 2, mean), apply(as.matrix(covz2_c_random), 2, mean))
names(covz_c_random) <- names(mean_covz_c_random) <- c("Control", "Intervention")
covz1_c_center_random <- as.data.frame(scale(covz1_c_random, scale = FALSE))
covz2_c_center_random <- as.data.frame(scale(covz2_c_random, scale = FALSE))
if(no_random_int_mc == FALSE) {
covz1_c_center_random[, 1] <- rep(1, nrow(covz1_c_random))
covz2_c_center_random[, 1] <- rep(1, nrow(covz2_c_random))
}
covz_c_center_random <- list(covz1_c_center_random, covz2_c_center_random)
mean_covz_c_center_random <- list(apply(as.matrix(covz1_c_center_random), 2, mean), apply(as.matrix(covz2_c_center_random), 2, mean))
if(center == TRUE) {
covz_c_random <- covz_c_center_random
mean_covz_c_random <- mean_covz_c_center_random
}
clusn_mc1 <- clusn_mc[t1_index]
clusn_mc1 <- factor(clusn_mc1, levels = unique(clusn_mc1))
clusn_mc2 <- clusn_mc[t2_index]
clusn_mc2 <- factor(clusn_mc2, levels = unique(clusn_mc2))
}
names(covz_e_fixed) <- names(mean_covz_e_fixed) <- names(covz_c_fixed) <- names(mean_covz_c_fixed) <- c("Control", "Intervention")
names(cov_e_fixed) <- names(cov_c_fixed) <- names(mean_cov_e_fixed) <- names(mean_cov_c_fixed) <- c("Control", "Intervention")
if(!is.null(random_c)) {
names(cov_c_random) <- names(mean_cov_c_random) <- c("Control", "Intervention")
clusn_c <- list("Control" = clusn_c1, "Intervention" = clusn_c2)
} else {cov_c_random <- mean_cov_c_random <- NULL}
if(!is.null(random_mc)) {
names(covz_c_random) <- names(mean_covz_c_random) <- c("Control", "Intervention")
clusn_mc <- list("Control" = clusn_mc1, "Intervention" = clusn_mc2)
} else {covz_c_random <- mean_covz_c_random <- NULL}
if(!is.null(random_e)) {
names(cov_e_random) <- names(mean_cov_e_random) <- c("Control", "Intervention")
clusn_e <- list("Control" = clusn_e1, "Intervention" = clusn_e2)
} else {cov_e_random <- mean_cov_e_random <- NULL}
if(!is.null(random_me)) {
names(covz_e_random) <- names(mean_covz_e_random) <- c("Control", "Intervention")
clusn_me <- list("Control" = clusn_me1, "Intervention" = clusn_me2)
} else {covz_e_random <- mean_covz_e_random <- NULL}
names(m_eff) <- names(m_cost) <- c("Control", "Intervention")
names(effects) <- names(costs) <- names(eff_cc) <- names(cost_cc) <- c("Control", "Intervention")
data_raw <- list("raw_effects" = effects, "raw_costs" = costs, "raw_effects_cc" = eff_cc, "raw_costs_cc" = cost_cc, "arm_lengths" = N,
"arm_lengths_cc" = N_cc, "arm_missing_data" = N_mis, "missing_effects" = m_eff, "missing_costs" = m_cost,
"covariates_effects_fixed" = cov_e_fixed, "covariates_costs_fixed" = cov_c_fixed, "mean_cov_effects_fixed" = mean_cov_e_fixed, "mean_cov_costs_fixed" = mean_cov_c_fixed,
"covariates_missing_effects_fixed" = covz_e_fixed, "mean_cov_missing_effects_fixed" = mean_covz_e_fixed, "covariates_missing_costs_fixed" = covz_c_fixed,
"mean_cov_missing_costs_fixed" = mean_covz_c_fixed, "covariates_effects_random" = cov_e_random, "covariates_costs_random" = cov_c_random, "mean_cov_effects_random" = mean_cov_e_random, "mean_cov_costs_random" = mean_cov_c_random,
"covariates_missing_effects_random" = covz_e_random, "mean_cov_missing_effects_random" = mean_covz_e_random, "covariates_missing_costs_random" = covz_c_random,
"mean_cov_missing_costs_random" = mean_covz_c_random, "clus_e" = clusn_e, "clus_c" = clusn_c, "clus_me" = clusn_me, "clus_mc" = clusn_mc, "data_ind" = data2)
model_formula <- list("mf_model.e_fixed" = fixed_e, "mf_model.c_fixed" = fixed_c, "mf_model.me_fixed" = fixed_me, "mf_model.mc_fixed" = fixed_mc,
"mf_model.e_random" = fname_re_e_coeff, "mf_model.c_random" = fname_re_c_coeff, "mf_model.me_random" = fname_re_me_coeff, "mf_model.mc_random" = fname_re_mc_coeff)
data_list <- list("data_raw" = data_raw, "model_formula" = model_formula)
return(data_list)
} |
library(Seurat)
library(ggplot2)
library(monocle)
setwd("~/Desktop/R code/")
# =============================================================================================
#All sequenced cell, before removing blood cells
#==============================================================================================
expresswt2336 <- read.csv("wtexpress2336.csv",stringsAsFactors=F)
dim(expresswt2336)
rownames(expresswt2336) <- expresswt2336[,1]
rownames(expresswt2336)
grep("ERCC",rownames(expresswt2336),value = T)
expresswt2336 <- expresswt2336[setdiff(rownames(expresswt2336),grep("ERCC",rownames(expresswt2336),value = T)),]
expresswt2336 <- expresswt2336[,-1]
metadatawt2336 <- read.csv("wtmeta2336.csv")
rownames(metadatawt2336) <- metadatawt2336[,1]
colnames(metadatawt2336)
table(rownames(metadatawt2336)==colnames(expresswt2336))
wt2336 <- CreateSeuratObject(counts = expresswt2336, project = "wt2336", min.cells = 3, min.features = 1000)
VlnPlot(wt2336, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3)
FeatureScatter(wt2336, feature1 = "nCount_RNA", feature2 = "nFeature_RNA")
wt2336 <- subset(wt2336, subset = nFeature_RNA > 1000 & nFeature_RNA < 6000 & nCount_RNA < 200000)
wt2336 <- AddMetaData(wt2336,metadata = metadatawt2336 )
dim(wt2336@meta.data)
wt2336 <- NormalizeData(wt2336, normalization.method = "LogNormalize", scale.factor = 10000)
wt2336 <- FindVariableFeatures(wt2336, selection.method = "vst", nfeatures = 2000)
top10 <- head(VariableFeatures(wt2336), 10)
plot1 <- VariableFeaturePlot(wt2336)
plot2 <- LabelPoints(plot = plot1, points = top10, repel = TRUE)
CombinePlots(plots = list(plot1, plot2))
all.genes <- rownames(wt2336)
wt2336 <- ScaleData(wt2336, features = all.genes)
wt2336 <- RunPCA(wt2336, features = VariableFeatures(object = wt2336))
print(wt2336[["pca"]], dims = 1:5, nfeatures = 5)
VizDimLoadings(wt2336, dims = 1:2, reduction = "pca")
DimPlot(wt2336, reduction = "pca")
DimHeatmap(wt2336, dims = 1, cells = 500, balanced = TRUE)
wt2336 <- FindNeighbors(wt2336, dims = 1:15)
wt2336 <- FindClusters(wt2336, resolution = 0.2)
wt2336 <- RunTSNE(wt2336, dims = 1:15,seed.use = 1,perplexity=50)
DimPlot(wt2336, reduction = "tsne",pt.size = 2,shape.by ="condition",label = T)
#gata1, blood cell
FeaturePlot(wt2336, features =c("gata1a","hbbe1.3","hbbe2"),cols = c("grey", "red"),pt.size=1,min.cutoff =0)
bloodcell <- WhichCells(wt2336,ident = 1)
#marker
FeaturePlot(wt2336, features =c("myh6","vmhc","fli1a","tcf21","twist1a","vim"),cols = c("grey", "red"),pt.size=2,min.cutoff =0)
# =============================================================================================
#Fig1b, remove blood cells, retain 1581 cells
#==============================================================================================
wt2336blood <- subset(wt2336, idents = 1,invert=T)
dim(wt2336blood@meta.data)
wt2336blood <- NormalizeData(wt2336blood, normalization.method = "LogNormalize", scale.factor = 10000)
wt2336blood <- FindVariableFeatures(wt2336blood, selection.method = "vst", nfeatures = 2000)
top10 <- head(VariableFeatures(wt2336blood), 10)
plot1 <- VariableFeaturePlot(wt2336blood)
plot2 <- LabelPoints(plot = plot1, points = top10, repel = TRUE)
CombinePlots(plots = list(plot1, plot2))
all.genes <- rownames(wt2336blood)
wt2336blood <- ScaleData(wt2336blood, features = all.genes) #??Ҫvars.to.regress = "run" ??Ⱥ???ÿ?
wt2336blood <- RunPCA(wt2336blood, features = VariableFeatures(object = wt2336blood))
print(wt2336blood[["pca"]], dims = 1:5, nfeatures = 5)
VizDimLoadings(wt2336blood, dims = 1:2, reduction = "pca")
DimPlot(wt2336blood, reduction = "pca")
DimHeatmap(wt2336blood, dims = 1, cells = 500, balanced = TRUE)
wt2336blood <- JackStraw(wt2336blood, num.replicate = 100)
wt2336blood <- ScoreJackStraw(wt2336blood, dims = 1:20)
JackStrawPlot(wt2336blood, dims = 1:15)
ElbowPlot(wt2336blood)
wt2336blood <- FindNeighbors(wt2336blood, dims = 1:15)
wt2336blood <- FindClusters(wt2336blood, resolution = 0.2)
wt2336blood <- RunTSNE(wt2336blood, dims = 1:15,seed.use = 1,perplexity=30)
DimPlot(wt2336blood, reduction = "tsne",pt.size = 2,shape.by ="condition",label = T)
FeaturePlot(wt2336blood,features = c("myh6","twist1a","vmhc"),reduction = "tsne")
wt2336blood <- RenameIdents(wt2336blood, "0" = "CM-A")
wt2336blood <- RenameIdents(wt2336blood, "5" = "CM-A")
wt2336blood <- RenameIdents(wt2336blood, "3" = "CM-V")
wt2336blood <- RenameIdents(wt2336blood, "4" = "CM-V")
wt2336blood <- RenameIdents(wt2336blood, "6" = "EP")
wt2336blood <- RenameIdents(wt2336blood, "1" = "EC")
wt2336blood <- RenameIdents(wt2336blood, "2" = "EPDC")
wt2336blood <- RenameIdents(wt2336blood, "8" = "CM-V")
wt2336blood <- RenameIdents(wt2336blood, "7" = "CM-V")
plot <- FeaturePlot(wt2336blood, features =c("myh6"),cols = c("grey", "red"),pt.size=1,reduction = "tsne")
#select.cells <- CellSelector(plot = plot)
#reidentify the cell cluster according to the gene expression profile
select.cells <- c("B06_1dpt_MTZ_CMA.sc24", "B06_1dpt_MTZ_CMA.sc26", "B06_1dpt_MTZ_CMA.sc33",
"B06_1dpt_MTZ_CMA.sc58", "B06_1dpt_MTZ_CMA.sc73", "B06_1dpt_MTZ_CMA.sc74",
"B06_1dpt_MTZ_CMA.sc89", "B06_2dpt_CT_CMA.sc3" , "B06_2dpt_CT_CMA.sc5" ,
"B06_2dpt_CT_CMA.sc8" , "B06_2dpt_CT_CMA.sc10" , "B06_2dpt_CT_CMA.sc13" ,
"B06_2dpt_CT_CMA.sc18" ,"B06_2dpt_CT_CMA.sc19", "B06_2dpt_CT_CMA.sc20" ,
"B06_2dpt_CT_CMA.sc24" , "B06_2dpt_CT_CMA.sc30" , "B06_2dpt_CT_CMA.sc32" ,
"B06_2dpt_CT_CMA.sc45" , "B06_2dpt_CT_CMA.sc47" , "B06_2dpt_CT_CMV.sc55" ,
"B06_2dpt_MTZ_CMA.sc4" , "B06_2dpt_MTZ_CMA.sc22", "B06_2dpt_MTZ_CMA.sc49",
"B06_2dpt_MTZ_CMA.sc64", "B07_3dpt_CT_CMA2.sc29" ,"B07_3dpt_CT_CMA2.sc31",
"B06_3dpt_MTZ_CMA.sc4" , "B06_3dpt_MTZ_CMA.sc20" ,"B06_3dpt_MTZ_CMA.sc92",
"B06_3dpt_MTZ_CMA.sc94")
Idents(wt2336blood,cells= select.cells) <- "CM-A"
plot <- FeaturePlot(wt2336blood, features =c("twist1a"),cols = c("grey", "red"),pt.size=1,reduction = "tsne")
#select.cells2<- CellSelector(plot = plot)
select.cells2 <- c("B01_1dpt_MTZ_V2.sc62" , "B01_1dpt_MTZ_V2.sc84", "B01_1dpt_CT_V1.sc33" ,
"B02_1dpt_CT_AV3.sc22" , "B02_1dpt_CT_AV3.sc34" , "B02_1dpt_CT_AV4.sc70" ,
"B02_1dpt_CT_AV4.sc79" , "B03_4dpt_CT_AV1.sc22", "B03_4dpt_CT_AV1.sc74" ,
"B04_2dpt_CT_AV1.sc1" , "B04_2dpt_CT_AV1.sc3" , "B04_2dpt_CT_AV2.sc34" ,
"B04_2dpt_MTZ_AV1.sc5" , "B04_2dpt_MTZ_AV1.sc32" ,"B04_2dpt_MTZ_AV1.sc91",
"B04_2dpt_MTZ_AV1.sc94","B04_2dpt_MTZ_AV1.sc95","B05_3dpt_CT_AV1.sc46",
"B05_3dpt_CT_AV1.sc50" , "B05_3dpt_CT_AV1.sc74" , "B05_3dpt_CT_AV2.sc49" ,
"B05_3dpt_MTZ_AV1.sc17" ,"B05_3dpt_MTZ_AV1.sc18" ,"B05_3dpt_MTZ_AV1.sc57",
"B05_3dpt_MTZ_AV1.sc63", "B05_3dpt_MTZ_AV1.sc81" ,"B05_3dpt_MTZ_AV2.sc25")
Idents(wt2336blood,cells= select.cells2) <- "EPDC"
DimPlot(wt2336blood,cells.highlight = select.cells2,reduction = "tsne")
wt2336blood <- RenameIdents(wt2336blood,"EPDC" = "eEPDC") #change order
wt2336blood <- RenameIdents(wt2336blood,"EP" = "dEP")
wt2336blood <- RenameIdents(wt2336blood,"EC" = "cEC")
wt2336blood <- RenameIdents(wt2336blood,"CM-V" = "bCM-V")
wt2336blood <- RenameIdents(wt2336blood,"CM-A" = "aCM-A")
DimPlot(wt2336blood, reduction = "tsne",pt.size = 2,shape.by ="condition",label = F,
cols = c("darkolivegreen4","brown3","darkgoldenrod3","dodgerblue3","darkorchid"))
# ==============================================================================================
#Fig1c, marker boxplot
# ==============================================================================================
wt2336bloodexpress1 <- wt2336blood[["RNA"]]@counts
wt2336bloodexpress1 <- as.data.frame(wt2336bloodexpress1)
wt2336bloodexpress1 <- t(wt2336bloodexpress1)
wt2336bloodexpress1 <- as.data.frame(wt2336bloodexpress1)
wt2336bloodexpress1 <- log2(wt2336bloodexpress1/10 + 1)
class(wt2336bloodexpress1)
dim(wt2336bloodexpress1)
table(rownames(wt2336bloodexpress1)==rownames(wt2336blood@meta.data))
Idents(wt2336blood)
table(rownames(wt2336bloodexpress1)==names(Idents(wt2336blood)))
wt2336bloodexpress1$ident <- Idents(wt2336blood)
table(wt2336bloodexpress1$ident)
library(ggplot2)
summarySE <- function(data=NULL, measurevar, groupvars=NULL, na.rm=FALSE,
conf.interval=.95, .drop=TRUE) {
library(plyr)
# New version of length which can handle NA's: if na.rm==T, don't count them
length2 <- function (x, na.rm=FALSE) {
if (na.rm) sum(!is.na(x))
else length(x)
}
# This does the summary. For each group's data frame, return a vector with
# N, mean, and sd
datac <- ddply(data, groupvars, .drop=.drop,
.fun = function(xx, col) {
c(N = length2(xx[[col]], na.rm=na.rm),
mean = mean (xx[[col]], na.rm=na.rm),
sd = sd (xx[[col]], na.rm=na.rm)
)
},
measurevar
)
# Rename the "mean" column
datac <- rename(datac, c("mean" = measurevar))
datac$se <- datac$sd / sqrt(datac$N) # Calculate standard error of the mean
# Confidence interval multiplier for standard error
# Calculate t-statistic for confidence interval:
# e.g., if conf.interval is .95, use .975 (above/below), and use df=N-1
ciMult <- qt(conf.interval/2 + .5, datac$N-1)
datac$ci <- datac$se * ciMult
return(datac)
}
wt2336bloodexpress1sum <- summarySE(wt2336bloodexpress1, measurevar="kdrl", groupvars="ident") #na.rm=TRUE
pdf("marker.pdf",width=1.5,height=8)
geneuse <- c("myh6","vmhc","myl7","fli1a","kdrl","tcf21","tbx18","twist1a","vim")
for(i in geneuse){
print(i)
wt2336bloodexpress1sum <- summarySE(wt2336bloodexpress1, measurevar=i, groupvars="ident") #na.rm=TRUE
p <- ggplot(wt2336bloodexpress1sum, aes_string(x="ident", y=i,fill="ident")) +
geom_bar(position=position_dodge(), stat="identity",width = 0.7) +
xlab("")+ #ȥx ??ǩ
scale_fill_manual(values = c("darkolivegreen4","brown3","darkgoldenrod3","dodgerblue3","darkorchid"))+
#coord_fixed(ratio=1/2)+
coord_flip()+
scale_x_discrete(limits=rev(c("aCM-A","bCM-V","cEC","dEP","eEPDC")))+
geom_errorbar(aes(ymin=get(i)-se, ymax=get(i)+se), # Error bars represent standard error of the mean
width=.1, # Width of the error bars
position=position_dodge(.9))+
theme_bw()+ #ȥ??????ɫ
theme(axis.text = element_blank(),#??????
axis.ticks = element_blank(),
panel.grid.major = element_blank(),#ȥ????????
panel.grid.minor = element_blank(),axis.line = element_line(colour = "black"),
plot.title = element_text(size = 25, face = "bold"),
axis.title=element_text(size=20,face="bold"),
legend.position = "") #ȥ??ͼ??
print(p)
}
dev.off()
# =============================================================================================
#Fig1d-e, heatmap of CT/MTZ cells
#==============================================================================================
table(wt2336blood@meta.data$condition)
wt2336bloodct <- subset(wt2336blood, condition=="CT")
table(wt2336bloodct@meta.data$condition)
wt2336bloodctmarker <- FindAllMarkers(wt2336bloodct,logfc.threshold = 0.25,test.use = "roc",only.pos = T)
library(dplyr)
wt2336bloodctmarker100 <- wt2336bloodctmarker %>% group_by(cluster) %>% top_n(n = 100, wt = avg_logFC)
table(wt2336bloodctmarkersel$cluster)
DoHeatmap(wt2336bloodct,features = wt2336bloodctmarker100$gene,label = T,
group.colors = c("darkolivegreen4","brown3","darkgoldenrod3","dodgerblue3","darkorchid"))
wt2336bloodmtz <- subset(wt2336blood, condition=="MTZ")
wt2336bloodmtzmarker <- FindAllMarkers(wt2336bloodmtz,logfc.threshold = 0.25,test.use = "roc",only.pos = T)
library(dplyr)
wt2336bloodmtzmarker100 <- wt2336bloodmtzmarker %>% group_by(cluster) %>% top_n(n = 100, wt = avg_logFC)
DoHeatmap(wt2336bloodmtz,features = wt2336bloodmtzmarker100$gene,
group.colors = c("darkolivegreen4","brown3","darkgoldenrod3","dodgerblue3","darkorchid"))
save.image("~/Desktop/R code/Fig.1.RData")
| /Fig. 1.R | no_license | HeIloworld/NC-Heart | R | false | false | 12,334 | r | library(Seurat)
library(ggplot2)
library(monocle)
setwd("~/Desktop/R code/")
# =============================================================================================
#All sequenced cell, before removing blood cells
#==============================================================================================
expresswt2336 <- read.csv("wtexpress2336.csv",stringsAsFactors=F)
dim(expresswt2336)
rownames(expresswt2336) <- expresswt2336[,1]
rownames(expresswt2336)
grep("ERCC",rownames(expresswt2336),value = T)
expresswt2336 <- expresswt2336[setdiff(rownames(expresswt2336),grep("ERCC",rownames(expresswt2336),value = T)),]
expresswt2336 <- expresswt2336[,-1]
metadatawt2336 <- read.csv("wtmeta2336.csv")
rownames(metadatawt2336) <- metadatawt2336[,1]
colnames(metadatawt2336)
table(rownames(metadatawt2336)==colnames(expresswt2336))
wt2336 <- CreateSeuratObject(counts = expresswt2336, project = "wt2336", min.cells = 3, min.features = 1000)
VlnPlot(wt2336, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3)
FeatureScatter(wt2336, feature1 = "nCount_RNA", feature2 = "nFeature_RNA")
wt2336 <- subset(wt2336, subset = nFeature_RNA > 1000 & nFeature_RNA < 6000 & nCount_RNA < 200000)
wt2336 <- AddMetaData(wt2336,metadata = metadatawt2336 )
dim(wt2336@meta.data)
wt2336 <- NormalizeData(wt2336, normalization.method = "LogNormalize", scale.factor = 10000)
wt2336 <- FindVariableFeatures(wt2336, selection.method = "vst", nfeatures = 2000)
top10 <- head(VariableFeatures(wt2336), 10)
plot1 <- VariableFeaturePlot(wt2336)
plot2 <- LabelPoints(plot = plot1, points = top10, repel = TRUE)
CombinePlots(plots = list(plot1, plot2))
all.genes <- rownames(wt2336)
wt2336 <- ScaleData(wt2336, features = all.genes)
wt2336 <- RunPCA(wt2336, features = VariableFeatures(object = wt2336))
print(wt2336[["pca"]], dims = 1:5, nfeatures = 5)
VizDimLoadings(wt2336, dims = 1:2, reduction = "pca")
DimPlot(wt2336, reduction = "pca")
DimHeatmap(wt2336, dims = 1, cells = 500, balanced = TRUE)
wt2336 <- FindNeighbors(wt2336, dims = 1:15)
wt2336 <- FindClusters(wt2336, resolution = 0.2)
wt2336 <- RunTSNE(wt2336, dims = 1:15,seed.use = 1,perplexity=50)
DimPlot(wt2336, reduction = "tsne",pt.size = 2,shape.by ="condition",label = T)
#gata1, blood cell
FeaturePlot(wt2336, features =c("gata1a","hbbe1.3","hbbe2"),cols = c("grey", "red"),pt.size=1,min.cutoff =0)
bloodcell <- WhichCells(wt2336,ident = 1)
#marker
FeaturePlot(wt2336, features =c("myh6","vmhc","fli1a","tcf21","twist1a","vim"),cols = c("grey", "red"),pt.size=2,min.cutoff =0)
# =============================================================================================
#Fig1b, remove blood cells, retain 1581 cells
#==============================================================================================
wt2336blood <- subset(wt2336, idents = 1,invert=T)
dim(wt2336blood@meta.data)
wt2336blood <- NormalizeData(wt2336blood, normalization.method = "LogNormalize", scale.factor = 10000)
wt2336blood <- FindVariableFeatures(wt2336blood, selection.method = "vst", nfeatures = 2000)
top10 <- head(VariableFeatures(wt2336blood), 10)
plot1 <- VariableFeaturePlot(wt2336blood)
plot2 <- LabelPoints(plot = plot1, points = top10, repel = TRUE)
CombinePlots(plots = list(plot1, plot2))
all.genes <- rownames(wt2336blood)
wt2336blood <- ScaleData(wt2336blood, features = all.genes) #??Ҫvars.to.regress = "run" ??Ⱥ???ÿ?
wt2336blood <- RunPCA(wt2336blood, features = VariableFeatures(object = wt2336blood))
print(wt2336blood[["pca"]], dims = 1:5, nfeatures = 5)
VizDimLoadings(wt2336blood, dims = 1:2, reduction = "pca")
DimPlot(wt2336blood, reduction = "pca")
DimHeatmap(wt2336blood, dims = 1, cells = 500, balanced = TRUE)
wt2336blood <- JackStraw(wt2336blood, num.replicate = 100)
wt2336blood <- ScoreJackStraw(wt2336blood, dims = 1:20)
JackStrawPlot(wt2336blood, dims = 1:15)
ElbowPlot(wt2336blood)
wt2336blood <- FindNeighbors(wt2336blood, dims = 1:15)
wt2336blood <- FindClusters(wt2336blood, resolution = 0.2)
wt2336blood <- RunTSNE(wt2336blood, dims = 1:15,seed.use = 1,perplexity=30)
DimPlot(wt2336blood, reduction = "tsne",pt.size = 2,shape.by ="condition",label = T)
FeaturePlot(wt2336blood,features = c("myh6","twist1a","vmhc"),reduction = "tsne")
wt2336blood <- RenameIdents(wt2336blood, "0" = "CM-A")
wt2336blood <- RenameIdents(wt2336blood, "5" = "CM-A")
wt2336blood <- RenameIdents(wt2336blood, "3" = "CM-V")
wt2336blood <- RenameIdents(wt2336blood, "4" = "CM-V")
wt2336blood <- RenameIdents(wt2336blood, "6" = "EP")
wt2336blood <- RenameIdents(wt2336blood, "1" = "EC")
wt2336blood <- RenameIdents(wt2336blood, "2" = "EPDC")
wt2336blood <- RenameIdents(wt2336blood, "8" = "CM-V")
wt2336blood <- RenameIdents(wt2336blood, "7" = "CM-V")
plot <- FeaturePlot(wt2336blood, features =c("myh6"),cols = c("grey", "red"),pt.size=1,reduction = "tsne")
#select.cells <- CellSelector(plot = plot)
#reidentify the cell cluster according to the gene expression profile
select.cells <- c("B06_1dpt_MTZ_CMA.sc24", "B06_1dpt_MTZ_CMA.sc26", "B06_1dpt_MTZ_CMA.sc33",
"B06_1dpt_MTZ_CMA.sc58", "B06_1dpt_MTZ_CMA.sc73", "B06_1dpt_MTZ_CMA.sc74",
"B06_1dpt_MTZ_CMA.sc89", "B06_2dpt_CT_CMA.sc3" , "B06_2dpt_CT_CMA.sc5" ,
"B06_2dpt_CT_CMA.sc8" , "B06_2dpt_CT_CMA.sc10" , "B06_2dpt_CT_CMA.sc13" ,
"B06_2dpt_CT_CMA.sc18" ,"B06_2dpt_CT_CMA.sc19", "B06_2dpt_CT_CMA.sc20" ,
"B06_2dpt_CT_CMA.sc24" , "B06_2dpt_CT_CMA.sc30" , "B06_2dpt_CT_CMA.sc32" ,
"B06_2dpt_CT_CMA.sc45" , "B06_2dpt_CT_CMA.sc47" , "B06_2dpt_CT_CMV.sc55" ,
"B06_2dpt_MTZ_CMA.sc4" , "B06_2dpt_MTZ_CMA.sc22", "B06_2dpt_MTZ_CMA.sc49",
"B06_2dpt_MTZ_CMA.sc64", "B07_3dpt_CT_CMA2.sc29" ,"B07_3dpt_CT_CMA2.sc31",
"B06_3dpt_MTZ_CMA.sc4" , "B06_3dpt_MTZ_CMA.sc20" ,"B06_3dpt_MTZ_CMA.sc92",
"B06_3dpt_MTZ_CMA.sc94")
Idents(wt2336blood,cells= select.cells) <- "CM-A"
plot <- FeaturePlot(wt2336blood, features =c("twist1a"),cols = c("grey", "red"),pt.size=1,reduction = "tsne")
#select.cells2<- CellSelector(plot = plot)
select.cells2 <- c("B01_1dpt_MTZ_V2.sc62" , "B01_1dpt_MTZ_V2.sc84", "B01_1dpt_CT_V1.sc33" ,
"B02_1dpt_CT_AV3.sc22" , "B02_1dpt_CT_AV3.sc34" , "B02_1dpt_CT_AV4.sc70" ,
"B02_1dpt_CT_AV4.sc79" , "B03_4dpt_CT_AV1.sc22", "B03_4dpt_CT_AV1.sc74" ,
"B04_2dpt_CT_AV1.sc1" , "B04_2dpt_CT_AV1.sc3" , "B04_2dpt_CT_AV2.sc34" ,
"B04_2dpt_MTZ_AV1.sc5" , "B04_2dpt_MTZ_AV1.sc32" ,"B04_2dpt_MTZ_AV1.sc91",
"B04_2dpt_MTZ_AV1.sc94","B04_2dpt_MTZ_AV1.sc95","B05_3dpt_CT_AV1.sc46",
"B05_3dpt_CT_AV1.sc50" , "B05_3dpt_CT_AV1.sc74" , "B05_3dpt_CT_AV2.sc49" ,
"B05_3dpt_MTZ_AV1.sc17" ,"B05_3dpt_MTZ_AV1.sc18" ,"B05_3dpt_MTZ_AV1.sc57",
"B05_3dpt_MTZ_AV1.sc63", "B05_3dpt_MTZ_AV1.sc81" ,"B05_3dpt_MTZ_AV2.sc25")
Idents(wt2336blood,cells= select.cells2) <- "EPDC"
DimPlot(wt2336blood,cells.highlight = select.cells2,reduction = "tsne")
wt2336blood <- RenameIdents(wt2336blood,"EPDC" = "eEPDC") #change order
wt2336blood <- RenameIdents(wt2336blood,"EP" = "dEP")
wt2336blood <- RenameIdents(wt2336blood,"EC" = "cEC")
wt2336blood <- RenameIdents(wt2336blood,"CM-V" = "bCM-V")
wt2336blood <- RenameIdents(wt2336blood,"CM-A" = "aCM-A")
DimPlot(wt2336blood, reduction = "tsne",pt.size = 2,shape.by ="condition",label = F,
cols = c("darkolivegreen4","brown3","darkgoldenrod3","dodgerblue3","darkorchid"))
# ==============================================================================================
#Fig1c, marker boxplot
# ==============================================================================================
wt2336bloodexpress1 <- wt2336blood[["RNA"]]@counts
wt2336bloodexpress1 <- as.data.frame(wt2336bloodexpress1)
wt2336bloodexpress1 <- t(wt2336bloodexpress1)
wt2336bloodexpress1 <- as.data.frame(wt2336bloodexpress1)
wt2336bloodexpress1 <- log2(wt2336bloodexpress1/10 + 1)
class(wt2336bloodexpress1)
dim(wt2336bloodexpress1)
table(rownames(wt2336bloodexpress1)==rownames(wt2336blood@meta.data))
Idents(wt2336blood)
table(rownames(wt2336bloodexpress1)==names(Idents(wt2336blood)))
wt2336bloodexpress1$ident <- Idents(wt2336blood)
table(wt2336bloodexpress1$ident)
library(ggplot2)
summarySE <- function(data=NULL, measurevar, groupvars=NULL, na.rm=FALSE,
conf.interval=.95, .drop=TRUE) {
library(plyr)
# New version of length which can handle NA's: if na.rm==T, don't count them
length2 <- function (x, na.rm=FALSE) {
if (na.rm) sum(!is.na(x))
else length(x)
}
# This does the summary. For each group's data frame, return a vector with
# N, mean, and sd
datac <- ddply(data, groupvars, .drop=.drop,
.fun = function(xx, col) {
c(N = length2(xx[[col]], na.rm=na.rm),
mean = mean (xx[[col]], na.rm=na.rm),
sd = sd (xx[[col]], na.rm=na.rm)
)
},
measurevar
)
# Rename the "mean" column
datac <- rename(datac, c("mean" = measurevar))
datac$se <- datac$sd / sqrt(datac$N) # Calculate standard error of the mean
# Confidence interval multiplier for standard error
# Calculate t-statistic for confidence interval:
# e.g., if conf.interval is .95, use .975 (above/below), and use df=N-1
ciMult <- qt(conf.interval/2 + .5, datac$N-1)
datac$ci <- datac$se * ciMult
return(datac)
}
wt2336bloodexpress1sum <- summarySE(wt2336bloodexpress1, measurevar="kdrl", groupvars="ident") #na.rm=TRUE
pdf("marker.pdf",width=1.5,height=8)
geneuse <- c("myh6","vmhc","myl7","fli1a","kdrl","tcf21","tbx18","twist1a","vim")
for(i in geneuse){
print(i)
wt2336bloodexpress1sum <- summarySE(wt2336bloodexpress1, measurevar=i, groupvars="ident") #na.rm=TRUE
p <- ggplot(wt2336bloodexpress1sum, aes_string(x="ident", y=i,fill="ident")) +
geom_bar(position=position_dodge(), stat="identity",width = 0.7) +
xlab("")+ #ȥx ??ǩ
scale_fill_manual(values = c("darkolivegreen4","brown3","darkgoldenrod3","dodgerblue3","darkorchid"))+
#coord_fixed(ratio=1/2)+
coord_flip()+
scale_x_discrete(limits=rev(c("aCM-A","bCM-V","cEC","dEP","eEPDC")))+
geom_errorbar(aes(ymin=get(i)-se, ymax=get(i)+se), # Error bars represent standard error of the mean
width=.1, # Width of the error bars
position=position_dodge(.9))+
theme_bw()+ #ȥ??????ɫ
theme(axis.text = element_blank(),#??????
axis.ticks = element_blank(),
panel.grid.major = element_blank(),#ȥ????????
panel.grid.minor = element_blank(),axis.line = element_line(colour = "black"),
plot.title = element_text(size = 25, face = "bold"),
axis.title=element_text(size=20,face="bold"),
legend.position = "") #ȥ??ͼ??
print(p)
}
dev.off()
# =============================================================================================
#Fig1d-e, heatmap of CT/MTZ cells
#==============================================================================================
table(wt2336blood@meta.data$condition)
wt2336bloodct <- subset(wt2336blood, condition=="CT")
table(wt2336bloodct@meta.data$condition)
wt2336bloodctmarker <- FindAllMarkers(wt2336bloodct,logfc.threshold = 0.25,test.use = "roc",only.pos = T)
library(dplyr)
wt2336bloodctmarker100 <- wt2336bloodctmarker %>% group_by(cluster) %>% top_n(n = 100, wt = avg_logFC)
table(wt2336bloodctmarkersel$cluster)
DoHeatmap(wt2336bloodct,features = wt2336bloodctmarker100$gene,label = T,
group.colors = c("darkolivegreen4","brown3","darkgoldenrod3","dodgerblue3","darkorchid"))
wt2336bloodmtz <- subset(wt2336blood, condition=="MTZ")
wt2336bloodmtzmarker <- FindAllMarkers(wt2336bloodmtz,logfc.threshold = 0.25,test.use = "roc",only.pos = T)
library(dplyr)
wt2336bloodmtzmarker100 <- wt2336bloodmtzmarker %>% group_by(cluster) %>% top_n(n = 100, wt = avg_logFC)
DoHeatmap(wt2336bloodmtz,features = wt2336bloodmtzmarker100$gene,
group.colors = c("darkolivegreen4","brown3","darkgoldenrod3","dodgerblue3","darkorchid"))
save.image("~/Desktop/R code/Fig.1.RData")
|
# This data contains all 336,776 flights that departed from New York City in 2013
library(nycflights13)
library(dplyr)
flights = flights
############################## Filter ########################################
# filter() allows you to subset observations based on their values
jan1 = filter(flights, month == 1, day == 1)
NovOrDec = filter(flights, month == 11 | month == 12)
nov_dec = filter(flights, month %in% c(11, 12))
filter(flights, !(arr_delay > 120 | dep_delay > 120))
filter(flights, arr_delay <= 120, dep_delay <= 120)
# Exercises: Find all flights that
# Had an arrival delay of two or more hours
temp = filter(NYCflights, arr_delay >= 2)
temp$arr_delay
# Flew to Houston (IAH or HOU)
temp = filter(NYCflights, dest == "IAH" | dest == "HOU")
temp$dest
# Departed in summer (July, August, and September)
temp = filter(NYCflights,month == 7 | month == 8 | month ==9)
############################## Arrange ########################################
# arrange() works similarly to filter() except that instead of selecting
# rows, it changes their order
# Order by year, month and day
temp = arrange(flights, year, month, day)
# Descending order of arr_delay
arrange(flights, desc(arr_delay))
############################## Select ########################################
#Its not uncommon to get datasets with hundreds or even thousands
#of variables. In this case, the first challenge is often narrowing in on
#the variables youre actually interested in.
# # Select columns by name
temp = select(flights, year, month, day)
# Select all columns between year and day (inclusive)
select(flights, year:day)
# Select all columns except those from year to day (exclusive)
select(flights, -(year:day))
# Helper Functions (starts_with, ends_with, contains, matches:
select(flights, starts_with("arr_"))
temp2 = select(flights, ends_with("delay"))
# Rename - a variant of select (doesnt drop other vars)
rename(NYCflights,tail_num = tailnum)
############################## MUTATE ######################################
# mutate() adds new columns at the end of your dataset
# transmute creates a tibble with the new variables only
flights_sml = select(flights, year:day, ends_with("delay"), distance, air_time)
mutate(flights_sml, gain = arr_delay - dep_delay, speed = distance / air_time * 60)
transmute(flights_sml, gain = arr_delay - dep_delay, speed = distance / air_time * 60)
############################# RANKING #####################################
x = c(2,3,3,4,5,6)
min_rank(x)
min_rank(desc(x))
############################ Summarize ######################################
# The last key verb is summarize(). It collapses a data frame to a single row
summarize(flights, delay = sum(dep_delay, na.rm = TRUE))
# summarize() is not terribly useful unless we pair it with group_by().
# This changes the unit of analysis from the complete dataset to individual
# groups. Then, when you use the dplyr verbs on a grouped
# data frame they will be automatically applied by group.
by_month = group_by(flights, year, month)
summarize(by_month, delay = mean(dep_delay, na.rm = TRUE))
# Exwrcise:
# We want to explore the relationship between the distance
# and average delay for each location
by_dest = group_by(flights, dest)
delay = summarize(by_dest, count = n(), dist = mean(distance, na.rm = TRUE),
delay = mean(arr_delay, na.rm = TRUE))
plot(delay ~ dist, data = delay)
delay = filter(delay, count > 20, dest != "HNL") #remove noisy points and honolulu airport which is far
# This code is a little frustrating to write because we have to give each
# intermediate data frame a name, even though we dont care about it.
# Naming things is hard, so this slows down our analysis.
# Theres another way to tackle the same problem with the pipe
delays = flights %>% group_by(dest) %>%
summarize(count = n(), dist = mean(distance, na.rm = TRUE), delay = mean(arr_delay, na.rm = TRUE)) %>%
filter(count > 20, dest != "HNL")
delays = not_cancelled %>% group_by(tailnum) %>%
summarize(delay = mean(arr_delay, na.rm = TRUE), n = n()) | /Data Manipulations 2 DPLYR - Flights.R | no_license | hesham230/Data-Science-with-R | R | false | false | 4,233 | r | # This data contains all 336,776 flights that departed from New York City in 2013
library(nycflights13)
library(dplyr)
flights = flights
############################## Filter ########################################
# filter() allows you to subset observations based on their values
jan1 = filter(flights, month == 1, day == 1)
NovOrDec = filter(flights, month == 11 | month == 12)
nov_dec = filter(flights, month %in% c(11, 12))
filter(flights, !(arr_delay > 120 | dep_delay > 120))
filter(flights, arr_delay <= 120, dep_delay <= 120)
# Exercises: Find all flights that
# Had an arrival delay of two or more hours
temp = filter(NYCflights, arr_delay >= 2)
temp$arr_delay
# Flew to Houston (IAH or HOU)
temp = filter(NYCflights, dest == "IAH" | dest == "HOU")
temp$dest
# Departed in summer (July, August, and September)
temp = filter(NYCflights,month == 7 | month == 8 | month ==9)
############################## Arrange ########################################
# arrange() works similarly to filter() except that instead of selecting
# rows, it changes their order
# Order by year, month and day
temp = arrange(flights, year, month, day)
# Descending order of arr_delay
arrange(flights, desc(arr_delay))
############################## Select ########################################
#Its not uncommon to get datasets with hundreds or even thousands
#of variables. In this case, the first challenge is often narrowing in on
#the variables youre actually interested in.
# # Select columns by name
temp = select(flights, year, month, day)
# Select all columns between year and day (inclusive)
select(flights, year:day)
# Select all columns except those from year to day (exclusive)
select(flights, -(year:day))
# Helper Functions (starts_with, ends_with, contains, matches:
select(flights, starts_with("arr_"))
temp2 = select(flights, ends_with("delay"))
# Rename - a variant of select (doesnt drop other vars)
rename(NYCflights,tail_num = tailnum)
############################## MUTATE ######################################
# mutate() adds new columns at the end of your dataset
# transmute creates a tibble with the new variables only
flights_sml = select(flights, year:day, ends_with("delay"), distance, air_time)
mutate(flights_sml, gain = arr_delay - dep_delay, speed = distance / air_time * 60)
transmute(flights_sml, gain = arr_delay - dep_delay, speed = distance / air_time * 60)
############################# RANKING #####################################
x = c(2,3,3,4,5,6)
min_rank(x)
min_rank(desc(x))
############################ Summarize ######################################
# The last key verb is summarize(). It collapses a data frame to a single row
summarize(flights, delay = sum(dep_delay, na.rm = TRUE))
# summarize() is not terribly useful unless we pair it with group_by().
# This changes the unit of analysis from the complete dataset to individual
# groups. Then, when you use the dplyr verbs on a grouped
# data frame they will be automatically applied by group.
by_month = group_by(flights, year, month)
summarize(by_month, delay = mean(dep_delay, na.rm = TRUE))
# Exwrcise:
# We want to explore the relationship between the distance
# and average delay for each location
by_dest = group_by(flights, dest)
delay = summarize(by_dest, count = n(), dist = mean(distance, na.rm = TRUE),
delay = mean(arr_delay, na.rm = TRUE))
plot(delay ~ dist, data = delay)
delay = filter(delay, count > 20, dest != "HNL") #remove noisy points and honolulu airport which is far
# This code is a little frustrating to write because we have to give each
# intermediate data frame a name, even though we dont care about it.
# Naming things is hard, so this slows down our analysis.
# Theres another way to tackle the same problem with the pipe
delays = flights %>% group_by(dest) %>%
summarize(count = n(), dist = mean(distance, na.rm = TRUE), delay = mean(arr_delay, na.rm = TRUE)) %>%
filter(count > 20, dest != "HNL")
delays = not_cancelled %>% group_by(tailnum) %>%
summarize(delay = mean(arr_delay, na.rm = TRUE), n = n()) |
isEmpty <- function (obj)
{
if (!isS4(obj)) {
if (length(obj) > 0)
FALSE
else TRUE
}
else {
if (identical(obj, new(class(obj)[1])))
out <- TRUE
else {
empty <- sapply(slotNames(obj), function(s) {
if (isS4(slot(obj, s)))
isEmpty(slot(obj, s))
else {
if (length(slot(obj, s)) == 0)
TRUE
else if (length(slot(obj, s)) > 0)
FALSE
}
})
out <- !any(!empty)
}
out
}
}
| /R/internal_isEmpty.R | permissive | ropensci/RNeXML | R | false | false | 636 | r |
isEmpty <- function (obj)
{
if (!isS4(obj)) {
if (length(obj) > 0)
FALSE
else TRUE
}
else {
if (identical(obj, new(class(obj)[1])))
out <- TRUE
else {
empty <- sapply(slotNames(obj), function(s) {
if (isS4(slot(obj, s)))
isEmpty(slot(obj, s))
else {
if (length(slot(obj, s)) == 0)
TRUE
else if (length(slot(obj, s)) > 0)
FALSE
}
})
out <- !any(!empty)
}
out
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kinesis_operations.R
\name{kinesis_get_shard_iterator}
\alias{kinesis_get_shard_iterator}
\title{Gets an Amazon Kinesis shard iterator}
\usage{
kinesis_get_shard_iterator(StreamName, ShardId, ShardIteratorType,
StartingSequenceNumber, Timestamp)
}
\arguments{
\item{StreamName}{[required] The name of the Amazon Kinesis data stream.}
\item{ShardId}{[required] The shard ID of the Kinesis Data Streams shard to get the iterator for.}
\item{ShardIteratorType}{[required] Determines how the shard iterator is used to start reading data records
from the shard.
The following are the valid Amazon Kinesis shard iterator types:
\itemize{
\item AT\\_SEQUENCE\\_NUMBER - Start reading from the position denoted by a
specific sequence number, provided in the value
\code{StartingSequenceNumber}.
\item AFTER\\_SEQUENCE\\_NUMBER - Start reading right after the position
denoted by a specific sequence number, provided in the value
\code{StartingSequenceNumber}.
\item AT\\_TIMESTAMP - Start reading from the position denoted by a
specific time stamp, provided in the value \code{Timestamp}.
\item TRIM\\_HORIZON - Start reading at the last untrimmed record in the
shard in the system, which is the oldest data record in the shard.
\item LATEST - Start reading just after the most recent record in the
shard, so that you always read the most recent data in the shard.
}}
\item{StartingSequenceNumber}{The sequence number of the data record in the shard from which to start
reading. Used with shard iterator type AT\\_SEQUENCE\\_NUMBER and
AFTER\\_SEQUENCE\\_NUMBER.}
\item{Timestamp}{The time stamp of the data record from which to start reading. Used with
shard iterator type AT\\_TIMESTAMP. A time stamp is the Unix epoch date
with precision in milliseconds. For example,
\verb{2016-04-04T19:58:46.480-00:00} or \code{1459799926.480}. If a record with
this exact time stamp does not exist, the iterator returned is for the
next (later) record. If the time stamp is older than the current trim
horizon, the iterator returned is for the oldest untrimmed data record
(TRIM\\_HORIZON).}
}
\description{
Gets an Amazon Kinesis shard iterator. A shard iterator expires 5
minutes after it is returned to the requester.
}
\details{
A shard iterator specifies the shard position from which to start
reading data records sequentially. The position is specified using the
sequence number of a data record in a shard. A sequence number is the
identifier associated with every record ingested in the stream, and is
assigned when a record is put into the stream. Each stream has one or
more shards.
You must specify the shard iterator type. For example, you can set the
\code{ShardIteratorType} parameter to read exactly from the position denoted
by a specific sequence number by using the \code{AT_SEQUENCE_NUMBER} shard
iterator type. Alternatively, the parameter can read right after the
sequence number by using the \code{AFTER_SEQUENCE_NUMBER} shard iterator
type, using sequence numbers returned by earlier calls to PutRecord,
PutRecords, GetRecords, or DescribeStream. In the request, you can
specify the shard iterator type \code{AT_TIMESTAMP} to read records from an
arbitrary point in time, \code{TRIM_HORIZON} to cause \code{ShardIterator} to
point to the last untrimmed record in the shard in the system (the
oldest data record in the shard), or \code{LATEST} so that you always read
the most recent data in the shard.
When you read repeatedly from a stream, use a GetShardIterator request
to get the first shard iterator for use in your first GetRecords request
and for subsequent reads use the shard iterator returned by the
GetRecords request in \code{NextShardIterator}. A new shard iterator is
returned by every GetRecords request in \code{NextShardIterator}, which you
use in the \code{ShardIterator} parameter of the next GetRecords request.
If a GetShardIterator request is made too often, you receive a
\code{ProvisionedThroughputExceededException}. For more information about
throughput limits, see GetRecords, and \href{http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html}{Streams Limits}
in the \emph{Amazon Kinesis Data Streams Developer Guide}.
If the shard is closed, GetShardIterator returns a valid iterator for
the last sequence number of the shard. A shard can be closed as a result
of using SplitShard or MergeShards.
GetShardIterator has a limit of five transactions per second per account
per open shard.
}
\section{Request syntax}{
\preformatted{svc$get_shard_iterator(
StreamName = "string",
ShardId = "string",
ShardIteratorType = "AT_SEQUENCE_NUMBER"|"AFTER_SEQUENCE_NUMBER"|"TRIM_HORIZON"|"LATEST"|"AT_TIMESTAMP",
StartingSequenceNumber = "string",
Timestamp = as.POSIXct(
"2015-01-01"
)
)
}
}
\keyword{internal}
| /cran/paws.analytics/man/kinesis_get_shard_iterator.Rd | permissive | johnnytommy/paws | R | false | true | 4,860 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kinesis_operations.R
\name{kinesis_get_shard_iterator}
\alias{kinesis_get_shard_iterator}
\title{Gets an Amazon Kinesis shard iterator}
\usage{
kinesis_get_shard_iterator(StreamName, ShardId, ShardIteratorType,
StartingSequenceNumber, Timestamp)
}
\arguments{
\item{StreamName}{[required] The name of the Amazon Kinesis data stream.}
\item{ShardId}{[required] The shard ID of the Kinesis Data Streams shard to get the iterator for.}
\item{ShardIteratorType}{[required] Determines how the shard iterator is used to start reading data records
from the shard.
The following are the valid Amazon Kinesis shard iterator types:
\itemize{
\item AT\\_SEQUENCE\\_NUMBER - Start reading from the position denoted by a
specific sequence number, provided in the value
\code{StartingSequenceNumber}.
\item AFTER\\_SEQUENCE\\_NUMBER - Start reading right after the position
denoted by a specific sequence number, provided in the value
\code{StartingSequenceNumber}.
\item AT\\_TIMESTAMP - Start reading from the position denoted by a
specific time stamp, provided in the value \code{Timestamp}.
\item TRIM\\_HORIZON - Start reading at the last untrimmed record in the
shard in the system, which is the oldest data record in the shard.
\item LATEST - Start reading just after the most recent record in the
shard, so that you always read the most recent data in the shard.
}}
\item{StartingSequenceNumber}{The sequence number of the data record in the shard from which to start
reading. Used with shard iterator type AT\\_SEQUENCE\\_NUMBER and
AFTER\\_SEQUENCE\\_NUMBER.}
\item{Timestamp}{The time stamp of the data record from which to start reading. Used with
shard iterator type AT\\_TIMESTAMP. A time stamp is the Unix epoch date
with precision in milliseconds. For example,
\verb{2016-04-04T19:58:46.480-00:00} or \code{1459799926.480}. If a record with
this exact time stamp does not exist, the iterator returned is for the
next (later) record. If the time stamp is older than the current trim
horizon, the iterator returned is for the oldest untrimmed data record
(TRIM\\_HORIZON).}
}
\description{
Gets an Amazon Kinesis shard iterator. A shard iterator expires 5
minutes after it is returned to the requester.
}
\details{
A shard iterator specifies the shard position from which to start
reading data records sequentially. The position is specified using the
sequence number of a data record in a shard. A sequence number is the
identifier associated with every record ingested in the stream, and is
assigned when a record is put into the stream. Each stream has one or
more shards.
You must specify the shard iterator type. For example, you can set the
\code{ShardIteratorType} parameter to read exactly from the position denoted
by a specific sequence number by using the \code{AT_SEQUENCE_NUMBER} shard
iterator type. Alternatively, the parameter can read right after the
sequence number by using the \code{AFTER_SEQUENCE_NUMBER} shard iterator
type, using sequence numbers returned by earlier calls to PutRecord,
PutRecords, GetRecords, or DescribeStream. In the request, you can
specify the shard iterator type \code{AT_TIMESTAMP} to read records from an
arbitrary point in time, \code{TRIM_HORIZON} to cause \code{ShardIterator} to
point to the last untrimmed record in the shard in the system (the
oldest data record in the shard), or \code{LATEST} so that you always read
the most recent data in the shard.
When you read repeatedly from a stream, use a GetShardIterator request
to get the first shard iterator for use in your first GetRecords request
and for subsequent reads use the shard iterator returned by the
GetRecords request in \code{NextShardIterator}. A new shard iterator is
returned by every GetRecords request in \code{NextShardIterator}, which you
use in the \code{ShardIterator} parameter of the next GetRecords request.
If a GetShardIterator request is made too often, you receive a
\code{ProvisionedThroughputExceededException}. For more information about
throughput limits, see GetRecords, and \href{http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html}{Streams Limits}
in the \emph{Amazon Kinesis Data Streams Developer Guide}.
If the shard is closed, GetShardIterator returns a valid iterator for
the last sequence number of the shard. A shard can be closed as a result
of using SplitShard or MergeShards.
GetShardIterator has a limit of five transactions per second per account
per open shard.
}
\section{Request syntax}{
\preformatted{svc$get_shard_iterator(
StreamName = "string",
ShardId = "string",
ShardIteratorType = "AT_SEQUENCE_NUMBER"|"AFTER_SEQUENCE_NUMBER"|"TRIM_HORIZON"|"LATEST"|"AT_TIMESTAMP",
StartingSequenceNumber = "string",
Timestamp = as.POSIXct(
"2015-01-01"
)
)
}
}
\keyword{internal}
|
\name{perBaseQuality}
\alias{perBaseQuality}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Per Base Quality Score
}
\description{
This function returns a plot showing the quality of each base of the reads averaged across all bam files. It uses "plotQuality" function of "EDASeq" package.
}
\usage{
perBaseQuality(the.file, Project)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{the.file}{
character indicating the name of the input count table.
}
\item{Project}{
character indicating the name of the project.
}
}
\references{
Risso D , Schwartz K , Sherlock G and Dudoit S (2011). GC-Content Normalization for RNA-Seq Data. BMC Bioinformatics 12:1-480.
}
\author{
Francesco Russo
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ quality }
| /man/perBaseQuality.Rd | no_license | drighelli/RNASeqGUI | R | false | false | 856 | rd | \name{perBaseQuality}
\alias{perBaseQuality}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Per Base Quality Score
}
\description{
This function returns a plot showing the quality of each base of the reads averaged across all bam files. It uses "plotQuality" function of "EDASeq" package.
}
\usage{
perBaseQuality(the.file, Project)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{the.file}{
character indicating the name of the input count table.
}
\item{Project}{
character indicating the name of the project.
}
}
\references{
Risso D , Schwartz K , Sherlock G and Dudoit S (2011). GC-Content Normalization for RNA-Seq Data. BMC Bioinformatics 12:1-480.
}
\author{
Francesco Russo
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ quality }
|
# Title : Features Scalling => DIMENSIONAMENTO DE CARACTERÍSTICAS
# Objective : Como a linguagem R pode nos apoiar nos trabalhos de tratamento de dados usando Feature Scalling
# Created by: accol
# Created on: 08/06/2020
# Para esse laboratório trabalharemos com o conjunto de dados iris
# Inicalmente vamos visualizar os dados sem qualquer transformação
print('\nObservando nosso conjunto de Dados sem qualquer transformação')
boxplot(iris[, 1:4])
# Primeiramente vamos realizar uma padronização => PADRONIZAÇÃO (Z-SCORE)
iris_padr = scale(iris[, 1:4])
print('\nObservando nosso conjunto de Dados usando PADRONIZAÇÃO (Z-SCORE)')
boxplot(iris_padr[, 1:4])
# Agora vamos realizar uma normalização => NORMALIZAÇÃO (MIN-MAX) -> para esse fim precisaremos criar uma função
normaliza = function(x){
return((x - min(x)) / (max(x) - min(x)))
}
iris_norm = normaliza(iris[, 1:4])
print('\nObservando nosso conjunto de Dados usando NORMALIZAÇÃO (MIN-MAX)')
boxplot(iris_norm[, 1:4])
| /lab21_features_scalling.R | no_license | accolombini/ciencia_dados | R | false | false | 1,004 | r | # Title : Features Scalling => DIMENSIONAMENTO DE CARACTERÍSTICAS
# Objective : Como a linguagem R pode nos apoiar nos trabalhos de tratamento de dados usando Feature Scalling
# Created by: accol
# Created on: 08/06/2020
# Para esse laboratório trabalharemos com o conjunto de dados iris
# Inicalmente vamos visualizar os dados sem qualquer transformação
print('\nObservando nosso conjunto de Dados sem qualquer transformação')
boxplot(iris[, 1:4])
# Primeiramente vamos realizar uma padronização => PADRONIZAÇÃO (Z-SCORE)
iris_padr = scale(iris[, 1:4])
print('\nObservando nosso conjunto de Dados usando PADRONIZAÇÃO (Z-SCORE)')
boxplot(iris_padr[, 1:4])
# Agora vamos realizar uma normalização => NORMALIZAÇÃO (MIN-MAX) -> para esse fim precisaremos criar uma função
normaliza = function(x){
return((x - min(x)) / (max(x) - min(x)))
}
iris_norm = normaliza(iris[, 1:4])
print('\nObservando nosso conjunto de Dados usando NORMALIZAÇÃO (MIN-MAX)')
boxplot(iris_norm[, 1:4])
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PSI.R
\name{psi}
\alias{psi}
\title{PSI(PoullutantStandardIndex)}
\usage{
psi(date_time = "")
}
\arguments{
\item{date_time}{Defaults to current (SGD) time. Format: YYYY-MM-DDTHH:MM:SS}
}
\value{
A dataframe containing various PSI measures across 5 corners
of Singapore
}
\description{
This functions calls upon the PSI API from data.gov.sg
and returns a data frame of the different measures of the PSI across 5
different areas in Singapores and the overall measure for the given
data-time. This data provided by the API is updated hourly.
}
\details{
Note that this function is different from the `PSI_summary` function,
which returns the PSI measures for a given day.
}
\examples{
psi()
psi(date = "2019-11-08T17:30:00")
psi(date = "2018-01-04T09:16:17")
}
\keyword{PSI}
| /man/psi.Rd | permissive | clintonwxy/datagovsgR | R | false | true | 851 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PSI.R
\name{psi}
\alias{psi}
\title{PSI(PoullutantStandardIndex)}
\usage{
psi(date_time = "")
}
\arguments{
\item{date_time}{Defaults to current (SGD) time. Format: YYYY-MM-DDTHH:MM:SS}
}
\value{
A dataframe containing various PSI measures across 5 corners
of Singapore
}
\description{
This functions calls upon the PSI API from data.gov.sg
and returns a data frame of the different measures of the PSI across 5
different areas in Singapores and the overall measure for the given
data-time. This data provided by the API is updated hourly.
}
\details{
Note that this function is different from the `PSI_summary` function,
which returns the PSI measures for a given day.
}
\examples{
psi()
psi(date = "2019-11-08T17:30:00")
psi(date = "2018-01-04T09:16:17")
}
\keyword{PSI}
|
library(gnm)
### Name: yaish
### Title: Class Mobility by Level of Education in Israel
### Aliases: yaish
### Keywords: datasets
### ** Examples
set.seed(1)
## Fit the "UNIDIFF" mobility model across education levels, leaving out
## the uninformative subtable for dest == 7:
##
unidiff <- gnm(Freq ~ educ*orig + educ*dest +
Mult(Exp(educ), orig:dest), family = poisson,
data = yaish, subset = (dest != 7))
## Deviance should be 200.3, 116 d.f.
##
## Look at the multipliers of the orig:dest association:
ofInterest(unidiff) <- pickCoef(unidiff, "[.]educ")
coef(unidiff)
##
## Coefficients of interest:
## Mult(Exp(.), orig:dest).educ1 Mult(Exp(.), orig:dest).educ2
## -0.5513258 -0.7766976
## Mult(Exp(.), orig:dest).educ3 Mult(Exp(.), orig:dest).educ4
## -1.2947494 -1.5902644
## Mult(Exp(.), orig:dest).educ5
## -2.8008285
##
## Get standard errors for the contrasts with educ1:
##
getContrasts(unidiff, ofInterest(unidiff))
## estimate SE quasiSE
## Mult(Exp(.), orig:dest).educ1 0.0000000 0.0000000 0.09757438
## Mult(Exp(.), orig:dest).educ2 -0.2253718 0.1611874 0.12885847
## Mult(Exp(.), orig:dest).educ3 -0.7434236 0.2335083 0.21182123
## Mult(Exp(.), orig:dest).educ4 -1.0389386 0.3434256 0.32609380
## Mult(Exp(.), orig:dest).educ5 -2.2495026 0.9453764 0.93560643
## quasiVar
## Mult(Exp(.), orig:dest).educ1 0.00952076
## Mult(Exp(.), orig:dest).educ2 0.01660450
## Mult(Exp(.), orig:dest).educ3 0.04486823
## Mult(Exp(.), orig:dest).educ4 0.10633716
## Mult(Exp(.), orig:dest).educ5 0.87535940
##
## Table of model residuals:
##
residuals(unidiff)
| /data/genthat_extracted_code/gnm/examples/yaish.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,786 | r | library(gnm)
### Name: yaish
### Title: Class Mobility by Level of Education in Israel
### Aliases: yaish
### Keywords: datasets
### ** Examples
set.seed(1)
## Fit the "UNIDIFF" mobility model across education levels, leaving out
## the uninformative subtable for dest == 7:
##
unidiff <- gnm(Freq ~ educ*orig + educ*dest +
Mult(Exp(educ), orig:dest), family = poisson,
data = yaish, subset = (dest != 7))
## Deviance should be 200.3, 116 d.f.
##
## Look at the multipliers of the orig:dest association:
ofInterest(unidiff) <- pickCoef(unidiff, "[.]educ")
coef(unidiff)
##
## Coefficients of interest:
## Mult(Exp(.), orig:dest).educ1 Mult(Exp(.), orig:dest).educ2
## -0.5513258 -0.7766976
## Mult(Exp(.), orig:dest).educ3 Mult(Exp(.), orig:dest).educ4
## -1.2947494 -1.5902644
## Mult(Exp(.), orig:dest).educ5
## -2.8008285
##
## Get standard errors for the contrasts with educ1:
##
getContrasts(unidiff, ofInterest(unidiff))
## estimate SE quasiSE
## Mult(Exp(.), orig:dest).educ1 0.0000000 0.0000000 0.09757438
## Mult(Exp(.), orig:dest).educ2 -0.2253718 0.1611874 0.12885847
## Mult(Exp(.), orig:dest).educ3 -0.7434236 0.2335083 0.21182123
## Mult(Exp(.), orig:dest).educ4 -1.0389386 0.3434256 0.32609380
## Mult(Exp(.), orig:dest).educ5 -2.2495026 0.9453764 0.93560643
## quasiVar
## Mult(Exp(.), orig:dest).educ1 0.00952076
## Mult(Exp(.), orig:dest).educ2 0.01660450
## Mult(Exp(.), orig:dest).educ3 0.04486823
## Mult(Exp(.), orig:dest).educ4 0.10633716
## Mult(Exp(.), orig:dest).educ5 0.87535940
##
## Table of model residuals:
##
residuals(unidiff)
|
## The two functions in this script are to cache the inverse of a matrix.
## This could reduce the unnecessary repeating calculations of matrix inversion,
## so that it helps with overall performance.
## The first function, makeCacheMatrix creates a special "matrix" that is
## a list of functions to set and get the values of matrix and its inverse.
## Input is a square inversible matrix; output is list of functions.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inv) m <<- inv
getinverse <- function() m
list(
set = set, get = get,
setinverse = setinverse,
getinverse = getinverse
)
}
## The second function, cacheSolve calculates the inverse of the special "matrix"
## created with the above function. If the inverse was already calculation,
## it returns the inverse directly from the cache without re-calculating.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if (!is.null(m)) {
message("getting cached data")
return(m)
}
new_m <- x$get()
m <- solve(new_m, ...)
x$setinverse(m)
m
}
| /cachematrix.R | no_license | lijixin3216/ProgrammingAssignment2 | R | false | false | 1,384 | r | ## The two functions in this script are to cache the inverse of a matrix.
## This could reduce the unnecessary repeating calculations of matrix inversion,
## so that it helps with overall performance.
## The first function, makeCacheMatrix creates a special "matrix" that is
## a list of functions to set and get the values of matrix and its inverse.
## Input is a square inversible matrix; output is list of functions.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inv) m <<- inv
getinverse <- function() m
list(
set = set, get = get,
setinverse = setinverse,
getinverse = getinverse
)
}
## The second function, cacheSolve calculates the inverse of the special "matrix"
## created with the above function. If the inverse was already calculation,
## it returns the inverse directly from the cache without re-calculating.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if (!is.null(m)) {
message("getting cached data")
return(m)
}
new_m <- x$get()
m <- solve(new_m, ...)
x$setinverse(m)
m
}
|
#### Problem 1a ####
# All results printed from the output of the R console.
queue<-c('James', 'Mary', 'Steve', 'Alex', 'Patricia')
print(queue)
#Result: [1] "James" "Mary" "Steve" "Alex" "Patricia"
#### Problem 1b ####
queue<-append(queue, 'Harold')
print(queue)
#Result: [1] "James" "Mary" "Steve" "Alex" "Patricia" "Harold"
#### Problem 1c ####
queue <- queue[-which(queue=='James')]
print(queue)
#Result: [1] "Mary" "Steve" "Alex" "Patricia" "Harold"
#### Problem 1d ####
queue<-append(queue,'Pam', after=1)
print(queue)
#Result: [1] "Mary" "Pam" "Steve" "Alex" "Patricia" "Harold"
#### Problem 1e ####
queue<-head(queue,-1)
print(queue)
#Result: [1] "Mary" "Pam" "Steve" "Alex" "Patricia"
#### Problem 1f ####
queue <- queue[-which(queue=='Alex')]
print(queue)
#Result: [1] "Mary" "Pam" "Steve" "Patricia"
#### Problem 1g ####
match('Patricia',queue)
#Result: [1] 4
#### Problem 1h ####
length(queue)
#Result: [1] 4
#### Problem 2 ####
seq(from=20, to=50, by=5)
#Result: 20 25 30 35 40 45 50
#### Problem 20 ####
rep(c("example"), each=10)
#Result: "example" "example" "example" "example" "example" "example" "example" "example" "example" "example"S
#### Problem 2 ####
quadratic<-function(a,b,c){
if( (b^2 - 4*a*c) < 0){
print("This equation has no solutions (i.e. no real roots)")
}else if( (b^2 - 4*a*c) == 0){
root <- (-b + ((b^2-4*a*c))^.5)/2*a
print("The equation has one root ")
print(root)
}else{
root1 <- (-b + ((b^2-4*a*c))^.5)/2*a
root2 <- (-b - ((b^2-4*a*c))^.5)/2*a
print("The equation has two roots")
print(root1)
print(root2)
}
}
#input the 3 coefficients from the keyboard the following lines have to be use
#a=scan("",n=1,quiet=TRUE)
#b=scan("",n=1,quiet=TRUE)
#c=scan("",n=1,quiet=TRUE)
#example quadratic equation is x^2+5x+6 (roots are -2 and -3) (coefficients are a=1, b=5, c=6)
a<-1
b<-5
c<-6
quadratic(a,b,c)
#Result: -2 -3
#Result:
#### Problem 3 ####
n<-1:1000
p<-n[!(n%%3 == 0)]
q<-p[!(p%%7 == 0)]
result<-q[!(q%%11 == 0)]
#print(result) #NOTE: uncomment this statement to get all the numbers.
print(length(result))
#Result:[1] 520 i.e. There are 520 numbers that are neither divisible by three, nor by seven, nor by 11
#### Problem 4 ####
#f=scan("",n=1,quiet = FALSE)
#g=scan("",n=1,quiet = FALSE)
#h=scan("",n=1,quiet = FALSE)
f=3
g=4
h=5
v=sort(c(f,g,h))
if(v[1]^2+v[2]^2==v[3]^2){
print("The numbers you entered form a Pythagorean Triple")
}else{
print("The numbers you entered DO NOT form a Pythagorean Triple")
}
#Result:
| /Assign_02.R | no_license | machadob/MSDA | R | false | false | 2,624 | r | #### Problem 1a ####
# All results printed from the output of the R console.
queue<-c('James', 'Mary', 'Steve', 'Alex', 'Patricia')
print(queue)
#Result: [1] "James" "Mary" "Steve" "Alex" "Patricia"
#### Problem 1b ####
queue<-append(queue, 'Harold')
print(queue)
#Result: [1] "James" "Mary" "Steve" "Alex" "Patricia" "Harold"
#### Problem 1c ####
queue <- queue[-which(queue=='James')]
print(queue)
#Result: [1] "Mary" "Steve" "Alex" "Patricia" "Harold"
#### Problem 1d ####
queue<-append(queue,'Pam', after=1)
print(queue)
#Result: [1] "Mary" "Pam" "Steve" "Alex" "Patricia" "Harold"
#### Problem 1e ####
queue<-head(queue,-1)
print(queue)
#Result: [1] "Mary" "Pam" "Steve" "Alex" "Patricia"
#### Problem 1f ####
queue <- queue[-which(queue=='Alex')]
print(queue)
#Result: [1] "Mary" "Pam" "Steve" "Patricia"
#### Problem 1g ####
match('Patricia',queue)
#Result: [1] 4
#### Problem 1h ####
length(queue)
#Result: [1] 4
#### Problem 2 ####
seq(from=20, to=50, by=5)
#Result: 20 25 30 35 40 45 50
#### Problem 20 ####
rep(c("example"), each=10)
#Result: "example" "example" "example" "example" "example" "example" "example" "example" "example" "example"S
#### Problem 2 ####
quadratic<-function(a,b,c){
if( (b^2 - 4*a*c) < 0){
print("This equation has no solutions (i.e. no real roots)")
}else if( (b^2 - 4*a*c) == 0){
root <- (-b + ((b^2-4*a*c))^.5)/2*a
print("The equation has one root ")
print(root)
}else{
root1 <- (-b + ((b^2-4*a*c))^.5)/2*a
root2 <- (-b - ((b^2-4*a*c))^.5)/2*a
print("The equation has two roots")
print(root1)
print(root2)
}
}
#input the 3 coefficients from the keyboard the following lines have to be use
#a=scan("",n=1,quiet=TRUE)
#b=scan("",n=1,quiet=TRUE)
#c=scan("",n=1,quiet=TRUE)
#example quadratic equation is x^2+5x+6 (roots are -2 and -3) (coefficients are a=1, b=5, c=6)
a<-1
b<-5
c<-6
quadratic(a,b,c)
#Result: -2 -3
#Result:
#### Problem 3 ####
n<-1:1000
p<-n[!(n%%3 == 0)]
q<-p[!(p%%7 == 0)]
result<-q[!(q%%11 == 0)]
#print(result) #NOTE: uncomment this statement to get all the numbers.
print(length(result))
#Result:[1] 520 i.e. There are 520 numbers that are neither divisible by three, nor by seven, nor by 11
#### Problem 4 ####
#f=scan("",n=1,quiet = FALSE)
#g=scan("",n=1,quiet = FALSE)
#h=scan("",n=1,quiet = FALSE)
f=3
g=4
h=5
v=sort(c(f,g,h))
if(v[1]^2+v[2]^2==v[3]^2){
print("The numbers you entered form a Pythagorean Triple")
}else{
print("The numbers you entered DO NOT form a Pythagorean Triple")
}
#Result:
|
###################################################################################### #
# Goal of this script: load mRNA together with miRNA and clinical data
###################################################################################### #
# --- Working directories ---
wd_cancer <- paste0("data_cancer/", cancer, "/")
wd_fit <- paste0("data_fit/", cancer, "/")
# clinical data ---
clinical_data <- t(read.csv(file = paste0(wd_cancer, cancer, ".clin.merged.txt"),
stringsAsFactors = F, sep = "\t", header = F))
colnames(clinical_data) <- clinical_data[1,]
clinical_data <- clinical_data[-1,]
row.names(clinical_data) <- clinical_data[, "patient.bcr_patient_barcode"]
clinical_data <- as.data.frame(clinical_data)
# extract information usefull for survival (srv) analysis (time, status)
clinical_data_srv <- clinical_data_prep(clinical_data)
# mRNA data ---
mRNA_data_all <- t(read.csv(file = paste0(wd_cancer, cancer, ".uncv2.mRNAseq_RSEM_all.txt"),
stringsAsFactors = F, sep = "\t", header =F))
mRNA_data_all[1:5, 1:5]
# colnames, rownames, NA values
colnames(mRNA_data_all) <- mRNA_data_all[1,]
mRNA_data_all <- mRNA_data_all[-1,]
row.names(mRNA_data_all) <- tolower(mRNA_data_all[,1])
mRNA_data_all <- mRNA_data_all[,-1]
mRNA_data_all <- as.matrix(mRNA_data_all)
class(mRNA_data_all) <- "numeric"
# keep only primary solid tumor samples
# (https://gdc.cancer.gov/resources-tcga-users/tcga-code-tables/sample-type-codes)
freq_sample_types <- as.data.frame(table(substr(row.names(mRNA_data_all), 14, 15)))
freq_sample_types[,2] <- paste0("(", freq_sample_types[,2], ")")
print(paste("Sample types:", paste(paste(freq_sample_types[,1], freq_sample_types[,2]), collapse = ", ")))
id_prim_tumor <- as.numeric(substr(row.names(mRNA_data_all), 14, 15)) == 1
mRNA_data <- mRNA_data_all[id_prim_tumor, ]
row.names(mRNA_data) <- substr(row.names(mRNA_data),1,12)
dim(mRNA_data)
# remove genes with NA values
id_NA_gene <- apply(mRNA_data, 2, function(x) sum(which(is.na(x))))
id_NA_gene <- id_NA_gene > 0
mRNA_data <- mRNA_data[, !id_NA_gene]
print(paste0(sum(id_NA_gene), " gene(s) removed due to NA values"))
# remove genes with constant values
id_cst_rm <- apply(mRNA_data, 2, function(x) var(x) == 0)
mRNA_data <- mRNA_data[, !id_cst_rm]
print(paste0(sum(id_cst_rm), " gene(s) removed due to constant values"))
# log2 transformation
if(min(mRNA_data) < 0) print("Negative values in genetics dataset")
mRNA_data <- log2(mRNA_data + 1)
mRNA_data[1:3, 1:3]
# miRNA data ---
miRNA_data <- t(read.csv(file = paste0(wd_cancer, cancer, ".miRseq_RPKM.txt"),
stringsAsFactors = F, sep = "\t", header =F))
miRNA_data[1:5, 1:5]
# colnames, rownames, NA values
colnames(miRNA_data) <- miRNA_data[1,]
miRNA_data <- miRNA_data[-1,]
row.names(miRNA_data) <- tolower(miRNA_data[,1])
miRNA_data <- miRNA_data[,-1]
miRNA_data <- as.matrix(miRNA_data)
class(miRNA_data) <- "numeric"
miRNA_data[1:5, 1:5]
# keep only primary solid tumor samples
# (https://gdc.cancer.gov/resources-tcga-users/tcga-code-tables/sample-type-codes)
freq_sample_types <- as.data.frame(table(substr(row.names(miRNA_data), 14, 15)))
freq_sample_types[,2] <- paste0("(", freq_sample_types[,2], ")")
print(paste("Sample types:", paste(paste(freq_sample_types[,1], freq_sample_types[,2]), collapse = ", ")))
id_prim_tumor <- as.numeric(substr(row.names(miRNA_data), 14, 15)) == 1
miRNA_data <- miRNA_data[id_prim_tumor, ]
row.names(miRNA_data) <- substr(row.names(miRNA_data),1,12)
dim(miRNA_data)
# remove genes with NA values
id_NA_gene <- apply(miRNA_data, 2, function(x) sum(which(is.na(x))))
id_NA_gene <- id_NA_gene > 0
miRNA_data <- miRNA_data[, !id_NA_gene]
print(paste0(sum(id_NA_gene), " miRNA(s) removed due to NA values"))
# remove genes with constant values
id_cst_rm <- apply(miRNA_data, 2, function(x) var(x) == 0)
miRNA_data <- miRNA_data[, !id_cst_rm]
print(paste0(sum(id_cst_rm), " miRNA(s) removed due to constant values"))
# log2 transformation
if(min(miRNA_data) < 0) print("Negative values in genetics dataset")
miRNA_data <- log2(miRNA_data + 1)
miRNA_data[1:3, 1:3]
# Patients overlap in clinical and genetic datas ---
intersect_patients <- intersect(row.names(mRNA_data),
row.names(clinical_data_srv))
intersect_patients <- intersect(intersect_patients, row.names(miRNA_data))
print(paste("Number of patients in miRNA, mRNA and clinical data:", length(intersect_patients)))
mRNA_data <- mRNA_data[intersect_patients,]
miRNA_data <- miRNA_data[intersect_patients,]
clinical_data_srv <- clinical_data_srv[intersect_patients,]
clinical_data <- clinical_data[intersect_patients, ]
# surv obect vector build with clinical data
y_cox <- Surv(clinical_data_srv$time, clinical_data_srv$status)
names(y_cox) <- row.names(clinical_data_srv)
# interquantile range
IQR_vect_mRNA <- apply(mRNA_data, 2, IQR)
IQR_vect_miRNA <- apply(miRNA_data, 2, IQR)
# dimension and censoring rate
print(paste0("n patients: ", nrow(mRNA_data), ", p mRNA: ", ncol(mRNA_data)))
print(paste0("n patients: ", nrow(miRNA_data), ", p miRNA: ", ncol(miRNA_data)))
print(paste0("Censoring rate: ", signif(1 - sum(clinical_data_srv$status) / nrow(mRNA_data),3)))
| /load_data_miRNA.R | no_license | lixiongyang/Survival_preFiltering | R | false | false | 5,388 | r | ###################################################################################### #
# Goal of this script: load mRNA together with miRNA and clinical data
###################################################################################### #
# --- Working directories ---
wd_cancer <- paste0("data_cancer/", cancer, "/")
wd_fit <- paste0("data_fit/", cancer, "/")
# clinical data ---
clinical_data <- t(read.csv(file = paste0(wd_cancer, cancer, ".clin.merged.txt"),
stringsAsFactors = F, sep = "\t", header = F))
colnames(clinical_data) <- clinical_data[1,]
clinical_data <- clinical_data[-1,]
row.names(clinical_data) <- clinical_data[, "patient.bcr_patient_barcode"]
clinical_data <- as.data.frame(clinical_data)
# extract information usefull for survival (srv) analysis (time, status)
clinical_data_srv <- clinical_data_prep(clinical_data)
# mRNA data ---
mRNA_data_all <- t(read.csv(file = paste0(wd_cancer, cancer, ".uncv2.mRNAseq_RSEM_all.txt"),
stringsAsFactors = F, sep = "\t", header =F))
mRNA_data_all[1:5, 1:5]
# colnames, rownames, NA values
colnames(mRNA_data_all) <- mRNA_data_all[1,]
mRNA_data_all <- mRNA_data_all[-1,]
row.names(mRNA_data_all) <- tolower(mRNA_data_all[,1])
mRNA_data_all <- mRNA_data_all[,-1]
mRNA_data_all <- as.matrix(mRNA_data_all)
class(mRNA_data_all) <- "numeric"
# keep only primary solid tumor samples
# (https://gdc.cancer.gov/resources-tcga-users/tcga-code-tables/sample-type-codes)
freq_sample_types <- as.data.frame(table(substr(row.names(mRNA_data_all), 14, 15)))
freq_sample_types[,2] <- paste0("(", freq_sample_types[,2], ")")
print(paste("Sample types:", paste(paste(freq_sample_types[,1], freq_sample_types[,2]), collapse = ", ")))
id_prim_tumor <- as.numeric(substr(row.names(mRNA_data_all), 14, 15)) == 1
mRNA_data <- mRNA_data_all[id_prim_tumor, ]
row.names(mRNA_data) <- substr(row.names(mRNA_data),1,12)
dim(mRNA_data)
# remove genes with NA values
id_NA_gene <- apply(mRNA_data, 2, function(x) sum(which(is.na(x))))
id_NA_gene <- id_NA_gene > 0
mRNA_data <- mRNA_data[, !id_NA_gene]
print(paste0(sum(id_NA_gene), " gene(s) removed due to NA values"))
# remove genes with constant values
id_cst_rm <- apply(mRNA_data, 2, function(x) var(x) == 0)
mRNA_data <- mRNA_data[, !id_cst_rm]
print(paste0(sum(id_cst_rm), " gene(s) removed due to constant values"))
# log2 transformation
if(min(mRNA_data) < 0) print("Negative values in genetics dataset")
mRNA_data <- log2(mRNA_data + 1)
mRNA_data[1:3, 1:3]
# miRNA data ---
miRNA_data <- t(read.csv(file = paste0(wd_cancer, cancer, ".miRseq_RPKM.txt"),
stringsAsFactors = F, sep = "\t", header =F))
miRNA_data[1:5, 1:5]
# colnames, rownames, NA values
colnames(miRNA_data) <- miRNA_data[1,]
miRNA_data <- miRNA_data[-1,]
row.names(miRNA_data) <- tolower(miRNA_data[,1])
miRNA_data <- miRNA_data[,-1]
miRNA_data <- as.matrix(miRNA_data)
class(miRNA_data) <- "numeric"
miRNA_data[1:5, 1:5]
# keep only primary solid tumor samples
# (https://gdc.cancer.gov/resources-tcga-users/tcga-code-tables/sample-type-codes)
freq_sample_types <- as.data.frame(table(substr(row.names(miRNA_data), 14, 15)))
freq_sample_types[,2] <- paste0("(", freq_sample_types[,2], ")")
print(paste("Sample types:", paste(paste(freq_sample_types[,1], freq_sample_types[,2]), collapse = ", ")))
id_prim_tumor <- as.numeric(substr(row.names(miRNA_data), 14, 15)) == 1
miRNA_data <- miRNA_data[id_prim_tumor, ]
row.names(miRNA_data) <- substr(row.names(miRNA_data),1,12)
dim(miRNA_data)
# remove genes with NA values
id_NA_gene <- apply(miRNA_data, 2, function(x) sum(which(is.na(x))))
id_NA_gene <- id_NA_gene > 0
miRNA_data <- miRNA_data[, !id_NA_gene]
print(paste0(sum(id_NA_gene), " miRNA(s) removed due to NA values"))
# remove genes with constant values
id_cst_rm <- apply(miRNA_data, 2, function(x) var(x) == 0)
miRNA_data <- miRNA_data[, !id_cst_rm]
print(paste0(sum(id_cst_rm), " miRNA(s) removed due to constant values"))
# log2 transformation
if(min(miRNA_data) < 0) print("Negative values in genetics dataset")
miRNA_data <- log2(miRNA_data + 1)
miRNA_data[1:3, 1:3]
# Patients overlap in clinical and genetic datas ---
intersect_patients <- intersect(row.names(mRNA_data),
row.names(clinical_data_srv))
intersect_patients <- intersect(intersect_patients, row.names(miRNA_data))
print(paste("Number of patients in miRNA, mRNA and clinical data:", length(intersect_patients)))
mRNA_data <- mRNA_data[intersect_patients,]
miRNA_data <- miRNA_data[intersect_patients,]
clinical_data_srv <- clinical_data_srv[intersect_patients,]
clinical_data <- clinical_data[intersect_patients, ]
# surv obect vector build with clinical data
y_cox <- Surv(clinical_data_srv$time, clinical_data_srv$status)
names(y_cox) <- row.names(clinical_data_srv)
# interquantile range
IQR_vect_mRNA <- apply(mRNA_data, 2, IQR)
IQR_vect_miRNA <- apply(miRNA_data, 2, IQR)
# dimension and censoring rate
print(paste0("n patients: ", nrow(mRNA_data), ", p mRNA: ", ncol(mRNA_data)))
print(paste0("n patients: ", nrow(miRNA_data), ", p miRNA: ", ncol(miRNA_data)))
print(paste0("Censoring rate: ", signif(1 - sum(clinical_data_srv$status) / nrow(mRNA_data),3)))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/double_anchor_word_embeddings.R
\name{double_anchor_word_embeddings}
\alias{double_anchor_word_embeddings}
\title{Double Anchor Word Embeddings}
\usage{
double_anchor_word_embeddings(anchoring_word, bwe_object,
similarity = c("cosine", "weighted cosine"), trim = TRUE)
}
\arguments{
\item{anchoring_word}{Character. A single word, used to set the anchorings.}
\item{bwe_object}{List. Output from \code{fit_bwe}.}
\item{similarity}{Character. Should similarity be measured only by cosine similiarity, or should similarity be weighted by the log frequency of words.}
\item{trim}{Boolean. Should common stopwords be removed from the list of possible anchors? Setting this to \code{TRUE} improves performance and interpretability. Defaults to \code{TRUE}.}
}
\value{
A list. Contains a matrix of the anchored embeddings, and vector of the words used as anchors.
}
\description{
Anchors and identifies the embeddings produced from \code{bwe}. Unlike \code{anchor_word_embeddings}, endpoint are selected for each dimension.
}
| /man/double_anchor_word_embeddings.Rd | no_license | spinkney/bwe | R | false | true | 1,103 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/double_anchor_word_embeddings.R
\name{double_anchor_word_embeddings}
\alias{double_anchor_word_embeddings}
\title{Double Anchor Word Embeddings}
\usage{
double_anchor_word_embeddings(anchoring_word, bwe_object,
similarity = c("cosine", "weighted cosine"), trim = TRUE)
}
\arguments{
\item{anchoring_word}{Character. A single word, used to set the anchorings.}
\item{bwe_object}{List. Output from \code{fit_bwe}.}
\item{similarity}{Character. Should similarity be measured only by cosine similiarity, or should similarity be weighted by the log frequency of words.}
\item{trim}{Boolean. Should common stopwords be removed from the list of possible anchors? Setting this to \code{TRUE} improves performance and interpretability. Defaults to \code{TRUE}.}
}
\value{
A list. Contains a matrix of the anchored embeddings, and vector of the words used as anchors.
}
\description{
Anchors and identifies the embeddings produced from \code{bwe}. Unlike \code{anchor_word_embeddings}, endpoint are selected for each dimension.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parse_section.R
\name{parse_section}
\alias{parse_section}
\title{import helper}
\usage{
parse_section(x, ...)
}
\description{
import helper
}
\keyword{internal}
| /man/parse_section.Rd | no_license | dleutnant/swmmr | R | false | true | 240 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parse_section.R
\name{parse_section}
\alias{parse_section}
\title{import helper}
\usage{
parse_section(x, ...)
}
\description{
import helper
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tf_custom_estimator.R
\name{estimator}
\alias{estimator}
\title{Construct a Custom Estimator}
\usage{
estimator(model_fn, model_dir = NULL, config = NULL, params = NULL,
class = NULL)
}
\arguments{
\item{model_fn}{The model function. See \strong{Model Function} for details
on the structure of a model function.}
\item{model_dir}{Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If \code{NULL}, the \code{model_dir} in
\code{config} will be used if set. If both are set, they must be same. If both
are \code{NULL}, a temporary directory will be used.}
\item{config}{Configuration object.}
\item{params}{List of hyper parameters that will be passed into \code{model_fn}.
Keys are names of parameters, values are basic python types.}
\item{class}{An optional set of \R classes to add to the generated object.}
}
\description{
Construct a custom estimator, to be used to train and evaluate
TensorFlow models.
}
\details{
The \code{Estimator} object wraps a model which is specified by a \code{model_fn},
which, given inputs and a number of other parameters, returns the operations
necessary to perform training, evaluation, and prediction.
All outputs (checkpoints, event files, etc.) are written to \code{model_dir}, or a
subdirectory thereof. If \code{model_dir} is not set, a temporary directory is
used.
The \code{config} argument can be used to passed run configuration object
containing information about the execution environment. It is passed on to
the \code{model_fn}, if the \code{model_fn} has a parameter named "config" (and input
functions in the same manner). If the \code{config} parameter is not passed, it is
instantiated by \code{estimator()}. Not passing config means that defaults useful
for local execution are used. \code{estimator()} makes config available to the
model (for instance, to allow specialization based on the number of workers
available), and also uses some of its fields to control internals, especially
regarding checkpointing.
The \code{params} argument contains hyperparameters. It is passed to the
\code{model_fn}, if the \code{model_fn} has a parameter named "params", and to the
input functions in the same manner. \code{estimator()} only passes \code{params} along, it
does not inspect it. The structure of \code{params} is therefore entirely up to
the developer.
None of estimator's methods can be overridden in subclasses (its
constructor enforces this). Subclasses should use \code{model_fn} to configure the
base class, and may add methods implementing specialized functionality.
}
\section{Model Functions}{
The \code{model_fn} should be an \R function of the form:
\preformatted{function(features, labels, mode, params) {
# 1. Configure the model via TensorFlow operations.
# 2. Define the loss function for training and evaluation.
# 3. Define the training optimizer.
# 4. Define how predictions should be produced.
# 5. Return the result as an `estimator_spec()` object.
estimator_spec(mode, predictions, loss, train_op, eval_metric_ops)
}}
The model function's inputs are defined as follows:
\tabular{ll}{
\code{features} \tab
The feature tensor(s). \cr
\code{labels} \tab
The label tensor(s). \cr
\code{mode} \tab
The current training mode ("train", "eval", "infer").
These can be accessed through the \code{mode_keys()} object. \cr
\code{params} \tab
An optional list of hyperparameters, as received
through the \code{estimator()} constructor. \cr
}
See \code{\link[=estimator_spec]{estimator_spec()}} for more details as to how the estimator specification
should be constructed, and \url{https://www.tensorflow.org/extend/estimators#constructing_the_model_fn} for
more information as to how the model function should be constructed.
}
\seealso{
Other custom estimator methods: \code{\link{estimator_spec}},
\code{\link{evaluate.tf_estimator}},
\code{\link{export_savedmodel.tf_estimator}},
\code{\link{predict.tf_estimator}},
\code{\link{train.tf_estimator}}
}
\concept{custom estimator methods}
| /man/estimator.Rd | no_license | muratmaga/tfestimators | R | false | true | 4,210 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tf_custom_estimator.R
\name{estimator}
\alias{estimator}
\title{Construct a Custom Estimator}
\usage{
estimator(model_fn, model_dir = NULL, config = NULL, params = NULL,
class = NULL)
}
\arguments{
\item{model_fn}{The model function. See \strong{Model Function} for details
on the structure of a model function.}
\item{model_dir}{Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If \code{NULL}, the \code{model_dir} in
\code{config} will be used if set. If both are set, they must be same. If both
are \code{NULL}, a temporary directory will be used.}
\item{config}{Configuration object.}
\item{params}{List of hyper parameters that will be passed into \code{model_fn}.
Keys are names of parameters, values are basic python types.}
\item{class}{An optional set of \R classes to add to the generated object.}
}
\description{
Construct a custom estimator, to be used to train and evaluate
TensorFlow models.
}
\details{
The \code{Estimator} object wraps a model which is specified by a \code{model_fn},
which, given inputs and a number of other parameters, returns the operations
necessary to perform training, evaluation, and prediction.
All outputs (checkpoints, event files, etc.) are written to \code{model_dir}, or a
subdirectory thereof. If \code{model_dir} is not set, a temporary directory is
used.
The \code{config} argument can be used to passed run configuration object
containing information about the execution environment. It is passed on to
the \code{model_fn}, if the \code{model_fn} has a parameter named "config" (and input
functions in the same manner). If the \code{config} parameter is not passed, it is
instantiated by \code{estimator()}. Not passing config means that defaults useful
for local execution are used. \code{estimator()} makes config available to the
model (for instance, to allow specialization based on the number of workers
available), and also uses some of its fields to control internals, especially
regarding checkpointing.
The \code{params} argument contains hyperparameters. It is passed to the
\code{model_fn}, if the \code{model_fn} has a parameter named "params", and to the
input functions in the same manner. \code{estimator()} only passes \code{params} along, it
does not inspect it. The structure of \code{params} is therefore entirely up to
the developer.
None of estimator's methods can be overridden in subclasses (its
constructor enforces this). Subclasses should use \code{model_fn} to configure the
base class, and may add methods implementing specialized functionality.
}
\section{Model Functions}{
The \code{model_fn} should be an \R function of the form:
\preformatted{function(features, labels, mode, params) {
# 1. Configure the model via TensorFlow operations.
# 2. Define the loss function for training and evaluation.
# 3. Define the training optimizer.
# 4. Define how predictions should be produced.
# 5. Return the result as an `estimator_spec()` object.
estimator_spec(mode, predictions, loss, train_op, eval_metric_ops)
}}
The model function's inputs are defined as follows:
\tabular{ll}{
\code{features} \tab
The feature tensor(s). \cr
\code{labels} \tab
The label tensor(s). \cr
\code{mode} \tab
The current training mode ("train", "eval", "infer").
These can be accessed through the \code{mode_keys()} object. \cr
\code{params} \tab
An optional list of hyperparameters, as received
through the \code{estimator()} constructor. \cr
}
See \code{\link[=estimator_spec]{estimator_spec()}} for more details as to how the estimator specification
should be constructed, and \url{https://www.tensorflow.org/extend/estimators#constructing_the_model_fn} for
more information as to how the model function should be constructed.
}
\seealso{
Other custom estimator methods: \code{\link{estimator_spec}},
\code{\link{evaluate.tf_estimator}},
\code{\link{export_savedmodel.tf_estimator}},
\code{\link{predict.tf_estimator}},
\code{\link{train.tf_estimator}}
}
\concept{custom estimator methods}
|
setwd("~")
EPCData <- read.table("downloads/household_power_consumption.txt", sep = ";", header = TRUE, na.strings = "?")
EPCData$Date <- as.Date(EPCData$Date, format = "%d/%m/%Y")
newdata <- subset(EPCData, Date == "2007-02-01" | Date == "2007-02-02")
newdata$DT <- as.POSIXct(paste(newdata$Date, newdata$Time), format = "%Y-%m-%d %H:%M:%S")
setwd("desktop/coursera/ExData_Plotting1")
png(file = "plot4.png", bg = "transparent")
par(mar = c(4,4,2,2), mfrow = c(2,2))
## plot4 consists of 4 plots. the first one is actually plot2
with(newdata, plot(DT, Global_active_power, type = "l", xlab = "", ylab = "Global Active Power (kilowatts)"))
## the second one is between DT and Voltage
with(newdata, plot(DT, Voltage, type = "l", xlab = "datetime"))
## the third one is literally plot3, so I copied the code here
with(newdata, plot(DT, Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering"))
with(newdata, points(DT, Sub_metering_2, type = "l", col = "red"))
with(newdata, points(DT, Sub_metering_3, type = "l", col = "blue"))
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), bty = "n", lty = c(1,1), col = c("black", "red","blue"))
## the fourth one is new
with(newdata, plot(DT, Global_reactive_power, type = "l", xlab = "datetime"))
dev.off()
| /plot4.R | no_license | roy9389/ExData_Plotting1 | R | false | false | 1,285 | r | setwd("~")
EPCData <- read.table("downloads/household_power_consumption.txt", sep = ";", header = TRUE, na.strings = "?")
EPCData$Date <- as.Date(EPCData$Date, format = "%d/%m/%Y")
newdata <- subset(EPCData, Date == "2007-02-01" | Date == "2007-02-02")
newdata$DT <- as.POSIXct(paste(newdata$Date, newdata$Time), format = "%Y-%m-%d %H:%M:%S")
setwd("desktop/coursera/ExData_Plotting1")
png(file = "plot4.png", bg = "transparent")
par(mar = c(4,4,2,2), mfrow = c(2,2))
## plot4 consists of 4 plots. the first one is actually plot2
with(newdata, plot(DT, Global_active_power, type = "l", xlab = "", ylab = "Global Active Power (kilowatts)"))
## the second one is between DT and Voltage
with(newdata, plot(DT, Voltage, type = "l", xlab = "datetime"))
## the third one is literally plot3, so I copied the code here
with(newdata, plot(DT, Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering"))
with(newdata, points(DT, Sub_metering_2, type = "l", col = "red"))
with(newdata, points(DT, Sub_metering_3, type = "l", col = "blue"))
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), bty = "n", lty = c(1,1), col = c("black", "red","blue"))
## the fourth one is new
with(newdata, plot(DT, Global_reactive_power, type = "l", xlab = "datetime"))
dev.off()
|
#import the data as csv file
df<-studyhour
head(df,10)
#explantory data analysis
plot(df,main="Hours vs Percentage",xlab="Hours Studied",ylab="Percentage Score",col="dark red")
#split the data in two parts
sam_size=floor(0.75*nrow(df))
train_data<-sample((nrow(df)),size=sam_size)
train=df[train_data,]
test=df[-train_data,]
#fit a linear model on the test data
reggression1<-lm((Score)~(Hours),train)
summary(reggression1)
#take some transformation on the variables
reggression2<-lm(log(Score)~(Hours),train)
summary(reggression2)
reggression3<-lm(log(Score)~log(Hours),train)
summary(reggression3)
reggression4<-lm((Score)~(Hours)^2,train)
summary(reggression4)
reggression5<-lm((Score)^2~(Hours)^2,train)
summary(reggression5)
#take the best model from this and plotting the reggression line
plot(df,main="Hours vs Percentage",xlab="Hours Studied",ylab="Percentage Score",col="dark red")
abline(lm(Score~(Hours)^2,data=train),col="blue")
#predicting the score
y.pred1<-predict(reggression4,test)
print(y.pred1)
#Comparing Actual vs Predicted
output<-data.frame(actual<-test$Score,predicted<-y.pred1)
print(output)
#You can also test with your own data
input<-data.frame(Hours<-(5:7),Scores<-(1:3))
y.pred2<-predict(reggression4,input)
print(y.pred2)
#evaluating the model
rmse<-sqrt(mean((test$Score-y.pred1)^2))
print(rmse)
#also minimize the residual moddel
| /spark.R | no_license | surojit1231/study_hour_project | R | false | false | 1,402 | r | #import the data as csv file
df<-studyhour
head(df,10)
#explantory data analysis
plot(df,main="Hours vs Percentage",xlab="Hours Studied",ylab="Percentage Score",col="dark red")
#split the data in two parts
sam_size=floor(0.75*nrow(df))
train_data<-sample((nrow(df)),size=sam_size)
train=df[train_data,]
test=df[-train_data,]
#fit a linear model on the test data
reggression1<-lm((Score)~(Hours),train)
summary(reggression1)
#take some transformation on the variables
reggression2<-lm(log(Score)~(Hours),train)
summary(reggression2)
reggression3<-lm(log(Score)~log(Hours),train)
summary(reggression3)
reggression4<-lm((Score)~(Hours)^2,train)
summary(reggression4)
reggression5<-lm((Score)^2~(Hours)^2,train)
summary(reggression5)
#take the best model from this and plotting the reggression line
plot(df,main="Hours vs Percentage",xlab="Hours Studied",ylab="Percentage Score",col="dark red")
abline(lm(Score~(Hours)^2,data=train),col="blue")
#predicting the score
y.pred1<-predict(reggression4,test)
print(y.pred1)
#Comparing Actual vs Predicted
output<-data.frame(actual<-test$Score,predicted<-y.pred1)
print(output)
#You can also test with your own data
input<-data.frame(Hours<-(5:7),Scores<-(1:3))
y.pred2<-predict(reggression4,input)
print(y.pred2)
#evaluating the model
rmse<-sqrt(mean((test$Score-y.pred1)^2))
print(rmse)
#also minimize the residual moddel
|
function(input, output) {
# Tab Raw Data
output$view <- renderTable({
if (input$all == "All") {swiss2}
else if (input$all == "None") {swiss2[input$obs,]}}, rownames = TRUE)
# Tab Variable Exploration
currentVariable <- reactive(swiss2[,input$var])
output$summaryPlot <- renderPlot(nice(currentVariable(), input$var ), height = 450)
output$summaryStatistics <- renderPrint({if (input$var_statistic == "Yes") {summary(currentVariable())}})
output$Boxplot <- renderPlot({if (input$var_boxplot == "Yes") {boxplot_variable(currentVariable(), input$var )}}, height = 450)
output$summaryPlot_transform <- renderPlot({
if (input$var_transform == "Logarithmic") {logarithm_variable(currentVariable(), input$var )}
else if (input$var_transform == "Normalized") {normalized_variable(currentVariable(), input$var )}
else if (input$var_transform == "Polynomial_square") {polynomial_variable(currentVariable(), input$var )}
}, height = 450)
# Tab Scatterplot
var_scatter <- reactive({ c(input$fertility_scatter, input$agriculture_scatter, input$examination_scatter,
input$education_scatter, input$catholic_scatter, input$infant_mortality_scatter) })
variable_scatter_df <- reactive({
df_scatter <- data.frame(row.names = rownames(swiss2))
df_scatter <- add_transformed_columns(names(swiss2), var_scatter(), df_scatter, swiss2)
return(df_scatter)
})
output$correlated_vars <- renderTable(cor_threshold_vars(variable_scatter_df(), input$cor_scatter))
output$scatterplot <- renderPlot( pairs({variable_scatter_df()},
lower.panel = panel.smooth, upper.panel = panel.cor,
gap=0, row1attop=TRUE), width = 750, height = 750 )
# output$scatterplot <- renderPlot( pairs(swiss2, lower.panel = panel.smooth, upper.panel = panel.cor,
# gap=0, row1attop=TRUE), width = 750, height = 750 ) # Change width and height
# plot(swiss2) liefert scatterplot ohne korrelationskoeffizienten
# Hier wird regressionsmodell schrittweise gebaut
# wenn man reaktiven wert weiter verwendet muss man ihn später wieder reaktiv gestalten
# https://stackoverflow.com/questions/26454609/r-shiny-reactive-error
# variable_input_vector <- reactive({ c(input$fertility_input, input$agriculture_input, input$examination_input,
# input$education_input, input$catholic_input, input$infant_mortality_input)})
var_transform <- reactive({ c(input$fertility_input, input$agriculture_input, input$examination_input,
input$education_input, input$catholic_input, input$infant_mortality_input) })
variable_work_df <- reactive({
df <- data.frame(row.names = rownames(swiss2))
df <- add_transformed_columns(names(swiss2), var_transform(), df, swiss2)
return(df)
})
independent_var <- reactive({ paste( names(variable_work_df()[-which(names(variable_work_df())==input$dependent_var)]), sep = " " , collapse = '+')})
# output model
leveragePoints <- reactive({ input$selectedLeveragePoints }) # leverage points in var gespeichert
#swissNoLeverage <- reactive({ swiss2[-which(rownames_swiss2 %in% leveragePoints() ),] }) # aus swiss entfert und neuer datensatz erstellt
myModel <- reactive(
# wenn leverage points ausgewählt werden
if(!is.null(variable_work_df()[2,2])
&& !is.null(input$selectedLeveragePoints)
&& (input$adjustedModel == TRUE)){
noLeverageformula <- reactive({ paste(input$dependent_var," ~ ", independent_var() )}) # modell formel
currentLinearModel <- reactive( {lm(noLeverageformula(), data = variable_work_df()[-which(rownames(variable_work_df()) %in% leveragePoints() ),] )} ) # modell
return(currentLinearModel() )
}
# wenn keine leverage points ausgewähl werden
else if( !is.null(variable_work_df()[2,2])) { # wenn variablen ausgesucht wurden
myformula <- reactive({ paste(input$dependent_var," ~ ", independent_var() )})
currentLinearModel <- reactive( {lm(myformula(), data = variable_work_df())} )
return(currentLinearModel() )}
)
output$linModelPlot <- renderPlot({
if( !is.null(variable_work_df()[2,2])) {
layout(matrix(c(1,2,3,4), 2,2, byrow = TRUE), respect = T)
plot(myModel() ) }
}, width = 900, height = 900)
output$summary_linearModel <- renderPrint({
if(is.null(variable_work_df()[2,2])) {
print("Please select a model")
} else {
summary(myModel())}
})
# AIC modellvergleich output
output$outStepwiseAIC <- renderPrint({
if( input$inStepwiseAIC == TRUE) {
step(myModel())
}
})
}
| /explAnalyse_swiss/server.R | no_license | evalehner/shinyStats | R | false | false | 4,728 | r | function(input, output) {
# Tab Raw Data
output$view <- renderTable({
if (input$all == "All") {swiss2}
else if (input$all == "None") {swiss2[input$obs,]}}, rownames = TRUE)
# Tab Variable Exploration
currentVariable <- reactive(swiss2[,input$var])
output$summaryPlot <- renderPlot(nice(currentVariable(), input$var ), height = 450)
output$summaryStatistics <- renderPrint({if (input$var_statistic == "Yes") {summary(currentVariable())}})
output$Boxplot <- renderPlot({if (input$var_boxplot == "Yes") {boxplot_variable(currentVariable(), input$var )}}, height = 450)
output$summaryPlot_transform <- renderPlot({
if (input$var_transform == "Logarithmic") {logarithm_variable(currentVariable(), input$var )}
else if (input$var_transform == "Normalized") {normalized_variable(currentVariable(), input$var )}
else if (input$var_transform == "Polynomial_square") {polynomial_variable(currentVariable(), input$var )}
}, height = 450)
# Tab Scatterplot
var_scatter <- reactive({ c(input$fertility_scatter, input$agriculture_scatter, input$examination_scatter,
input$education_scatter, input$catholic_scatter, input$infant_mortality_scatter) })
variable_scatter_df <- reactive({
df_scatter <- data.frame(row.names = rownames(swiss2))
df_scatter <- add_transformed_columns(names(swiss2), var_scatter(), df_scatter, swiss2)
return(df_scatter)
})
output$correlated_vars <- renderTable(cor_threshold_vars(variable_scatter_df(), input$cor_scatter))
output$scatterplot <- renderPlot( pairs({variable_scatter_df()},
lower.panel = panel.smooth, upper.panel = panel.cor,
gap=0, row1attop=TRUE), width = 750, height = 750 )
# output$scatterplot <- renderPlot( pairs(swiss2, lower.panel = panel.smooth, upper.panel = panel.cor,
# gap=0, row1attop=TRUE), width = 750, height = 750 ) # Change width and height
# plot(swiss2) liefert scatterplot ohne korrelationskoeffizienten
# Hier wird regressionsmodell schrittweise gebaut
# wenn man reaktiven wert weiter verwendet muss man ihn später wieder reaktiv gestalten
# https://stackoverflow.com/questions/26454609/r-shiny-reactive-error
# variable_input_vector <- reactive({ c(input$fertility_input, input$agriculture_input, input$examination_input,
# input$education_input, input$catholic_input, input$infant_mortality_input)})
var_transform <- reactive({ c(input$fertility_input, input$agriculture_input, input$examination_input,
input$education_input, input$catholic_input, input$infant_mortality_input) })
variable_work_df <- reactive({
df <- data.frame(row.names = rownames(swiss2))
df <- add_transformed_columns(names(swiss2), var_transform(), df, swiss2)
return(df)
})
independent_var <- reactive({ paste( names(variable_work_df()[-which(names(variable_work_df())==input$dependent_var)]), sep = " " , collapse = '+')})
# output model
leveragePoints <- reactive({ input$selectedLeveragePoints }) # leverage points in var gespeichert
#swissNoLeverage <- reactive({ swiss2[-which(rownames_swiss2 %in% leveragePoints() ),] }) # aus swiss entfert und neuer datensatz erstellt
myModel <- reactive(
# wenn leverage points ausgewählt werden
if(!is.null(variable_work_df()[2,2])
&& !is.null(input$selectedLeveragePoints)
&& (input$adjustedModel == TRUE)){
noLeverageformula <- reactive({ paste(input$dependent_var," ~ ", independent_var() )}) # modell formel
currentLinearModel <- reactive( {lm(noLeverageformula(), data = variable_work_df()[-which(rownames(variable_work_df()) %in% leveragePoints() ),] )} ) # modell
return(currentLinearModel() )
}
# wenn keine leverage points ausgewähl werden
else if( !is.null(variable_work_df()[2,2])) { # wenn variablen ausgesucht wurden
myformula <- reactive({ paste(input$dependent_var," ~ ", independent_var() )})
currentLinearModel <- reactive( {lm(myformula(), data = variable_work_df())} )
return(currentLinearModel() )}
)
output$linModelPlot <- renderPlot({
if( !is.null(variable_work_df()[2,2])) {
layout(matrix(c(1,2,3,4), 2,2, byrow = TRUE), respect = T)
plot(myModel() ) }
}, width = 900, height = 900)
output$summary_linearModel <- renderPrint({
if(is.null(variable_work_df()[2,2])) {
print("Please select a model")
} else {
summary(myModel())}
})
# AIC modellvergleich output
output$outStepwiseAIC <- renderPrint({
if( input$inStepwiseAIC == TRUE) {
step(myModel())
}
})
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summarize_sim_P.r
\name{summarize_sim_P}
\alias{summarize_sim_P}
\title{Summarize with multiple parameter sets}
\usage{
summarize_sim_P(run_dir = "./", parm_dir = "./",
results_dir = "./Summaries/", cross_time = F)
}
\arguments{
\item{run_dir}{directory where simulation runs are located.
Defaults to the current directory.}
\item{parm_dir}{directory where parameter files are located.
Defaults to the current directory.}
\item{results_dir}{directory where results should be saved.
Defaults to 'Summaries' in the current directory.}
\item{cross_time}{logical indicating whether to use \code{t_window}
to summarize the simulation in windows of time}
}
\value{
nothing
}
\description{
Summarize simulation runs with multiple sets of parameters
}
\details{
This function summarizes multiple simulation runs with multiple
sets of parameters, which are saved in the directory
given in \code{parm_dir}. Parameter filenames must start with 's_'.
Each parameter file should have a unique \code{sumID} defined within it
as well define objects that can be passed as parameters to
\code{\link{summarize_sim_N}}. Parameters
requiring a value are \code{breaks}, \code{locs} and \code{t_window}.
The function will either summarize simulation results for a set time period
defined in \code{t_window} (default) or for multiple consecutive time windows
(use \code{cross_time=TRUE}), in which case \code{t_window}
defines the time interval and must be a list with named elements
\code{start} and \code{stop}. If \code{cross_time=FALSE} then
two summary objects are saved to the .RData file: \code{sim_sum_ind}-
includes a summary for each run, and \code{sim_sum}- summarizes quantities
across runs using the function defined in \code{sum_func}.
If \code{cross_time=TRUE} then only \code{sim_sum_ind} is saved and
\code{T=<time>} is appended to the filename to denote the timestep at
which the summary ends.
Results are saved in the directory \code{results_dir} and are not returned
by the function. Filenames follow the convention
\code{<sumID>_summary.RData}.
}
\seealso{
\code{\link{summarize_simulation}} for command line execution
}
| /Code/CTSim/man/summarize_sim_P.Rd | no_license | hurlbertlab/core-transient-simulation | R | false | true | 2,212 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summarize_sim_P.r
\name{summarize_sim_P}
\alias{summarize_sim_P}
\title{Summarize with multiple parameter sets}
\usage{
summarize_sim_P(run_dir = "./", parm_dir = "./",
results_dir = "./Summaries/", cross_time = F)
}
\arguments{
\item{run_dir}{directory where simulation runs are located.
Defaults to the current directory.}
\item{parm_dir}{directory where parameter files are located.
Defaults to the current directory.}
\item{results_dir}{directory where results should be saved.
Defaults to 'Summaries' in the current directory.}
\item{cross_time}{logical indicating whether to use \code{t_window}
to summarize the simulation in windows of time}
}
\value{
nothing
}
\description{
Summarize simulation runs with multiple sets of parameters
}
\details{
This function summarizes multiple simulation runs with multiple
sets of parameters, which are saved in the directory
given in \code{parm_dir}. Parameter filenames must start with 's_'.
Each parameter file should have a unique \code{sumID} defined within it
as well define objects that can be passed as parameters to
\code{\link{summarize_sim_N}}. Parameters
requiring a value are \code{breaks}, \code{locs} and \code{t_window}.
The function will either summarize simulation results for a set time period
defined in \code{t_window} (default) or for multiple consecutive time windows
(use \code{cross_time=TRUE}), in which case \code{t_window}
defines the time interval and must be a list with named elements
\code{start} and \code{stop}. If \code{cross_time=FALSE} then
two summary objects are saved to the .RData file: \code{sim_sum_ind}-
includes a summary for each run, and \code{sim_sum}- summarizes quantities
across runs using the function defined in \code{sum_func}.
If \code{cross_time=TRUE} then only \code{sim_sum_ind} is saved and
\code{T=<time>} is appended to the filename to denote the timestep at
which the summary ends.
Results are saved in the directory \code{results_dir} and are not returned
by the function. Filenames follow the convention
\code{<sumID>_summary.RData}.
}
\seealso{
\code{\link{summarize_simulation}} for command line execution
}
|
#------------------------------------------ run_shinymixr ------------------------------------------
#' Creates and run the interface
#'
#' @param wd character with the working directory
#' @param ... arguments passed to the shiny runApp function
#'
#' @export
#' @return runs the shinyMixR interface
#' @author Richard Hooijmaijers
#' @examples
#'
#' \dontrun{
#' run_shinymixr(".")
#' }
run_shinymixr <- function(wd=getwd(),...){
if(!file.exists(paste0(wd,"/shinyMixR/app/www"))) try(dir.create(paste0(wd,"/shinyMixR/app/www"),recursive = TRUE))
#if(!file.exists(paste0(wd,"/shinyMixR/app/R"))) try(dir.create(paste0(wd,"/shinyMixR/app/R"),recursive = TRUE))
if(!file.exists(paste0(wd,"/shinyMixR/temp"))) try(dir.create(paste0(wd,"/shinyMixR/temp"),recursive=TRUE))
try(file.copy(system.file("Dashboard","global.R",package="shinyMixR"), paste0(wd,"/shinyMixR/app/global.R"),overwrite = TRUE),silent = TRUE)
try(file.copy(system.file("Dashboard","server.R",package="shinyMixR"), paste0(wd,"/shinyMixR/app/server.R"),overwrite = TRUE),silent = TRUE)
try(file.copy(system.file("Dashboard","logoshinyMixR.png",package="shinyMixR"), paste0(wd,"/shinyMixR/app/www/logoshinyMixR.png")),silent = TRUE)
# We need to add the working directory to the ui.r file (global.r does not work)
adpt <- readLines(system.file("Dashboard","ui.R",package="shinyMixR"))
adpt <- c(paste0("setwd(\"",normalizePath(wd,winslash = "/"),"\")"),adpt)
writeLines(adpt,paste0(wd,"/shinyMixR/app/ui.R"))
# Clean up stuff before running the app (check if feasible or not)
try(unlink(list.files(paste0(wd,"/shinyMixR/temp"),pattern=".*prog\\.txt$",full.names = TRUE)))
shiny::runApp(paste0(wd,"/shinyMixR/app"),...)
}
| /R/run_shinymixr.r | permissive | RichardHooijmaijers/shinyMixR | R | false | false | 1,746 | r | #------------------------------------------ run_shinymixr ------------------------------------------
#' Creates and run the interface
#'
#' @param wd character with the working directory
#' @param ... arguments passed to the shiny runApp function
#'
#' @export
#' @return runs the shinyMixR interface
#' @author Richard Hooijmaijers
#' @examples
#'
#' \dontrun{
#' run_shinymixr(".")
#' }
run_shinymixr <- function(wd=getwd(),...){
if(!file.exists(paste0(wd,"/shinyMixR/app/www"))) try(dir.create(paste0(wd,"/shinyMixR/app/www"),recursive = TRUE))
#if(!file.exists(paste0(wd,"/shinyMixR/app/R"))) try(dir.create(paste0(wd,"/shinyMixR/app/R"),recursive = TRUE))
if(!file.exists(paste0(wd,"/shinyMixR/temp"))) try(dir.create(paste0(wd,"/shinyMixR/temp"),recursive=TRUE))
try(file.copy(system.file("Dashboard","global.R",package="shinyMixR"), paste0(wd,"/shinyMixR/app/global.R"),overwrite = TRUE),silent = TRUE)
try(file.copy(system.file("Dashboard","server.R",package="shinyMixR"), paste0(wd,"/shinyMixR/app/server.R"),overwrite = TRUE),silent = TRUE)
try(file.copy(system.file("Dashboard","logoshinyMixR.png",package="shinyMixR"), paste0(wd,"/shinyMixR/app/www/logoshinyMixR.png")),silent = TRUE)
# We need to add the working directory to the ui.r file (global.r does not work)
adpt <- readLines(system.file("Dashboard","ui.R",package="shinyMixR"))
adpt <- c(paste0("setwd(\"",normalizePath(wd,winslash = "/"),"\")"),adpt)
writeLines(adpt,paste0(wd,"/shinyMixR/app/ui.R"))
# Clean up stuff before running the app (check if feasible or not)
try(unlink(list.files(paste0(wd,"/shinyMixR/temp"),pattern=".*prog\\.txt$",full.names = TRUE)))
shiny::runApp(paste0(wd,"/shinyMixR/app"),...)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/runGO.R
\name{runGO}
\alias{runGO}
\title{Calculating pathway enrichment scores for scATAC-seq profiles}
\usage{
runGO(gmtFile, BGfile, countFile, method, globalaccessibility_scores,
FGfile, promoters = FALSE)
}
\arguments{
\item{gmtFile}{gene-set file}
\item{BGfile}{background file}
\item{countFile}{scATAC-seq count matrix}
\item{method}{If method is chosen as 1, data normalization is performed using global accessibility scores. If selected method is 2 then local accessibility score-based normalization is performed}
\item{globalaccessibility_scores}{global accessibility scores for input count data matrix}
\item{FGfile}{foreground file}
\item{promoters}{whether promoters to be used or not for conversion of scATAC-seq profiles to pathway scores. Default is false}
}
\value{
A list containing two dataframes, One dataframe contains p-values based on hypergeometric test and other one is p-values based on binomial test
}
\description{
Calculating pathway enrichment scores for scATAC-seq profiles
}
\examples{
runGO()
}
| /man/runGO.Rd | no_license | reggenlab/UniPath | R | false | true | 1,114 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/runGO.R
\name{runGO}
\alias{runGO}
\title{Calculating pathway enrichment scores for scATAC-seq profiles}
\usage{
runGO(gmtFile, BGfile, countFile, method, globalaccessibility_scores,
FGfile, promoters = FALSE)
}
\arguments{
\item{gmtFile}{gene-set file}
\item{BGfile}{background file}
\item{countFile}{scATAC-seq count matrix}
\item{method}{If method is chosen as 1, data normalization is performed using global accessibility scores. If selected method is 2 then local accessibility score-based normalization is performed}
\item{globalaccessibility_scores}{global accessibility scores for input count data matrix}
\item{FGfile}{foreground file}
\item{promoters}{whether promoters to be used or not for conversion of scATAC-seq profiles to pathway scores. Default is false}
}
\value{
A list containing two dataframes, One dataframe contains p-values based on hypergeometric test and other one is p-values based on binomial test
}
\description{
Calculating pathway enrichment scores for scATAC-seq profiles
}
\examples{
runGO()
}
|
# To use this function, source it to your environment by `source("cellRangerLikeEmptyDrops.R")`
#' An approximate implementation of the `--soloCellFilter EmptyDrops_CR` filtering approach to identify empty droplets.
#'
#' An approximate implementation of the `--soloCellFilter EmptyDrops_CR` filtering approach,
#' which, itself, was reverse-engineered from the behavior of CellRanger 3+.
#'
#' @param m A numeric matrix-like object containing counts, where columns represent barcoded droplets and rows represent features.
#' The matrix should only contain barcodes for an individual sample, prior to any filtering for cells.
#'
#' @param umiMin A numeric scalar specifying the minimum UMI count above which a sample will be included in ambient profiles,
#' as specified in the call to CellRanger.
#'
#' @param umiMinFracMedian A numeric scalar between 0 and 1 specifying that only the samples whose UMI count are above \code{umiMinFracMedian}
#' fraction of the median UMI count#' of the top \code{nExpectedCells} samples will be included in the ambient profile.
#' as specified in the call to CellRanger.
#'
#' @param candMaxN An integer specifying the maximum number of ambient samples that are possible to be regarded as real cells,
#' as specified in the call to CellRanger.
#'
#' @param indMax An integer specifying the highest UMI count ranking of the ambient pool, cells with UMI count ranking above
#' this number will not be included in the ambient pool, as specified in the call to CellRanger.
#'
#' @param indMin An integer specifying the lowest UMI count ranking of the ambient pool, cells with UMI count ranking below
#' this number will not be included in the ambient pool, as specified in the call to CellRanger.
#'
#' @param fdr_thresh A numeric scalar specifying the FDR threshold to filter samples. Samples whose FDR returned by emptyDrops
#' is above this threshold will not be regarded as real cells, as specified in the call to CellRanger.
#'
#' @param maxPercentile A numeric scalar specifying the percentile used in simple filtering, samples selected by simple filtering
#' will be regarded as real cells regardless of the \code{emptyDrops} result, as specified in the call to CellRanger.
#'
#' @param nExpectedCells A numeric scalar specifying the expected number of cells in this sample, as specified in the call to CellRanger.
#'
#' @param maxMinRatio A numeric scalar specifying the maximum ratio of maximum UMI count and minimum UMI count used in simple filtering,
#' maximum UMI count used in simple filtering is determined first by \code{nExpectedCells*(1-maxPercentile)}, minimum UMI count used in
#' simple filtering is then determined by this ratio, as specified in the call to CellRanger..
#'
#' @param seed Integer specifying the seed that will be used to run \code{emptyDrops}
#' @param ... For the generic, further arguments to pass to \code{emptyDrops}.
#'
#' @details
#' This function is an approximate implementation of the `--soloCellFilter EmptyDrops_CR` filtering approach of STARsolo
#' (\url{https://www.biorxiv.org/content/10.1101/2021.05.05.442755v1}), which, itself, was reverse engineered from the behavior of CellRanger 3+.
#' The original C++ code on which this function is based can be found at
#' (\url{https://github.com/alexdobin/STAR/blob/master/source/SoloFeature_cellFiltering.cpp})
#' All parameters are defaulty set as the default value used in starSolo and Cellranger.
#' In the most cases, users just need to specify the raw and unfiltered count matrix, \code{m}.
#' See \code{?\link{emptyDrops}} for an alternative approach for cell calling.
#'
#' @return
#' A DataFrame like \code{\link{emptyDrops}}, with an additional binary \code{is.cell} field demonstrating whether
#' samples are estimated as real cells.
#'
#' @author
#' Dongze He, Rob Patro
#'
#' @examples
#' # Mocking up some data:
#' set.seed(0)
#' my.counts <- DropletUtils:::simCounts()
#'
#' # Identify likely cell-containing droplets.
#' e.out <- cellRangerLikeEmptyDrops(my.counts)
#' e.out
#'
#' # Get matrix of estimated cells.
#' cell.counts <- my.counts[, e.out$is.cell]
#'
#' @references
#' Kaminow et al. (2021).
#' STARsolo: accurate, fast and versatile mapping/quantification of single-cell and single-nucleus RNA-seq data
#' \url{https://www.biorxiv.org/content/10.1101/2021.05.05.442755v1}
#'
#' @seealso
#' \code{\link{emptyDrops}}, for another method for calling cells.
#'
#' @name cellRangerLikeEmptyDrops
NULL
# Authors: Dongze He, Rob Patro
# Center of Bioinformatics and Computational Biology, University of Maryland, College Park, Maryland, 20740
.cellRangerLikeEmptyDrops <- function(m,
umiMin=500,
umiMinFracMedian=0.01,
candMaxN=20000,
indMax=90000,
indMin=45000,
fdr_thresh=0.01,
maxPercentile=0.99,
nExpectedCells=3000,
maxMinRatio=10,
seed=2718,
...
) {
# This function is an approximate implementation of the
# `--soloCellFilter EmptyDrops_CR` filtering approach
# of STARsolo (https://www.biorxiv.org/content/10.1101/2021.05.05.442755v1),
# which, itself, was reverse engineered from the behavior of
# CellRanger 3+. The original C++ code on which this
# function is based can be found at (https://github.com/alexdobin/STAR/blob/master/source/SoloFeature_cellFiltering.cpp)
###################################################################################################################
# get the sorted nUMI vector of cells
csums <- colSums2(m)
indCount <- as.data.frame(cbind(1:length(csums), csums))
colnames(indCount) <- c("index", "count")
indCount <- indCount[order(indCount$count,decreasing = TRUE),]
# Simple Filtering
maxind <- round(nExpectedCells * (1 - maxPercentile))
nUMImax <- indCount$count[min(ncol(m), maxind)]
nUMImin <- round(nUMImax/maxMinRatio)
ncellsSimple <- sum(indCount$count>=nUMImin)
# set lower bound
minUMI <- max(umiMin, round(umiMinFracMedian * indCount$count[ncellsSimple/2]))
## we at most assign candMaxN samples in the ambient pool as real cells
minUMI <- max(minUMI, indCount$count[min(ncellsSimple+candMaxN,nrow(indCount))])
# emptyDrops
## ignore: the lower bound of UMI count, samples with UMI count less than ignore
## will not be considered as ambient cells.
ignore_index <- min(ncol(m), indMax)
ignore <- indCount$count[ignore_index]
## by.rank: cells with UMI count ranking lower than by.rank will be considered as
## ambient cells
by.rank <- indMin
## retain: samples with UMI count higher than retain will be regarded as cells
retain <- indCount$count[ncellsSimple]
## the cells with total UMI count between ignore and lower will be considered as ambient
set.seed(seed)
e.out <- DropletUtils::emptyDrops(m, by.rank=by.rank, ignore=ignore, retain=retain, alpha=Inf)
e.out$is.cell <- e.out$FDR < fdr_thresh
e.out$is.cell[is.na(e.out$is.cell)] <- FALSE
# further filter cells by minUMI
e.out$is.cell[indCount[indCount$count<minUMI, "index"]] <- FALSE
e.out
}
#' @export
#' @rdname cellRangerLikeEmptyDrops
setGeneric("cellRangerLikeEmptyDrops", function(m, ...) standardGeneric("cellRangerLikeEmptyDrops"))
#' @export
#' @rdname cellRangerLikeEmptyDrops
setMethod("cellRangerLikeEmptyDrops", "ANY", .cellRangerLikeEmptyDrops)
#' @export
#' @rdname cellRangerLikeEmptyDrops
#' @importFrom SummarizedExperiment assay
setMethod("cellRangerLikeEmptyDrops", "SummarizedExperiment", function(m, ..., assay.type="counts") {
.cellRangerLikeEmptyDrops(assay(m, assay.type), ...)
})
| /R/cellRangerLikeEmptyDrops.R | permissive | jashapiro/usefulaf | R | false | false | 8,186 | r | # To use this function, source it to your environment by `source("cellRangerLikeEmptyDrops.R")`
#' An approximate implementation of the `--soloCellFilter EmptyDrops_CR` filtering approach to identify empty droplets.
#'
#' An approximate implementation of the `--soloCellFilter EmptyDrops_CR` filtering approach,
#' which, itself, was reverse-engineered from the behavior of CellRanger 3+.
#'
#' @param m A numeric matrix-like object containing counts, where columns represent barcoded droplets and rows represent features.
#' The matrix should only contain barcodes for an individual sample, prior to any filtering for cells.
#'
#' @param umiMin A numeric scalar specifying the minimum UMI count above which a sample will be included in ambient profiles,
#' as specified in the call to CellRanger.
#'
#' @param umiMinFracMedian A numeric scalar between 0 and 1 specifying that only the samples whose UMI count are above \code{umiMinFracMedian}
#' fraction of the median UMI count#' of the top \code{nExpectedCells} samples will be included in the ambient profile.
#' as specified in the call to CellRanger.
#'
#' @param candMaxN An integer specifying the maximum number of ambient samples that are possible to be regarded as real cells,
#' as specified in the call to CellRanger.
#'
#' @param indMax An integer specifying the highest UMI count ranking of the ambient pool, cells with UMI count ranking above
#' this number will not be included in the ambient pool, as specified in the call to CellRanger.
#'
#' @param indMin An integer specifying the lowest UMI count ranking of the ambient pool, cells with UMI count ranking below
#' this number will not be included in the ambient pool, as specified in the call to CellRanger.
#'
#' @param fdr_thresh A numeric scalar specifying the FDR threshold to filter samples. Samples whose FDR returned by emptyDrops
#' is above this threshold will not be regarded as real cells, as specified in the call to CellRanger.
#'
#' @param maxPercentile A numeric scalar specifying the percentile used in simple filtering, samples selected by simple filtering
#' will be regarded as real cells regardless of the \code{emptyDrops} result, as specified in the call to CellRanger.
#'
#' @param nExpectedCells A numeric scalar specifying the expected number of cells in this sample, as specified in the call to CellRanger.
#'
#' @param maxMinRatio A numeric scalar specifying the maximum ratio of maximum UMI count and minimum UMI count used in simple filtering,
#' maximum UMI count used in simple filtering is determined first by \code{nExpectedCells*(1-maxPercentile)}, minimum UMI count used in
#' simple filtering is then determined by this ratio, as specified in the call to CellRanger..
#'
#' @param seed Integer specifying the seed that will be used to run \code{emptyDrops}
#' @param ... For the generic, further arguments to pass to \code{emptyDrops}.
#'
#' @details
#' This function is an approximate implementation of the `--soloCellFilter EmptyDrops_CR` filtering approach of STARsolo
#' (\url{https://www.biorxiv.org/content/10.1101/2021.05.05.442755v1}), which, itself, was reverse engineered from the behavior of CellRanger 3+.
#' The original C++ code on which this function is based can be found at
#' (\url{https://github.com/alexdobin/STAR/blob/master/source/SoloFeature_cellFiltering.cpp})
#' All parameters are defaulty set as the default value used in starSolo and Cellranger.
#' In the most cases, users just need to specify the raw and unfiltered count matrix, \code{m}.
#' See \code{?\link{emptyDrops}} for an alternative approach for cell calling.
#'
#' @return
#' A DataFrame like \code{\link{emptyDrops}}, with an additional binary \code{is.cell} field demonstrating whether
#' samples are estimated as real cells.
#'
#' @author
#' Dongze He, Rob Patro
#'
#' @examples
#' # Mocking up some data:
#' set.seed(0)
#' my.counts <- DropletUtils:::simCounts()
#'
#' # Identify likely cell-containing droplets.
#' e.out <- cellRangerLikeEmptyDrops(my.counts)
#' e.out
#'
#' # Get matrix of estimated cells.
#' cell.counts <- my.counts[, e.out$is.cell]
#'
#' @references
#' Kaminow et al. (2021).
#' STARsolo: accurate, fast and versatile mapping/quantification of single-cell and single-nucleus RNA-seq data
#' \url{https://www.biorxiv.org/content/10.1101/2021.05.05.442755v1}
#'
#' @seealso
#' \code{\link{emptyDrops}}, for another method for calling cells.
#'
#' @name cellRangerLikeEmptyDrops
NULL
# Authors: Dongze He, Rob Patro
# Center of Bioinformatics and Computational Biology, University of Maryland, College Park, Maryland, 20740
.cellRangerLikeEmptyDrops <- function(m,
umiMin=500,
umiMinFracMedian=0.01,
candMaxN=20000,
indMax=90000,
indMin=45000,
fdr_thresh=0.01,
maxPercentile=0.99,
nExpectedCells=3000,
maxMinRatio=10,
seed=2718,
...
) {
# This function is an approximate implementation of the
# `--soloCellFilter EmptyDrops_CR` filtering approach
# of STARsolo (https://www.biorxiv.org/content/10.1101/2021.05.05.442755v1),
# which, itself, was reverse engineered from the behavior of
# CellRanger 3+. The original C++ code on which this
# function is based can be found at (https://github.com/alexdobin/STAR/blob/master/source/SoloFeature_cellFiltering.cpp)
###################################################################################################################
# get the sorted nUMI vector of cells
csums <- colSums2(m)
indCount <- as.data.frame(cbind(1:length(csums), csums))
colnames(indCount) <- c("index", "count")
indCount <- indCount[order(indCount$count,decreasing = TRUE),]
# Simple Filtering
maxind <- round(nExpectedCells * (1 - maxPercentile))
nUMImax <- indCount$count[min(ncol(m), maxind)]
nUMImin <- round(nUMImax/maxMinRatio)
ncellsSimple <- sum(indCount$count>=nUMImin)
# set lower bound
minUMI <- max(umiMin, round(umiMinFracMedian * indCount$count[ncellsSimple/2]))
## we at most assign candMaxN samples in the ambient pool as real cells
minUMI <- max(minUMI, indCount$count[min(ncellsSimple+candMaxN,nrow(indCount))])
# emptyDrops
## ignore: the lower bound of UMI count, samples with UMI count less than ignore
## will not be considered as ambient cells.
ignore_index <- min(ncol(m), indMax)
ignore <- indCount$count[ignore_index]
## by.rank: cells with UMI count ranking lower than by.rank will be considered as
## ambient cells
by.rank <- indMin
## retain: samples with UMI count higher than retain will be regarded as cells
retain <- indCount$count[ncellsSimple]
## the cells with total UMI count between ignore and lower will be considered as ambient
set.seed(seed)
e.out <- DropletUtils::emptyDrops(m, by.rank=by.rank, ignore=ignore, retain=retain, alpha=Inf)
e.out$is.cell <- e.out$FDR < fdr_thresh
e.out$is.cell[is.na(e.out$is.cell)] <- FALSE
# further filter cells by minUMI
e.out$is.cell[indCount[indCount$count<minUMI, "index"]] <- FALSE
e.out
}
#' @export
#' @rdname cellRangerLikeEmptyDrops
setGeneric("cellRangerLikeEmptyDrops", function(m, ...) standardGeneric("cellRangerLikeEmptyDrops"))
#' @export
#' @rdname cellRangerLikeEmptyDrops
setMethod("cellRangerLikeEmptyDrops", "ANY", .cellRangerLikeEmptyDrops)
#' @export
#' @rdname cellRangerLikeEmptyDrops
#' @importFrom SummarizedExperiment assay
setMethod("cellRangerLikeEmptyDrops", "SummarizedExperiment", function(m, ..., assay.type="counts") {
.cellRangerLikeEmptyDrops(assay(m, assay.type), ...)
})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.