content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
##Coursera - datascience Exploratory Data Analysis
#save path and name of source of data to be plotted
input<-"household_power_consumption.txt"
#read file into variable name "DF"
DF<-read.table(input, sep=";", header=TRUE, na.strings = "?", stringsAsFactors=TRUE)
#format date
DF$FormatedDate<-as.Date(DF$Date, "%d/%m/%Y")
#get subset of the data frame where date is equalto the first an second date of February
df<-subset(DF, FormatedDate==as.Date("2007-02-01") | FormatedDate==as.Date("2007-02-02"))
df$Global_active_power <-as.numeric(df$Global_active_power)
#set the color of the pot to red
plot_colors <- c("Red")
# Open the graphics device - png file
png("plot1.png",width=480, height=480)
#plot a histogram
hist(df$Global_active_power,col=plot_colors[1], xlab="Global Active Power(Kilowatts)",
main="Global Active Power")
# Close the graphics device
dev.off() | /plot1.R | no_license | YoMT/ExData_Plotting1 | R | false | false | 900 | r | ##Coursera - datascience Exploratory Data Analysis
#save path and name of source of data to be plotted
input<-"household_power_consumption.txt"
#read file into variable name "DF"
DF<-read.table(input, sep=";", header=TRUE, na.strings = "?", stringsAsFactors=TRUE)
#format date
DF$FormatedDate<-as.Date(DF$Date, "%d/%m/%Y")
#get subset of the data frame where date is equalto the first an second date of February
df<-subset(DF, FormatedDate==as.Date("2007-02-01") | FormatedDate==as.Date("2007-02-02"))
df$Global_active_power <-as.numeric(df$Global_active_power)
#set the color of the pot to red
plot_colors <- c("Red")
# Open the graphics device - png file
png("plot1.png",width=480, height=480)
#plot a histogram
hist(df$Global_active_power,col=plot_colors[1], xlab="Global Active Power(Kilowatts)",
main="Global Active Power")
# Close the graphics device
dev.off() |
#Exploratory data
#Course Project #1
#Plot3.R
#mem calc: 2M rows, 9 columns, 8B per numeric = 200MB RAM needed
#only want data 2007-02-01 and 2007-02-02; by browsing through the table,
# I established that this will capture a range which safely includes those dates
skipCnt <- 60000
rows <- 10000
#need to set header=F because we are skipping rows
cols <- c("Date", "Time", "Global_active_power", "Global_reactive_power", "Voltage", "Global_intensity", "Sub_metering_1", "Sub_metering_2", "Sub_metering_3")
houseData <- read.table("household_power_consumption.txt", header=F, sep=";", na.strings="?", col.names=cols, skip=skipCnt, nrows=rows)
#Misc checking
#str(houseData)
#levels(houseData$Date) #check the date range (note the factors are alphabetic)
#sum(is.na(houseData)) #see how many NA's we have
#convert the date and time columns
houseData$Time <- strptime( paste(houseData$Date, houseData$Time), "%d/%m/%Y %H:%M:%S") #time is in 24hr format
houseData$Date <- as.Date(houseData$Date, "%d/%m/%Y")
#get data from 2007-02-01 and 2007-02-02
feb1 <- as.Date("2007-02-01")
feb2 <- as.Date("2007-02-02")
twoDays <- houseData[houseData$Date >= feb1 & houseData$Date <= feb2, ]
#be sure our device is setup normally!
par(mfrow = c(1, 1), mar=c(4,4,2,1))
#a line plot - switching to direct print in png because the copy was messing up the legend
png("plot3.png", width=480, height=480)
with(twoDays, plot(Time, Sub_metering_1, type="l", ylab="Energy sub metering", xlab="")) #first plot
with(twoDays, lines(Time, Sub_metering_2, type="l", col="Red")) #add #2 as lines
with(twoDays, lines(Time, Sub_metering_3, type="l", col="Blue")) #add #3 as lines
legend("topright", lty=1, col=c("black", "red", "blue"), legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")) #setup legend
dev.off()
| /Plot3.R | no_license | DonaldMGray/ExData_Plotting1 | R | false | false | 1,811 | r | #Exploratory data
#Course Project #1
#Plot3.R
#mem calc: 2M rows, 9 columns, 8B per numeric = 200MB RAM needed
#only want data 2007-02-01 and 2007-02-02; by browsing through the table,
# I established that this will capture a range which safely includes those dates
skipCnt <- 60000
rows <- 10000
#need to set header=F because we are skipping rows
cols <- c("Date", "Time", "Global_active_power", "Global_reactive_power", "Voltage", "Global_intensity", "Sub_metering_1", "Sub_metering_2", "Sub_metering_3")
houseData <- read.table("household_power_consumption.txt", header=F, sep=";", na.strings="?", col.names=cols, skip=skipCnt, nrows=rows)
#Misc checking
#str(houseData)
#levels(houseData$Date) #check the date range (note the factors are alphabetic)
#sum(is.na(houseData)) #see how many NA's we have
#convert the date and time columns
houseData$Time <- strptime( paste(houseData$Date, houseData$Time), "%d/%m/%Y %H:%M:%S") #time is in 24hr format
houseData$Date <- as.Date(houseData$Date, "%d/%m/%Y")
#get data from 2007-02-01 and 2007-02-02
feb1 <- as.Date("2007-02-01")
feb2 <- as.Date("2007-02-02")
twoDays <- houseData[houseData$Date >= feb1 & houseData$Date <= feb2, ]
#be sure our device is setup normally!
par(mfrow = c(1, 1), mar=c(4,4,2,1))
#a line plot - switching to direct print in png because the copy was messing up the legend
png("plot3.png", width=480, height=480)
with(twoDays, plot(Time, Sub_metering_1, type="l", ylab="Energy sub metering", xlab="")) #first plot
with(twoDays, lines(Time, Sub_metering_2, type="l", col="Red")) #add #2 as lines
with(twoDays, lines(Time, Sub_metering_3, type="l", col="Blue")) #add #3 as lines
legend("topright", lty=1, col=c("black", "red", "blue"), legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")) #setup legend
dev.off()
|
/Density Estimation and Vine Copula Model/section1&2/section1.r | no_license | relatecode/Revenue-Insurance | R | false | false | 4,008 | r | ||
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hostname.R
\name{hostname}
\alias{hostname}
\title{Retrieve the hostname of your machine}
\usage{
hostname()
}
\description{
Retrieve the hostname of your machine
}
\examples{
hostname()
}
| /man/hostname.Rd | no_license | daskelly/das | R | false | true | 267 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hostname.R
\name{hostname}
\alias{hostname}
\title{Retrieve the hostname of your machine}
\usage{
hostname()
}
\description{
Retrieve the hostname of your machine
}
\examples{
hostname()
}
|
testlist <- list(x1 = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), x2 = numeric(0), y1 = numeric(0), y2 = numeric(0))
result <- do.call(palm:::euc_distances,testlist)
str(result) | /palm/inst/testfiles/euc_distances/libFuzzer_euc_distances/euc_distances_valgrind_files/1612968118-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 212 | r | testlist <- list(x1 = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), x2 = numeric(0), y1 = numeric(0), y2 = numeric(0))
result <- do.call(palm:::euc_distances,testlist)
str(result) |
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
sampling <- function(x, s, NT, nthin, nupd, Nburn, g, nu_g, tausq_g, delta, beta_delta, sigma0sq, beta0, sigma1sq, beta1, z, gammam, clu, beta_pi, alpha_lambda, g_seq, delta_seq, sigma0sq_seq, sigma1sq_seq, z_seq, c_seq, gamma_seq, beta_pi_seq, alpha_lambda_seq, nu_g_seq, tausq_g_seq, beta_delta_seq, beta0_seq, beta1_seq, llambda, lpi, lobs, lother, lpost, m_g, dsq_g, a_g, b_g, alpha_delta, alpha0, alpha1, a_l, b_l, nu1, tau1sq, nu2, tau2sq, alpha_pi) {
invisible(.Call('_BasClu_sampling', PACKAGE = 'BasClu', x, s, NT, nthin, nupd, Nburn, g, nu_g, tausq_g, delta, beta_delta, sigma0sq, beta0, sigma1sq, beta1, z, gammam, clu, beta_pi, alpha_lambda, g_seq, delta_seq, sigma0sq_seq, sigma1sq_seq, z_seq, c_seq, gamma_seq, beta_pi_seq, alpha_lambda_seq, nu_g_seq, tausq_g_seq, beta_delta_seq, beta0_seq, beta1_seq, llambda, lpi, lobs, lother, lpost, m_g, dsq_g, a_g, b_g, alpha_delta, alpha0, alpha1, a_l, b_l, nu1, tau1sq, nu2, tau2sq, alpha_pi))
}
| /R/RcppExports.R | no_license | raivivek/BasClu | R | false | false | 1,084 | r | # Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
sampling <- function(x, s, NT, nthin, nupd, Nburn, g, nu_g, tausq_g, delta, beta_delta, sigma0sq, beta0, sigma1sq, beta1, z, gammam, clu, beta_pi, alpha_lambda, g_seq, delta_seq, sigma0sq_seq, sigma1sq_seq, z_seq, c_seq, gamma_seq, beta_pi_seq, alpha_lambda_seq, nu_g_seq, tausq_g_seq, beta_delta_seq, beta0_seq, beta1_seq, llambda, lpi, lobs, lother, lpost, m_g, dsq_g, a_g, b_g, alpha_delta, alpha0, alpha1, a_l, b_l, nu1, tau1sq, nu2, tau2sq, alpha_pi) {
invisible(.Call('_BasClu_sampling', PACKAGE = 'BasClu', x, s, NT, nthin, nupd, Nburn, g, nu_g, tausq_g, delta, beta_delta, sigma0sq, beta0, sigma1sq, beta1, z, gammam, clu, beta_pi, alpha_lambda, g_seq, delta_seq, sigma0sq_seq, sigma1sq_seq, z_seq, c_seq, gamma_seq, beta_pi_seq, alpha_lambda_seq, nu_g_seq, tausq_g_seq, beta_delta_seq, beta0_seq, beta1_seq, llambda, lpi, lobs, lother, lpost, m_g, dsq_g, a_g, b_g, alpha_delta, alpha0, alpha1, a_l, b_l, nu1, tau1sq, nu2, tau2sq, alpha_pi))
}
|
#' Assert that a file exists and then remove it
#'
#' @param file_path A file path
expect_file_exists <- function(file_path){
expect_true(file.exists(file_path))
if (file.exists(file_path)) file.remove(file_path)
}
| /tests/testthat/utils.R | permissive | alan-turing-institute/DetectorChecker | R | false | false | 221 | r | #' Assert that a file exists and then remove it
#'
#' @param file_path A file path
expect_file_exists <- function(file_path){
expect_true(file.exists(file_path))
if (file.exists(file_path)) file.remove(file_path)
}
|
\name{Zero adjusted Dirichlet regression}
\alias{zadr}
\alias{zadr2}
\title{
Zero adjusted Dirichlet regression
}
\description{
Zero adjusted Dirichlet regression.
}
\usage{
zadr(y, x, con = TRUE, B = 1, ncores = 2, xnew = NULL)
zadr2(y, x, con = TRUE, B = 1, ncores = 2, xnew = NULL)
}
\arguments{
\item{y}{
A matrix with the compositional data (dependent variable). The number of observations
(vectors) with no zero values should be more than the columns of the predictor variables.
Otherwise, the initial values will not be calculated.
}
\item{x}{
The predictor variable(s), they can be either continnuous or categorical or both.
}
\item{con}{
If this is TRUE (default) then the constant term is estimated, otherwise the model includes no constant term.
}
\item{B}{
If B is greater than 1 bootstrap estimates of the standard error are returned.
If you set this greater than 1, then you must define the number of clusters in
order to run in parallel.
}
\item{ncores}{
The number of cores to use when B>1. This is to be used for the
case of bootstrap. If B = 1, this is not taken into consideration.
If this does not work then you might need to load the doParallel yourselves.
}
\item{xnew}{
If you have new data use it, otherwise leave it NULL.
}
}
\details{
A zero adjusted Dirichlet regression is being fittd. The likelihood conists of two components.
The contributions of the non zero compositional values and the contributions of the compositional
vectors with at least one zero value. The second component may have many different sub-categories,
one for each pattern of zeros. The function "zadr2()" links the covariates to the alpha parameters
of the Dirichlet distribution, i.e. it uses the classical parametrization of the distribution.
This means, that there is a set of regression parameters for each component.
}
\value{
A list including:
\item{runtime}{
The time required by the regression.
}
\item{loglik}{
The value of the log-likelihood.
}
\item{phi}{
The precision parameter.
}
\item{be}{
The beta coefficients.
}
\item{seb}{
The standard error of the beta coefficients.
}
\item{sigma}{
Th covariance matrix of the regression parameters (for the mean vector and the phi parameter).
}
\item{est}{
The fitted or the predicted values (if xnew is not NULL).
}
}
\references{
Tsagris M. and Stewart C. (2018). A Dirichlet regression model for compositional data with zeros.
Lobachevskii Journal of Mathematics,39(3): 398--412.
Preprint available from https://arxiv.org/pdf/1410.5011.pdf
}
\author{
Michail Tsagris.
R implementation and documentation: Michail Tsagris \email{mtsagris@uoc.gr}.
}
%\note{
%% ~~further notes~~
%}
\seealso{
\code{\link{zad.est}, \link{diri.reg}, \link{kl.compreg}, \link{ols.compreg}, \link{alfa.reg}
}
}
\examples{
x <- as.vector(iris[, 4])
y <- as.matrix(iris[, 1:3])
y <- y / rowSums(y)
mod1 <- diri.reg(y, x)
y[sample(1:450, 15) ] <- 0
mod2 <- zadr(y, x)
}
| /man/zadr.Rd | no_license | cran/Compositional | R | false | false | 3,025 | rd | \name{Zero adjusted Dirichlet regression}
\alias{zadr}
\alias{zadr2}
\title{
Zero adjusted Dirichlet regression
}
\description{
Zero adjusted Dirichlet regression.
}
\usage{
zadr(y, x, con = TRUE, B = 1, ncores = 2, xnew = NULL)
zadr2(y, x, con = TRUE, B = 1, ncores = 2, xnew = NULL)
}
\arguments{
\item{y}{
A matrix with the compositional data (dependent variable). The number of observations
(vectors) with no zero values should be more than the columns of the predictor variables.
Otherwise, the initial values will not be calculated.
}
\item{x}{
The predictor variable(s), they can be either continnuous or categorical or both.
}
\item{con}{
If this is TRUE (default) then the constant term is estimated, otherwise the model includes no constant term.
}
\item{B}{
If B is greater than 1 bootstrap estimates of the standard error are returned.
If you set this greater than 1, then you must define the number of clusters in
order to run in parallel.
}
\item{ncores}{
The number of cores to use when B>1. This is to be used for the
case of bootstrap. If B = 1, this is not taken into consideration.
If this does not work then you might need to load the doParallel yourselves.
}
\item{xnew}{
If you have new data use it, otherwise leave it NULL.
}
}
\details{
A zero adjusted Dirichlet regression is being fittd. The likelihood conists of two components.
The contributions of the non zero compositional values and the contributions of the compositional
vectors with at least one zero value. The second component may have many different sub-categories,
one for each pattern of zeros. The function "zadr2()" links the covariates to the alpha parameters
of the Dirichlet distribution, i.e. it uses the classical parametrization of the distribution.
This means, that there is a set of regression parameters for each component.
}
\value{
A list including:
\item{runtime}{
The time required by the regression.
}
\item{loglik}{
The value of the log-likelihood.
}
\item{phi}{
The precision parameter.
}
\item{be}{
The beta coefficients.
}
\item{seb}{
The standard error of the beta coefficients.
}
\item{sigma}{
Th covariance matrix of the regression parameters (for the mean vector and the phi parameter).
}
\item{est}{
The fitted or the predicted values (if xnew is not NULL).
}
}
\references{
Tsagris M. and Stewart C. (2018). A Dirichlet regression model for compositional data with zeros.
Lobachevskii Journal of Mathematics,39(3): 398--412.
Preprint available from https://arxiv.org/pdf/1410.5011.pdf
}
\author{
Michail Tsagris.
R implementation and documentation: Michail Tsagris \email{mtsagris@uoc.gr}.
}
%\note{
%% ~~further notes~~
%}
\seealso{
\code{\link{zad.est}, \link{diri.reg}, \link{kl.compreg}, \link{ols.compreg}, \link{alfa.reg}
}
}
\examples{
x <- as.vector(iris[, 4])
y <- as.matrix(iris[, 1:3])
y <- y / rowSums(y)
mod1 <- diri.reg(y, x)
y[sample(1:450, 15) ] <- 0
mod2 <- zadr(y, x)
}
|
ScaleGradient <- proto(ScaleContinuous, expr={
aliases <- c("scale_colour_continuous", "scale_fill_continuous", "scale_color_continuous", "scale_color_gradient")
new <- function(., name = NULL, low = "#3B4FB8", high = "#B71B1A", space = "rgb", ...) {
.super$new(., name = name, low = low, high = high, space = space, ...)
}
map <- function(., x) {
ramp <- colorRamp(c(.$low, .$high), space=.$space, interpolate="linear")
domain <- .$input_set()
x[x < domain[1] | x > domain[2]] <- NA
x <- (x - domain[1]) / diff(domain)
nice_ramp(ramp, x)
}
output_breaks <- function(.) {
.$map(.$input_breaks())
}
common <- c("colour", "fill")
# Documentation -----------------------------------------------
objname <- "gradient"
desc <- "Smooth gradient between two colours"
icon <- function(.) {
g <- scale_fill_gradient()
g$train(1:5)
rectGrob(c(0.1, 0.3, 0.5, 0.7, 0.9), width=0.21,
gp=gpar(fill=g$map(1:5), col=NA)
)
}
desc_params <- list(
low = "colour at low end of scale",
high = "colour at high end of scale",
space = "colour space to interpolate through, rgb or Lab, see ?colorRamp for details",
interpolate = "type of interpolation to use, linear or spline, see ?colorRamp for more details"
)
seealso <- list(
"scale_gradient2" = "continuous colour scale with midpoint",
"colorRamp" = "for details of interpolation algorithm"
)
examples <- function(.) {
# It's hard to see, but look for the bright yellow dot
# in the bottom right hand corner
dsub <- subset(diamonds, x > 5 & x < 6 & y > 5 & y < 6)
(d <- qplot(x, y, data=dsub, colour=z))
# That one point throws our entire scale off. We could
# remove it, or manually tweak the limits of the scale
# Tweak scale limits. Any points outside these
# limits will not be plotted, but will continue to affect the
# calculate of statistics, etc
d + scale_colour_gradient(limits=c(3, 10))
d + scale_colour_gradient(limits=c(3, 4))
# Setting the limits manually is also useful when producing
# multiple plots that need to be comparable
# Alternatively we could try transforming the scale:
d + scale_colour_gradient(trans = "log")
d + scale_colour_gradient(trans = "sqrt")
# Other more trivial manipulations, including changing the name
# of the scale and the colours.
d + scale_colour_gradient("Depth")
d + scale_colour_gradient(expression(Depth[mm]))
d + scale_colour_gradient(limits=c(3, 4), low="red")
d + scale_colour_gradient(limits=c(3, 4), low="red", high="white")
# Much slower
d + scale_colour_gradient(limits=c(3, 4), low="red", high="white", space="Lab")
d + scale_colour_gradient(limits=c(3, 4), space="Lab")
# scale_fill_continuous works similarly, but for fill colours
(h <- qplot(x - y, data=dsub, geom="histogram", binwidth=0.01, fill=..count..))
h + scale_fill_continuous(low="black", high="pink", limits=c(0,3100))
}
})
ScaleGradient2 <- proto(ScaleContinuous, expr={
new <- function(., name = NULL, low = muted("red"), mid = "white", high = muted("blue"), midpoint = 0, space = "rgb", ...) {
.super$new(., name = name, low = low, mid = mid, high = high,
midpoint = midpoint, space = space, ...)
}
aliases <- c("scale_color_gradient2")
map <- function(., x) {
rng <- .$output_set() - .$midpoint
extent <- max(abs(rng))
domain <- .$input_set()
x[x < domain[1] | x > domain[2]] <- NA
ramp <- colorRamp(c(.$low, .$mid, .$high), space=.$space, interpolate="linear")
x <- x - .$midpoint
x <- x / extent / 2 + 0.5
nice_ramp(ramp, x)
}
objname <-"gradient2"
common <- c("colour", "fill")
desc <- "Smooth gradient between three colours (high, low and midpoints)"
output_breaks <- function(.) .$map(.$input_breaks())
icon <- function(.) {
g <- scale_fill_gradient2()
g$train(1:5 - 3)
rectGrob(c(0.1, 0.3, 0.5, 0.7, 0.9), width=0.21,
gp=gpar(fill=g$map(1:5 - 3), col=NA)
)
}
desc_params <- list(
low = "colour at low end of scale",
mid = "colour at mid point of scale",
high = "colour at high end of scale",
midpoint = "position of mid point of scale, defaults to 0",
space = "colour space to interpolate through, rgb or Lab, see ?colorRamp for details",
interpolate = "type of interpolation to use, linear or spline, see ?colorRamp for more details"
)
seealso <- list(
"scale_gradient" = "continuous colour scale",
"colorRamp" = "for details of interpolation algorithm"
)
examples <- function(.) {
dsub <- subset(diamonds, x > 5 & x < 6 & y > 5 & y < 6)
dsub$diff <- with(dsub, sqrt(abs(x-y))* sign(x-y))
(d <- qplot(x, y, data=dsub, colour=diff))
d + scale_colour_gradient2()
# Change scale name
d + scale_colour_gradient2(expression(sqrt(abs(x - y))))
d + scale_colour_gradient2("Difference\nbetween\nwidth and\nheight")
# Change limits and colours
d + scale_colour_gradient2(limits=c(-0.2, 0.2))
# Using "muted" colours makes for pleasant graphics
# (and they have better perceptual properties too)
d + scale_colour_gradient2(low="red", high="blue")
d + scale_colour_gradient2(low=muted("red"), high=muted("blue"))
# Using the Lab colour space also improves perceptual properties
# at the price of slightly slower operation
d + scale_colour_gradient2(space="Lab")
# About 5% of males are red-green colour blind, so it's a good
# idea to avoid that combination
d + scale_colour_gradient2(high=muted("green"))
# We can also make the middle stand out
d + scale_colour_gradient2(mid=muted("green"), high="white", low="white")
# or use a non zero mid point
(d <- qplot(carat, price, data=diamonds, colour=price/carat))
d + scale_colour_gradient2(midpoint=mean(diamonds$price / diamonds$carat))
# Fill gradients work much the same way
p <- qplot(letters[1:5], 1:5, fill= c(-3, 3, 5, 2, -2), geom="bar")
p + scale_fill_gradient2("fill")
# Note how positive and negative values of the same magnitude
# have similar intensity
}
})
ScaleGradientn <- proto(ScaleContinuous, expr={
new <- function(., name=NULL, colours, values = NULL, rescale = TRUE, space="rgb", ...) {
.super$new(.,
name = name,
colours = colours, values = values, rescale = rescale,
space = space, ...,
)
}
aliases <- c("scale_color_gradientn")
map <- function(., x) {
if (.$rescale) x <- rescale(x, c(0, 1), .$input_set())
if (!is.null(.$values)) {
xs <- seq(0, 1, length = length(.$values))
f <- approxfun(.$values, xs)
x <- f(x)
}
ramp <- colorRamp(.$colours, space=.$space, interpolate="linear")
nice_ramp(ramp, x)
}
objname <- "gradientn"
common <- c("colour", "fill")
desc <- "Smooth gradient between n colours"
output_breaks <- function(.) .$map(.$input_breaks())
icon <- function(.) {
g <- scale_fill_gradientn(colours = rainbow(7))
g$train(1:5)
rectGrob(c(0.1, 0.3, 0.5, 0.7, 0.9), width=0.21,
gp=gpar(fill = g$map(1:5), col=NA)
)
}
desc_params <- list(
space = "colour space to interpolate through, rgb or Lab, see ?colorRamp for details",
interpolate = "type of interpolation to use, linear or spline, see ?colorRamp for more details"
)
seealso <- list(
"scale_gradient" = "continuous colour scale with midpoint",
"colorRamp" = "for details of interpolation algorithm"
)
examples <- function(.) {
# scale_colour_gradient make it easy to use existing colour palettes
dsub <- subset(diamonds, x > 5 & x < 6 & y > 5 & y < 6)
dsub$diff <- with(dsub, sqrt(abs(x-y))* sign(x-y))
(d <- qplot(x, y, data=dsub, colour=diff))
d + scale_colour_gradientn(colour = rainbow(7))
breaks <- c(-0.5, 0, 0.5)
d + scale_colour_gradientn(colour = rainbow(7),
breaks = breaks, labels = format(breaks))
d + scale_colour_gradientn(colour = topo.colors(10))
d + scale_colour_gradientn(colour = terrain.colors(10))
# You can force them to be symmetric by supplying a vector of
# values, and turning rescaling off
max_val <- max(abs(dsub$diff))
values <- seq(-max_val, max_val, length = 11)
d + scale_colour_gradientn(colours = topo.colors(10),
values = values, rescale = FALSE)
d + scale_colour_gradientn(colours = terrain.colors(10),
values = values, rescale = FALSE)
}
}) | /R/scale-continuous-colour.r | no_license | strongh/ggplot2 | R | false | false | 8,647 | r | ScaleGradient <- proto(ScaleContinuous, expr={
aliases <- c("scale_colour_continuous", "scale_fill_continuous", "scale_color_continuous", "scale_color_gradient")
new <- function(., name = NULL, low = "#3B4FB8", high = "#B71B1A", space = "rgb", ...) {
.super$new(., name = name, low = low, high = high, space = space, ...)
}
map <- function(., x) {
ramp <- colorRamp(c(.$low, .$high), space=.$space, interpolate="linear")
domain <- .$input_set()
x[x < domain[1] | x > domain[2]] <- NA
x <- (x - domain[1]) / diff(domain)
nice_ramp(ramp, x)
}
output_breaks <- function(.) {
.$map(.$input_breaks())
}
common <- c("colour", "fill")
# Documentation -----------------------------------------------
objname <- "gradient"
desc <- "Smooth gradient between two colours"
icon <- function(.) {
g <- scale_fill_gradient()
g$train(1:5)
rectGrob(c(0.1, 0.3, 0.5, 0.7, 0.9), width=0.21,
gp=gpar(fill=g$map(1:5), col=NA)
)
}
desc_params <- list(
low = "colour at low end of scale",
high = "colour at high end of scale",
space = "colour space to interpolate through, rgb or Lab, see ?colorRamp for details",
interpolate = "type of interpolation to use, linear or spline, see ?colorRamp for more details"
)
seealso <- list(
"scale_gradient2" = "continuous colour scale with midpoint",
"colorRamp" = "for details of interpolation algorithm"
)
examples <- function(.) {
# It's hard to see, but look for the bright yellow dot
# in the bottom right hand corner
dsub <- subset(diamonds, x > 5 & x < 6 & y > 5 & y < 6)
(d <- qplot(x, y, data=dsub, colour=z))
# That one point throws our entire scale off. We could
# remove it, or manually tweak the limits of the scale
# Tweak scale limits. Any points outside these
# limits will not be plotted, but will continue to affect the
# calculate of statistics, etc
d + scale_colour_gradient(limits=c(3, 10))
d + scale_colour_gradient(limits=c(3, 4))
# Setting the limits manually is also useful when producing
# multiple plots that need to be comparable
# Alternatively we could try transforming the scale:
d + scale_colour_gradient(trans = "log")
d + scale_colour_gradient(trans = "sqrt")
# Other more trivial manipulations, including changing the name
# of the scale and the colours.
d + scale_colour_gradient("Depth")
d + scale_colour_gradient(expression(Depth[mm]))
d + scale_colour_gradient(limits=c(3, 4), low="red")
d + scale_colour_gradient(limits=c(3, 4), low="red", high="white")
# Much slower
d + scale_colour_gradient(limits=c(3, 4), low="red", high="white", space="Lab")
d + scale_colour_gradient(limits=c(3, 4), space="Lab")
# scale_fill_continuous works similarly, but for fill colours
(h <- qplot(x - y, data=dsub, geom="histogram", binwidth=0.01, fill=..count..))
h + scale_fill_continuous(low="black", high="pink", limits=c(0,3100))
}
})
ScaleGradient2 <- proto(ScaleContinuous, expr={
new <- function(., name = NULL, low = muted("red"), mid = "white", high = muted("blue"), midpoint = 0, space = "rgb", ...) {
.super$new(., name = name, low = low, mid = mid, high = high,
midpoint = midpoint, space = space, ...)
}
aliases <- c("scale_color_gradient2")
map <- function(., x) {
rng <- .$output_set() - .$midpoint
extent <- max(abs(rng))
domain <- .$input_set()
x[x < domain[1] | x > domain[2]] <- NA
ramp <- colorRamp(c(.$low, .$mid, .$high), space=.$space, interpolate="linear")
x <- x - .$midpoint
x <- x / extent / 2 + 0.5
nice_ramp(ramp, x)
}
objname <-"gradient2"
common <- c("colour", "fill")
desc <- "Smooth gradient between three colours (high, low and midpoints)"
output_breaks <- function(.) .$map(.$input_breaks())
icon <- function(.) {
g <- scale_fill_gradient2()
g$train(1:5 - 3)
rectGrob(c(0.1, 0.3, 0.5, 0.7, 0.9), width=0.21,
gp=gpar(fill=g$map(1:5 - 3), col=NA)
)
}
desc_params <- list(
low = "colour at low end of scale",
mid = "colour at mid point of scale",
high = "colour at high end of scale",
midpoint = "position of mid point of scale, defaults to 0",
space = "colour space to interpolate through, rgb or Lab, see ?colorRamp for details",
interpolate = "type of interpolation to use, linear or spline, see ?colorRamp for more details"
)
seealso <- list(
"scale_gradient" = "continuous colour scale",
"colorRamp" = "for details of interpolation algorithm"
)
examples <- function(.) {
dsub <- subset(diamonds, x > 5 & x < 6 & y > 5 & y < 6)
dsub$diff <- with(dsub, sqrt(abs(x-y))* sign(x-y))
(d <- qplot(x, y, data=dsub, colour=diff))
d + scale_colour_gradient2()
# Change scale name
d + scale_colour_gradient2(expression(sqrt(abs(x - y))))
d + scale_colour_gradient2("Difference\nbetween\nwidth and\nheight")
# Change limits and colours
d + scale_colour_gradient2(limits=c(-0.2, 0.2))
# Using "muted" colours makes for pleasant graphics
# (and they have better perceptual properties too)
d + scale_colour_gradient2(low="red", high="blue")
d + scale_colour_gradient2(low=muted("red"), high=muted("blue"))
# Using the Lab colour space also improves perceptual properties
# at the price of slightly slower operation
d + scale_colour_gradient2(space="Lab")
# About 5% of males are red-green colour blind, so it's a good
# idea to avoid that combination
d + scale_colour_gradient2(high=muted("green"))
# We can also make the middle stand out
d + scale_colour_gradient2(mid=muted("green"), high="white", low="white")
# or use a non zero mid point
(d <- qplot(carat, price, data=diamonds, colour=price/carat))
d + scale_colour_gradient2(midpoint=mean(diamonds$price / diamonds$carat))
# Fill gradients work much the same way
p <- qplot(letters[1:5], 1:5, fill= c(-3, 3, 5, 2, -2), geom="bar")
p + scale_fill_gradient2("fill")
# Note how positive and negative values of the same magnitude
# have similar intensity
}
})
ScaleGradientn <- proto(ScaleContinuous, expr={
new <- function(., name=NULL, colours, values = NULL, rescale = TRUE, space="rgb", ...) {
.super$new(.,
name = name,
colours = colours, values = values, rescale = rescale,
space = space, ...,
)
}
aliases <- c("scale_color_gradientn")
map <- function(., x) {
if (.$rescale) x <- rescale(x, c(0, 1), .$input_set())
if (!is.null(.$values)) {
xs <- seq(0, 1, length = length(.$values))
f <- approxfun(.$values, xs)
x <- f(x)
}
ramp <- colorRamp(.$colours, space=.$space, interpolate="linear")
nice_ramp(ramp, x)
}
objname <- "gradientn"
common <- c("colour", "fill")
desc <- "Smooth gradient between n colours"
output_breaks <- function(.) .$map(.$input_breaks())
icon <- function(.) {
g <- scale_fill_gradientn(colours = rainbow(7))
g$train(1:5)
rectGrob(c(0.1, 0.3, 0.5, 0.7, 0.9), width=0.21,
gp=gpar(fill = g$map(1:5), col=NA)
)
}
desc_params <- list(
space = "colour space to interpolate through, rgb or Lab, see ?colorRamp for details",
interpolate = "type of interpolation to use, linear or spline, see ?colorRamp for more details"
)
seealso <- list(
"scale_gradient" = "continuous colour scale with midpoint",
"colorRamp" = "for details of interpolation algorithm"
)
examples <- function(.) {
# scale_colour_gradient make it easy to use existing colour palettes
dsub <- subset(diamonds, x > 5 & x < 6 & y > 5 & y < 6)
dsub$diff <- with(dsub, sqrt(abs(x-y))* sign(x-y))
(d <- qplot(x, y, data=dsub, colour=diff))
d + scale_colour_gradientn(colour = rainbow(7))
breaks <- c(-0.5, 0, 0.5)
d + scale_colour_gradientn(colour = rainbow(7),
breaks = breaks, labels = format(breaks))
d + scale_colour_gradientn(colour = topo.colors(10))
d + scale_colour_gradientn(colour = terrain.colors(10))
# You can force them to be symmetric by supplying a vector of
# values, and turning rescaling off
max_val <- max(abs(dsub$diff))
values <- seq(-max_val, max_val, length = 11)
d + scale_colour_gradientn(colours = topo.colors(10),
values = values, rescale = FALSE)
d + scale_colour_gradientn(colours = terrain.colors(10),
values = values, rescale = FALSE)
}
}) |
# The purpose of this project is to demonstrate your ability to collect, work with, and clean a data set.
# The goal is to prepare tidy data that can be used for later analysis. You will be graded by your peers
# on a series of yes/no questions related to the project. You will be required to submit: 1) a tidy data set
# as described below, 2) a link to a Github repository with your script for performing the analysis, and 3)
# a code book that describes the variables, the data, and any transformations or work that you performed to
# clean up the data called CodeBook.md. You should also include a README.md in the repo with your scripts.
# This repo explains how all of the scripts work and how they are connected.
#
# One of the most exciting areas in all of data science right now is wearable computing - see for example
# this article . Companies like Fitbit, Nike, and Jawbone Up are racing to develop the most advanced algorithms
# to attract new users. The data linked to from the course website represent data collected from the accelerometers
# from the Samsung Galaxy S smartphone. A full description is available at the site where the data was obtained:
#
# http://archive.ics.uci.edu/ml/datasets/Human+Activity+Recognition+Using+Smartphones
#
# Here are the data for the project:
#
# https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip
#
# You should create one R script called run_analysis.R that does the following.
# 1.Merges the training and the test sets to create one data set.
# 2.Extracts only the measurements on the mean and standard deviation for each measurement.
# 3.Uses descriptive activity names to name the activities in the data set
# 4.Appropriately labels the data set with descriptive variable names.
# 5.Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
#setwd("c:/temp")
# Create a folder to save the zip file on it
if (!file.exists("./data")) {dir.create("./data")}
zipUrl= "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
zipFile = "./data/data.zip"
download.file (zipUrl, destfile=zipFile, method="auto")
# Unzip the file
unzip (zipFile)
# As a result, on the root there will be a new directory called UCI HAR Dataset and inside it there will
# be two folders called test and train with all the files we will use to make the merge
setwd("./UCI HAR Dataset")
# Now we will merge the information of the subject, activity and results.
# And we will do it for the train and for the test
subject_train = read.csv ("./train/subject_train.txt",header=F, col.names="subject");
x_train <- read.table ("./train/X_train.txt",comment.char = "",colClasses="numeric");
y_train = read.csv ("./train/y_train.txt",header=F, col.names="activity");
train <- cbind (subject_train, y_train,x_train)
subject_test = read.csv ("./test/subject_test.txt",header=F, col.names="subject");
x_test <- read.table ("./test/X_test.txt",comment.char = "",colClasses="numeric");
y_test = read.csv ("./test/y_test.txt",header=F, col.names="activity");
test <- cbind (subject_test, y_test,x_test )
# 1.Merges the training and the test sets to create one data set.
# To join two data frames (datasets) vertically, use the rbind function.
# The two data frames must have the same variables, but they do not have to be in the same order
mergedData <- rbind(train, test)
# 2.Extracts only the measurements on the mean and standard deviation for each measurement.
# If you take a look at the features.txt file, you will find what we need to extract. As we have also
# at the begining of each line the subject and the analysis, we have to add 2 to each number
# to get the correct position
#1 tBodyAcc-mean()-X
#2 tBodyAcc-mean()-Y
#3 tBodyAcc-mean()-Z
#4 tBodyAcc-std()-X
#5 tBodyAcc-std()-Y
#6 tBodyAcc-std()-Z
#41 tGravityAcc-mean()-X
#42 tGravityAcc-mean()-Y
#43 tGravityAcc-mean()-Z
#44 tGravityAcc-std()-X
#45 tGravityAcc-std()-Y
#46 tGravityAcc-std()-Z
#81 tBodyAccJerk-mean()-X
#82 tBodyAccJerk-mean()-Y
#83 tBodyAccJerk-mean()-Z
#84 tBodyAccJerk-std()-X
#85 tBodyAccJerk-std()-Y
#86 tBodyAccJerk-std()-Z
#121 tBodyGyro-mean()-X
#122 tBodyGyro-mean()-Y
#123 tBodyGyro-mean()-Z
#124 tBodyGyro-std()-X
#125 tBodyGyro-std()-Y
#126 tBodyGyro-std()-Z
#161 tBodyGyroJerk-mean()-X
#162 tBodyGyroJerk-mean()-Y
#163 tBodyGyroJerk-mean()-Z
#164 tBodyGyroJerk-std()-X
#165 tBodyGyroJerk-std()-Y
#166 tBodyGyroJerk-std()-Z
#201 tBodyAccMag-mean()
#202 tBodyAccMag-std()
#214 tGravityAccMag-mean()
#215 tGravityAccMag-std()
#227 tBodyAccJerkMag-mean()
#228 tBodyAccJerkMag-std()
#240 tBodyGyroMag-mean()
#241 tBodyGyroMag-std()
#253 tBodyGyroJerkMag-mean()
#254 tBodyGyroJerkMag-std()
#266 fBodyAcc-mean()-X
#267 fBodyAcc-mean()-Y
#268 fBodyAcc-mean()-Z
#269 fBodyAcc-std()-X
#270 fBodyAcc-std()-Y
#271 fBodyAcc-std()-Z
#345 fBodyAccJerk-mean()-X
#346 fBodyAccJerk-mean()-Y
#347 fBodyAccJerk-mean()-Z
#348 fBodyAccJerk-std()-X
#349 fBodyAccJerk-std()-Y
#350 fBodyAccJerk-std()-Z
#424 fBodyGyro-mean()-X
#425 fBodyGyro-mean()-Y
#426 fBodyGyro-mean()-Z
#427 fBodyGyro-std()-X
#428 fBodyGyro-std()-Y
#429 fBodyGyro-std()-Z
#503 fBodyAccMag-mean()
#504 fBodyAccMag-std()
#516 fBodyBodyAccJerkMag-mean()
#517 fBodyBodyAccJerkMag-std()
#529 fBodyBodyGyroMag-mean()
#530 fBodyBodyGyroMag-std()
#542 fBodyBodyGyroJerkMag-mean()
#543 fBodyBodyGyroJerkMag-std()
df<-mergedData[,c("subject","activity","V3","V4","V5","V6","V7","V8","V43","V44","V45","V46","V47","V48","V83","V84","V85",
"V86","V87","V88","V123","V124","V125","V126","V127","V128","V163","V164","V165","V166","V167","V168",
"V203","V204","V216","V217","V229","V230","V242","V243","V255","V256","V268","V269","V270","V271","V272",
"V273","V347","V348","V349","V350","V351","V352","V426","V427","V428","V429","V430","V431","V505","V506",
"V518","V519","V531","V532","V544","V545")]
# 3.Uses descriptive activity names to name the activities in the data set
df$activity[df$activity == 1] <- "WALKING"
df$activity[df$activity == 2] <- "WALKING_UPSTAIRS"
df$activity[df$activity == 3] <- "WALKING_DOWNSTAIRS"
df$activity[df$activity == 4] <- "SITTING"
df$activity[df$activity == 5] <- "STANDING"
df$activity[df$activity == 6] <- "LAYING"
# 4.Appropriately labels the data set with descriptive variable names.
arr_cols <- c("subject", "activity",
"tBodyAcc_mean_X","tBodyAcc_mean_Y","tBodyAcc_mean_Z",
"tBodyAcc_std_X","tBodyAcc_std_Y","tBodyAcc_std_Z",
"tGravityAcc_mean_X","tGravityAcc_mean_Y","tGravityAcc_mean_Z",
"tGravityAcc_std_X","tGravityAcc_std_Y","tGravityAcc_std_Z",
"tBodyAccJerk_mean_X","tBodyAccJerk_mean_Y","tBodyAccJerk_mean_Z",
"tBodyAccJerk_std_X","tBodyAccJerk_std_Y","tBodyAccJerk_std_Z",
"tBodyGyro_mean_X","tBodyGyro_mean_Y","tBodyGyro_mean_Z",
"tBodyGyro_std_X","tBodyGyro_std_Y","tBodyGyro_std_Z"
,"tBodyGyroJerk_mean_X","tBodyGyroJerk_mean_Y","tBodyGyroJerk_mean_Z","tBodyGyroJerk_std_X"
,"tBodyGyroJerk_std_Y","tBodyGyroJerk_std_Z","tBodyAccMag_mean","tBodyAccMag_std","tGravityAccMag_mean"
,"tGravityAccMag_std","tBodyAccJerkMag_mean","tBodyAccJerkMag_std","tBodyGyroMag_mean"
,"tBodyGyroMag_std","tBodyGyroJerkMag_mean","tBodyGyroJerkMag_std","fBodyAcc_mean_X"
,"fBodyAcc_mean_Y","fBodyAcc_mean_Z","fBodyAcc_std_X","fBodyAcc_std_Y","fBodyAcc_std_Z"
,"fBodyAccJerk_mean_X","fBodyAccJerk_mean_Y","fBodyAccJerk_mean_Z","fBodyAccJerk_std_X"
,"fBodyAccJerk_std_Y","fBodyAccJerk_std_Z","fBodyGyro_mean_X","fBodyGyro_mean_Y","fBodyGyro_mean_Z"
,"fBodyGyro_std_X","fBodyGyro_std_Y","fBodyGyro_std_Z"
,"fBodyAccMag_mean","fBodyAccMag_std","fBodyBodyAccJerkMag_mean","fBodyBodyAccJerkMag_std"
,"fBodyBodyGyroMag_mean","fBodyBodyGyroMag_std","fBodyBodyGyroJerkMag_mean"
,"fBodyBodyGyroJerkMag_std")
for (i in 3:length(arr_cols)){
names (df)[i] <-arr_cols[i]
}
# 5.Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
aux<- aggregate(df$tBodyAcc_mean_X, list(subject = df$subject, activity = df$activity), mean)
result <- aux
aux<- aggregate(df$tBodyAcc_mean_Y, list(subject = df$subject, activity = df$activity), mean)
result["tBodyAcc_mean_Y"] <- aux["x"]
aux<- aggregate(df$tBodyAcc_mean_Z, list(subject = df$subject, activity = df$activity), mean)
result["tBodyAcc_mean_Z"] <- aux["x"]
aux<- aggregate(df$tBodyAcc_std_X, list(subject = df$subject, activity = df$activity), mean)
result["tBodyAcc_std_X"] <- aux["x"]
aux<- aggregate(df$tBodyAcc_std_Y, list(subject = df$subject, activity = df$activity), mean)
result["tBodyAcc_std_Y"] <- aux["x"]
aux<- aggregate(df$tBodyAcc_std_Z, list(subject = df$subject, activity = df$activity), mean)
result["tBodyAcc_std_Z"] <- aux["x"]
aux<- aggregate(df$tGravityAcc_mean_X, list(subject = df$subject, activity = df$activity), mean)
result["tGravityAcc_mean_X"] <- aux["x"]
aux<- aggregate(df$tGravityAcc_mean_Y, list(subject = df$subject, activity = df$activity), mean)
result["tGravityAcc_mean_Y"] <- aux["x"]
aux<- aggregate(df$tGravityAcc_mean_Z, list(subject = df$subject, activity = df$activity), mean)
result["tGravityAcc_mean_Z"] <- aux["x"]
aux<- aggregate(df$tGravityAcc_std_X, list(subject = df$subject, activity = df$activity), mean)
result["tGravityAcc_std_X"] <- aux["x"]
aux<- aggregate(df$tGravityAcc_std_Y, list(subject = df$subject, activity = df$activity), mean)
result["tGravityAcc_std_Y"] <- aux["x"]
aux<- aggregate(df$tGravityAcc_std_Z, list(subject = df$subject, activity = df$activity), mean)
result["tGravityAcc_std_Z"] <- aux["x"]
aux<- aggregate(df$tBodyAccJerk_mean_X, list(subject = df$subject, activity = df$activity), mean)
result["tBodyAccJerk_mean_X"] <- aux["x"]
aux<- aggregate(df$tBodyAccJerk_mean_Y, list(subject = df$subject, activity = df$activity), mean)
result["tBodyAccJerk_mean_Y"] <- aux["x"]
aux<- aggregate(df$tBodyAccJerk_mean_Z, list(subject = df$subject, activity = df$activity), mean)
result["tBodyAccJerk_mean_Z"] <- aux["x"]
aux<- aggregate(df$tBodyAccJerk_std_X, list(subject = df$subject, activity = df$activity), mean)
result["tBodyAccJerk_std_X"] <- aux["x"]
aux<- aggregate(df$tBodyAccJerk_std_Y, list(subject = df$subject, activity = df$activity), mean)
result["tBodyAccJerk_std_Y"] <- aux["x"]
aux<- aggregate(df$tBodyAccJerk_std_Z, list(subject = df$subject, activity = df$activity), mean)
result["tBodyAccJerk_std_Z"] <- aux["x"]
aux<- aggregate(df$tBodyGyro_mean_X, list(subject = df$subject, activity = df$activity), mean)
result["tBodyGyro_mean_X"] <- aux["x"]
aux<- aggregate(df$tBodyGyro_mean_Y, list(subject = df$subject, activity = df$activity), mean)
result["tBodyGyro_mean_Y"] <- aux["x"]
aux<- aggregate(df$tBodyGyro_mean_Z, list(subject = df$subject, activity = df$activity), mean)
result["tBodyGyro_mean_Z"] <- aux["x"]
aux<- aggregate(df$tBodyGyro_std_X, list(subject = df$subject, activity = df$activity), mean)
result["tBodyGyro_std_X"] <- aux["x"]
aux<- aggregate(df$tBodyGyro_std_Y, list(subject = df$subject, activity = df$activity), mean)
result["tBodyGyro_std_Y"] <- aux["x"]
aux<- aggregate(df$tBodyGyro_std_Z, list(subject = df$subject, activity = df$activity), mean)
result["tBodyGyro_std_Z"] <- aux["x"]
aux<- aggregate(df$tBodyGyroJerk_mean_X, list(subject = df$subject, activity = df$activity), mean)
result["tBodyGyroJerk_mean_X"] <- aux["x"]
aux<- aggregate(df$tBodyGyroJerk_mean_Y, list(subject = df$subject, activity = df$activity), mean)
result["tBodyGyroJerk_mean_Y"] <- aux["x"]
aux<- aggregate(df$tBodyGyroJerk_mean_Z, list(subject = df$subject, activity = df$activity), mean)
result["tBodyGyroJerk_mean_Z"] <- aux["x"]
aux<- aggregate(df$tBodyGyroJerk_std_X, list(subject = df$subject, activity = df$activity), mean)
result["tBodyGyroJerk_std_X"] <- aux["x"]
aux<- aggregate(df$tBodyGyroJerk_std_Y, list(subject = df$subject, activity = df$activity), mean)
result["tBodyGyroJerk_std_Y"] <- aux["x"]
aux<- aggregate(df$tBodyGyroJerk_std_Z, list(subject = df$subject, activity = df$activity), mean)
result["tBodyGyroJerk_std_Z"] <- aux["x"]
aux<- aggregate(df$tBodyAccMag_mean, list(subject = df$subject, activity = df$activity), mean)
result["tBodyAccMag_mean"] <- aux["x"]
aux<- aggregate(df$tBodyAccMag_std, list(subject = df$subject, activity = df$activity), mean)
result["tBodyAccMag_std"] <- aux["x"]
aux<- aggregate(df$tGravityAccMag_mean, list(subject = df$subject, activity = df$activity), mean)
result["tGravityAccMag_mean"] <- aux["x"]
aux<- aggregate(df$tGravityAccMag_std, list(subject = df$subject, activity = df$activity), mean)
result["tGravityAccMag_std"] <- aux["x"]
aux<- aggregate(df$tBodyAccJerkMag_mean, list(subject = df$subject, activity = df$activity), mean)
result["tBodyAccJerkMag_mean"] <- aux["x"]
aux<- aggregate(df$tBodyAccJerkMag_std, list(subject = df$subject, activity = df$activity), mean)
result["tBodyAccJerkMag_std"] <- aux["x"]
aux<- aggregate(df$tBodyGyroMag_mean, list(subject = df$subject, activity = df$activity), mean)
result["tBodyGyroMag_mean"] <- aux["x"]
aux<- aggregate(df$tBodyGyroMag_std, list(subject = df$subject, activity = df$activity), mean)
result["tBodyGyroMag_std"] <- aux["x"]
aux<- aggregate(df$tBodyGyroJerkMag_mean, list(subject = df$subject, activity = df$activity), mean)
result["tBodyGyroJerkMag_mean"] <- aux["x"]
aux<- aggregate(df$tBodyGyroJerkMag_std, list(subject = df$subject, activity = df$activity), mean)
result["tBodyGyroJerkMag_std"] <- aux["x"]
aux<- aggregate(df$fBodyAcc_mean_X, list(subject = df$subject, activity = df$activity), mean)
result["fBodyAcc_mean_X"] <- aux["x"]
aux<- aggregate(df$fBodyAcc_mean_Y, list(subject = df$subject, activity = df$activity), mean)
result["fBodyAcc_mean_Y"] <- aux["x"]
aux<- aggregate(df$fBodyAcc_mean_Z, list(subject = df$subject, activity = df$activity), mean)
result["fBodyAcc_mean_Z"] <- aux["x"]
aux<- aggregate(df$fBodyAcc_std_X, list(subject = df$subject, activity = df$activity), mean)
result["fBodyAcc_std_X"] <- aux["x"]
aux<- aggregate(df$fBodyAcc_std_Y, list(subject = df$subject, activity = df$activity), mean)
result["fBodyAcc_std_Y"] <- aux["x"]
aux<- aggregate(df$fBodyAcc_std_Z, list(subject = df$subject, activity = df$activity), mean)
result["fBodyAcc_std_Z"] <- aux["x"]
aux<- aggregate(df$fBodyAccJerk_mean_X, list(subject = df$subject, activity = df$activity), mean)
result["fBodyAccJerk_mean_X"] <- aux["x"]
aux<- aggregate(df$fBodyAccJerk_mean_Y, list(subject = df$subject, activity = df$activity), mean)
result["fBodyAccJerk_mean_Y"] <- aux["x"]
aux<- aggregate(df$fBodyAccJerk_mean_Z, list(subject = df$subject, activity = df$activity), mean)
result["fBodyAccJerk_mean_Z"] <- aux["x"]
aux<- aggregate(df$fBodyAccJerk_std_X, list(subject = df$subject, activity = df$activity), mean)
result["fBodyAccJerk_std_X"] <- aux["x"]
aux<- aggregate(df$fBodyAccJerk_std_Y, list(subject = df$subject, activity = df$activity), mean)
result["fBodyAccJerk_std_Y"] <- aux["x"]
aux<- aggregate(df$fBodyAccJerk_std_Z, list(subject = df$subject, activity = df$activity), mean)
result["fBodyAccJerk_std_Z"] <- aux["x"]
aux<- aggregate(df$fBodyGyro_mean_X, list(subject = df$subject, activity = df$activity), mean)
result["fBodyGyro_mean_X"] <- aux["x"]
aux<- aggregate(df$fBodyGyro_mean_Y, list(subject = df$subject, activity = df$activity), mean)
result["fBodyGyro_mean_Y"] <- aux["x"]
aux<- aggregate(df$fBodyGyro_mean_Z, list(subject = df$subject, activity = df$activity), mean)
result["fBodyGyro_mean_Z"] <- aux["x"]
aux<- aggregate(df$fBodyGyro_std_X, list(subject = df$subject, activity = df$activity), mean)
result["fBodyGyro_std_X"] <- aux["x"]
aux<- aggregate(df$fBodyGyro_std_Y, list(subject = df$subject, activity = df$activity), mean)
result["fBodyGyro_std_Y"] <- aux["x"]
aux<- aggregate(df$fBodyGyro_std_Z, list(subject = df$subject, activity = df$activity), mean)
result["fBodyGyro_std_Z"] <- aux["x"]
aux<- aggregate(df$fBodyAccMag_mean, list(subject = df$subject, activity = df$activity), mean)
result["fBodyAccMag_mean"] <- aux["x"]
aux<- aggregate(df$fBodyAccMag_std, list(subject = df$subject, activity = df$activity), mean)
result["fBodyAccMag_std"] <- aux["x"]
aux<- aggregate(df$fBodyBodyAccJerkMag_mean, list(subject = df$subject, activity = df$activity), mean)
result["fBodyBodyAccJerkMag_mean"] <- aux["x"]
aux<- aggregate(df$fBodyBodyAccJerkMag_std, list(subject = df$subject, activity = df$activity), mean)
result["fBodyBodyAccJerkMag_std"] <- aux["x"]
aux<- aggregate(df$fBodyBodyGyroMag_mean, list(subject = df$subject, activity = df$activity), mean)
result["fBodyBodyGyroMag_mean"] <- aux["x"]
aux<- aggregate(df$fBodyBodyGyroMag_std, list(subject = df$subject, activity = df$activity), mean)
result["fBodyBodyGyroMag_std"] <- aux["x"]
aux<- aggregate(df$fBodyBodyGyroJerkMag_mean, list(subject = df$subject, activity = df$activity), mean)
result["fBodyBodyGyroJerkMag_mean"] <- aux["x"]
aux<- aggregate(df$fBodyBodyGyroJerkMag_std, list(subject = df$subject, activity = df$activity), mean)
result["fBodyBodyGyroJerkMag_std"] <- aux["x"]
write.table(df, "../tidy_data.txt", sep="\t")
write.table(result, "../result_mean.txt", sep="\t")
| /run_analysis.R | no_license | dalvarom/getting_and_cleaning_data | R | false | false | 17,431 | r | # The purpose of this project is to demonstrate your ability to collect, work with, and clean a data set.
# The goal is to prepare tidy data that can be used for later analysis. You will be graded by your peers
# on a series of yes/no questions related to the project. You will be required to submit: 1) a tidy data set
# as described below, 2) a link to a Github repository with your script for performing the analysis, and 3)
# a code book that describes the variables, the data, and any transformations or work that you performed to
# clean up the data called CodeBook.md. You should also include a README.md in the repo with your scripts.
# This repo explains how all of the scripts work and how they are connected.
#
# One of the most exciting areas in all of data science right now is wearable computing - see for example
# this article . Companies like Fitbit, Nike, and Jawbone Up are racing to develop the most advanced algorithms
# to attract new users. The data linked to from the course website represent data collected from the accelerometers
# from the Samsung Galaxy S smartphone. A full description is available at the site where the data was obtained:
#
# http://archive.ics.uci.edu/ml/datasets/Human+Activity+Recognition+Using+Smartphones
#
# Here are the data for the project:
#
# https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip
#
# You should create one R script called run_analysis.R that does the following.
# 1.Merges the training and the test sets to create one data set.
# 2.Extracts only the measurements on the mean and standard deviation for each measurement.
# 3.Uses descriptive activity names to name the activities in the data set
# 4.Appropriately labels the data set with descriptive variable names.
# 5.Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
#setwd("c:/temp")
# Create a folder to save the zip file on it
if (!file.exists("./data")) {dir.create("./data")}
zipUrl= "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
zipFile = "./data/data.zip"
download.file (zipUrl, destfile=zipFile, method="auto")
# Unzip the file
unzip (zipFile)
# As a result, on the root there will be a new directory called UCI HAR Dataset and inside it there will
# be two folders called test and train with all the files we will use to make the merge
setwd("./UCI HAR Dataset")
# Now we will merge the information of the subject, activity and results.
# And we will do it for the train and for the test
subject_train = read.csv ("./train/subject_train.txt",header=F, col.names="subject");
x_train <- read.table ("./train/X_train.txt",comment.char = "",colClasses="numeric");
y_train = read.csv ("./train/y_train.txt",header=F, col.names="activity");
train <- cbind (subject_train, y_train,x_train)
subject_test = read.csv ("./test/subject_test.txt",header=F, col.names="subject");
x_test <- read.table ("./test/X_test.txt",comment.char = "",colClasses="numeric");
y_test = read.csv ("./test/y_test.txt",header=F, col.names="activity");
test <- cbind (subject_test, y_test,x_test )
# 1.Merges the training and the test sets to create one data set.
# To join two data frames (datasets) vertically, use the rbind function.
# The two data frames must have the same variables, but they do not have to be in the same order
mergedData <- rbind(train, test)
# 2.Extracts only the measurements on the mean and standard deviation for each measurement.
# If you take a look at the features.txt file, you will find what we need to extract. As we have also
# at the begining of each line the subject and the analysis, we have to add 2 to each number
# to get the correct position
#1 tBodyAcc-mean()-X
#2 tBodyAcc-mean()-Y
#3 tBodyAcc-mean()-Z
#4 tBodyAcc-std()-X
#5 tBodyAcc-std()-Y
#6 tBodyAcc-std()-Z
#41 tGravityAcc-mean()-X
#42 tGravityAcc-mean()-Y
#43 tGravityAcc-mean()-Z
#44 tGravityAcc-std()-X
#45 tGravityAcc-std()-Y
#46 tGravityAcc-std()-Z
#81 tBodyAccJerk-mean()-X
#82 tBodyAccJerk-mean()-Y
#83 tBodyAccJerk-mean()-Z
#84 tBodyAccJerk-std()-X
#85 tBodyAccJerk-std()-Y
#86 tBodyAccJerk-std()-Z
#121 tBodyGyro-mean()-X
#122 tBodyGyro-mean()-Y
#123 tBodyGyro-mean()-Z
#124 tBodyGyro-std()-X
#125 tBodyGyro-std()-Y
#126 tBodyGyro-std()-Z
#161 tBodyGyroJerk-mean()-X
#162 tBodyGyroJerk-mean()-Y
#163 tBodyGyroJerk-mean()-Z
#164 tBodyGyroJerk-std()-X
#165 tBodyGyroJerk-std()-Y
#166 tBodyGyroJerk-std()-Z
#201 tBodyAccMag-mean()
#202 tBodyAccMag-std()
#214 tGravityAccMag-mean()
#215 tGravityAccMag-std()
#227 tBodyAccJerkMag-mean()
#228 tBodyAccJerkMag-std()
#240 tBodyGyroMag-mean()
#241 tBodyGyroMag-std()
#253 tBodyGyroJerkMag-mean()
#254 tBodyGyroJerkMag-std()
#266 fBodyAcc-mean()-X
#267 fBodyAcc-mean()-Y
#268 fBodyAcc-mean()-Z
#269 fBodyAcc-std()-X
#270 fBodyAcc-std()-Y
#271 fBodyAcc-std()-Z
#345 fBodyAccJerk-mean()-X
#346 fBodyAccJerk-mean()-Y
#347 fBodyAccJerk-mean()-Z
#348 fBodyAccJerk-std()-X
#349 fBodyAccJerk-std()-Y
#350 fBodyAccJerk-std()-Z
#424 fBodyGyro-mean()-X
#425 fBodyGyro-mean()-Y
#426 fBodyGyro-mean()-Z
#427 fBodyGyro-std()-X
#428 fBodyGyro-std()-Y
#429 fBodyGyro-std()-Z
#503 fBodyAccMag-mean()
#504 fBodyAccMag-std()
#516 fBodyBodyAccJerkMag-mean()
#517 fBodyBodyAccJerkMag-std()
#529 fBodyBodyGyroMag-mean()
#530 fBodyBodyGyroMag-std()
#542 fBodyBodyGyroJerkMag-mean()
#543 fBodyBodyGyroJerkMag-std()
df<-mergedData[,c("subject","activity","V3","V4","V5","V6","V7","V8","V43","V44","V45","V46","V47","V48","V83","V84","V85",
"V86","V87","V88","V123","V124","V125","V126","V127","V128","V163","V164","V165","V166","V167","V168",
"V203","V204","V216","V217","V229","V230","V242","V243","V255","V256","V268","V269","V270","V271","V272",
"V273","V347","V348","V349","V350","V351","V352","V426","V427","V428","V429","V430","V431","V505","V506",
"V518","V519","V531","V532","V544","V545")]
# 3.Uses descriptive activity names to name the activities in the data set
df$activity[df$activity == 1] <- "WALKING"
df$activity[df$activity == 2] <- "WALKING_UPSTAIRS"
df$activity[df$activity == 3] <- "WALKING_DOWNSTAIRS"
df$activity[df$activity == 4] <- "SITTING"
df$activity[df$activity == 5] <- "STANDING"
df$activity[df$activity == 6] <- "LAYING"
# 4.Appropriately labels the data set with descriptive variable names.
arr_cols <- c("subject", "activity",
"tBodyAcc_mean_X","tBodyAcc_mean_Y","tBodyAcc_mean_Z",
"tBodyAcc_std_X","tBodyAcc_std_Y","tBodyAcc_std_Z",
"tGravityAcc_mean_X","tGravityAcc_mean_Y","tGravityAcc_mean_Z",
"tGravityAcc_std_X","tGravityAcc_std_Y","tGravityAcc_std_Z",
"tBodyAccJerk_mean_X","tBodyAccJerk_mean_Y","tBodyAccJerk_mean_Z",
"tBodyAccJerk_std_X","tBodyAccJerk_std_Y","tBodyAccJerk_std_Z",
"tBodyGyro_mean_X","tBodyGyro_mean_Y","tBodyGyro_mean_Z",
"tBodyGyro_std_X","tBodyGyro_std_Y","tBodyGyro_std_Z"
,"tBodyGyroJerk_mean_X","tBodyGyroJerk_mean_Y","tBodyGyroJerk_mean_Z","tBodyGyroJerk_std_X"
,"tBodyGyroJerk_std_Y","tBodyGyroJerk_std_Z","tBodyAccMag_mean","tBodyAccMag_std","tGravityAccMag_mean"
,"tGravityAccMag_std","tBodyAccJerkMag_mean","tBodyAccJerkMag_std","tBodyGyroMag_mean"
,"tBodyGyroMag_std","tBodyGyroJerkMag_mean","tBodyGyroJerkMag_std","fBodyAcc_mean_X"
,"fBodyAcc_mean_Y","fBodyAcc_mean_Z","fBodyAcc_std_X","fBodyAcc_std_Y","fBodyAcc_std_Z"
,"fBodyAccJerk_mean_X","fBodyAccJerk_mean_Y","fBodyAccJerk_mean_Z","fBodyAccJerk_std_X"
,"fBodyAccJerk_std_Y","fBodyAccJerk_std_Z","fBodyGyro_mean_X","fBodyGyro_mean_Y","fBodyGyro_mean_Z"
,"fBodyGyro_std_X","fBodyGyro_std_Y","fBodyGyro_std_Z"
,"fBodyAccMag_mean","fBodyAccMag_std","fBodyBodyAccJerkMag_mean","fBodyBodyAccJerkMag_std"
,"fBodyBodyGyroMag_mean","fBodyBodyGyroMag_std","fBodyBodyGyroJerkMag_mean"
,"fBodyBodyGyroJerkMag_std")
for (i in 3:length(arr_cols)){
names (df)[i] <-arr_cols[i]
}
# 5.Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
aux<- aggregate(df$tBodyAcc_mean_X, list(subject = df$subject, activity = df$activity), mean)
result <- aux
aux<- aggregate(df$tBodyAcc_mean_Y, list(subject = df$subject, activity = df$activity), mean)
result["tBodyAcc_mean_Y"] <- aux["x"]
aux<- aggregate(df$tBodyAcc_mean_Z, list(subject = df$subject, activity = df$activity), mean)
result["tBodyAcc_mean_Z"] <- aux["x"]
aux<- aggregate(df$tBodyAcc_std_X, list(subject = df$subject, activity = df$activity), mean)
result["tBodyAcc_std_X"] <- aux["x"]
aux<- aggregate(df$tBodyAcc_std_Y, list(subject = df$subject, activity = df$activity), mean)
result["tBodyAcc_std_Y"] <- aux["x"]
aux<- aggregate(df$tBodyAcc_std_Z, list(subject = df$subject, activity = df$activity), mean)
result["tBodyAcc_std_Z"] <- aux["x"]
aux<- aggregate(df$tGravityAcc_mean_X, list(subject = df$subject, activity = df$activity), mean)
result["tGravityAcc_mean_X"] <- aux["x"]
aux<- aggregate(df$tGravityAcc_mean_Y, list(subject = df$subject, activity = df$activity), mean)
result["tGravityAcc_mean_Y"] <- aux["x"]
aux<- aggregate(df$tGravityAcc_mean_Z, list(subject = df$subject, activity = df$activity), mean)
result["tGravityAcc_mean_Z"] <- aux["x"]
aux<- aggregate(df$tGravityAcc_std_X, list(subject = df$subject, activity = df$activity), mean)
result["tGravityAcc_std_X"] <- aux["x"]
aux<- aggregate(df$tGravityAcc_std_Y, list(subject = df$subject, activity = df$activity), mean)
result["tGravityAcc_std_Y"] <- aux["x"]
aux<- aggregate(df$tGravityAcc_std_Z, list(subject = df$subject, activity = df$activity), mean)
result["tGravityAcc_std_Z"] <- aux["x"]
aux<- aggregate(df$tBodyAccJerk_mean_X, list(subject = df$subject, activity = df$activity), mean)
result["tBodyAccJerk_mean_X"] <- aux["x"]
aux<- aggregate(df$tBodyAccJerk_mean_Y, list(subject = df$subject, activity = df$activity), mean)
result["tBodyAccJerk_mean_Y"] <- aux["x"]
aux<- aggregate(df$tBodyAccJerk_mean_Z, list(subject = df$subject, activity = df$activity), mean)
result["tBodyAccJerk_mean_Z"] <- aux["x"]
aux<- aggregate(df$tBodyAccJerk_std_X, list(subject = df$subject, activity = df$activity), mean)
result["tBodyAccJerk_std_X"] <- aux["x"]
aux<- aggregate(df$tBodyAccJerk_std_Y, list(subject = df$subject, activity = df$activity), mean)
result["tBodyAccJerk_std_Y"] <- aux["x"]
aux<- aggregate(df$tBodyAccJerk_std_Z, list(subject = df$subject, activity = df$activity), mean)
result["tBodyAccJerk_std_Z"] <- aux["x"]
aux<- aggregate(df$tBodyGyro_mean_X, list(subject = df$subject, activity = df$activity), mean)
result["tBodyGyro_mean_X"] <- aux["x"]
aux<- aggregate(df$tBodyGyro_mean_Y, list(subject = df$subject, activity = df$activity), mean)
result["tBodyGyro_mean_Y"] <- aux["x"]
aux<- aggregate(df$tBodyGyro_mean_Z, list(subject = df$subject, activity = df$activity), mean)
result["tBodyGyro_mean_Z"] <- aux["x"]
aux<- aggregate(df$tBodyGyro_std_X, list(subject = df$subject, activity = df$activity), mean)
result["tBodyGyro_std_X"] <- aux["x"]
aux<- aggregate(df$tBodyGyro_std_Y, list(subject = df$subject, activity = df$activity), mean)
result["tBodyGyro_std_Y"] <- aux["x"]
aux<- aggregate(df$tBodyGyro_std_Z, list(subject = df$subject, activity = df$activity), mean)
result["tBodyGyro_std_Z"] <- aux["x"]
aux<- aggregate(df$tBodyGyroJerk_mean_X, list(subject = df$subject, activity = df$activity), mean)
result["tBodyGyroJerk_mean_X"] <- aux["x"]
aux<- aggregate(df$tBodyGyroJerk_mean_Y, list(subject = df$subject, activity = df$activity), mean)
result["tBodyGyroJerk_mean_Y"] <- aux["x"]
aux<- aggregate(df$tBodyGyroJerk_mean_Z, list(subject = df$subject, activity = df$activity), mean)
result["tBodyGyroJerk_mean_Z"] <- aux["x"]
aux<- aggregate(df$tBodyGyroJerk_std_X, list(subject = df$subject, activity = df$activity), mean)
result["tBodyGyroJerk_std_X"] <- aux["x"]
aux<- aggregate(df$tBodyGyroJerk_std_Y, list(subject = df$subject, activity = df$activity), mean)
result["tBodyGyroJerk_std_Y"] <- aux["x"]
aux<- aggregate(df$tBodyGyroJerk_std_Z, list(subject = df$subject, activity = df$activity), mean)
result["tBodyGyroJerk_std_Z"] <- aux["x"]
aux<- aggregate(df$tBodyAccMag_mean, list(subject = df$subject, activity = df$activity), mean)
result["tBodyAccMag_mean"] <- aux["x"]
aux<- aggregate(df$tBodyAccMag_std, list(subject = df$subject, activity = df$activity), mean)
result["tBodyAccMag_std"] <- aux["x"]
aux<- aggregate(df$tGravityAccMag_mean, list(subject = df$subject, activity = df$activity), mean)
result["tGravityAccMag_mean"] <- aux["x"]
aux<- aggregate(df$tGravityAccMag_std, list(subject = df$subject, activity = df$activity), mean)
result["tGravityAccMag_std"] <- aux["x"]
aux<- aggregate(df$tBodyAccJerkMag_mean, list(subject = df$subject, activity = df$activity), mean)
result["tBodyAccJerkMag_mean"] <- aux["x"]
aux<- aggregate(df$tBodyAccJerkMag_std, list(subject = df$subject, activity = df$activity), mean)
result["tBodyAccJerkMag_std"] <- aux["x"]
aux<- aggregate(df$tBodyGyroMag_mean, list(subject = df$subject, activity = df$activity), mean)
result["tBodyGyroMag_mean"] <- aux["x"]
aux<- aggregate(df$tBodyGyroMag_std, list(subject = df$subject, activity = df$activity), mean)
result["tBodyGyroMag_std"] <- aux["x"]
aux<- aggregate(df$tBodyGyroJerkMag_mean, list(subject = df$subject, activity = df$activity), mean)
result["tBodyGyroJerkMag_mean"] <- aux["x"]
aux<- aggregate(df$tBodyGyroJerkMag_std, list(subject = df$subject, activity = df$activity), mean)
result["tBodyGyroJerkMag_std"] <- aux["x"]
aux<- aggregate(df$fBodyAcc_mean_X, list(subject = df$subject, activity = df$activity), mean)
result["fBodyAcc_mean_X"] <- aux["x"]
aux<- aggregate(df$fBodyAcc_mean_Y, list(subject = df$subject, activity = df$activity), mean)
result["fBodyAcc_mean_Y"] <- aux["x"]
aux<- aggregate(df$fBodyAcc_mean_Z, list(subject = df$subject, activity = df$activity), mean)
result["fBodyAcc_mean_Z"] <- aux["x"]
aux<- aggregate(df$fBodyAcc_std_X, list(subject = df$subject, activity = df$activity), mean)
result["fBodyAcc_std_X"] <- aux["x"]
aux<- aggregate(df$fBodyAcc_std_Y, list(subject = df$subject, activity = df$activity), mean)
result["fBodyAcc_std_Y"] <- aux["x"]
aux<- aggregate(df$fBodyAcc_std_Z, list(subject = df$subject, activity = df$activity), mean)
result["fBodyAcc_std_Z"] <- aux["x"]
aux<- aggregate(df$fBodyAccJerk_mean_X, list(subject = df$subject, activity = df$activity), mean)
result["fBodyAccJerk_mean_X"] <- aux["x"]
aux<- aggregate(df$fBodyAccJerk_mean_Y, list(subject = df$subject, activity = df$activity), mean)
result["fBodyAccJerk_mean_Y"] <- aux["x"]
aux<- aggregate(df$fBodyAccJerk_mean_Z, list(subject = df$subject, activity = df$activity), mean)
result["fBodyAccJerk_mean_Z"] <- aux["x"]
aux<- aggregate(df$fBodyAccJerk_std_X, list(subject = df$subject, activity = df$activity), mean)
result["fBodyAccJerk_std_X"] <- aux["x"]
aux<- aggregate(df$fBodyAccJerk_std_Y, list(subject = df$subject, activity = df$activity), mean)
result["fBodyAccJerk_std_Y"] <- aux["x"]
aux<- aggregate(df$fBodyAccJerk_std_Z, list(subject = df$subject, activity = df$activity), mean)
result["fBodyAccJerk_std_Z"] <- aux["x"]
aux<- aggregate(df$fBodyGyro_mean_X, list(subject = df$subject, activity = df$activity), mean)
result["fBodyGyro_mean_X"] <- aux["x"]
aux<- aggregate(df$fBodyGyro_mean_Y, list(subject = df$subject, activity = df$activity), mean)
result["fBodyGyro_mean_Y"] <- aux["x"]
aux<- aggregate(df$fBodyGyro_mean_Z, list(subject = df$subject, activity = df$activity), mean)
result["fBodyGyro_mean_Z"] <- aux["x"]
aux<- aggregate(df$fBodyGyro_std_X, list(subject = df$subject, activity = df$activity), mean)
result["fBodyGyro_std_X"] <- aux["x"]
aux<- aggregate(df$fBodyGyro_std_Y, list(subject = df$subject, activity = df$activity), mean)
result["fBodyGyro_std_Y"] <- aux["x"]
aux<- aggregate(df$fBodyGyro_std_Z, list(subject = df$subject, activity = df$activity), mean)
result["fBodyGyro_std_Z"] <- aux["x"]
aux<- aggregate(df$fBodyAccMag_mean, list(subject = df$subject, activity = df$activity), mean)
result["fBodyAccMag_mean"] <- aux["x"]
aux<- aggregate(df$fBodyAccMag_std, list(subject = df$subject, activity = df$activity), mean)
result["fBodyAccMag_std"] <- aux["x"]
aux<- aggregate(df$fBodyBodyAccJerkMag_mean, list(subject = df$subject, activity = df$activity), mean)
result["fBodyBodyAccJerkMag_mean"] <- aux["x"]
aux<- aggregate(df$fBodyBodyAccJerkMag_std, list(subject = df$subject, activity = df$activity), mean)
result["fBodyBodyAccJerkMag_std"] <- aux["x"]
aux<- aggregate(df$fBodyBodyGyroMag_mean, list(subject = df$subject, activity = df$activity), mean)
result["fBodyBodyGyroMag_mean"] <- aux["x"]
aux<- aggregate(df$fBodyBodyGyroMag_std, list(subject = df$subject, activity = df$activity), mean)
result["fBodyBodyGyroMag_std"] <- aux["x"]
aux<- aggregate(df$fBodyBodyGyroJerkMag_mean, list(subject = df$subject, activity = df$activity), mean)
result["fBodyBodyGyroJerkMag_mean"] <- aux["x"]
aux<- aggregate(df$fBodyBodyGyroJerkMag_std, list(subject = df$subject, activity = df$activity), mean)
result["fBodyBodyGyroJerkMag_std"] <- aux["x"]
write.table(df, "../tidy_data.txt", sep="\t")
write.table(result, "../result_mean.txt", sep="\t")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_model_names.R
\name{get_model_names}
\alias{get_model_names}
\title{Lists the models available in rTPC}
\usage{
get_model_names()
}
\value{
character vector of thermal performance curves available in rTPC
}
\description{
Lists the models available in rTPC
}
\examples{
get_model_names()
}
| /man/get_model_names.Rd | no_license | padpadpadpad/rTPC | R | false | true | 371 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_model_names.R
\name{get_model_names}
\alias{get_model_names}
\title{Lists the models available in rTPC}
\usage{
get_model_names()
}
\value{
character vector of thermal performance curves available in rTPC
}
\description{
Lists the models available in rTPC
}
\examples{
get_model_names()
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ylhelperfunctions.R
\name{coll_width}
\alias{coll_width}
\title{Set width of columns}
\usage{
coll_width(wb, sheet, cols, width)
}
\arguments{
\item{wb}{workbook}
\item{sheet}{sheet name or number}
\item{cols}{Columns to apply format}
\item{width}{Width of column in points}
}
\description{
Set width of columns
}
\examples{
coll_width(wb, sheet, cols, width)
}
\keyword{XLSX}
\keyword{column}
\keyword{width}
| /man/coll_width.Rd | permissive | yanlesin/ylhelper | R | false | true | 491 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ylhelperfunctions.R
\name{coll_width}
\alias{coll_width}
\title{Set width of columns}
\usage{
coll_width(wb, sheet, cols, width)
}
\arguments{
\item{wb}{workbook}
\item{sheet}{sheet name or number}
\item{cols}{Columns to apply format}
\item{width}{Width of column in points}
}
\description{
Set width of columns
}
\examples{
coll_width(wb, sheet, cols, width)
}
\keyword{XLSX}
\keyword{column}
\keyword{width}
|
#Create Object
#Object-Oriented Programming
#Assign <-
x <- 1
y <- 2
x+y
#remove object
rm(x)
rm(y)
#data types
#1. numeric
#2. logical #Tuer,False
#3. character #Text
#class(y) #check data type | /Object.R | no_license | amonsap/R | R | false | false | 214 | r | #Create Object
#Object-Oriented Programming
#Assign <-
x <- 1
y <- 2
x+y
#remove object
rm(x)
rm(y)
#data types
#1. numeric
#2. logical #Tuer,False
#3. character #Text
#class(y) #check data type |
library(TTR)
pathCVS <- "/Users/kreegee/Dropbox/poker/eclipse/uzholdem/data/csv/"
pathEPS <- "C:/Users/Christian/Documents/My Dropbox/Uni/HS09/poker/report/section-chapter3/figures/stats/"
algo <- "MOANaiveBayes-HyperboreanNL-BR"
data <- read.csv(paste(c(pathCVS,algo,"Hand.csv"), collapse=""),sep=",")
fileCount <- 8
source("/Users/kreegee/Dropbox/poker/eclipse/uzholdem/data/R/HandGraph.rbat")
| /eclipse/uzholdem/data/R/MOANaiveBayes-HyperboreanNL-BRHand.R | no_license | alincc/uzholdem | R | false | false | 402 | r | library(TTR)
pathCVS <- "/Users/kreegee/Dropbox/poker/eclipse/uzholdem/data/csv/"
pathEPS <- "C:/Users/Christian/Documents/My Dropbox/Uni/HS09/poker/report/section-chapter3/figures/stats/"
algo <- "MOANaiveBayes-HyperboreanNL-BR"
data <- read.csv(paste(c(pathCVS,algo,"Hand.csv"), collapse=""),sep=",")
fileCount <- 8
source("/Users/kreegee/Dropbox/poker/eclipse/uzholdem/data/R/HandGraph.rbat")
|
# reading data
data <- read.table("household_power_consumption.txt", , header=TRUE, sep=";", stringsAsFactors=FALSE, , na.strings = "?")
# convert into the date class (Date column of data )
data$Date <- as.Date(data$Date, format = "%d/%m/%Y")
# filtering data for a specific date
#data <- data[Date >= "2007-02-01" & Date <= "2007-02-02"]
data <- data[data$Date >= "2007-02-01" & data$Date <= "2007-02-02",]
# convert into time class (datetime column of data)
data$datetime <- strptime(paste(data$Date, data$Time), "%Y-%m-%d %H:%M:%S")
#plot 1
hist(data$Global_active_power, main = "Global Active Power", xlab = "Global Active Power (kilowatts)", col = "Red")
# saving into png
dev.copy(png, file = "plot1.png", height = 480, width = 480)
dev.off()
| /plot1.R | no_license | ohnmar30/ExData_Plotting1 | R | false | false | 777 | r | # reading data
data <- read.table("household_power_consumption.txt", , header=TRUE, sep=";", stringsAsFactors=FALSE, , na.strings = "?")
# convert into the date class (Date column of data )
data$Date <- as.Date(data$Date, format = "%d/%m/%Y")
# filtering data for a specific date
#data <- data[Date >= "2007-02-01" & Date <= "2007-02-02"]
data <- data[data$Date >= "2007-02-01" & data$Date <= "2007-02-02",]
# convert into time class (datetime column of data)
data$datetime <- strptime(paste(data$Date, data$Time), "%Y-%m-%d %H:%M:%S")
#plot 1
hist(data$Global_active_power, main = "Global Active Power", xlab = "Global Active Power (kilowatts)", col = "Red")
# saving into png
dev.copy(png, file = "plot1.png", height = 480, width = 480)
dev.off()
|
## Checking
## ReChecking`
| /pollutantmean.R | no_license | jockod/Week2 | R | false | false | 27 | r | ## Checking
## ReChecking`
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ef_cetesb.R
\name{ef_cetesb}
\alias{ef_cetesb}
\title{Emissions factors for Environment Company of Sao Paulo, Brazil (CETESB)}
\usage{
ef_cetesb(
p,
veh,
year = 2017,
agemax = 40,
scale = "default",
sppm,
full = FALSE,
efinput,
verbose = FALSE,
csv
)
}
\arguments{
\item{p}{Character;
Pollutants: "CO", "HC", "NMHC", "CH4", "NOx", "CO2",
"RCHO" (aldehydes + formaldehyde), "ETOH",
"PM", "N2O", "KML", "FC", "NO2", "NO", "NH3",
"gD/KWH", "gCO2/KWH", "RCHO_0km" (aldehydes + formaldehyde), "PM25RES", "PM10RES",
"CO_0km", "HC_0km", "NMHC_0km", "NOx_0km", "NO2_0km" ,"NO_0km",
"RCHO_0km" and "ETOH_0km", "FS" (fuel sales) (g/km). If scale = "tunnel" is
used, there is also "ALD" for aldehydes and "HCHO" for formaldehydes
Evaporative emissions at average temperature ranges:
"D_20_35", "S_20_35", "R_20_35", "D_10_25", "S_10_25", "R_10_25", "D_0_15",
"S_0_15" and "R_0_15" where D means diurnal (g/day), S hot/warm soak (g/trip)
and R hot/warm running losses (g/trip). THe deteriorated emission factors are calculated inside this function.}
\item{veh}{Character; Vehicle categories:
"PC_G", "PC_FG", "PC_FE", "PC_E",
"LCV_G", "LCV_FG", "LCV_FE", "LCV_E", "LCV_D",
"TRUCKS_SL", "TRUCKS_L", "TRUCKS_M", "TRUCKS_SH", "TRUCKS_H",
"BUS_URBAN", "BUS_MICRO", "BUS_COACH", "BUS_ARTIC",
"MC_150_G", "MC_150_500_G", "MC_500_G",
"MC_150_FG", "MC_150_500_FG", "MC_500_FG",
"MC_150_FE", "MC_150_500_FE", "MC_500_FE",
"CICLOMOTOR", "GNV"}
\item{year}{Numeric; Filter the emission factor to start from a specific base year.
If project is 'constant' values above 2017 and below 1980 will be repeated}
\item{agemax}{Integer; age of oldest vehicles for that category}
\item{scale}{Character; values "default","tunnel" o "tunnel2018". If "tunnel", emission
factors are scaled to represent EF measurements in tunnels in Sao Paulo}
\item{sppm}{Numeric, sulfur (sulphur) in ppm in fuel.}
\item{full}{Logical; To return a data.frame instead or a vector adding
Age, Year, Brazilian emissions standards and its euro equivalents.}
\item{efinput}{data.frame with efinput structure of sysdata cetesb. Allow
apply deterioration for future emission factors}
\item{verbose}{Logical; To show more information}
\item{csv}{String with the path to download the ef in a .csv file. For instance,
ef.csv}
}
\value{
A vector of Emission Factor or a data.frame
}
\description{
\code{\link{ef_cetesb}} returns a vector or data.frame of Brazilian emission factors.
}
\note{
The new convention for vehicles names are translated from CETESB report:
\tabular{ll}{
veh \tab description \cr
PC_G \tab Passenger Car Gasohol (Gasoline + 27perc of anhydrous ethanol) \cr
PC_E \tab Passenger Car Ethanol (hydrous ethanol) \cr
PC_FG \tab Passenger Car Flex Gasohol (Gasoline + 27perc of anhydrous ethanol) \cr
PC_FE \tab Passenger Car Flex Ethanol (hydrous ethanol) \cr
LCV_G \tab Light Commercial Vehicle Gasohol (Gasoline + 27perc of anhydrous ethanol) \cr
LCV_E \tab Light Commercial Vehicle Ethanol (hydrous ethanol) \cr
LCV_FG \tab Light Commercial Vehicle Flex Gasohol (Gasoline + 27perc of anhydrous ethanol) \cr
LCV_FE \tab Light Commercial Vehicle Flex Ethanol (hydrous ethanol) \cr
LCV_D \tab Light Commercial Vehicle Diesel (5perc bio-diesel) \cr
TRUCKS_SL_D \tab Trucks Semi Light Diesel (5perc bio-diesel) \cr
TRUCKS_L_D \tab Trucks Light Diesel (5perc bio-diesel) \cr
TRUCKS_M_D \tab Trucks Medium Diesel (5perc bio-diesel) \cr
TRUCKS_SH_D \tab Trucks Semi Heavy Diesel (5perc bio-diesel) \cr
TRUCKS_H_D \tab Trucks Heavy Diesel (5perc bio-diesel) \cr
BUS_URBAN_D \tab Urban Bus Diesel (5perc bio-diesel) \cr
BUS_MICRO_D \tab Micro Urban Bus Diesel (5perc bio-diesel) \cr
BUS_COACH_D \tab Coach (inter-state) Bus Diesel (5perc bio-diesel) \cr
BUS_ARTIC_D \tab Articulated Urban Bus Diesel (5perc bio-diesel) \cr
MC_150_G \tab Motorcycle engine less than 150cc Gasohol (Gasoline + 27perc of anhydrous ethanol) \cr
MC_150_500_G \tab Motorcycle engine 150-500cc Gasohol (Gasoline + 27perc of anhydrous ethanol) \cr
MC_500_G \tab Motorcycle greater than 500cc Gasohol (Gasoline + 27perc of anhydrous ethanol) \cr
MC_150_FG \tab Flex Motorcycle engine less than 150cc Gasohol (Gasoline + 27perc of anhydrous ethanol) \cr
MC_150_500_FG \tab Flex Motorcycle engine 150-500cc Gasohol (Gasoline + 27perc of anhydrous ethanol) \cr
MC_500_FG \tab Flex Motorcycle greater than 500cc Gasohol (Gasoline + 27perc of anhydrous ethanol) \cr
MC_150_FE \tab Flex Motorcycle engine less than 150cc Ethanol (hydrous ethanol) \cr
MC_150_500_FE \tab Flex Motorcycle engine 150-500cc Ethanol (hydrous ethanol) \cr
MC_500_FE \tab Flex Motorcycle greater than 500cc Ethanol (hydrous ethanol) \cr
PC_ELEC \tab Passenger Car Electric \cr
LCV_ELEC \tab Light Commercial Vehicle Electric \cr
}
The percentage varies of biofuels varies by law.
This emission factors are not exactly the same as the report of CETESB.
1) In this emission factors, there is also NO and NO2 based on split by
published in the EMEP/EEA air pollutant emission inventory guidebook.
2) Also, the emission factors were extended till 50 years of use, repeating
the oldest value.
3) CNG emission factors were expanded to other pollutants by comparison
of US.EPA-AP42 emission factor: Section 1.4 Natural Gas Combustion.
In the previous versions I used the letter 'd' for deteriorated. I removed the
letter 'd' internally to not break older code.
If by mistake, the user inputs one of veh names from the old convention,
they are internally changed to the new convention:
"SLT", "LT", "MT", "SHT","HT", "UB", "SUB", "COACH", "ARTIC", "M_G_150",
"M_G_150_500", "M_G_500", "M_FG_150", "M_FG_150_500", "M_FG_500",
"M_FE_150", "M_FE_150_500","M_FE_500",
PC_ELEC, LCV_ELEC, TRUCKS_ELEC, BUS_ELEC,
MC_150_ELEC, MC_150_500_ELEC, MC_500_ELEC
If pollutant is "SO2", it needs sppm. It is designed when veh has length 1, if it has length 2 or more,
it will show a warning
\strong{Emission factor for vehicles older than the reported by CETESB were filled with las highest EF}
\itemize{
\item Range EF from PC and LCV otto: 2018 - 1982. EF for 1981 and older as moving average.
\item Range LCV diesel : 2018 - 2006. EF for 2005 and older as moving average.
\item Range Trucks and Buse: 2018 - 1998. EF for 1997 and older as moving average.
\item Range MC Gasoline: 2018 - 2003. EF for 2002 and older as moving average.
\item Range MC Flex 150-500cc and >500cc: 2018 - 2012. EF for 2011 and older as moving average.
}
Currently, 2020, there are not any system for recovery of fuel vapors in Brazil. Hence,
the FS takes into account the vapour that comes from the fuel tank inside the car and
released into the atmosphere when injecting new fuel. There are discussions about
increasing implementing stage I and II and/or ORVR these days. The ef FS is calculated
by transforming g FC/km into (L/KM)*g/L with g/L 1.14 fgor gasoline and 0.37
for ethanol (CETESB, 2016). The density considered is 0.75425 for gasoline and
0.809 for ethanol (t/m^3)
CETESB emission factors did not cover evaporative emissions from motorcycles,
which occur. Therefore, in the absence of better data, it was assumed the
same ratio from passenger cars.
Li, Lan, et al. "Exhaust and evaporative emissions from motorcycles fueled
with ethanol gasoline blends." Science of the Total Environment 502 (2015): 627-631.
If scale is used with tunnel, the references are:
\itemize{
\item Pérez-Martinez, P. J., Miranda, R. M., Nogueira, T., Guardani, M. L.,
Fornaro, A., Ynoue, R., and Andrade, M. F. (2014). Emission
factors of air pollutants from vehicles measured inside road tunnels in
Sao Paulo: case study comparison. International Journal of
Environmental Science and Technology, 11(8), 2155-2168.
\item Nogueira, T., de Souza, K. F., Fornaro, A., de Fatima Andrade, M., and
de Carvalho, L. R. F. (2015). On-road emissions of carbonyls
from vehicles powered by biofuel blends in traffic tunnels in the
Metropolitan Area of Sao Paulo, Brazil. Atmospheric Environment, 108, 88-97.
\item Nogueira, T., et al (2021). In preparation (for tunnel 2018)
}
Emission factors for resuspension applies \strong{only} with top-down approach
as a experimental feature. Units are g/(streets*veh)/day. These values were
derived form a bottom-up resuspension emissions from metropolitan area
of Sao Paulo 2018, assuming 50000 streets
NH3 from EEA Tier 2
}
\examples{
{
a <- ef_cetesb(p = "CO", veh = "PC_G")
a <- ef_cetesb(p = "NOx", veh = "TRUCKS_M_D")
a <- ef_cetesb("R_10_25", "PC_G")
a <- ef_cetesb("CO", c("PC_G", "PC_FE"))
ef_cetesb(p = "CO", veh = "PC_G", year = 1970, agemax = 40)
ef_cetesb(p = "CO", veh = "TRUCKS_L_D", year = 2018)
ef_cetesb(p = "CO", veh = "SLT", year = 2018) # olds names
a <- ef_cetesb(p = "NMHC", veh = c("PC_G", "PC_FG", "PC_FE", "PC_E"), year = 2018, agemax = 20)
colplot(a, main = "NMHC EF", ylab = "[g/km]", xlab = "Years of use")
ef_cetesb(p = "PM25RES", veh = "PC_ELEC", year = 1970, agemax = 40)
ef_cetesb(p = "PM25RES", veh = "BUS_ELEC", year = 1970, agemax = 40)
}
}
\references{
Emissoes Veiculares no Estado de Sao Paulo 2016. Technical Report.
url: https://cetesb.sp.gov.br/veicular/relatorios-e-publicacoes/.
}
\keyword{emission}
\keyword{factors}
| /man/ef_cetesb.Rd | no_license | cran/vein | R | false | true | 9,385 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ef_cetesb.R
\name{ef_cetesb}
\alias{ef_cetesb}
\title{Emissions factors for Environment Company of Sao Paulo, Brazil (CETESB)}
\usage{
ef_cetesb(
p,
veh,
year = 2017,
agemax = 40,
scale = "default",
sppm,
full = FALSE,
efinput,
verbose = FALSE,
csv
)
}
\arguments{
\item{p}{Character;
Pollutants: "CO", "HC", "NMHC", "CH4", "NOx", "CO2",
"RCHO" (aldehydes + formaldehyde), "ETOH",
"PM", "N2O", "KML", "FC", "NO2", "NO", "NH3",
"gD/KWH", "gCO2/KWH", "RCHO_0km" (aldehydes + formaldehyde), "PM25RES", "PM10RES",
"CO_0km", "HC_0km", "NMHC_0km", "NOx_0km", "NO2_0km" ,"NO_0km",
"RCHO_0km" and "ETOH_0km", "FS" (fuel sales) (g/km). If scale = "tunnel" is
used, there is also "ALD" for aldehydes and "HCHO" for formaldehydes
Evaporative emissions at average temperature ranges:
"D_20_35", "S_20_35", "R_20_35", "D_10_25", "S_10_25", "R_10_25", "D_0_15",
"S_0_15" and "R_0_15" where D means diurnal (g/day), S hot/warm soak (g/trip)
and R hot/warm running losses (g/trip). THe deteriorated emission factors are calculated inside this function.}
\item{veh}{Character; Vehicle categories:
"PC_G", "PC_FG", "PC_FE", "PC_E",
"LCV_G", "LCV_FG", "LCV_FE", "LCV_E", "LCV_D",
"TRUCKS_SL", "TRUCKS_L", "TRUCKS_M", "TRUCKS_SH", "TRUCKS_H",
"BUS_URBAN", "BUS_MICRO", "BUS_COACH", "BUS_ARTIC",
"MC_150_G", "MC_150_500_G", "MC_500_G",
"MC_150_FG", "MC_150_500_FG", "MC_500_FG",
"MC_150_FE", "MC_150_500_FE", "MC_500_FE",
"CICLOMOTOR", "GNV"}
\item{year}{Numeric; Filter the emission factor to start from a specific base year.
If project is 'constant' values above 2017 and below 1980 will be repeated}
\item{agemax}{Integer; age of oldest vehicles for that category}
\item{scale}{Character; values "default","tunnel" o "tunnel2018". If "tunnel", emission
factors are scaled to represent EF measurements in tunnels in Sao Paulo}
\item{sppm}{Numeric, sulfur (sulphur) in ppm in fuel.}
\item{full}{Logical; To return a data.frame instead or a vector adding
Age, Year, Brazilian emissions standards and its euro equivalents.}
\item{efinput}{data.frame with efinput structure of sysdata cetesb. Allow
apply deterioration for future emission factors}
\item{verbose}{Logical; To show more information}
\item{csv}{String with the path to download the ef in a .csv file. For instance,
ef.csv}
}
\value{
A vector of Emission Factor or a data.frame
}
\description{
\code{\link{ef_cetesb}} returns a vector or data.frame of Brazilian emission factors.
}
\note{
The new convention for vehicles names are translated from CETESB report:
\tabular{ll}{
veh \tab description \cr
PC_G \tab Passenger Car Gasohol (Gasoline + 27perc of anhydrous ethanol) \cr
PC_E \tab Passenger Car Ethanol (hydrous ethanol) \cr
PC_FG \tab Passenger Car Flex Gasohol (Gasoline + 27perc of anhydrous ethanol) \cr
PC_FE \tab Passenger Car Flex Ethanol (hydrous ethanol) \cr
LCV_G \tab Light Commercial Vehicle Gasohol (Gasoline + 27perc of anhydrous ethanol) \cr
LCV_E \tab Light Commercial Vehicle Ethanol (hydrous ethanol) \cr
LCV_FG \tab Light Commercial Vehicle Flex Gasohol (Gasoline + 27perc of anhydrous ethanol) \cr
LCV_FE \tab Light Commercial Vehicle Flex Ethanol (hydrous ethanol) \cr
LCV_D \tab Light Commercial Vehicle Diesel (5perc bio-diesel) \cr
TRUCKS_SL_D \tab Trucks Semi Light Diesel (5perc bio-diesel) \cr
TRUCKS_L_D \tab Trucks Light Diesel (5perc bio-diesel) \cr
TRUCKS_M_D \tab Trucks Medium Diesel (5perc bio-diesel) \cr
TRUCKS_SH_D \tab Trucks Semi Heavy Diesel (5perc bio-diesel) \cr
TRUCKS_H_D \tab Trucks Heavy Diesel (5perc bio-diesel) \cr
BUS_URBAN_D \tab Urban Bus Diesel (5perc bio-diesel) \cr
BUS_MICRO_D \tab Micro Urban Bus Diesel (5perc bio-diesel) \cr
BUS_COACH_D \tab Coach (inter-state) Bus Diesel (5perc bio-diesel) \cr
BUS_ARTIC_D \tab Articulated Urban Bus Diesel (5perc bio-diesel) \cr
MC_150_G \tab Motorcycle engine less than 150cc Gasohol (Gasoline + 27perc of anhydrous ethanol) \cr
MC_150_500_G \tab Motorcycle engine 150-500cc Gasohol (Gasoline + 27perc of anhydrous ethanol) \cr
MC_500_G \tab Motorcycle greater than 500cc Gasohol (Gasoline + 27perc of anhydrous ethanol) \cr
MC_150_FG \tab Flex Motorcycle engine less than 150cc Gasohol (Gasoline + 27perc of anhydrous ethanol) \cr
MC_150_500_FG \tab Flex Motorcycle engine 150-500cc Gasohol (Gasoline + 27perc of anhydrous ethanol) \cr
MC_500_FG \tab Flex Motorcycle greater than 500cc Gasohol (Gasoline + 27perc of anhydrous ethanol) \cr
MC_150_FE \tab Flex Motorcycle engine less than 150cc Ethanol (hydrous ethanol) \cr
MC_150_500_FE \tab Flex Motorcycle engine 150-500cc Ethanol (hydrous ethanol) \cr
MC_500_FE \tab Flex Motorcycle greater than 500cc Ethanol (hydrous ethanol) \cr
PC_ELEC \tab Passenger Car Electric \cr
LCV_ELEC \tab Light Commercial Vehicle Electric \cr
}
The percentage varies of biofuels varies by law.
This emission factors are not exactly the same as the report of CETESB.
1) In this emission factors, there is also NO and NO2 based on split by
published in the EMEP/EEA air pollutant emission inventory guidebook.
2) Also, the emission factors were extended till 50 years of use, repeating
the oldest value.
3) CNG emission factors were expanded to other pollutants by comparison
of US.EPA-AP42 emission factor: Section 1.4 Natural Gas Combustion.
In the previous versions I used the letter 'd' for deteriorated. I removed the
letter 'd' internally to not break older code.
If by mistake, the user inputs one of veh names from the old convention,
they are internally changed to the new convention:
"SLT", "LT", "MT", "SHT","HT", "UB", "SUB", "COACH", "ARTIC", "M_G_150",
"M_G_150_500", "M_G_500", "M_FG_150", "M_FG_150_500", "M_FG_500",
"M_FE_150", "M_FE_150_500","M_FE_500",
PC_ELEC, LCV_ELEC, TRUCKS_ELEC, BUS_ELEC,
MC_150_ELEC, MC_150_500_ELEC, MC_500_ELEC
If pollutant is "SO2", it needs sppm. It is designed when veh has length 1, if it has length 2 or more,
it will show a warning
\strong{Emission factor for vehicles older than the reported by CETESB were filled with las highest EF}
\itemize{
\item Range EF from PC and LCV otto: 2018 - 1982. EF for 1981 and older as moving average.
\item Range LCV diesel : 2018 - 2006. EF for 2005 and older as moving average.
\item Range Trucks and Buse: 2018 - 1998. EF for 1997 and older as moving average.
\item Range MC Gasoline: 2018 - 2003. EF for 2002 and older as moving average.
\item Range MC Flex 150-500cc and >500cc: 2018 - 2012. EF for 2011 and older as moving average.
}
Currently, 2020, there are not any system for recovery of fuel vapors in Brazil. Hence,
the FS takes into account the vapour that comes from the fuel tank inside the car and
released into the atmosphere when injecting new fuel. There are discussions about
increasing implementing stage I and II and/or ORVR these days. The ef FS is calculated
by transforming g FC/km into (L/KM)*g/L with g/L 1.14 fgor gasoline and 0.37
for ethanol (CETESB, 2016). The density considered is 0.75425 for gasoline and
0.809 for ethanol (t/m^3)
CETESB emission factors did not cover evaporative emissions from motorcycles,
which occur. Therefore, in the absence of better data, it was assumed the
same ratio from passenger cars.
Li, Lan, et al. "Exhaust and evaporative emissions from motorcycles fueled
with ethanol gasoline blends." Science of the Total Environment 502 (2015): 627-631.
If scale is used with tunnel, the references are:
\itemize{
\item Pérez-Martinez, P. J., Miranda, R. M., Nogueira, T., Guardani, M. L.,
Fornaro, A., Ynoue, R., and Andrade, M. F. (2014). Emission
factors of air pollutants from vehicles measured inside road tunnels in
Sao Paulo: case study comparison. International Journal of
Environmental Science and Technology, 11(8), 2155-2168.
\item Nogueira, T., de Souza, K. F., Fornaro, A., de Fatima Andrade, M., and
de Carvalho, L. R. F. (2015). On-road emissions of carbonyls
from vehicles powered by biofuel blends in traffic tunnels in the
Metropolitan Area of Sao Paulo, Brazil. Atmospheric Environment, 108, 88-97.
\item Nogueira, T., et al (2021). In preparation (for tunnel 2018)
}
Emission factors for resuspension applies \strong{only} with top-down approach
as a experimental feature. Units are g/(streets*veh)/day. These values were
derived form a bottom-up resuspension emissions from metropolitan area
of Sao Paulo 2018, assuming 50000 streets
NH3 from EEA Tier 2
}
\examples{
{
a <- ef_cetesb(p = "CO", veh = "PC_G")
a <- ef_cetesb(p = "NOx", veh = "TRUCKS_M_D")
a <- ef_cetesb("R_10_25", "PC_G")
a <- ef_cetesb("CO", c("PC_G", "PC_FE"))
ef_cetesb(p = "CO", veh = "PC_G", year = 1970, agemax = 40)
ef_cetesb(p = "CO", veh = "TRUCKS_L_D", year = 2018)
ef_cetesb(p = "CO", veh = "SLT", year = 2018) # olds names
a <- ef_cetesb(p = "NMHC", veh = c("PC_G", "PC_FG", "PC_FE", "PC_E"), year = 2018, agemax = 20)
colplot(a, main = "NMHC EF", ylab = "[g/km]", xlab = "Years of use")
ef_cetesb(p = "PM25RES", veh = "PC_ELEC", year = 1970, agemax = 40)
ef_cetesb(p = "PM25RES", veh = "BUS_ELEC", year = 1970, agemax = 40)
}
}
\references{
Emissoes Veiculares no Estado de Sao Paulo 2016. Technical Report.
url: https://cetesb.sp.gov.br/veicular/relatorios-e-publicacoes/.
}
\keyword{emission}
\keyword{factors}
|
#Johns Hopkins Data Science Track Coursera
#Course: 2
#Week: 2
#
#User: g9a2hvk9
#email: g9a2hvk9@gmail.com
#github: github.com/g9a2hvk9
#
#
#LOOP FUNCTIONS - LAPPLY
#
#lapply <- function (X, FUN, ...)
# {
# FUN <- match.fun(FUN)
# if (!is.vector(X) || is.object(X))
# X <- as.list(X)
# .Internal(lapply(X, FUN))
# }
# <bytecode: 0x7f8424049190>
# <environment: namespace:base>
# lapply(x, mean) returns list of means of respective vectors in list x
x <- list(a = 1:5, b = rnorm(10))
lapply(x, mean)
# lapply(x, mean) returns list of means of respective vectors in list x
x <- list(a = 1:4, b = rnorm(10), c = rnorm(20, 1), d = rnorm(100, 5))
lapply(x, mean)
# lapply(x, runif) creates list of 4 vectors of random integers between 0 and 1 by iterating runif() over vector(1:4)
x <- 1:4
lapply(x, runif)
# lapply(x, runif) creates list of 4 vectors of random integers between 0 and 10 by iterating runif() over vector(1:4)
# arguments min and max of FUN "runif()" passed to ... of lapply(x, FUN, ...)
x <- 1:4
lapply(x, runif, min = 0, max = 10)
# Creates 2 matrices of dim [2, 2] and dim [3, 2]
# lapply(x, function(elt) elt[, 1] returns list of first column of respective matrix by using anonymous function
# matrix notation: [row, col]
x <- list(a = matrix(1:4, 2, 2), b = matrix(1:6, 3, 2))
lapply(x, function(matrix) matrix[, 1])
# lapply(x, mean) returns list of means with length 4
x <- list(a = 1:4, b = rnorm(10), c = rnorm(20, 1), d = rnorm(100, 5))
lapply(x, mean)
# sapply returns vector of means with length 4
# this is the simpler notation
sapply(x, mean)
# calling mean(x) on list returns error because list is not numeric or logical
mean(x) | /02_RProgramming/03_Week03/01_Lectures/01_loopFunctions-lapply.R | no_license | G9A2HvK9/datasciencecoursera | R | false | false | 1,713 | r | #Johns Hopkins Data Science Track Coursera
#Course: 2
#Week: 2
#
#User: g9a2hvk9
#email: g9a2hvk9@gmail.com
#github: github.com/g9a2hvk9
#
#
#LOOP FUNCTIONS - LAPPLY
#
#lapply <- function (X, FUN, ...)
# {
# FUN <- match.fun(FUN)
# if (!is.vector(X) || is.object(X))
# X <- as.list(X)
# .Internal(lapply(X, FUN))
# }
# <bytecode: 0x7f8424049190>
# <environment: namespace:base>
# lapply(x, mean) returns list of means of respective vectors in list x
x <- list(a = 1:5, b = rnorm(10))
lapply(x, mean)
# lapply(x, mean) returns list of means of respective vectors in list x
x <- list(a = 1:4, b = rnorm(10), c = rnorm(20, 1), d = rnorm(100, 5))
lapply(x, mean)
# lapply(x, runif) creates list of 4 vectors of random integers between 0 and 1 by iterating runif() over vector(1:4)
x <- 1:4
lapply(x, runif)
# lapply(x, runif) creates list of 4 vectors of random integers between 0 and 10 by iterating runif() over vector(1:4)
# arguments min and max of FUN "runif()" passed to ... of lapply(x, FUN, ...)
x <- 1:4
lapply(x, runif, min = 0, max = 10)
# Creates 2 matrices of dim [2, 2] and dim [3, 2]
# lapply(x, function(elt) elt[, 1] returns list of first column of respective matrix by using anonymous function
# matrix notation: [row, col]
x <- list(a = matrix(1:4, 2, 2), b = matrix(1:6, 3, 2))
lapply(x, function(matrix) matrix[, 1])
# lapply(x, mean) returns list of means with length 4
x <- list(a = 1:4, b = rnorm(10), c = rnorm(20, 1), d = rnorm(100, 5))
lapply(x, mean)
# sapply returns vector of means with length 4
# this is the simpler notation
sapply(x, mean)
# calling mean(x) on list returns error because list is not numeric or logical
mean(x) |
library(sqldf)
setwd("C:/Users/user/Desktop/coursera")
data <- read.csv.sql("household_power_consumption.txt", header=TRUE, sep=";",
sql="Select * from file where Date = '1/2/2007' OR Date = '2/2/2007'")
#converting
as.Date(data$Date, "%d/%m/%Y")
strptime(data$Time, "%S:%M:%H")
#Plot 3
png("plot3.png",width = 480,height = 480)
plot(data[,7], type="l", ylab="Energy sub metering")
lines(data[,8], col="red")
lines(data[,9], col="blue")
legend("topright",lty=1, col=1:3, c("sub_metering_1","sub_metering_2","sub_metering_3"))
dev.off() | /plot3.R | no_license | dragolino/try | R | false | false | 560 | r | library(sqldf)
setwd("C:/Users/user/Desktop/coursera")
data <- read.csv.sql("household_power_consumption.txt", header=TRUE, sep=";",
sql="Select * from file where Date = '1/2/2007' OR Date = '2/2/2007'")
#converting
as.Date(data$Date, "%d/%m/%Y")
strptime(data$Time, "%S:%M:%H")
#Plot 3
png("plot3.png",width = 480,height = 480)
plot(data[,7], type="l", ylab="Energy sub metering")
lines(data[,8], col="red")
lines(data[,9], col="blue")
legend("topright",lty=1, col=1:3, c("sub_metering_1","sub_metering_2","sub_metering_3"))
dev.off() |
#############################################################################
#
# XLConnect
# Copyright (C) 2010-2021 Mirai Solutions GmbH
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
#############################################################################
#############################################################################
#
# Accessing cell formulae
#
# Author: Thomas Themel, Mirai Solutions GmbH
#
#############################################################################
setGeneric("getCellFormula",
function(object, sheet, row, col) standardGeneric("getCellFormula"))
setMethod("getCellFormula",
signature(object = "workbook", sheet = "numeric"),
function(object, sheet, row, col) {
xlcCall(object, "getCellFormula", as.integer(sheet - 1), as.integer(row - 1),
as.integer(col - 1))
}
)
setMethod("getCellFormula",
signature(object = "workbook", sheet = "character"),
function(object, sheet, row, col) {
xlcCall(object, "getCellFormula", sheet, as.integer(row - 1),
as.integer(col - 1))
}
)
| /R/workbook.getCellFormula.R | no_license | miraisolutions/xlconnect | R | false | false | 1,692 | r | #############################################################################
#
# XLConnect
# Copyright (C) 2010-2021 Mirai Solutions GmbH
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
#############################################################################
#############################################################################
#
# Accessing cell formulae
#
# Author: Thomas Themel, Mirai Solutions GmbH
#
#############################################################################
setGeneric("getCellFormula",
function(object, sheet, row, col) standardGeneric("getCellFormula"))
setMethod("getCellFormula",
signature(object = "workbook", sheet = "numeric"),
function(object, sheet, row, col) {
xlcCall(object, "getCellFormula", as.integer(sheet - 1), as.integer(row - 1),
as.integer(col - 1))
}
)
setMethod("getCellFormula",
signature(object = "workbook", sheet = "character"),
function(object, sheet, row, col) {
xlcCall(object, "getCellFormula", sheet, as.integer(row - 1),
as.integer(col - 1))
}
)
|
library(tidyverse)
library(TTR)
library(janitor)
library(plotly)
library(caret)
library(lubridate)
library(Rtsne)
library(pROC)
source('funciones.r')
if(stringi::stri_detect(getwd(),fixed='cluster')){
setwd('..')
}
raw=read.csv('all_info.csv',stringsAsFactors = FALSE)%>%
clean_names()%>%
mutate(date=as.Date(date))%>%
rename(tri=tot_return_index_net_dvds)
#### Cluster 1
if(stringi::stri_detect(getwd(),fixed='cluster')){
setwd('../cluster 1')
}else{
setwd('cluster 1')
}
etfs=c(
'MCHI US Equity', #china
'EWH US Equity', #hong kong
'EWY US Equity' #korea
)
k=5
all_pairs=data.frame(combn(unique(etfs),2),stringsAsFactors = FALSE)
all_pairs=lapply(all_pairs, c)
all_tecs=lapply(all_pairs,get_tec,raw=raw,k=k)
pair_tecnical=do.call(rbind,all_tecs)
stat=get_strategy(pair_tecnical,y=year(max(raw$date)),yot=5,met='regLogistic')
portfolio1=get_portfolio(stat$prediction)%>%
filter(date>'2019-01-01')
## Cluster 2
if(stringi::stri_detect(getwd(),fixed='cluster')){
setwd('../cluster 2')
}else{
setwd('cluster 2')
}
etfs=c(
'EWQ US Equity', #Francia
'EWG US Equity', #Alemania
'EWU US Equity' #UK
)
k=5
all_pairs=data.frame(combn(unique(etfs),2),stringsAsFactors = FALSE)
all_pairs=lapply(all_pairs, c)
all_tecs=lapply(all_pairs,get_tec,raw=raw,k=k)
pair_tecnical=do.call(rbind,all_tecs)
stat=get_strategy(pair_tecnical,y=year(max(raw$date)),yot=5,met='regLogistic')
portfolio2=get_portfolio(stat$prediction)%>%
filter(date>'2019-01-01')
## Cluster 3
if(stringi::stri_detect(getwd(),fixed='cluster')){
setwd('../cluster 3')
}else{
setwd('cluster 3')
}
etfs=c(
'XLI US Equity', #Industrial
'XLB US Equity', #Material
'XLF US Equity',#Financial
'XLE US Equity'#Energi
)
k=5
all_pairs=data.frame(combn(unique(etfs),2),stringsAsFactors = FALSE)
all_pairs=lapply(all_pairs, c)
all_tecs=lapply(all_pairs,get_tec,raw=raw,k=k)
pair_tecnical=do.call(rbind,all_tecs)
stat=get_strategy(pair_tecnical,y=year(max(raw$date)),yot=5,met='regLogistic')
portfolio3=get_portfolio(stat$prediction)%>%
filter(date>'2019-01-01')
## Cluster 4
if(stringi::stri_detect(getwd(),fixed='cluster')){
setwd('../cluster 4')
}else{
setwd('cluster 4')
}
etfs=c(
'INDA US Equity', #India
'THD US Equity', #Tailandia
'EWM US Equity'# Malasya
)
k=5
all_pairs=data.frame(combn(unique(etfs),2),stringsAsFactors = FALSE)
all_pairs=lapply(all_pairs, c)
all_tecs=lapply(all_pairs,get_tec,raw=raw,k=k)
pair_tecnical=do.call(rbind,all_tecs)
stat=get_strategy(pair_tecnical,y=year(max(raw$date)),yot=5,met='regLogistic')
portfolio4=get_portfolio(stat$prediction)%>%
filter(date>'2019-01-01')
## Cluster 5
if(stringi::stri_detect(getwd(),fixed='cluster')){
setwd('../cluster 5')
}else{
setwd('cluster 5')
}
etfs=c(
'SPY US Equity', #US general
'XLK US Equity', #Tech
'XLY US Equity'#Cons Disc
)
k=5
all_pairs=data.frame(combn(unique(etfs),2),stringsAsFactors = FALSE)
all_pairs=lapply(all_pairs, c)
all_tecs=lapply(all_pairs,get_tec,raw=raw,k=k)
pair_tecnical=do.call(rbind,all_tecs)
stat=get_strategy(pair_tecnical,y=year(max(raw$date)),yot=5,met='regLogistic')
portfolio5=get_portfolio(stat$prediction)%>%
filter(date>'2019-01-01')
if(stringi::stri_detect(getwd(),fixed='cluster')){
setwd('..')
}
#### resumen
portafolios=portfolio1%>%
mutate(cluster=1)%>%
rbind(mutate(portfolio2,cluster=2))%>%
rbind(mutate(portfolio3,cluster=3))%>%
rbind(mutate(portfolio4,cluster=4))%>%
rbind(mutate(portfolio5,cluster=5))%>%
filter(wday(date)==5)%>%
filter(date<max(date))
write.csv(portafolios,'portafolios.csv')
| /Demo/.Rproj.user/BF253200/sources/s-556CC03A/69552EA8-contents | no_license | magralo/proyecto_integrador2_ITPA | R | false | false | 3,623 | library(tidyverse)
library(TTR)
library(janitor)
library(plotly)
library(caret)
library(lubridate)
library(Rtsne)
library(pROC)
source('funciones.r')
if(stringi::stri_detect(getwd(),fixed='cluster')){
setwd('..')
}
raw=read.csv('all_info.csv',stringsAsFactors = FALSE)%>%
clean_names()%>%
mutate(date=as.Date(date))%>%
rename(tri=tot_return_index_net_dvds)
#### Cluster 1
if(stringi::stri_detect(getwd(),fixed='cluster')){
setwd('../cluster 1')
}else{
setwd('cluster 1')
}
etfs=c(
'MCHI US Equity', #china
'EWH US Equity', #hong kong
'EWY US Equity' #korea
)
k=5
all_pairs=data.frame(combn(unique(etfs),2),stringsAsFactors = FALSE)
all_pairs=lapply(all_pairs, c)
all_tecs=lapply(all_pairs,get_tec,raw=raw,k=k)
pair_tecnical=do.call(rbind,all_tecs)
stat=get_strategy(pair_tecnical,y=year(max(raw$date)),yot=5,met='regLogistic')
portfolio1=get_portfolio(stat$prediction)%>%
filter(date>'2019-01-01')
## Cluster 2
if(stringi::stri_detect(getwd(),fixed='cluster')){
setwd('../cluster 2')
}else{
setwd('cluster 2')
}
etfs=c(
'EWQ US Equity', #Francia
'EWG US Equity', #Alemania
'EWU US Equity' #UK
)
k=5
all_pairs=data.frame(combn(unique(etfs),2),stringsAsFactors = FALSE)
all_pairs=lapply(all_pairs, c)
all_tecs=lapply(all_pairs,get_tec,raw=raw,k=k)
pair_tecnical=do.call(rbind,all_tecs)
stat=get_strategy(pair_tecnical,y=year(max(raw$date)),yot=5,met='regLogistic')
portfolio2=get_portfolio(stat$prediction)%>%
filter(date>'2019-01-01')
## Cluster 3
if(stringi::stri_detect(getwd(),fixed='cluster')){
setwd('../cluster 3')
}else{
setwd('cluster 3')
}
etfs=c(
'XLI US Equity', #Industrial
'XLB US Equity', #Material
'XLF US Equity',#Financial
'XLE US Equity'#Energi
)
k=5
all_pairs=data.frame(combn(unique(etfs),2),stringsAsFactors = FALSE)
all_pairs=lapply(all_pairs, c)
all_tecs=lapply(all_pairs,get_tec,raw=raw,k=k)
pair_tecnical=do.call(rbind,all_tecs)
stat=get_strategy(pair_tecnical,y=year(max(raw$date)),yot=5,met='regLogistic')
portfolio3=get_portfolio(stat$prediction)%>%
filter(date>'2019-01-01')
## Cluster 4
if(stringi::stri_detect(getwd(),fixed='cluster')){
setwd('../cluster 4')
}else{
setwd('cluster 4')
}
etfs=c(
'INDA US Equity', #India
'THD US Equity', #Tailandia
'EWM US Equity'# Malasya
)
k=5
all_pairs=data.frame(combn(unique(etfs),2),stringsAsFactors = FALSE)
all_pairs=lapply(all_pairs, c)
all_tecs=lapply(all_pairs,get_tec,raw=raw,k=k)
pair_tecnical=do.call(rbind,all_tecs)
stat=get_strategy(pair_tecnical,y=year(max(raw$date)),yot=5,met='regLogistic')
portfolio4=get_portfolio(stat$prediction)%>%
filter(date>'2019-01-01')
## Cluster 5
if(stringi::stri_detect(getwd(),fixed='cluster')){
setwd('../cluster 5')
}else{
setwd('cluster 5')
}
etfs=c(
'SPY US Equity', #US general
'XLK US Equity', #Tech
'XLY US Equity'#Cons Disc
)
k=5
all_pairs=data.frame(combn(unique(etfs),2),stringsAsFactors = FALSE)
all_pairs=lapply(all_pairs, c)
all_tecs=lapply(all_pairs,get_tec,raw=raw,k=k)
pair_tecnical=do.call(rbind,all_tecs)
stat=get_strategy(pair_tecnical,y=year(max(raw$date)),yot=5,met='regLogistic')
portfolio5=get_portfolio(stat$prediction)%>%
filter(date>'2019-01-01')
if(stringi::stri_detect(getwd(),fixed='cluster')){
setwd('..')
}
#### resumen
portafolios=portfolio1%>%
mutate(cluster=1)%>%
rbind(mutate(portfolio2,cluster=2))%>%
rbind(mutate(portfolio3,cluster=3))%>%
rbind(mutate(portfolio4,cluster=4))%>%
rbind(mutate(portfolio5,cluster=5))%>%
filter(wday(date)==5)%>%
filter(date<max(date))
write.csv(portafolios,'portafolios.csv')
| |
#' Connect intervals that are close enough
#' Generates a smaller dataframe of connected intervals using a data frame of given intervals and a distance that allows tow of them to be connected
#'
#' @param interval Initial dataframe of intervals to be connected
#' @param distance distance between ttwo intervals to allo the connection of both in just one.
#' @return A dataframe of intervals, having less or equal rows tan the original
#'
#' @export
connectOverDistance=function(interval,distance=dminutes(30)){
secDistance=distance/dseconds(1)
interval=interval %>%
mutate(lag=difftime(from,lag(to,1),units="secs"),
lead=difftime(lead(from,1),to,units="secs"),
closeEnough=lead<=distance)
interval %>%
mutate(change=!lag(closeEnough),
change=ifelse(is.na(change),0,change),
bout=cumsum(change) ) %>%
group_by(bout) %>%
summarise(from=first(from),to=last(to))
}
| /R/connectOverDistance.R | no_license | muschellij2/rbouts | R | false | false | 932 | r | #' Connect intervals that are close enough
#' Generates a smaller dataframe of connected intervals using a data frame of given intervals and a distance that allows tow of them to be connected
#'
#' @param interval Initial dataframe of intervals to be connected
#' @param distance distance between ttwo intervals to allo the connection of both in just one.
#' @return A dataframe of intervals, having less or equal rows tan the original
#'
#' @export
connectOverDistance=function(interval,distance=dminutes(30)){
secDistance=distance/dseconds(1)
interval=interval %>%
mutate(lag=difftime(from,lag(to,1),units="secs"),
lead=difftime(lead(from,1),to,units="secs"),
closeEnough=lead<=distance)
interval %>%
mutate(change=!lag(closeEnough),
change=ifelse(is.na(change),0,change),
bout=cumsum(change) ) %>%
group_by(bout) %>%
summarise(from=first(from),to=last(to))
}
|
library(ggplot2)
library(dplyr)
library(tidyr)
library(tidyverse)
setwd("/data1/2018_ActionDecoding/analysis_fc/Misc/BME295/")
GC = read.delim('AllSubGCIFGpSTS_RH.txt',header=T)
t.test(GC$IFG2pSTS,GC$pSTS2IFG,paired = T)
ggplot(GC,aes(x=IFG2pSTS,y=pSTS2IFG)) +
geom_point()+
geom_abline(slope =1, intercept = 0) +
geom_hline(yintercept = 0.0112,linetype="dotted", color = "blue", size=1.5) +
geom_vline(xintercept = 0.1363,linetype="dashed", color = "red", size=1.5) +
labs(title = 'All subjects GC index',x='IFG on pSTS',y='pSTS on IFG')
ggsave("pSTS_RH_IFG_RH_GC.png",width = 4,height = 4)
| /GC_GroupAnalysis.R | no_license | zhouxiaojue/GC_DCM | R | false | false | 608 | r | library(ggplot2)
library(dplyr)
library(tidyr)
library(tidyverse)
setwd("/data1/2018_ActionDecoding/analysis_fc/Misc/BME295/")
GC = read.delim('AllSubGCIFGpSTS_RH.txt',header=T)
t.test(GC$IFG2pSTS,GC$pSTS2IFG,paired = T)
ggplot(GC,aes(x=IFG2pSTS,y=pSTS2IFG)) +
geom_point()+
geom_abline(slope =1, intercept = 0) +
geom_hline(yintercept = 0.0112,linetype="dotted", color = "blue", size=1.5) +
geom_vline(xintercept = 0.1363,linetype="dashed", color = "red", size=1.5) +
labs(title = 'All subjects GC index',x='IFG on pSTS',y='pSTS on IFG')
ggsave("pSTS_RH_IFG_RH_GC.png",width = 4,height = 4)
|
findRuns <- function (mst,mst2,nv,nv1){
mstWM <- get.adjacency(mst, type = "lower", attr="weight")
edgeind <- which(mstWM != 0, arr.ind = TRUE, useNames = FALSE)
run_count <- 1 + sum(V(mst2)[edgeind[,1]-1]$color != V(mst2)[edgeind[,2]-1]$color) # run_count = no. of deleted edges + 1
run_count
}
#########################################################################
# #
# This function performs the multivariate Wald-Wolfowitz test #
# #
#########################################################################
# 'data' is a p-by-nv matrix that supply the gene expression profiles to the function
# 'nv' is the total number of samples in both groups
# 'nv1' is the number of samples of the first group. Obviously (nv-nv1) is the number of samples of the second group
# 'p' is the number of genes in the gene set
# The syntax for calling this function is:
# p_value <- MVWWtest(data,nv1,print_decision=TRUE) (print_decision can be either TRUE or FALSE)
MVWWtest <- function (data,nv1,print_decision = FALSE) {
library(igraph)
library(combinat)
dimensions <- dim(data)
p <- dimensions[1]
nv <- dimensions[2]
number_perm <- 1000 # number of random permutations
combinations <- factorial(nv) / (factorial(nv-nv1)*factorial(nv1))
gt <- aperm(data, c(2,1))
Wmat <- as.matrix(dist(gt, method = "euclidean", diag = TRUE, upper = TRUE, p = 2))
gr <- graph.adjacency(Wmat, weighted = TRUE, mode = "undirected")
V(gr)[c(0:(nv1-1))]$color <- "green"
V(gr)[c(nv1:(nv-1))]$color <- "red"
mst <- minimum.spanning.tree(gr)
permutations <- matrix(V(mst)$color, nv, 1)
domain <- V(mst)$color
#####################################################################################
# if combinations > 3000, do an approximate permutation test (randomly picked #
# combinations withno replica). If nv1=nv/2, the condition is satisfied for nv>=14. #
# if combinations < 3000, do full permutation test (all possible combinations with #
# no replica). Random search for 1000 distictive samples out of a total number of #
# samples which is little bit > 1000 will result in a prolonged loop execution #
#####################################################################################
if (combinations > 3000) {
runs <- array(0,c(1,number_perm))
for (itr in 1:number_perm) {
randperm <- sample(domain, replace = FALSE)
mst2 <- mst
while (sum(colSums(permutations == randperm) == nv) > 0)
{
randperm <- sample(domain, replace = FALSE)
}
permutations <- cbind(permutations,randperm)
V(mst2)$color <- randperm
runs[itr] <- findRuns(mst,mst2,nv,nv1)
}
} else {
runs <- array(0,c(1,combinations)) # a vector to save results
co <- combn(c(1:nv), nv1)
for (itr in 1:combinations) {
mst2 <- mst
V(mst2)$color <- "red"
V(mst2)[co[,itr]-1]$color <- "green"
runs[itr] <- findRuns(mst,mst2,nv,nv1)
}
}
alpha <- 0.05
runs_a <- findRuns(mst,mst,nv,nv1)
p_value <- (sum(runs < runs_a) + 1) / (length(runs) + 1)
if (print_decision == TRUE) {
if (p_value < alpha) print(paste("p_value = ", p_value, " Reject Ho"), quote=FALSE) else print(paste("p_value = ", p_value, " Fail to reject Ho"), quote=FALSE)
}
p_value
}
findTestStatRKS <- function (radial_ranking,randperm,nv,nv1){
ri <- 0
si <- 0
di <- array(0, c(1,nv))
#radial_ranking <- radial_ranking + 1
for (i in 1:nv) {
ri <- sum(randperm[radial_ranking[1:i]] == "green")
si <- sum(randperm[radial_ranking[1:i]] == "red")
di[i] <- (ri/nv1) - (si/(nv-nv1))
}
D <- sqrt((nv1 * (nv-nv1)) / (nv1 + (nv-nv1))) * max(abs(di))
D
}
#################################################################################
# #
# This function performs the multivariate radial Kolmogorov-Smirnov test #
# #
#################################################################################
# 'data' is a p-by-nv matrix that supply the gene expression profiles to the function
# 'nv' is the total number of samples in both groups
# 'nv1' is the number of samples of the first group. Obviously (nv-nv1) is the number of samples of the second group
# 'p' is the number of genes in the gene set
# The syntax for calling this function is:
# p_value <- MVKStest(data,nv1,print_decision=TRUE) (print_decision can be either TRUE or FALSE)
MVradialKStest <- function (data,nv1,print_decision = FALSE) {
library(igraph)
library(combinat)
dimensions <- dim(data)
p <- dimensions[1]
nv <- dimensions[2]
number_perm <- 1000
combinations <- factorial(nv) / (factorial(nv-nv1)*factorial(nv1))
gt <- aperm(data, c(2,1))
Wmat <- as.matrix(dist(gt, method = "euclidean", diag = TRUE, upper = TRUE, p = 2))
gr <- graph.adjacency(Wmat, weighted = TRUE, mode = "undirected")
V(gr)[c(0:(nv1-1))]$color <- "green"
V(gr)[c(nv1:(nv-1))]$color <- "red"
mst <- minimum.spanning.tree(gr)
sp <- apply(shortest.paths(mst), 1, max)
radius <- min(sp)
center <- which(sp == radius)
if (length(center)>1) center <- center[1]
ranktree <- sort(shortest.paths(mst)[, center], decreasing = FALSE, index.return = TRUE)
radial_ranking <- ranktree$ix
permutations <- matrix(V(mst)$color, nv, 1)
domain <- V(mst)$color
#####################################################################################
# if combinations > 3000, do an approximate permutation test (randomly picked #
# combinations withno replica). If nv1=nv/2, the condition is satisfied for nv>=14. #
# if combinations < 3000, do full permutation test (all possible combinations with #
# no replica). Random search for 1000 distictive samples out of a total number of #
# samples which is little bit > 1000 will result in a prolonged loop execution #
#####################################################################################
if (combinations > 3000) {
D <- array(0,c(1,number_perm))
for (itr in 1:number_perm) {
randperm <- sample(domain, replace = FALSE)
while (sum(colSums(permutations == randperm) == nv) > 0)
{
randperm <- sample(domain, replace = FALSE)
}
permutations <- cbind(permutations,randperm)
D[itr] <- findTestStatRKS(radial_ranking,randperm,nv,nv1)
}
} else {
D <- array(0,c(1,combinations))
co <- combn(c(1:nv), nv1)
for (itr in 1:combinations) {
mst2 <- mst
V(mst2)$color <- "red"
V(mst2)[co[,itr]-1]$color <- "green"
randperm <- V(mst2)$color
D[itr] <- findTestStatRKS(radial_ranking,randperm,nv,nv1)
}
}
alpha <- 0.05
D_a <- findTestStatRKS(radial_ranking,domain,nv,nv1)
p_value <- (sum(D > D_a) + 1) / (length(D) + 1)
if (print_decision == TRUE) {
if (p_value < alpha) print(paste("p_value = ", p_value, " Reject Ho"), quote=FALSE) else print(paste("p_value = ", p_value, " Fail to reject Ho"), quote=FALSE)
}
p_value
}
findTestStatKS <- function (KSranking,randperm,nv,nv1){
ri <- 0
si <- 0
di <- array(0, c(1,nv))
KSranking <- KSranking + 1
for (i in 1:nv) {
ri <- sum(randperm[KSranking[1:i]] == "green")
si <- sum(randperm[KSranking[1:i]] == "red")
di[i] <- (ri/nv1) - (si/(nv-nv1))
}
D <- sqrt((nv1 * (nv-nv1)) / (nv1 + (nv-nv1))) * max(abs(di))
D
}
HDP.ranking <- function(mst,nv){
rr <- farthest.nodes(mst, directed = FALSE, unconnected = TRUE)
root <- floor(rr[1])
terminal_nodes <- which(degree(mst) == 1)
ltn <- length(terminal_nodes) - 1
tn <- terminal_nodes - 1
tn <- tn[tn != root]
sp <- get.shortest.paths(mst, root, to = tn)
path_len <- shortest.paths(mst)
break_ties <- path_len[root+1, tn+1] / max(path_len)
depth <- array(0, c(1,ltn))
KSranks <- root
for (k in 1:ltn) {
depth[k] <- length(sp[[k]])
}
md <- max(depth)
adjusted_depth <- depth + break_ties
col_nodes <- array(0, c(1,ltn))
alphabets <- rep("",ltn)
for (col in seq(1,md,by=1)) {
for (row in seq(1,ltn,by=1)) {
col_nodes[row] <- sp[[row]][col]
}
fcn <- factor(col_nodes)
collevels <- levels(fcn)
llev <- length(collevels)
if (llev > 1) {
mpg <- tapply(adjusted_depth,fcn,max)
sortmpg <- sort(mpg, decreasing = FALSE, index.return = TRUE)
smpg <- sortmpg$ix
sorted_levels <- collevels[smpg]
for (lind in seq(1,length(smpg),by=1)) {
alphabets[which(col_nodes==sorted_levels[lind])]<- paste(alphabets[which(col_nodes==sorted_levels[lind])], letters[lind], sep="")
}
}
}
newranks <- sort(alphabets, decreasing = FALSE, index.return = TRUE)
spm <- as.matrix(sp)
sp_new <- spm[newranks$ix ,]
sp_new <- as.matrix(sp_new)
for (k in 1:ltn) {
len <- length(sp_new[[k]])
for (u in 1:len) {
if (sum(KSranks == sp_new[[k]][u]) == 0){
KSranks <- c(KSranks,sp_new[[k]][u]) }
}
}
KSranks
}
#########################################################################
# #
# This function performs the multivariate Kolmogorov-Smirnov test #
# #
#########################################################################
# 'data' is a p-by-nv matrix that supply the gene expression profiles to the function
# 'nv' is the total number of samples in both groups
# 'nv1' is the number of samples of the first group. Obviously (nv-nv1) is the number of samples of the second group
# 'p' is the number of genes in the gene set
# The syntax for calling this function is:
# p_value <- MVKStest(data,nv1,print_decision=TRUE) (print_decision can be either TRUE or FALSE)
MVKSHDPtest <- function (data,nv1,print_decision = FALSE) {
library(igraph)
library(combinat)
dimensions <- dim(data)
p <- dimensions[1]
nv <- dimensions[2]
number_perm <- 1000
combinations <- factorial(nv) / (factorial(nv-nv1)*factorial(nv1))
gt <- aperm(data, c(2,1))
Wmat <- as.matrix(dist(gt, method = "euclidean", diag = TRUE, upper = TRUE, p = 2))
gr <- graph.adjacency(Wmat, weighted = TRUE, mode = "undirected")
V(gr)[c(0:(nv1-1))]$color <- "green"
V(gr)[c(nv1:(nv-1))]$color <- "red"
mst <- minimum.spanning.tree(gr)
KSranking <- HDP.ranking(mst,nv)
permutations <- matrix(V(mst)$color, nv, 1)
domain <- V(mst)$color
#####################################################################################
# if combinations > 3000, do an approximate permutation test (randomly picked #
# combinations withno replica). If nv1=nv/2, the condition is satisfied for nv>=14. #
# if combinations < 3000, do full permutation test (all possible combinations with #
# no replica). Random search for 1000 distictive samples out of a total number of #
# samples which is little bit > 1000 will result in a prolonged loop execution #
#####################################################################################
if (combinations > 3000) {
D <- array(0,c(1,number_perm))
for (itr in 1:number_perm) {
randperm <- sample(domain, replace = FALSE)
while (sum(colSums(permutations == randperm) == nv) > 0)
{
randperm <- sample(domain, replace = FALSE)
}
permutations <- cbind(permutations,randperm)
D[itr] <- findTestStatKS(KSranking,randperm,nv,nv1)
}
} else {
D <- array(0,c(1,combinations))
co <- combn(c(1:nv), nv1)
for (itr in 1:combinations) {
mst2 <- mst
V(mst2)$color <- "red"
V(mst2)[co[,itr]-1]$color <- "green"
randperm <- V(mst2)$color
D[itr] <- findTestStatKS(KSranking,randperm,nv,nv1)
}
}
alpha <- 0.05
D_a <- findTestStatKS(KSranking,domain,nv,nv1)
p_value <- (sum(D > D_a) + 1) / (length(D) + 1)
if (print_decision == TRUE) {
if (p_value < alpha) print(paste("p_value = ", p_value, " Reject Ho"), quote=FALSE) else print(paste("p_value = ", p_value, " Fail to reject Ho"), quote=FALSE)
}
p_value
} | /mst_code_from_pub.R | no_license | rmflight/gsa_mst | R | false | false | 11,567 | r | findRuns <- function (mst,mst2,nv,nv1){
mstWM <- get.adjacency(mst, type = "lower", attr="weight")
edgeind <- which(mstWM != 0, arr.ind = TRUE, useNames = FALSE)
run_count <- 1 + sum(V(mst2)[edgeind[,1]-1]$color != V(mst2)[edgeind[,2]-1]$color) # run_count = no. of deleted edges + 1
run_count
}
#########################################################################
# #
# This function performs the multivariate Wald-Wolfowitz test #
# #
#########################################################################
# 'data' is a p-by-nv matrix that supply the gene expression profiles to the function
# 'nv' is the total number of samples in both groups
# 'nv1' is the number of samples of the first group. Obviously (nv-nv1) is the number of samples of the second group
# 'p' is the number of genes in the gene set
# The syntax for calling this function is:
# p_value <- MVWWtest(data,nv1,print_decision=TRUE) (print_decision can be either TRUE or FALSE)
MVWWtest <- function (data,nv1,print_decision = FALSE) {
library(igraph)
library(combinat)
dimensions <- dim(data)
p <- dimensions[1]
nv <- dimensions[2]
number_perm <- 1000 # number of random permutations
combinations <- factorial(nv) / (factorial(nv-nv1)*factorial(nv1))
gt <- aperm(data, c(2,1))
Wmat <- as.matrix(dist(gt, method = "euclidean", diag = TRUE, upper = TRUE, p = 2))
gr <- graph.adjacency(Wmat, weighted = TRUE, mode = "undirected")
V(gr)[c(0:(nv1-1))]$color <- "green"
V(gr)[c(nv1:(nv-1))]$color <- "red"
mst <- minimum.spanning.tree(gr)
permutations <- matrix(V(mst)$color, nv, 1)
domain <- V(mst)$color
#####################################################################################
# if combinations > 3000, do an approximate permutation test (randomly picked #
# combinations withno replica). If nv1=nv/2, the condition is satisfied for nv>=14. #
# if combinations < 3000, do full permutation test (all possible combinations with #
# no replica). Random search for 1000 distictive samples out of a total number of #
# samples which is little bit > 1000 will result in a prolonged loop execution #
#####################################################################################
if (combinations > 3000) {
runs <- array(0,c(1,number_perm))
for (itr in 1:number_perm) {
randperm <- sample(domain, replace = FALSE)
mst2 <- mst
while (sum(colSums(permutations == randperm) == nv) > 0)
{
randperm <- sample(domain, replace = FALSE)
}
permutations <- cbind(permutations,randperm)
V(mst2)$color <- randperm
runs[itr] <- findRuns(mst,mst2,nv,nv1)
}
} else {
runs <- array(0,c(1,combinations)) # a vector to save results
co <- combn(c(1:nv), nv1)
for (itr in 1:combinations) {
mst2 <- mst
V(mst2)$color <- "red"
V(mst2)[co[,itr]-1]$color <- "green"
runs[itr] <- findRuns(mst,mst2,nv,nv1)
}
}
alpha <- 0.05
runs_a <- findRuns(mst,mst,nv,nv1)
p_value <- (sum(runs < runs_a) + 1) / (length(runs) + 1)
if (print_decision == TRUE) {
if (p_value < alpha) print(paste("p_value = ", p_value, " Reject Ho"), quote=FALSE) else print(paste("p_value = ", p_value, " Fail to reject Ho"), quote=FALSE)
}
p_value
}
findTestStatRKS <- function (radial_ranking,randperm,nv,nv1){
ri <- 0
si <- 0
di <- array(0, c(1,nv))
#radial_ranking <- radial_ranking + 1
for (i in 1:nv) {
ri <- sum(randperm[radial_ranking[1:i]] == "green")
si <- sum(randperm[radial_ranking[1:i]] == "red")
di[i] <- (ri/nv1) - (si/(nv-nv1))
}
D <- sqrt((nv1 * (nv-nv1)) / (nv1 + (nv-nv1))) * max(abs(di))
D
}
#################################################################################
# #
# This function performs the multivariate radial Kolmogorov-Smirnov test #
# #
#################################################################################
# 'data' is a p-by-nv matrix that supply the gene expression profiles to the function
# 'nv' is the total number of samples in both groups
# 'nv1' is the number of samples of the first group. Obviously (nv-nv1) is the number of samples of the second group
# 'p' is the number of genes in the gene set
# The syntax for calling this function is:
# p_value <- MVKStest(data,nv1,print_decision=TRUE) (print_decision can be either TRUE or FALSE)
MVradialKStest <- function (data,nv1,print_decision = FALSE) {
library(igraph)
library(combinat)
dimensions <- dim(data)
p <- dimensions[1]
nv <- dimensions[2]
number_perm <- 1000
combinations <- factorial(nv) / (factorial(nv-nv1)*factorial(nv1))
gt <- aperm(data, c(2,1))
Wmat <- as.matrix(dist(gt, method = "euclidean", diag = TRUE, upper = TRUE, p = 2))
gr <- graph.adjacency(Wmat, weighted = TRUE, mode = "undirected")
V(gr)[c(0:(nv1-1))]$color <- "green"
V(gr)[c(nv1:(nv-1))]$color <- "red"
mst <- minimum.spanning.tree(gr)
sp <- apply(shortest.paths(mst), 1, max)
radius <- min(sp)
center <- which(sp == radius)
if (length(center)>1) center <- center[1]
ranktree <- sort(shortest.paths(mst)[, center], decreasing = FALSE, index.return = TRUE)
radial_ranking <- ranktree$ix
permutations <- matrix(V(mst)$color, nv, 1)
domain <- V(mst)$color
#####################################################################################
# if combinations > 3000, do an approximate permutation test (randomly picked #
# combinations withno replica). If nv1=nv/2, the condition is satisfied for nv>=14. #
# if combinations < 3000, do full permutation test (all possible combinations with #
# no replica). Random search for 1000 distictive samples out of a total number of #
# samples which is little bit > 1000 will result in a prolonged loop execution #
#####################################################################################
if (combinations > 3000) {
D <- array(0,c(1,number_perm))
for (itr in 1:number_perm) {
randperm <- sample(domain, replace = FALSE)
while (sum(colSums(permutations == randperm) == nv) > 0)
{
randperm <- sample(domain, replace = FALSE)
}
permutations <- cbind(permutations,randperm)
D[itr] <- findTestStatRKS(radial_ranking,randperm,nv,nv1)
}
} else {
D <- array(0,c(1,combinations))
co <- combn(c(1:nv), nv1)
for (itr in 1:combinations) {
mst2 <- mst
V(mst2)$color <- "red"
V(mst2)[co[,itr]-1]$color <- "green"
randperm <- V(mst2)$color
D[itr] <- findTestStatRKS(radial_ranking,randperm,nv,nv1)
}
}
alpha <- 0.05
D_a <- findTestStatRKS(radial_ranking,domain,nv,nv1)
p_value <- (sum(D > D_a) + 1) / (length(D) + 1)
if (print_decision == TRUE) {
if (p_value < alpha) print(paste("p_value = ", p_value, " Reject Ho"), quote=FALSE) else print(paste("p_value = ", p_value, " Fail to reject Ho"), quote=FALSE)
}
p_value
}
findTestStatKS <- function (KSranking,randperm,nv,nv1){
ri <- 0
si <- 0
di <- array(0, c(1,nv))
KSranking <- KSranking + 1
for (i in 1:nv) {
ri <- sum(randperm[KSranking[1:i]] == "green")
si <- sum(randperm[KSranking[1:i]] == "red")
di[i] <- (ri/nv1) - (si/(nv-nv1))
}
D <- sqrt((nv1 * (nv-nv1)) / (nv1 + (nv-nv1))) * max(abs(di))
D
}
HDP.ranking <- function(mst,nv){
rr <- farthest.nodes(mst, directed = FALSE, unconnected = TRUE)
root <- floor(rr[1])
terminal_nodes <- which(degree(mst) == 1)
ltn <- length(terminal_nodes) - 1
tn <- terminal_nodes - 1
tn <- tn[tn != root]
sp <- get.shortest.paths(mst, root, to = tn)
path_len <- shortest.paths(mst)
break_ties <- path_len[root+1, tn+1] / max(path_len)
depth <- array(0, c(1,ltn))
KSranks <- root
for (k in 1:ltn) {
depth[k] <- length(sp[[k]])
}
md <- max(depth)
adjusted_depth <- depth + break_ties
col_nodes <- array(0, c(1,ltn))
alphabets <- rep("",ltn)
for (col in seq(1,md,by=1)) {
for (row in seq(1,ltn,by=1)) {
col_nodes[row] <- sp[[row]][col]
}
fcn <- factor(col_nodes)
collevels <- levels(fcn)
llev <- length(collevels)
if (llev > 1) {
mpg <- tapply(adjusted_depth,fcn,max)
sortmpg <- sort(mpg, decreasing = FALSE, index.return = TRUE)
smpg <- sortmpg$ix
sorted_levels <- collevels[smpg]
for (lind in seq(1,length(smpg),by=1)) {
alphabets[which(col_nodes==sorted_levels[lind])]<- paste(alphabets[which(col_nodes==sorted_levels[lind])], letters[lind], sep="")
}
}
}
newranks <- sort(alphabets, decreasing = FALSE, index.return = TRUE)
spm <- as.matrix(sp)
sp_new <- spm[newranks$ix ,]
sp_new <- as.matrix(sp_new)
for (k in 1:ltn) {
len <- length(sp_new[[k]])
for (u in 1:len) {
if (sum(KSranks == sp_new[[k]][u]) == 0){
KSranks <- c(KSranks,sp_new[[k]][u]) }
}
}
KSranks
}
#########################################################################
# #
# This function performs the multivariate Kolmogorov-Smirnov test #
# #
#########################################################################
# 'data' is a p-by-nv matrix that supply the gene expression profiles to the function
# 'nv' is the total number of samples in both groups
# 'nv1' is the number of samples of the first group. Obviously (nv-nv1) is the number of samples of the second group
# 'p' is the number of genes in the gene set
# The syntax for calling this function is:
# p_value <- MVKStest(data,nv1,print_decision=TRUE) (print_decision can be either TRUE or FALSE)
MVKSHDPtest <- function (data,nv1,print_decision = FALSE) {
library(igraph)
library(combinat)
dimensions <- dim(data)
p <- dimensions[1]
nv <- dimensions[2]
number_perm <- 1000
combinations <- factorial(nv) / (factorial(nv-nv1)*factorial(nv1))
gt <- aperm(data, c(2,1))
Wmat <- as.matrix(dist(gt, method = "euclidean", diag = TRUE, upper = TRUE, p = 2))
gr <- graph.adjacency(Wmat, weighted = TRUE, mode = "undirected")
V(gr)[c(0:(nv1-1))]$color <- "green"
V(gr)[c(nv1:(nv-1))]$color <- "red"
mst <- minimum.spanning.tree(gr)
KSranking <- HDP.ranking(mst,nv)
permutations <- matrix(V(mst)$color, nv, 1)
domain <- V(mst)$color
#####################################################################################
# if combinations > 3000, do an approximate permutation test (randomly picked #
# combinations withno replica). If nv1=nv/2, the condition is satisfied for nv>=14. #
# if combinations < 3000, do full permutation test (all possible combinations with #
# no replica). Random search for 1000 distictive samples out of a total number of #
# samples which is little bit > 1000 will result in a prolonged loop execution #
#####################################################################################
if (combinations > 3000) {
D <- array(0,c(1,number_perm))
for (itr in 1:number_perm) {
randperm <- sample(domain, replace = FALSE)
while (sum(colSums(permutations == randperm) == nv) > 0)
{
randperm <- sample(domain, replace = FALSE)
}
permutations <- cbind(permutations,randperm)
D[itr] <- findTestStatKS(KSranking,randperm,nv,nv1)
}
} else {
D <- array(0,c(1,combinations))
co <- combn(c(1:nv), nv1)
for (itr in 1:combinations) {
mst2 <- mst
V(mst2)$color <- "red"
V(mst2)[co[,itr]-1]$color <- "green"
randperm <- V(mst2)$color
D[itr] <- findTestStatKS(KSranking,randperm,nv,nv1)
}
}
alpha <- 0.05
D_a <- findTestStatKS(KSranking,domain,nv,nv1)
p_value <- (sum(D > D_a) + 1) / (length(D) + 1)
if (print_decision == TRUE) {
if (p_value < alpha) print(paste("p_value = ", p_value, " Reject Ho"), quote=FALSE) else print(paste("p_value = ", p_value, " Fail to reject Ho"), quote=FALSE)
}
p_value
} |
## Overall Description of what the Functions do
## - Matrix inversion is usually a costly computation, so
## there may be some benefit to caching the inverse of a matrix
## rather than computing it repeatedly.
## - Below are two functions that cache the inverse of a matrix.
## 1. makeCacheMatrix: creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
# m <- NULL
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
# setmean <- function(mean) m <<- mean
setinverse <- function(inverse) i <<-inverse
# getmean <- function() m
getinverse <- function() i
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## 2. cacheSolve: computes the inverse of the special "matrix" returned by
## makeCacheMatrix function.
## - If the inverse has already been calculated(and the matrix has not changed),
## then cacheSolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data)
x$setinverse(i)
i
} | /cachematrix.R | no_license | stellaseong/ProgrammingAssignment2 | R | false | false | 1,428 | r | ## Overall Description of what the Functions do
## - Matrix inversion is usually a costly computation, so
## there may be some benefit to caching the inverse of a matrix
## rather than computing it repeatedly.
## - Below are two functions that cache the inverse of a matrix.
## 1. makeCacheMatrix: creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
# m <- NULL
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
# setmean <- function(mean) m <<- mean
setinverse <- function(inverse) i <<-inverse
# getmean <- function() m
getinverse <- function() i
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## 2. cacheSolve: computes the inverse of the special "matrix" returned by
## makeCacheMatrix function.
## - If the inverse has already been calculated(and the matrix has not changed),
## then cacheSolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data)
x$setinverse(i)
i
} |
dataFile <- "C:/Users/Ashutosh/Desktop/Education/Johns Hopkins Data Science/Exploratory Data Analysis/Exploratory Data Analysis/ExData_Plotting1/household_power_consumption.txt"
powerdata <- read.table(dataFile, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
subSetData <- powerdata[powerdata$Date %in% c("1/2/2007","2/2/2007") ,]
datetime <- strptime(paste(subSetData$Date, subSetData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
globalActivePower <- as.numeric(subSetData$Global_active_power)
subMetering1 <- as.numeric(subSetData$Sub_metering_1)
subMetering2 <- as.numeric(subSetData$Sub_metering_2)
subMetering3 <- as.numeric(subSetData$Sub_metering_3)
png("plot3.png", width=480, height=480)
plot(datetime, subMetering1, type="l", ylab="Energy Submetering", xlab="")
lines(datetime, subMetering2, type="l", col="red")
lines(datetime, subMetering3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
dev.off() | /plot3.R | no_license | AshuAnshu/ExData_Plotting1 | R | false | false | 1,026 | r | dataFile <- "C:/Users/Ashutosh/Desktop/Education/Johns Hopkins Data Science/Exploratory Data Analysis/Exploratory Data Analysis/ExData_Plotting1/household_power_consumption.txt"
powerdata <- read.table(dataFile, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
subSetData <- powerdata[powerdata$Date %in% c("1/2/2007","2/2/2007") ,]
datetime <- strptime(paste(subSetData$Date, subSetData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
globalActivePower <- as.numeric(subSetData$Global_active_power)
subMetering1 <- as.numeric(subSetData$Sub_metering_1)
subMetering2 <- as.numeric(subSetData$Sub_metering_2)
subMetering3 <- as.numeric(subSetData$Sub_metering_3)
png("plot3.png", width=480, height=480)
plot(datetime, subMetering1, type="l", ylab="Energy Submetering", xlab="")
lines(datetime, subMetering2, type="l", col="red")
lines(datetime, subMetering3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
dev.off() |
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/AvgRank/liver.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.4,family="gaussian",standardize=TRUE)
sink('./liver_051.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/AvgRank/liver/liver_051.R | no_license | esbgkannan/QSMART | R | false | false | 343 | r | library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/AvgRank/liver.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.4,family="gaussian",standardize=TRUE)
sink('./liver_051.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
#!/usr/bin/env Rscript
# Here we simply download the main data sources provided by Prof. John Hunter
# (PSU) as found here: http://personal.psu.edu/drh20/code/btmatlab/
library(tidyverse)
get_nascar_2002_dat <- function(){
fname_pfx <- "nascar2002"
fname_ext <- c(".mat", ".txt", ".xls")
fnames <- glue::glue("{fname_pfx}{fname_ext}")
base_url <- "http://personal.psu.edu/drh20/code/btmatlab/"
furls <- glue::glue("{base_url}{fnames}")
fpaths <- glue::glue("{here::here('data', 'nascar')}/{fnames}")
# Download all datasets to the specified nascar folder
purrr::walk2(.x = furls, .y = fpaths,
.f = ~utils::download.file(url = .x, destfile = .y))
}
# Download the (raw) NASCAR 2002 data
print("Getting NASCAR 2002 datasets...")
get_nascar_2002_dat()
print(glue::glue("\nDONE!\nPlease see {here::here('data', 'nascar')} for the raw NASCAR 2002 datasets")) | /R/get_nascar_data.R | permissive | shamindras/bttv-aistats2020 | R | false | false | 907 | r | #!/usr/bin/env Rscript
# Here we simply download the main data sources provided by Prof. John Hunter
# (PSU) as found here: http://personal.psu.edu/drh20/code/btmatlab/
library(tidyverse)
get_nascar_2002_dat <- function(){
fname_pfx <- "nascar2002"
fname_ext <- c(".mat", ".txt", ".xls")
fnames <- glue::glue("{fname_pfx}{fname_ext}")
base_url <- "http://personal.psu.edu/drh20/code/btmatlab/"
furls <- glue::glue("{base_url}{fnames}")
fpaths <- glue::glue("{here::here('data', 'nascar')}/{fnames}")
# Download all datasets to the specified nascar folder
purrr::walk2(.x = furls, .y = fpaths,
.f = ~utils::download.file(url = .x, destfile = .y))
}
# Download the (raw) NASCAR 2002 data
print("Getting NASCAR 2002 datasets...")
get_nascar_2002_dat()
print(glue::glue("\nDONE!\nPlease see {here::here('data', 'nascar')} for the raw NASCAR 2002 datasets")) |
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 151596
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 151596
c
c Input Parameter (command line, file):
c input filename QBFLIB/Miller-Marin/trafficlight-controller/tlc02-uniform-depth-250.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 57480
c no.of clauses 151596
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 151596
c
c QBFLIB/Miller-Marin/trafficlight-controller/tlc02-uniform-depth-250.qdimacs 57480 151596 E1 [] 0 251 56478 151596 NONE
| /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Miller-Marin/trafficlight-controller/tlc02-uniform-depth-250/tlc02-uniform-depth-250.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 697 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 151596
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 151596
c
c Input Parameter (command line, file):
c input filename QBFLIB/Miller-Marin/trafficlight-controller/tlc02-uniform-depth-250.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 57480
c no.of clauses 151596
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 151596
c
c QBFLIB/Miller-Marin/trafficlight-controller/tlc02-uniform-depth-250.qdimacs 57480 151596 E1 [] 0 251 56478 151596 NONE
|
#!/usr/bin/env Rscript
# run from terminal:
# Rscript --vanilla vcf2pca.R simulated.vcf.gz pops_sim.txt 5
# ===============================================================
# This script used the package SNPrelate to produce a PCA on a given VCF file.
# The number of PCs considdered can be varied.
# ---------------------------------------------------------------
# The produced output contains:
# - 2 plots
# - 1 combination of "%exp var" ~ "PC" and a grid of all possible "PCy" ~ "PCx"
# - 1 "PC2" ~ "PC1" with the repective 9 SNPs with top loadings for PC1 & PC2 indicated
# - 3 tables
# - % exp var per PC
# - individual scores on PCx - PCz
# - SNPs with top loadings
# ===============================================================
# args <- c('simulated.vcf.gz', 'pops_sim.txt', '5')
args = commandArgs(trailingOnly=FALSE)
args = args[7:10]
print(args)
library(SNPRelate)
library(stringr)
library(tidyverse)
library(hypoimg)
library(hypogen)
library(GenomicOriginsScripts)
# custom functions -------------------
get_pca_scores <- function(mat,n_ev){mat[,1:n_ev] %>%
as_tibble() %>%
set_names(.,nm=str_c("EV",str_pad(width = 2,pad = '0',1:n_ev)))}
filter_fun <- function(x, y) x >= y
ct_subset <- function(p,q,df,exp_var){
id <- names(df)[1]
P <- names(df)[p+1]
Q <- names(df)[q+1]
P_lab <- str_c(pc_percent[p,2],' (',round(pc_percent[p,3],2),'%)')
Q_lab <- str_c(pc_percent[q,2],' (',round(pc_percent[q,3],2),'%)')
df %>%
select(id,P,Q) %>%
set_names(.,nm = c('id','x','y')) %>%
mutate(run_x = P_lab,
run_y = Q_lab)
}
# loadings related
scale_var <- function(x,exp_var){x * exp_var}
loading_set <- function(df,snpid,EV,n_snps,clr_scheme,...){
clr <- do.call(clr_scheme,list(n = n_snps,...))
snp_load <- df %>%
t() %>%
as.tibble() %>%
set_names(.,nm=str_c("EV",str_pad(width = 2,pad = '0',1:n_ev))) %>%
bind_cols(tibble(snp = str_c('snp',nm=snpid),
snpnr = snpid),.)%>%
mutate(total = rowSums(abs(scale_var(x = df,exp_var = pc_percent$exp_var)) %>%
t())) %>%
arrange_(EV) %>%
mutate(idx = row_number()) %>% ungroup() %>%
filter(idx <= n_snps) %>%
mutate(clr = clr)
}
prep_laoding_ev <- function(df,EV){df %>% mutate(ev = EV) %>% select(snpnr,ev,idx) }
# ------------------------------------
# config:
set.seed(1000)
vcf.fn <- as.character(args[1])
vcf_samples <- as.character(args[2])
n_ev <- as.numeric(args[3])
# ------------------------------------
base_name <- str_remove(vcf.fn,'.vcf.gz')
# reformating genotypes
snpgdsVCF2GDS(vcf.fn = vcf.fn, out.fn = str_c(base_name,'.gds'), method = "biallelic.only")
snpgdsSummary(str_c(base_name,'.gds'))
genofile <- snpgdsOpen(str_c(base_name,'.gds'))
# filter genotypes by LD
snpset <- snpgdsLDpruning(genofile, ld.threshold = 0.2, autosome.only = FALSE)
snpset.id <- unlist(snpset)
snpset.tbl <- tibble(CHROM = names(snpset.id) %>%
str_extract(.,'(LG|Contig)[0-9]{2}'),
POS = snpset.id) %>%
mutate(snpid = str_c(CHROM,":",POS))
id_labs <- read_delim(vcf_samples, delim = '\t',col_names = c('id','spec','loc'))
# run pca ------------------------------------------------
pca <- snpgdsPCA(genofile, snp.id = snpset.id, num.thread = 2,
autosome.only = FALSE,eigen.cnt = n_ev)
# extract results ---------------------------------------
pc_percent <- tibble(EV_nr = 1:n_ev,
EV = str_c("EV",str_pad(width = 2,pad = '0',EV_nr)),
exp_var = (pca$varprop*100)[EV_nr])
tab <-bind_cols(id = pca$sample.id, get_pca_scores(pca$eigenvect,n_ev))
ct <- cross_df(tibble(p=1:n_ev, q=1:n_ev),.filter = filter_fun) %>% arrange(p,q)
df_result <- pmap(ct,ct_subset,df = tab, exp_var = pc_percent) %>%
bind_rows() %>%
left_join(.,id_labs)
thm_base <- theme(panel.background = element_blank(),
panel.grid = element_blank(),
axis.line = element_line(color = 'lightgray'),
legend.key = element_blank())
# explained variation per PC
p1 <- ggplot(pc_percent,aes(x=EV_nr,y=exp_var)) +
geom_area(fill='darkgray',alpha =.2) +
geom_point(shape=21,fill = 'red') +
scale_x_continuous(name =NULL,breaks = pc_percent$EV_nr,labels = pc_percent$EV) +
scale_y_continuous(name = 'Explained Variation (%)',
limits = c(0,max(pc_percent$exp_var))) +
thm_base
# plot all combinations of PCs
p2 <- ggplot(df_result,aes(x,y,fill=spec)) +
geom_point(aes(shape = loc)) +
scale_fill_manual("Species", values = clr, labels = sp_labs) +
scale_shape_manual("Location", values = shps, labels = loc_names) +
facet_grid(run_y~run_x, scales = 'free') +
guides(fill = guide_legend(override.aes = list(shape = 21, size = 3)),
shape = guide_legend(override.aes = list(fill = 'black', size = 3))) +
theme(axis.title = element_blank(),
legend.position = c(.85,.75)) +
thm_base +
theme(panel.background = element_rect(color = 'lightgray'))
out_plot <- cowplot::plot_grid(p1,p2,ncol = 1,rel_heights = c(.3,1))
# export results ---------------------------------------
ggsave(filename = str_c(base_name,'.pca.pdf'),out_plot,width = 8,height = 9.5)
write_delim(pc_percent,path = str_c(base_name,'.exp_var.txt'),delim = '\t')
system(str_c("gzip ",str_c(base_name,'.exp_var.txt')))
write_delim(tab,path = str_c(base_name,'.scores.txt'),delim = '\t')
system(str_c("gzip ",str_c(base_name,'.scores.txt')))
# -------------------------------------------------------
# Primary PCA (PC1 ~ PC2)
p3 <- ggplot()+
geom_point(data = tab %>%
left_join(.,id_labs),
aes(EV01, EV02, fill = spec, shape = loc)) +
scale_fill_manual(values = clr, guide = FALSE) +
scale_shape_manual("Location", values = shps, labels = loc_names) +
guides(shape = guide_legend(override.aes = list(fill = 'black', size = 3))) +
labs(x = levels(factor(c(df_result$run_x,df_result$run_y)))[1],
y = levels(factor(c(df_result$run_x,df_result$run_y)))[2]) +
theme(legend.position = 'bottom') +
thm_base
sp_list <- (tab %>% left_join(.,id_labs))$spec %>% factor() %>% levels()
legend_grob_pair <- hypo_legend_single(species = sp_names[sp_list],
color_map = clr[sp_list],
circle_color = 'black',
plot_names = TRUE,
circle_lwd = .5,
ncol = min(length(sp_list),6)) %>%
ggplotGrob()
p_prime <- cowplot::plot_grid(p3,legend_grob_pair,ncol = 1,rel_heights = c(1,.15))
ggsave(p_prime,filename = str_c(base_name,'.prime_pca.pdf'), width = 8,height = 8)
# -------------------------------------------------------
# Investigate loadings
#SnpLoad <- snpgdsPCASNPLoading(pca, genofile)
#snpl <- SnpLoad$snp.id
#
#snpl_table <- tibble(snpl = SnpLoad$snp.id) %>% bind_cols(.,snpset.tbl)
#snp_load_ev1 <- loading_set(df = SnpLoad$snploading,snpid = snpl_table$snpid, EV = '-abs(EV01)',
# n = 9,clr_scheme = RColorBrewer::brewer.pal,name='Oranges')
#snp_load_ev2 <- loading_set(df = SnpLoad$snploading,snpid = snpl_table$snpid,EV = '-abs(EV02)',
# n = 9,clr_scheme = RColorBrewer::brewer.pal,name='Blues')
#cc <- c(RColorBrewer::brewer.pal(5,'Oranges')[5],RColorBrewer::brewer.pal(5,'Blues')[5])
#dist <- 1.1
#(p_loadings <- ggplot()+
# geom_segment(data = snp_load_ev1,aes(x=0,xend=EV01,y=0,yend=EV02,alpha=-idx),col=cc[1],#snp_load_ev1$clr,
# arrow = arrow(length = unit(3,'pt'),type = 'closed'))+
# geom_text(data = snp_load_ev1,aes(x=EV01*dist,y=EV02*dist,label=snpnr),col=cc[1])+
# geom_segment(data = snp_load_ev2,aes(x=0,xend=EV01,y=0,yend=EV02,alpha=-idx),col=cc[2],#snp_load_ev2$clr,
# arrow = arrow(length = unit(3,'pt'),type = 'closed'))+
# geom_text(data = snp_load_ev2,aes(x=EV01*dist,y=EV02*dist,label=snpnr),col=cc[2])+
# geom_point(data = tab %>% left_join(.,id_labs),aes(EV01,EV02,fill=spec),shape=21)+
# scico::scale_color_scico(direction = -1)+
# labs(x='PC01',y='PC02')+
# theme(axis.title.x = element_text(color=cc[1]),
# axis.title.y = element_text(color=cc[2])))
#top_snps <- purrr::map2(list(snp_load_ev1 ,snp_load_ev2),list('EV01','EV02'),prep_laoding_ev) %>% bind_rows()
# export results ---------------------------------------
#ggsave(p_loadings,filename = str_c(base_name,'.snp_loadings.pdf'), width = 8,height = 8)
#write_delim(top_snps,path = str_c(base_name,'.top_snps.txt'),delim = '\t')
#system(str_c("gzip ",str_c(base_name,'.top_snps.txt')))
# close the door ---------------------------------------
closefn.gds(genofile) | /R/vcf2pca.R | no_license | k-hench/hamlet_radiation | R | false | false | 8,739 | r | #!/usr/bin/env Rscript
# run from terminal:
# Rscript --vanilla vcf2pca.R simulated.vcf.gz pops_sim.txt 5
# ===============================================================
# This script used the package SNPrelate to produce a PCA on a given VCF file.
# The number of PCs considdered can be varied.
# ---------------------------------------------------------------
# The produced output contains:
# - 2 plots
# - 1 combination of "%exp var" ~ "PC" and a grid of all possible "PCy" ~ "PCx"
# - 1 "PC2" ~ "PC1" with the repective 9 SNPs with top loadings for PC1 & PC2 indicated
# - 3 tables
# - % exp var per PC
# - individual scores on PCx - PCz
# - SNPs with top loadings
# ===============================================================
# args <- c('simulated.vcf.gz', 'pops_sim.txt', '5')
args = commandArgs(trailingOnly=FALSE)
args = args[7:10]
print(args)
library(SNPRelate)
library(stringr)
library(tidyverse)
library(hypoimg)
library(hypogen)
library(GenomicOriginsScripts)
# custom functions -------------------
get_pca_scores <- function(mat,n_ev){mat[,1:n_ev] %>%
as_tibble() %>%
set_names(.,nm=str_c("EV",str_pad(width = 2,pad = '0',1:n_ev)))}
filter_fun <- function(x, y) x >= y
ct_subset <- function(p,q,df,exp_var){
id <- names(df)[1]
P <- names(df)[p+1]
Q <- names(df)[q+1]
P_lab <- str_c(pc_percent[p,2],' (',round(pc_percent[p,3],2),'%)')
Q_lab <- str_c(pc_percent[q,2],' (',round(pc_percent[q,3],2),'%)')
df %>%
select(id,P,Q) %>%
set_names(.,nm = c('id','x','y')) %>%
mutate(run_x = P_lab,
run_y = Q_lab)
}
# loadings related
scale_var <- function(x,exp_var){x * exp_var}
loading_set <- function(df,snpid,EV,n_snps,clr_scheme,...){
clr <- do.call(clr_scheme,list(n = n_snps,...))
snp_load <- df %>%
t() %>%
as.tibble() %>%
set_names(.,nm=str_c("EV",str_pad(width = 2,pad = '0',1:n_ev))) %>%
bind_cols(tibble(snp = str_c('snp',nm=snpid),
snpnr = snpid),.)%>%
mutate(total = rowSums(abs(scale_var(x = df,exp_var = pc_percent$exp_var)) %>%
t())) %>%
arrange_(EV) %>%
mutate(idx = row_number()) %>% ungroup() %>%
filter(idx <= n_snps) %>%
mutate(clr = clr)
}
prep_laoding_ev <- function(df,EV){df %>% mutate(ev = EV) %>% select(snpnr,ev,idx) }
# ------------------------------------
# config:
set.seed(1000)
vcf.fn <- as.character(args[1])
vcf_samples <- as.character(args[2])
n_ev <- as.numeric(args[3])
# ------------------------------------
base_name <- str_remove(vcf.fn,'.vcf.gz')
# reformating genotypes
snpgdsVCF2GDS(vcf.fn = vcf.fn, out.fn = str_c(base_name,'.gds'), method = "biallelic.only")
snpgdsSummary(str_c(base_name,'.gds'))
genofile <- snpgdsOpen(str_c(base_name,'.gds'))
# filter genotypes by LD
snpset <- snpgdsLDpruning(genofile, ld.threshold = 0.2, autosome.only = FALSE)
snpset.id <- unlist(snpset)
snpset.tbl <- tibble(CHROM = names(snpset.id) %>%
str_extract(.,'(LG|Contig)[0-9]{2}'),
POS = snpset.id) %>%
mutate(snpid = str_c(CHROM,":",POS))
id_labs <- read_delim(vcf_samples, delim = '\t',col_names = c('id','spec','loc'))
# run pca ------------------------------------------------
pca <- snpgdsPCA(genofile, snp.id = snpset.id, num.thread = 2,
autosome.only = FALSE,eigen.cnt = n_ev)
# extract results ---------------------------------------
pc_percent <- tibble(EV_nr = 1:n_ev,
EV = str_c("EV",str_pad(width = 2,pad = '0',EV_nr)),
exp_var = (pca$varprop*100)[EV_nr])
tab <-bind_cols(id = pca$sample.id, get_pca_scores(pca$eigenvect,n_ev))
ct <- cross_df(tibble(p=1:n_ev, q=1:n_ev),.filter = filter_fun) %>% arrange(p,q)
df_result <- pmap(ct,ct_subset,df = tab, exp_var = pc_percent) %>%
bind_rows() %>%
left_join(.,id_labs)
thm_base <- theme(panel.background = element_blank(),
panel.grid = element_blank(),
axis.line = element_line(color = 'lightgray'),
legend.key = element_blank())
# explained variation per PC
p1 <- ggplot(pc_percent,aes(x=EV_nr,y=exp_var)) +
geom_area(fill='darkgray',alpha =.2) +
geom_point(shape=21,fill = 'red') +
scale_x_continuous(name =NULL,breaks = pc_percent$EV_nr,labels = pc_percent$EV) +
scale_y_continuous(name = 'Explained Variation (%)',
limits = c(0,max(pc_percent$exp_var))) +
thm_base
# plot all combinations of PCs
p2 <- ggplot(df_result,aes(x,y,fill=spec)) +
geom_point(aes(shape = loc)) +
scale_fill_manual("Species", values = clr, labels = sp_labs) +
scale_shape_manual("Location", values = shps, labels = loc_names) +
facet_grid(run_y~run_x, scales = 'free') +
guides(fill = guide_legend(override.aes = list(shape = 21, size = 3)),
shape = guide_legend(override.aes = list(fill = 'black', size = 3))) +
theme(axis.title = element_blank(),
legend.position = c(.85,.75)) +
thm_base +
theme(panel.background = element_rect(color = 'lightgray'))
out_plot <- cowplot::plot_grid(p1,p2,ncol = 1,rel_heights = c(.3,1))
# export results ---------------------------------------
ggsave(filename = str_c(base_name,'.pca.pdf'),out_plot,width = 8,height = 9.5)
write_delim(pc_percent,path = str_c(base_name,'.exp_var.txt'),delim = '\t')
system(str_c("gzip ",str_c(base_name,'.exp_var.txt')))
write_delim(tab,path = str_c(base_name,'.scores.txt'),delim = '\t')
system(str_c("gzip ",str_c(base_name,'.scores.txt')))
# -------------------------------------------------------
# Primary PCA (PC1 ~ PC2)
p3 <- ggplot()+
geom_point(data = tab %>%
left_join(.,id_labs),
aes(EV01, EV02, fill = spec, shape = loc)) +
scale_fill_manual(values = clr, guide = FALSE) +
scale_shape_manual("Location", values = shps, labels = loc_names) +
guides(shape = guide_legend(override.aes = list(fill = 'black', size = 3))) +
labs(x = levels(factor(c(df_result$run_x,df_result$run_y)))[1],
y = levels(factor(c(df_result$run_x,df_result$run_y)))[2]) +
theme(legend.position = 'bottom') +
thm_base
sp_list <- (tab %>% left_join(.,id_labs))$spec %>% factor() %>% levels()
legend_grob_pair <- hypo_legend_single(species = sp_names[sp_list],
color_map = clr[sp_list],
circle_color = 'black',
plot_names = TRUE,
circle_lwd = .5,
ncol = min(length(sp_list),6)) %>%
ggplotGrob()
p_prime <- cowplot::plot_grid(p3,legend_grob_pair,ncol = 1,rel_heights = c(1,.15))
ggsave(p_prime,filename = str_c(base_name,'.prime_pca.pdf'), width = 8,height = 8)
# -------------------------------------------------------
# Investigate loadings
#SnpLoad <- snpgdsPCASNPLoading(pca, genofile)
#snpl <- SnpLoad$snp.id
#
#snpl_table <- tibble(snpl = SnpLoad$snp.id) %>% bind_cols(.,snpset.tbl)
#snp_load_ev1 <- loading_set(df = SnpLoad$snploading,snpid = snpl_table$snpid, EV = '-abs(EV01)',
# n = 9,clr_scheme = RColorBrewer::brewer.pal,name='Oranges')
#snp_load_ev2 <- loading_set(df = SnpLoad$snploading,snpid = snpl_table$snpid,EV = '-abs(EV02)',
# n = 9,clr_scheme = RColorBrewer::brewer.pal,name='Blues')
#cc <- c(RColorBrewer::brewer.pal(5,'Oranges')[5],RColorBrewer::brewer.pal(5,'Blues')[5])
#dist <- 1.1
#(p_loadings <- ggplot()+
# geom_segment(data = snp_load_ev1,aes(x=0,xend=EV01,y=0,yend=EV02,alpha=-idx),col=cc[1],#snp_load_ev1$clr,
# arrow = arrow(length = unit(3,'pt'),type = 'closed'))+
# geom_text(data = snp_load_ev1,aes(x=EV01*dist,y=EV02*dist,label=snpnr),col=cc[1])+
# geom_segment(data = snp_load_ev2,aes(x=0,xend=EV01,y=0,yend=EV02,alpha=-idx),col=cc[2],#snp_load_ev2$clr,
# arrow = arrow(length = unit(3,'pt'),type = 'closed'))+
# geom_text(data = snp_load_ev2,aes(x=EV01*dist,y=EV02*dist,label=snpnr),col=cc[2])+
# geom_point(data = tab %>% left_join(.,id_labs),aes(EV01,EV02,fill=spec),shape=21)+
# scico::scale_color_scico(direction = -1)+
# labs(x='PC01',y='PC02')+
# theme(axis.title.x = element_text(color=cc[1]),
# axis.title.y = element_text(color=cc[2])))
#top_snps <- purrr::map2(list(snp_load_ev1 ,snp_load_ev2),list('EV01','EV02'),prep_laoding_ev) %>% bind_rows()
# export results ---------------------------------------
#ggsave(p_loadings,filename = str_c(base_name,'.snp_loadings.pdf'), width = 8,height = 8)
#write_delim(top_snps,path = str_c(base_name,'.top_snps.txt'),delim = '\t')
#system(str_c("gzip ",str_c(base_name,'.top_snps.txt')))
# close the door ---------------------------------------
closefn.gds(genofile) |
# nCr.R
# ::rtemis::
# 2016 Efstathios D. Gennatas egenn.lambdamd.org
#' n Choose r
#'
#' Calculate number of combinations
#'
#' In plain language:
#' You have \code{n} items. How many different cobinations of \code{r} items can you make?
#'
#' @param n Integer: Total number of items
#' @param r Integer: Number of items in each combination
#' @return Integer: Number of combinations
#' @author Efstathios D Gennatas
#' @export
nCr <- function(n, r) {
if (n < r) {
0
} else {
factorial(n) / (factorial(r) * factorial(n - r))
}
} # rtemis::nCr
| /R/nCr.R | no_license | zeta1999/rtemis | R | false | false | 564 | r | # nCr.R
# ::rtemis::
# 2016 Efstathios D. Gennatas egenn.lambdamd.org
#' n Choose r
#'
#' Calculate number of combinations
#'
#' In plain language:
#' You have \code{n} items. How many different cobinations of \code{r} items can you make?
#'
#' @param n Integer: Total number of items
#' @param r Integer: Number of items in each combination
#' @return Integer: Number of combinations
#' @author Efstathios D Gennatas
#' @export
nCr <- function(n, r) {
if (n < r) {
0
} else {
factorial(n) / (factorial(r) * factorial(n - r))
}
} # rtemis::nCr
|
DATA <- read.table('RawCounts_NormedCounts_NormedAllelicExpression.txt2', header = F, sep='\t')
Sum <- apply(DATA[,11:16],1,sum)
DATA <- cbind(DATA, Sum)
colnames(DATA) <- c('Chromosome', 'TSSStart', 'TSSStop', 'GeneName', 'RawCounts_NoDox', 'RawCounts_DoxA', 'RawCounts_DoxB', 'NormedCounts_NoDox', 'NormedCounts_DoxA', 'NormedCounts_DoxB', 'Genome1_NoDox', 'Genome1_DoxA', 'Genome1_DoxB', 'Genome2_NoDox', 'Genome2_DoxA', 'Genome2_DoxB', 'Sum')
x <- c(5,6,8,9) + 6
y <- c(5,7,8,10) + 6
Cmp_data_x <- DATA[,x]
Cmp_data_y <- DATA[,y]
source('CalculateSilencingScoreFunction.R')
Cmp_data_ss_x <- Calculate_Silencing_Score(Cmp_data_x)
Cmp_data_ss_y <- Calculate_Silencing_Score(Cmp_data_y)
transcriptome_ss <- cbind(DATA, Cmp_data_ss_x, Cmp_data_ss_y)
for(chrs in c('chr1','chr2','chr3','chr4','chr5','chr6','chr7','chr8','chr9','chr10','chr11','chr12','chr13','chr14','chr15','chr16','chr17','chr18','chr19')){
print(chrs)
print(median(transcriptome_ss[transcriptome_ss$Chromosome==chrs,]$Cmp_data_ss_x))
print(median(transcriptome_ss[transcriptome_ss$Chromosome==chrs,]$Cmp_data_ss_y))
}
D <- transcriptome_ss[transcriptome_ss$Chromosome =="chr7", ]
D$site = ceiling(D$TSSStart/1000000)
library(ggplot2)
p1<-ggplot(D, aes(x=factor(site), y= Cmp_data_ss_x) ) + geom_boxplot(outlier.shape = 3) + geom_point(position=position_jitter(width=0.25,height=0.001),shape=16, colour="purple", alpha=.55, size=2)
p2<-ggplot(D, aes(x=factor(site), y= Cmp_data_ss_y) ) + geom_boxplot(outlier.shape = 3) + geom_point(position=position_jitter(width=0.25,height=0.001),shape=16, colour="purple", alpha=.55, size=2)
library(gridExtra)
grid.arrange(p1,p2,p1,nrow=3, ncol=1)
for(i in 1:max(D$site)){
print(c(i, median(D[D$site==i,]$Cmp_data_ss_x), median(D[D$site==i,]$Cmp_data_ss_y)), quote=F)
}
# sum(ifelse(D[which(D$site>120 & D$site<131),]$Cmp_data_ss_x>0.75,1,0))
# sum(ifelse(D[which(D$site>120 & D$site<131),]$Cmp_data_ss_x>=0.75,1,0))
# sum(ifelse(D[which(D$site>120 & D$site<131),]$Cmp_data_ss_x>=0.5,1,0))
# sum(ifelse(D[which(D$site>120 & D$site<131),]$Cmp_data_ss_x>=0.25,1,0))
| /CalculateSilencing.R | permissive | guifengwei/XCI | R | false | false | 2,098 | r |
DATA <- read.table('RawCounts_NormedCounts_NormedAllelicExpression.txt2', header = F, sep='\t')
Sum <- apply(DATA[,11:16],1,sum)
DATA <- cbind(DATA, Sum)
colnames(DATA) <- c('Chromosome', 'TSSStart', 'TSSStop', 'GeneName', 'RawCounts_NoDox', 'RawCounts_DoxA', 'RawCounts_DoxB', 'NormedCounts_NoDox', 'NormedCounts_DoxA', 'NormedCounts_DoxB', 'Genome1_NoDox', 'Genome1_DoxA', 'Genome1_DoxB', 'Genome2_NoDox', 'Genome2_DoxA', 'Genome2_DoxB', 'Sum')
x <- c(5,6,8,9) + 6
y <- c(5,7,8,10) + 6
Cmp_data_x <- DATA[,x]
Cmp_data_y <- DATA[,y]
source('CalculateSilencingScoreFunction.R')
Cmp_data_ss_x <- Calculate_Silencing_Score(Cmp_data_x)
Cmp_data_ss_y <- Calculate_Silencing_Score(Cmp_data_y)
transcriptome_ss <- cbind(DATA, Cmp_data_ss_x, Cmp_data_ss_y)
for(chrs in c('chr1','chr2','chr3','chr4','chr5','chr6','chr7','chr8','chr9','chr10','chr11','chr12','chr13','chr14','chr15','chr16','chr17','chr18','chr19')){
print(chrs)
print(median(transcriptome_ss[transcriptome_ss$Chromosome==chrs,]$Cmp_data_ss_x))
print(median(transcriptome_ss[transcriptome_ss$Chromosome==chrs,]$Cmp_data_ss_y))
}
D <- transcriptome_ss[transcriptome_ss$Chromosome =="chr7", ]
D$site = ceiling(D$TSSStart/1000000)
library(ggplot2)
p1<-ggplot(D, aes(x=factor(site), y= Cmp_data_ss_x) ) + geom_boxplot(outlier.shape = 3) + geom_point(position=position_jitter(width=0.25,height=0.001),shape=16, colour="purple", alpha=.55, size=2)
p2<-ggplot(D, aes(x=factor(site), y= Cmp_data_ss_y) ) + geom_boxplot(outlier.shape = 3) + geom_point(position=position_jitter(width=0.25,height=0.001),shape=16, colour="purple", alpha=.55, size=2)
library(gridExtra)
grid.arrange(p1,p2,p1,nrow=3, ncol=1)
for(i in 1:max(D$site)){
print(c(i, median(D[D$site==i,]$Cmp_data_ss_x), median(D[D$site==i,]$Cmp_data_ss_y)), quote=F)
}
# sum(ifelse(D[which(D$site>120 & D$site<131),]$Cmp_data_ss_x>0.75,1,0))
# sum(ifelse(D[which(D$site>120 & D$site<131),]$Cmp_data_ss_x>=0.75,1,0))
# sum(ifelse(D[which(D$site>120 & D$site<131),]$Cmp_data_ss_x>=0.5,1,0))
# sum(ifelse(D[which(D$site>120 & D$site<131),]$Cmp_data_ss_x>=0.25,1,0))
|
library(icews)
library(ggplot2)
query <- "
SELECT event_date, count(*) AS records
FROM events
GROUP BY event_date;"
daily_records <- query_icews(query)
daily_records$event_date <- as.Date(as.character(daily_records$event_date),
format = "%Y%m%d", origin = "1970-01-01")
ggplot(daily_records, aes(x = event_date, y = records)) +
geom_point()
query <- "
SELECT distinct(source_file),
count(*) as records
FROM events
GROUP BY source_file;"
n_by_sf <- query_icews(query)
query <- "
SELECT count(*) AS records,
count(distinct(event_id)) AS ids
FROM events LIMIT 1;"
daily_count <- query_icews(query)
# Verify that events are correctly fetched from DVN
# Verify number of rows in files matches number of ingested records
dir(find_raw())
sf <- "20200927-icews-events.tab"
check <- readr::read_tsv(find_raw(sf))
query <- sprintf("
SELECT count(*) AS records
FROM events
WHERE source_file = '%s'
", sf)
db_n <- query_icews(query)
nrow(check)
db_n[[1]]
# matches
# Check number of events by source file vs time period nominally covered
| /2020-decrease/investigate.R | no_license | andybega/icews-misc | R | false | false | 1,103 | r |
library(icews)
library(ggplot2)
query <- "
SELECT event_date, count(*) AS records
FROM events
GROUP BY event_date;"
daily_records <- query_icews(query)
daily_records$event_date <- as.Date(as.character(daily_records$event_date),
format = "%Y%m%d", origin = "1970-01-01")
ggplot(daily_records, aes(x = event_date, y = records)) +
geom_point()
query <- "
SELECT distinct(source_file),
count(*) as records
FROM events
GROUP BY source_file;"
n_by_sf <- query_icews(query)
query <- "
SELECT count(*) AS records,
count(distinct(event_id)) AS ids
FROM events LIMIT 1;"
daily_count <- query_icews(query)
# Verify that events are correctly fetched from DVN
# Verify number of rows in files matches number of ingested records
dir(find_raw())
sf <- "20200927-icews-events.tab"
check <- readr::read_tsv(find_raw(sf))
query <- sprintf("
SELECT count(*) AS records
FROM events
WHERE source_file = '%s'
", sf)
db_n <- query_icews(query)
nrow(check)
db_n[[1]]
# matches
# Check number of events by source file vs time period nominally covered
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{oneoffVariable}
\alias{oneoffVariable}
\title{One-off Global Variables}
\usage{
oneoffVariable(default = NULL)
}
\arguments{
\item{default}{default value to which the global variable is reset after each
access. Default is \code{NULL}.}
}
\value{
a function with one argument (\code{value}) that provides get/set access
to a global variable.
If called with a value, it assigns this value to the global variable.
If called with no argument, it returns the current value of the global variable and
reset it to its default value -- as defined at its creation.
}
\description{
Defines a function that allow to get/assign a global variable whose value is
ensured to be reset after each access.
}
\examples{
x <- oneoffVariable(0)
# returns default value
x()
# assign a value
x(3)
# get the value
x()
# second call returns default value again
x()
}
| /man/oneoffVariable.Rd | no_license | renozao/pkgmaker | R | false | true | 938 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{oneoffVariable}
\alias{oneoffVariable}
\title{One-off Global Variables}
\usage{
oneoffVariable(default = NULL)
}
\arguments{
\item{default}{default value to which the global variable is reset after each
access. Default is \code{NULL}.}
}
\value{
a function with one argument (\code{value}) that provides get/set access
to a global variable.
If called with a value, it assigns this value to the global variable.
If called with no argument, it returns the current value of the global variable and
reset it to its default value -- as defined at its creation.
}
\description{
Defines a function that allow to get/assign a global variable whose value is
ensured to be reset after each access.
}
\examples{
x <- oneoffVariable(0)
# returns default value
x()
# assign a value
x(3)
# get the value
x()
# second call returns default value again
x()
}
|
#' Conversion Factor for Passenger Vehicles of Heavy Vehicles at Roundabouts
#'
#' Conversion factor for passenger vehicles of heavy vehicles at roundabouts.
#' It follows <Table 11-4> in KHCM(2013) p.500.
#' @param lane Roundabout lane. Choose one from : \code{1}, \code{2}
#' @param pcn_hv Heavy Vehicle Ratio(%)
#' @keywords conversion factor passenger vehicle roundabout heavy
#' @seealso \code{\link{f_hv_rab}}, \code{\link{V_i_pce_rab}}
#' @export E_T_rab Conversion factor for passenger cars of heavy vehicles
#' @examples
#' E_T_rab(lane = 2, hv_pcn = 12.3)
E_T_rab <- function(lane = NULL, hv_pcn = NULL){
if (lane == 1){
if (hv_pcn >= 0 & hv_pcn <= 5){et <- 2.4}
else if (hv_pcn > 5 & hv_pcn <= 10){et <- 2.4}
else if (hv_pcn > 10 & hv_pcn <= 15){et <- 2.4}
else if (hv_pcn > 15){et <- 2.5}
}
else if (lane == 2){
if (hv_pcn >= 0 & hv_pcn <= 5){et <- 2.4}
else if (hv_pcn > 5 & hv_pcn <= 10){et <- 2.5}
else if (hv_pcn > 10 & hv_pcn <= 15){et <- 2.6}
else if (hv_pcn > 15){et <- 2.7}
}
else {et <- 'Error : [lane] must be one of 1, 2. Please check that.'}
et
}
| /R/E_T_rab.R | no_license | regenesis90/KHCMinR | R | false | false | 1,119 | r | #' Conversion Factor for Passenger Vehicles of Heavy Vehicles at Roundabouts
#'
#' Conversion factor for passenger vehicles of heavy vehicles at roundabouts.
#' It follows <Table 11-4> in KHCM(2013) p.500.
#' @param lane Roundabout lane. Choose one from : \code{1}, \code{2}
#' @param pcn_hv Heavy Vehicle Ratio(%)
#' @keywords conversion factor passenger vehicle roundabout heavy
#' @seealso \code{\link{f_hv_rab}}, \code{\link{V_i_pce_rab}}
#' @export E_T_rab Conversion factor for passenger cars of heavy vehicles
#' @examples
#' E_T_rab(lane = 2, hv_pcn = 12.3)
E_T_rab <- function(lane = NULL, hv_pcn = NULL){
if (lane == 1){
if (hv_pcn >= 0 & hv_pcn <= 5){et <- 2.4}
else if (hv_pcn > 5 & hv_pcn <= 10){et <- 2.4}
else if (hv_pcn > 10 & hv_pcn <= 15){et <- 2.4}
else if (hv_pcn > 15){et <- 2.5}
}
else if (lane == 2){
if (hv_pcn >= 0 & hv_pcn <= 5){et <- 2.4}
else if (hv_pcn > 5 & hv_pcn <= 10){et <- 2.5}
else if (hv_pcn > 10 & hv_pcn <= 15){et <- 2.6}
else if (hv_pcn > 15){et <- 2.7}
}
else {et <- 'Error : [lane] must be one of 1, 2. Please check that.'}
et
}
|
## Merge Data
library(dplyr)
library(readr)
load('/data/Sta323/nyc_parking/pluto_data.Rdata')
nyc = read_csv("/data/Sta323/nyc_parking/NYParkingViolations.csv")
names(nyc) = make.names(names(nyc))
nyc_addr = nyc %>%
select(Issuer.Precinct,House.Number,Street.Name) %>%
transmute(precinct = Issuer.Precinct,
address = paste(House.Number, Street.Name) %>% tolower()) %>%
filter(precinct >= 1, precinct <= 34)
pluto = pluto_data %>%
rename(x=longitude, y=latitude)
precincts = inner_join(nyc_addr, pluto)
## Visualizing Precincts
good_precincts = c(1,5,6,7,9,10,13,14,17,18,19,20,22,
23,24,25,26,28,30,32,33,34)
d = precincts %>% filter(precinct %in% good_precincts)
table(d$precinct)
set.seed(123)
precinct_colors = rep(NA,34)
colors = scales::hue_pal(h = c(0, 360) + 15, c = 100, l = 65, h.start = 0, direction = 1)(22)
colors = sample(colors,length(colors))
precinct_colors[good_precincts] = colors
plot(d$x, d$y, pch=16, cex=0.5, col=precinct_colors[d$precinct])
legend("topleft",legend=as.character(good_precincts), pch=16, col=colors,
ncol = 5, cex=1, x.intersp=0.33, y.intersp=0.33)
## Manhattan Bounday
library(rgdal)
library(raster)
bb = readOGR("/data/Sta323/nyc_parking/nybb/","nybb")
manh = bb[bb$BoroName == "Manhattan",]
r = raster()
extent(r) = bbox(manh)
dim(r) = c(1000,200)
r = rasterize(manh, r)
pred_cells = which(r[] != 0)
pred_locs = xyFromCell(r,pred_cells) %>% as.data.frame()
## Modeling - xgboost
library(xgboost)
xg_data = as.matrix(d[,c("x","y")])
xg_label = as.matrix(d[,"precinct"]) %>%
as.factor() %>%
as.numeric() - 1
l = xgboost(data=xg_data, label=xg_label,
objective="multi:softmax",num_class=length(good_precincts),
nrounds=20)
p = predict(l, newdata=as.matrix(pred_locs))
pred_lab = good_precincts[p+1]
pred_xg = r
pred_xg[pred_cells] = pred_lab
plot(pred_xg, asp=0)
## Model output
poly = rasterToPolygons(pred_xg, dissolve = TRUE)
names(poly) = "Precinct"
source("https://raw.githubusercontent.com/Sta323-Sp16/Homework/master/hw5/write_geojson.R")
write_geojson(poly, "precincts.json")
| /analysis.R | no_license | Sta323-Sp16/Team0_hw5 | R | false | false | 2,138 | r | ## Merge Data
library(dplyr)
library(readr)
load('/data/Sta323/nyc_parking/pluto_data.Rdata')
nyc = read_csv("/data/Sta323/nyc_parking/NYParkingViolations.csv")
names(nyc) = make.names(names(nyc))
nyc_addr = nyc %>%
select(Issuer.Precinct,House.Number,Street.Name) %>%
transmute(precinct = Issuer.Precinct,
address = paste(House.Number, Street.Name) %>% tolower()) %>%
filter(precinct >= 1, precinct <= 34)
pluto = pluto_data %>%
rename(x=longitude, y=latitude)
precincts = inner_join(nyc_addr, pluto)
## Visualizing Precincts
good_precincts = c(1,5,6,7,9,10,13,14,17,18,19,20,22,
23,24,25,26,28,30,32,33,34)
d = precincts %>% filter(precinct %in% good_precincts)
table(d$precinct)
set.seed(123)
precinct_colors = rep(NA,34)
colors = scales::hue_pal(h = c(0, 360) + 15, c = 100, l = 65, h.start = 0, direction = 1)(22)
colors = sample(colors,length(colors))
precinct_colors[good_precincts] = colors
plot(d$x, d$y, pch=16, cex=0.5, col=precinct_colors[d$precinct])
legend("topleft",legend=as.character(good_precincts), pch=16, col=colors,
ncol = 5, cex=1, x.intersp=0.33, y.intersp=0.33)
## Manhattan Bounday
library(rgdal)
library(raster)
bb = readOGR("/data/Sta323/nyc_parking/nybb/","nybb")
manh = bb[bb$BoroName == "Manhattan",]
r = raster()
extent(r) = bbox(manh)
dim(r) = c(1000,200)
r = rasterize(manh, r)
pred_cells = which(r[] != 0)
pred_locs = xyFromCell(r,pred_cells) %>% as.data.frame()
## Modeling - xgboost
library(xgboost)
xg_data = as.matrix(d[,c("x","y")])
xg_label = as.matrix(d[,"precinct"]) %>%
as.factor() %>%
as.numeric() - 1
l = xgboost(data=xg_data, label=xg_label,
objective="multi:softmax",num_class=length(good_precincts),
nrounds=20)
p = predict(l, newdata=as.matrix(pred_locs))
pred_lab = good_precincts[p+1]
pred_xg = r
pred_xg[pred_cells] = pred_lab
plot(pred_xg, asp=0)
## Model output
poly = rasterToPolygons(pred_xg, dissolve = TRUE)
names(poly) = "Precinct"
source("https://raw.githubusercontent.com/Sta323-Sp16/Homework/master/hw5/write_geojson.R")
write_geojson(poly, "precincts.json")
|
library(raster)
library(rgdal)
test_dir = "H:/S2/VIS2NIR/Test/"
B = raster(paste0(test_dir,"S2_2.tif"))
G = raster(paste0(test_dir,"S2_3.tif"))
R = raster(paste0(test_dir,"S2_4.tif"))
VIS = stack(B,G,R)
writeRaster(VIS, filename=paste0(test_dir,"VIS.tif"), options="INTERLEAVE=BAND", overwrite=TRUE)
| /R/SentinelTester.R | no_license | aocalderon/ICT2018 | R | false | false | 301 | r | library(raster)
library(rgdal)
test_dir = "H:/S2/VIS2NIR/Test/"
B = raster(paste0(test_dir,"S2_2.tif"))
G = raster(paste0(test_dir,"S2_3.tif"))
R = raster(paste0(test_dir,"S2_4.tif"))
VIS = stack(B,G,R)
writeRaster(VIS, filename=paste0(test_dir,"VIS.tif"), options="INTERLEAVE=BAND", overwrite=TRUE)
|
plot1 <- function(){
## read and filter data file
library(sqldf)
sqlData <- read.csv.sql("./household_power_consumption.txt",
sql = "select * from file where Date in ('1/2/2007',
'2/2/2007')",header = TRUE, sep = ";")
closeAllConnections()
## Export for PNG
png(file = "plot1.png", height = 480, width = 480)
## set boundaries and draw histogram.
par(mar = c(5,4,3,2), oma = c(1,1,1,1))
with(sqlData, hist(Global_active_power, col = "red",
xlab = "Global Active Power (kilowatts)",
main = "Global Active Power")
)
dev.off()
} | /plot1.R | no_license | HolyMoonshine/ExData_Plotting1 | R | false | false | 683 | r | plot1 <- function(){
## read and filter data file
library(sqldf)
sqlData <- read.csv.sql("./household_power_consumption.txt",
sql = "select * from file where Date in ('1/2/2007',
'2/2/2007')",header = TRUE, sep = ";")
closeAllConnections()
## Export for PNG
png(file = "plot1.png", height = 480, width = 480)
## set boundaries and draw histogram.
par(mar = c(5,4,3,2), oma = c(1,1,1,1))
with(sqlData, hist(Global_active_power, col = "red",
xlab = "Global Active Power (kilowatts)",
main = "Global Active Power")
)
dev.off()
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compile.collated.dataset.r
\name{compile.collated.dataset}
\alias{compile.collated.dataset}
\title{compile collated dataset function}
\usage{
compile.collated.dataset(dataset_name_prefix, dir_names = NULL,
skip_dirs = NULL, save = TRUE)
}
\arguments{
\item{dataset_name_prefix}{a character string to prefix the datasets with, no default}
\item{dir_names}{a vector of directory names to import records from. default is NULL, in which case records from all directories present in path are imported}
\item{skip_dirs}{a vector of directory names to skip importing from. default is NULL, in which case no directory is skipped}
\item{save}{logical, if TRUE the datasets are saved to disk, defaults to TRUE}
}
\description{
this function puts compiles the collated dataset from records
}
| /man/compile.collated.dataset.Rd | no_license | tracits/beehive | R | false | true | 864 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compile.collated.dataset.r
\name{compile.collated.dataset}
\alias{compile.collated.dataset}
\title{compile collated dataset function}
\usage{
compile.collated.dataset(dataset_name_prefix, dir_names = NULL,
skip_dirs = NULL, save = TRUE)
}
\arguments{
\item{dataset_name_prefix}{a character string to prefix the datasets with, no default}
\item{dir_names}{a vector of directory names to import records from. default is NULL, in which case records from all directories present in path are imported}
\item{skip_dirs}{a vector of directory names to skip importing from. default is NULL, in which case no directory is skipped}
\item{save}{logical, if TRUE the datasets are saved to disk, defaults to TRUE}
}
\description{
this function puts compiles the collated dataset from records
}
|
# INSTALL AND LOAD PACKAGES ###################
# Install pacman ("package manager") if needed
if (!require("pacman")) install.packages("pacman")
# Load contributed packages with pacman
pacman::p_load(pacman, party, rio, tidyverse)
# pacman: for loading/unloading packages
# party: for decision trees
# rio: for importing data
# tidyverse: for so many reasons
# LOAD AND PREPARE DATA ###################
# Save data to "df" (for "data frame")
# Rename outcome as "y" (if it helps)
# Specify outcome with df$y
# Import CSV files with readr::read_csv() from tidyverse
(df <- read_csv("data/StateData.csv"))
# Import other formats with rio::import() from rio
(df <- import("data/StateData.xlsx") %>% as_tibble())
# or...
df <- import("data/StateData.xlsx") %>%
as_tibble() %>%
select(state_code,
psychRegions,
instagram:modernDance) %>%
mutate(psychRegions = as.factor(psychRegions)) %>%
rename(y = psychRegions) %>%
print()
# ANALYZE DATA ###################
# By using standardized object and variable names, the same
# code can be reused for different analyses
# Decision tree using party::ctree
# df[, -1]) excludes the state_code
fit <- ctree(y ~ ., data = df[, -1]) # Create tree
fit %>% plot() # Plot tree
fit %>% # Predicted vs true
predict() %>%
table(df$y)
hc <- df %>% # Get data
dist %>% # Compute distance/dissimilarity matrix
hclust %>% # Compute hierarchical clusters
plot(labels = df$state_code) # Plot dendrogram
# CLEAN UP ###################
# Clear environment
rm(list = ls())
# Clear packages
p_unload(all) # Remove all add-ons
# Clear plots
dev.off() # But only if there IS a plot
# Clear console
cat("\014") # ctrl+L
| /FinalProject/ImportingDataExercise.R | no_license | nrome/MechEng-Coursework-2020 | R | false | false | 1,756 | r | # INSTALL AND LOAD PACKAGES ###################
# Install pacman ("package manager") if needed
if (!require("pacman")) install.packages("pacman")
# Load contributed packages with pacman
pacman::p_load(pacman, party, rio, tidyverse)
# pacman: for loading/unloading packages
# party: for decision trees
# rio: for importing data
# tidyverse: for so many reasons
# LOAD AND PREPARE DATA ###################
# Save data to "df" (for "data frame")
# Rename outcome as "y" (if it helps)
# Specify outcome with df$y
# Import CSV files with readr::read_csv() from tidyverse
(df <- read_csv("data/StateData.csv"))
# Import other formats with rio::import() from rio
(df <- import("data/StateData.xlsx") %>% as_tibble())
# or...
df <- import("data/StateData.xlsx") %>%
as_tibble() %>%
select(state_code,
psychRegions,
instagram:modernDance) %>%
mutate(psychRegions = as.factor(psychRegions)) %>%
rename(y = psychRegions) %>%
print()
# ANALYZE DATA ###################
# By using standardized object and variable names, the same
# code can be reused for different analyses
# Decision tree using party::ctree
# df[, -1]) excludes the state_code
fit <- ctree(y ~ ., data = df[, -1]) # Create tree
fit %>% plot() # Plot tree
fit %>% # Predicted vs true
predict() %>%
table(df$y)
hc <- df %>% # Get data
dist %>% # Compute distance/dissimilarity matrix
hclust %>% # Compute hierarchical clusters
plot(labels = df$state_code) # Plot dendrogram
# CLEAN UP ###################
# Clear environment
rm(list = ls())
# Clear packages
p_unload(all) # Remove all add-ons
# Clear plots
dev.off() # But only if there IS a plot
# Clear console
cat("\014") # ctrl+L
|
randomsearch_xgb_oc = function(){
# randomly pick parameter values within defined boundaries
parameters_list = list()
for (i in 1:500){
param <- list(booster = "gbtree", # to account for non-linearities a tree based estimator is used
objective = "reg:squarederror",
max_depth = sample(1:10, 1),
eta = runif(1, 0.01, 0.4),
subsample = runif(1, 0.6, 1),
colsample_bytree = runif(1, 0.4, 1),
min_child_weight = sample(0:10, 1),
alpha = sample(0:6, 1),
nrounds= 300,
eval_metric = "rmse",
early_stopping_rounds= 30
)
parameters <- as.data.frame(param)
parameters_list[[i]] <- parameters
}
# create dataframe of all randomly simulated parameter sets
params_df = do.call(rbind, parameters_list)
return(params_df)
}
randomsearch_xgb_ps = function(){
# randomly pick parameter values within defined boundaries
parameters_list = list()
for (i in 1:500){
param <- list(booster = "gbtree", # to account for non-linearities a tree based estimator is used
objective = "binary:logistic",
max_depth = sample(1:10, 1),
eta = runif(1, 0.01, 0.4),
subsample = runif(1, 0.6, 1),
colsample_bytree = runif(1, 0.4, 1),
min_child_weight = sample(0:10, 1),
alpha = sample(0:6, 1),
nrounds= 300,
eval_metric = "error",
early_stopping_rounds= 30
)
parameters <- as.data.frame(param)
parameters_list[[i]] <- parameters
}
# create dataframe of all randomly simulated parameter sets
params_df = do.call(rbind, parameters_list)
return(params_df)
}
| /hyperparam_tuning/randomsearch_xgb.R | no_license | marccgrau/dml_est_general | R | false | false | 1,845 | r | randomsearch_xgb_oc = function(){
# randomly pick parameter values within defined boundaries
parameters_list = list()
for (i in 1:500){
param <- list(booster = "gbtree", # to account for non-linearities a tree based estimator is used
objective = "reg:squarederror",
max_depth = sample(1:10, 1),
eta = runif(1, 0.01, 0.4),
subsample = runif(1, 0.6, 1),
colsample_bytree = runif(1, 0.4, 1),
min_child_weight = sample(0:10, 1),
alpha = sample(0:6, 1),
nrounds= 300,
eval_metric = "rmse",
early_stopping_rounds= 30
)
parameters <- as.data.frame(param)
parameters_list[[i]] <- parameters
}
# create dataframe of all randomly simulated parameter sets
params_df = do.call(rbind, parameters_list)
return(params_df)
}
randomsearch_xgb_ps = function(){
# randomly pick parameter values within defined boundaries
parameters_list = list()
for (i in 1:500){
param <- list(booster = "gbtree", # to account for non-linearities a tree based estimator is used
objective = "binary:logistic",
max_depth = sample(1:10, 1),
eta = runif(1, 0.01, 0.4),
subsample = runif(1, 0.6, 1),
colsample_bytree = runif(1, 0.4, 1),
min_child_weight = sample(0:10, 1),
alpha = sample(0:6, 1),
nrounds= 300,
eval_metric = "error",
early_stopping_rounds= 30
)
parameters <- as.data.frame(param)
parameters_list[[i]] <- parameters
}
# create dataframe of all randomly simulated parameter sets
params_df = do.call(rbind, parameters_list)
return(params_df)
}
|
testlist <- list(iK = 486539264L)
result <- do.call(eDMA:::PowerSet,testlist)
str(result) | /eDMA/inst/testfiles/PowerSet/AFL_PowerSet/PowerSet_valgrind_files/1609870115-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 89 | r | testlist <- list(iK = 486539264L)
result <- do.call(eDMA:::PowerSet,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bin.continuous.R
\name{bin.continuous}
\alias{bin.continuous}
\title{Divide continuous feature scores into specified number of bins.}
\usage{
bin.continuous(feature.name, feature.url, nbins)
}
\arguments{
\item{feature.name}{Continuous feature name.}
\item{feature.url}{URL of continuous feature.}
\item{nbins}{Number of bins to divide continuous feature score into.}
}
\value{
Binned continuous feature scores.
}
\description{
Divide continuous feature scores into specified number of bins.
}
| /MutSpot_Rpackage/man/bin.continuous.Rd | no_license | danchubb/hg38MutSpot | R | false | true | 574 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bin.continuous.R
\name{bin.continuous}
\alias{bin.continuous}
\title{Divide continuous feature scores into specified number of bins.}
\usage{
bin.continuous(feature.name, feature.url, nbins)
}
\arguments{
\item{feature.name}{Continuous feature name.}
\item{feature.url}{URL of continuous feature.}
\item{nbins}{Number of bins to divide continuous feature score into.}
}
\value{
Binned continuous feature scores.
}
\description{
Divide continuous feature scores into specified number of bins.
}
|
/R_기초프로그래밍_회귀분석/11_가설검정_2_독립2표본검정(170731).R | permissive | heechulk/Data_science_school_with_R | R | false | false | 2,646 | r | ||
#################################################################################
#### Functions for BML Simulation Study
#### Initialization function.
## Input : size of grid [r and c] and density [p]
## Output : A matrix [m] with entries 0 (no cars) 1 (red cars) or 2 (blue cars)
## that stores the state of the system (i.e. location of red and blue cars)
bml.init <- function(r, c, p){
# initialize a lattice with uniform distributed numbers
m=runif(r*c)
# determine which site is 0(empty), 1(red) and 2(blue)
# Initially, cars are distributed at random: each intersection is independently
# assigned a car with probability p, or an empty space, i.e., 0 with probability 1 - p.
# Each car is independently equally likely to be blue or red.
empty=which(m<=(1-p))
red=which(m>(1-p) & m<(1-p/2))
m=rep(2,r*c)
m[empty]=0
m[red]=1
m=array(m,c(r,c))
return(m)
}
#### Function to move the system one step (east and north)
## Input : a matrix [m] of the same type as the output from bml.init()
## Output : TWO variables, the updated [m] and a logical variable
## [grid.new] which should be TRUE if the system changed, FALSE otherwise.
## NOTE : the function should move the red cars once and the blue cars once,
## you can write extra functions that do just a step north or just a step east.
bml.step <- function(m){
r=dim(m)[1]
c=dim(m)[2]
blue=which(m==2,arr.ind=T)
red=which(m==1,arr.ind=T)
# blue cars move to the north
# the sites above the blue
blue.north=blue
blue.north[,1]=blue[,1]-1
blue.north[blue.north[,1]==0,1]=r
blue.move=(m[blue.north]==0)
m[blue.north[blue.move,,drop=FALSE]]=2 # move up?
m[blue[blue.move,,drop=FALSE]]=0 # make the empty?
# red cars move to the east
#browser()
red.east=red
red.east[,2]=red[,2]+1
red.east[red.east[,2]==(c+1),2]=1
red.move=(m[red.east]==0)
m[red.east[red.move,,drop=FALSE]]=1 # move to right
m[red[red.move,,drop=FALSE]]=0 # make the empty
grid.new=(sum(blue.move,red.move)>0)
return(list(m, grid.new))
}
#### Function to do a simulation for a given set of input parameters
## Input : size of grid [r and c] and density [p]
## Output : *up to you* (e.g. number of steps taken, did you hit gridlock, ...)
bml.sim <- function(r, c, p){
# initilize
m=bml.init(r,c,p)
flag=1
step=0
quartz()
image(rotate(m),col=c('white','red','blue'))
while(flag & step <= 2000){
step = step+1
m.sim=bml.step(m)
m=m.sim[[1]]
flag=m.sim[[2]]
image(rotate(m),col=c('white','red','blue'))
}
return (step)
}
# rotate the image for display
rotate <- function(x) t(apply(x, 2, rev))
| /bml_functions.R | no_license | Maggiezhang94/statistical-analysis-in-R-assignments-and-projects | R | false | false | 2,641 | r | #################################################################################
#### Functions for BML Simulation Study
#### Initialization function.
## Input : size of grid [r and c] and density [p]
## Output : A matrix [m] with entries 0 (no cars) 1 (red cars) or 2 (blue cars)
## that stores the state of the system (i.e. location of red and blue cars)
bml.init <- function(r, c, p){
# initialize a lattice with uniform distributed numbers
m=runif(r*c)
# determine which site is 0(empty), 1(red) and 2(blue)
# Initially, cars are distributed at random: each intersection is independently
# assigned a car with probability p, or an empty space, i.e., 0 with probability 1 - p.
# Each car is independently equally likely to be blue or red.
empty=which(m<=(1-p))
red=which(m>(1-p) & m<(1-p/2))
m=rep(2,r*c)
m[empty]=0
m[red]=1
m=array(m,c(r,c))
return(m)
}
#### Function to move the system one step (east and north)
## Input : a matrix [m] of the same type as the output from bml.init()
## Output : TWO variables, the updated [m] and a logical variable
## [grid.new] which should be TRUE if the system changed, FALSE otherwise.
## NOTE : the function should move the red cars once and the blue cars once,
## you can write extra functions that do just a step north or just a step east.
bml.step <- function(m){
r=dim(m)[1]
c=dim(m)[2]
blue=which(m==2,arr.ind=T)
red=which(m==1,arr.ind=T)
# blue cars move to the north
# the sites above the blue
blue.north=blue
blue.north[,1]=blue[,1]-1
blue.north[blue.north[,1]==0,1]=r
blue.move=(m[blue.north]==0)
m[blue.north[blue.move,,drop=FALSE]]=2 # move up?
m[blue[blue.move,,drop=FALSE]]=0 # make the empty?
# red cars move to the east
#browser()
red.east=red
red.east[,2]=red[,2]+1
red.east[red.east[,2]==(c+1),2]=1
red.move=(m[red.east]==0)
m[red.east[red.move,,drop=FALSE]]=1 # move to right
m[red[red.move,,drop=FALSE]]=0 # make the empty
grid.new=(sum(blue.move,red.move)>0)
return(list(m, grid.new))
}
#### Function to do a simulation for a given set of input parameters
## Input : size of grid [r and c] and density [p]
## Output : *up to you* (e.g. number of steps taken, did you hit gridlock, ...)
bml.sim <- function(r, c, p){
# initilize
m=bml.init(r,c,p)
flag=1
step=0
quartz()
image(rotate(m),col=c('white','red','blue'))
while(flag & step <= 2000){
step = step+1
m.sim=bml.step(m)
m=m.sim[[1]]
flag=m.sim[[2]]
image(rotate(m),col=c('white','red','blue'))
}
return (step)
}
# rotate the image for display
rotate <- function(x) t(apply(x, 2, rev))
|
# import metadata
metadata <- read.csv("/Users/catherinehogg/Documents/Semester3/Project/InputData/isoforms/firstdata/samplefilters/MetaData.csv",
header = TRUE, ",")
# import sequencing data
sequencing <- read.csv("/Users/catherinehogg/Documents/Semester3/Project/InputData/isoforms/firstdata/samplefilters/SequencingMetrics.txt",
header = TRUE, "\t")
# add patient column to sequencing data
sequencing$Patient.ID <- sub("rimary","",sequencing$Sample)
sequencing$Patient.ID <- sub("ecurrent","",sequencing$Patient.ID)
sequencing$Patient.ID <- gsub(".{2}$","",sequencing$Patient.ID)
# remove patients that are not in both sequencing and metadata
metadata.mod <- metadata[metadata$Patient.ID %in% sequencing$Patient.ID,]
sequencing.mod <- sequencing[sequencing$Patient.ID %in% metadata$Patient.ID,]
# Logic for patients that should be removed
# Inital diagnosis of primary GBM
# Same location of primary and recurrent tumour, and not NA or empty
# Radiotherapy and TMZ
# IDH 0
keep <- metadata.mod$Initial.Diagnosis == "Primary GBM" & metadata.mod$Location.Primary == metadata.mod$Location.Recurrence &
!is.na(metadata.mod$Location.Primary) & metadata.mod$Location.Primary != "TBC" &
(metadata.mod$Non.Surgical.Treatment == "Radiotherapy and TMZ" | metadata.mod$Non.Surgical.Treatment == "Radiotherapy and TMZ +") &
metadata.mod$IDH == 0
patientskeep.metadata <- metadata.mod[keep,3]
# get list of patients with read below above 30m
keep <- sequencing.mod$Reads < 30000000
sequencing.mod <- sequencing.mod[keep,17]
patientsremove.below30 <- unique(sequencing.mod) # because if one paired sample below 30m reads then need to remove both
# keep any patients from patientskeep.metadata that have at both paired samples above 30m reads
keep <- !patientskeep.metadata %in% patientsremove.below30
patientskeep <- patientskeep.metadata[keep]
# finally, check that sequencing data exists for both samples for 66 patients in patientskeep
temp <- c()
for (i in 1:length(patientskeep)){
temp[i] <- sum(sequencing$Patient.ID == patientskeep[i])
}
table(temp)
# write list of patients to keep to file
write.table(patientskeep,
"/Users/catherinehogg/Documents/Semester3/Project/Results/filtered_data/isoforms/firstdata/patientskeep.txt",
col.names = F, row.names = F)
| /1-dataprocessing/Sample_Filter_Logic/archive/PatientsKeep.R | no_license | catherinelebek/isoformproject | R | false | false | 2,369 | r | # import metadata
metadata <- read.csv("/Users/catherinehogg/Documents/Semester3/Project/InputData/isoforms/firstdata/samplefilters/MetaData.csv",
header = TRUE, ",")
# import sequencing data
sequencing <- read.csv("/Users/catherinehogg/Documents/Semester3/Project/InputData/isoforms/firstdata/samplefilters/SequencingMetrics.txt",
header = TRUE, "\t")
# add patient column to sequencing data
sequencing$Patient.ID <- sub("rimary","",sequencing$Sample)
sequencing$Patient.ID <- sub("ecurrent","",sequencing$Patient.ID)
sequencing$Patient.ID <- gsub(".{2}$","",sequencing$Patient.ID)
# remove patients that are not in both sequencing and metadata
metadata.mod <- metadata[metadata$Patient.ID %in% sequencing$Patient.ID,]
sequencing.mod <- sequencing[sequencing$Patient.ID %in% metadata$Patient.ID,]
# Logic for patients that should be removed
# Inital diagnosis of primary GBM
# Same location of primary and recurrent tumour, and not NA or empty
# Radiotherapy and TMZ
# IDH 0
keep <- metadata.mod$Initial.Diagnosis == "Primary GBM" & metadata.mod$Location.Primary == metadata.mod$Location.Recurrence &
!is.na(metadata.mod$Location.Primary) & metadata.mod$Location.Primary != "TBC" &
(metadata.mod$Non.Surgical.Treatment == "Radiotherapy and TMZ" | metadata.mod$Non.Surgical.Treatment == "Radiotherapy and TMZ +") &
metadata.mod$IDH == 0
patientskeep.metadata <- metadata.mod[keep,3]
# get list of patients with read below above 30m
keep <- sequencing.mod$Reads < 30000000
sequencing.mod <- sequencing.mod[keep,17]
patientsremove.below30 <- unique(sequencing.mod) # because if one paired sample below 30m reads then need to remove both
# keep any patients from patientskeep.metadata that have at both paired samples above 30m reads
keep <- !patientskeep.metadata %in% patientsremove.below30
patientskeep <- patientskeep.metadata[keep]
# finally, check that sequencing data exists for both samples for 66 patients in patientskeep
temp <- c()
for (i in 1:length(patientskeep)){
temp[i] <- sum(sequencing$Patient.ID == patientskeep[i])
}
table(temp)
# write list of patients to keep to file
write.table(patientskeep,
"/Users/catherinehogg/Documents/Semester3/Project/Results/filtered_data/isoforms/firstdata/patientskeep.txt",
col.names = F, row.names = F)
|
category CUI
GNU Readline によるコマンドライン入力インタフェースを提供するライブラリです。
= module Readline
GNU Readline によるコマンドライン入力インタフェースを提供するモジュールです。
GNU Readline 互換ライブラリのひとつである Edit Line(libedit) もサポートしています。
* [[url:https://directory.fsf.org/wiki/Readline]]
* [[url:https://thrysoee.dk/editline/]]
Readline.readline を使用してユーザからの入力を取得できます。
このとき、 GNU Readline のように入力の補完や
Emacs のようなキー操作などができます。
例: プロンプト"> "を表示して、ユーザからの入力を取得する。
require 'readline'
while buf = Readline.readline("> ", true)
print("-> ", buf, "\n")
end
ユーザが入力した内容を履歴(以下、ヒストリ)として記録することができます。
定数 [[c:Readline::HISTORY]] を使用して入力履歴にアクセスできます。
例えば、[[c:Readline::HISTORY]].to_a により、
全ての入力した内容を文字列の配列として取得できます。
例: ヒストリを配列として取得する。
require 'readline'
while buf = Readline.readline("> ", true)
p Readline::HISTORY.to_a
print("-> ", buf, "\n")
end
== Module Functions
--- readline(prompt = "", add_hist = false) -> String | nil
prompt を出力し、ユーザからのキー入力を待ちます。
エンターキーの押下などでユーザが文字列を入力し終えると、
入力した文字列を返します。
このとき、add_hist が true であれば、入力した文字列を入力履歴に追加します。
何も入力していない状態で EOF(UNIX では ^D) を入力するなどで、
ユーザからの入力がない場合は nil を返します。
本メソッドはスレッドに対応しています。
入力待ち状態のときはスレッドコンテキストの切替えが発生します。
入力時には行内編集が可能で、vi モードと Emacs モードが用意されています。
デフォルトは Emacs モードです。
@param prompt カーソルの前に表示する文字列を指定します。デフォルトは""です。
@param add_hist 真ならば、入力した文字列をヒストリに記録します。デフォルトは偽です。
@raise IOError 標準入力が tty でない、かつ、標準入力をクローズしている
([[man:isatty(2)]] の errno が EBADF である。) 場合に発生します。
例:
require "readline"
input = Readline.readline
(プロンプトなどは表示せずに、入力待ちの状態になります。
ここでは「abc」を入力後、エンターキーを押したと想定します。)
abc
p input # => "abc"
input = Readline.readline("> ")
(">"を表示し、入力待ちの状態になります。
ここでは「ls」を入力後、エンターキーを押したと想定します。)
> ls
p input # => "ls"
input = Readline.readline("> ", true)
(">"を表示し、入力待ちの状態になります。
ここでは「cd」を入力後、エンターキーを押したと想定します。)
> cd
p input # => "cd"
input = Readline.readline("> ", true)
(">"を表示し、入力待ちの状態になります。
ここで、カーソルの上キー、または ^P を押すと、
先ほど入力した「cd」が表示されます。
そして、エンターキーを押したと想定します。)
> cd
p input # => "cd"
本メソッドには注意事項があります。
入力待ちの状態で ^C すると ruby インタプリタが終了し、端末状態を復帰しません。
これを回避するための例を2つ挙げます。
例: ^CによるInterrupt例外を捕捉して、端末状態を復帰する。
require 'readline'
stty_save = `stty -g`.chomp
begin
while buf = Readline.readline
p buf
end
rescue Interrupt
system("stty", stty_save)
exit
end
例: INTシグナルを捕捉して、端末状態を復帰する。
require 'readline'
stty_save = `stty -g`.chomp
trap("INT") { system "stty", stty_save; exit }
while buf = Readline.readline
p buf
end
また、単に ^C を無視する方法もあります。
require 'readline'
trap("INT", "SIG_IGN")
while buf = Readline.readline
p buf
end
入力履歴 Readline::HISTORY を使用して、次のようなこともできます。
例: 空行や直前の入力と同じ内容は入力履歴に残さない。
require 'readline'
while buf = Readline.readline("> ", true)
# p Readline::HISTORY.to_a
Readline::HISTORY.pop if /^\s*$/ =~ buf
begin
if Readline::HISTORY[Readline::HISTORY.length-2] == buf
Readline::HISTORY.pop
end
rescue IndexError
end
# p Readline::HISTORY.to_a
print "-> ", buf, "\n"
end
@see [[m:Readline.vi_editing_mode]]、[[m:Readline.emacs_editing_mode]]、
[[c:Readline::HISTORY]]
== Singleton Methods
#@since 1.9.1
--- input=(input)
readline メソッドで使用する入力用の [[c:File]] オブジェクト input を指定します。
戻り値は指定した [[c:File]] オブジェクト input です。
@param input [[c:File]] オブジェクトを指定します。
--- output=(output)
readline メソッドで使用する出力用の [[c:File]] オブジェクト output を指定します。
戻り値は指定した [[c:File]] オブジェクト output です。
@param output [[c:File]] オブジェクトを指定します。
#@end
--- completion_proc=(proc)
ユーザからの入力を補完する時の候補を取得する [[c:Proc]] オブジェクト
proc を指定します。
proc は、次のものを想定しています。
(1) callメソッドを持つ。callメソッドを持たない場合、例外 ArgumentError を発生します。
(2) 引数にユーザからの入力文字列を取る。
(3) 候補の文字列の配列を返す。
「/var/lib /v」の後で補完を行うと、
デフォルトでは proc の引数に「/v」が渡されます。
このように、ユーザが入力した文字列を
[[m:Readline.completer_word_break_characters]] に含まれる文字で区切ったものを単語とすると、
カーソルがある単語の最初の文字から現在のカーソル位置までの文字列が proc の引数に渡されます。
@param proc ユーザからの入力を補完する時の候補を取得する [[c:Proc]] オブジェクトを指定します。
#@since 1.9.2
nil を指定した場合はデフォルトの動作になります。
#@end
例: foo、foobar、foobazを補完する。
require 'readline'
WORDS = %w(foo foobar foobaz)
Readline.completion_proc = proc {|word|
WORDS.grep(/\A#{Regexp.quote word}/)
}
while buf = Readline.readline("> ")
print "-> ", buf, "\n"
end
@see [[m:Readline.completion_proc]]
--- completion_proc -> Proc
ユーザからの入力を補完する時の候補を取得する [[c:Proc]] オブジェクト
proc を取得します。
@see [[m:Readline.completion_proc=]]
--- completion_case_fold=(bool)
ユーザの入力を補完する際、大文字と小文字を同一視する/しないを指定します。
bool が真ならば同一視します。bool が偽ならば同一視しません。
@param bool 大文字と小文字を同一視する(true)/しない(false)を指定します。
@see [[m:Readline.completion_case_fold]]
--- completion_case_fold -> bool
ユーザの入力を補完する際、大文字と小文字を同一視する/しないを取得します。
bool が真ならば同一視します。bool が偽ならば同一視しません。
なお、Readline.completion_case_fold= メソッドで指定したオブジェクトを
そのまま取得するので、次のような動作をします。
require 'readline'
Readline.completion_case_fold = "This is a String."
p Readline.completion_case_fold # => "This is a String."
@see [[m:Readline.completion_case_fold=]]
--- vi_editing_mode -> nil
編集モードを vi モードにします。
vi モードの詳細は、GNU Readline のマニュアルを参照してください。
* [[url:http://www.gnu.org/directory/readline.html]]
@raise NotImplementedError サポートしていない環境で発生します。
--- emacs_editing_mode -> nil
編集モードを Emacs モードにします。デフォルトは Emacs モードです。
Emacs モードの詳細は、 GNU Readline のマニュアルを参照してください。
* [[url:http://www.gnu.org/directory/readline.html]]
@raise NotImplementedError サポートしていない環境で発生します。
--- completion_append_character=(string)
ユーザの入力の補完が完了した場合に、最後に付加する文字 string を指定します。
@param string 1文字を指定します。
@raise NotImplementedError サポートしていない環境で発生します。
半角スペース「" "」などの単語を区切る文字を指定すれば、
連続して入力する際に便利です。
require 'readline'
Readline.readline("> ", true)
Readline.completion_append_character = " "
> /var/li
ここで補完(TABキーを押す)を行う。
> /var/lib
最後に" "が追加されているため、すぐに「/usr」などを入力できる。
> /var/lib /usr
なお、1文字しか指定することはできないため、
例えば、"string"を指定した場合は最初の文字である"s"だけを使用します。
require 'readline'
Readline.completion_append_character = "string"
p Readline.completion_append_character # => "s"
@see [[m:Readline.completion_append_character]]
--- completion_append_character -> String
ユーザの入力の補完が完了した場合に、最後に付加する文字を取得します。
@raise NotImplementedError サポートしていない環境で発生します。
@see [[m:Readline.completion_append_character=]]
--- basic_word_break_characters=(string)
ユーザの入力の補完を行う際、
単語の区切りを示す複数の文字で構成される文字列 string を指定します。
GNU Readline のデフォルト値は、Bash の補完処理で使用している文字列
" \t\n\"\\'`@$><=;|&{(" (スペースを含む) になっています。
@param string 文字列を指定します。
@raise NotImplementedError サポートしていない環境で発生します。
@see [[m:Readline.basic_word_break_characters]]
--- basic_word_break_characters -> String
ユーザの入力の補完を行う際、
単語の区切りを示す複数の文字で構成される文字列を取得します。
@raise NotImplementedError サポートしていない環境で発生します。
@see [[m:Readline.basic_word_break_characters=]]
--- completer_word_break_characters=(string)
ユーザの入力の補完を行う際、
単語の区切りを示す複数の文字で構成される文字列 string を指定します。
[[m:Readline.basic_word_break_characters=]] との違いは、
GNU Readline の rl_complete_internal 関数で使用されることです。
GNU Readline のデフォルトの値は、
[[m:Readline.basic_word_break_characters]] と同じです。
@param string 文字列を指定します。
@raise NotImplementedError サポートしていない環境で発生します。
@see [[m:Readline.completer_word_break_characters]]
--- completer_word_break_characters -> String
ユーザの入力の補完を行う際、
単語の区切りを示す複数の文字で構成された文字列を取得します。
[[m:Readline.basic_word_break_characters]] との違いは、
GNU Readline の rl_complete_internal 関数で使用されることです。
@raise NotImplementedError サポートしていない環境で発生します。
@see [[m:Readline.completer_word_break_characters=]]
--- basic_quote_characters=(string)
スペースなどの単語の区切りをクオートするための
複数の文字で構成される文字列 string を指定します。
GNU Readline のデフォルト値は、「"'」です。
@param string 文字列を指定します。
@raise NotImplementedError サポートしていない環境で発生します。
@see [[m:Readline.basic_quote_characters]]
--- basic_quote_characters -> String
スペースなどの単語の区切りをクオートするための
複数の文字で構成される文字列を取得します。
@raise NotImplementedError サポートしていない環境で発生します。
@see [[m:Readline.basic_quote_characters=]]
--- completer_quote_characters=(string)
ユーザの入力の補完を行う際、スペースなどの単語の区切りを
クオートするための複数の文字で構成される文字列 string を指定します。
指定した文字の間では、[[m:Readline.completer_word_break_characters=]]
で指定した文字列に含まれる文字も、普通の文字列として扱われます。
@param string 文字列を指定します。
@raise NotImplementedError サポートしていない環境で発生します。
@see [[m:Readline.completer_quote_characters]]
--- completer_quote_characters -> String
ユーザの入力の補完を行う際、スペースなどの単語の区切りを
クオートするための複数の文字で構成される文字列を取得します。
@raise NotImplementedError サポートしていない環境で発生します。
@see [[m:Readline.completer_quote_characters=]]
--- filename_quote_characters=(string)
ユーザの入力時にファイル名の補完を行う際、スペースなどの単語の区切りを
クオートするための複数の文字で構成される文字列 string を指定します。
GNU Readline のデフォルト値は nil(NULL) です。
@param string 文字列を指定します。
@raise NotImplementedError サポートしていない環境で発生します。
@see [[m:Readline.filename_quote_characters]]
--- filename_quote_characters -> String
ユーザの入力時にファイル名の補完を行う際、スペースなどの単語の区切りを
クオートするための複数の文字で構成される文字列を取得します。
@raise NotImplementedError サポートしていない環境で発生します。
@see [[m:Readline.filename_quote_characters=]]
#@since 1.9.2
--- Readline.set_screen_size(rows, columns) -> Readline
端末のサイズを引数 row、columns に設定します。
@param rows 行数を整数で指定します。
@param columns 列数を整数で指定します。
@raise NotImplementedError サポートしていない環境で発生します。
@see GNU Readline ライブラリの rl_set_screen_size 関数
--- Readline.get_screen_size -> [Integer, Integer]
端末のサイズを [rows, columns] で返します。
@raise NotImplementedError サポートしていない環境で発生します。
@see GNU Readline ライブラリの rl_get_screen_size 関数
#@end
== Constants
--- VERSION -> String
Readlineモジュールが使用している GNU Readline や libedit のバージョンを
示す文字列です。
--- FILENAME_COMPLETION_PROC -> Proc
GNU Readline で定義されている関数を使用してファイル名の補完を行うための
[[c:Proc]] オブジェクトです。
[[m:Readline.completion_proc=]] で使用します。
@see [[m:Readline.completion_proc=]]
--- USERNAME_COMPLETION_PROC -> Proc
GNU Readline で定義されている関数を使用してユーザ名の補完を行うための
[[c:Proc]] オブジェクトです。
[[m:Readline.completion_proc=]] で使用します。
@see [[m:Readline.completion_proc=]]
= object Readline::HISTORY
extend Enumerable
Readline::HISTORY を使用してヒストリにアクセスできます。
[[c:Enumerable]] モジュールを extend しており、
[[c:Array]] クラスのように振る舞うことができます。
例えば、HISTORY[4] により 5 番目に入力した内容を取り出すことができます。
--- to_s -> "HISTORY"
文字列"HISTORY"を返します。
例:
require 'readline'
Readline::HISTORY.to_s #=> "HISTORY"
--- [](index) -> String
ヒストリから index で指定したインデックスの内容を取得します。
例えば index に 0 を指定すると最初の入力内容が取得できます。
また、 -1 は最後の入力内容というように、index に負の値を指定することで、
最後から入力内容を取得することもできます。
@param index 取得対象のヒストリのインデックスを整数で指定します。
インデックスは [[c:Array]] ように 0 から指定します。
また、 -1 は最後の入力内容というように、負の数を指定することもできます。
@raise IndexError index で指定したインデックスに該当する入力内容がない場合に発生します。
@raise RangeError index で指定したインデックスが int 型よりも大きな値の場合に発生します。
例:
require "readline"
Readline::HISTORY[0] #=> 最初の入力内容
Readline::HISTORY[4] #=> 5番目の入力内容
Readline::HISTORY[-1] #=> 最後の入力内容
Readline::HISTORY[-5] #=> 最後から5番目の入力内容
例: 1000000 番目の入力内容が存在しない場合、例外 IndexError が発生します。
require "readline"
Readline::HISTORY[1000000] #=> 例外 IndexError が発生します。
例: 32 bit のシステムの場合、例外 RangeError が発生します。
require "readline"
Readline::HISTORY[2 ** 32 + 1] #=> 例外 RangeError が発生します。
例: 64 bit のシステムの場合、例外 RangeError が発生します。
require "readline"
Readline::HISTORY[2 ** 64 + 1] #=> 例外 RangeError が発生します。
--- []=(index, string)
ヒストリの index で指定したインデックスの内容を string で指定した文字列で書き換えます。
例えば index に 0 を指定すると最初の入力内容が書き換えます。
また、 -1 は最後の入力内容というように、index に負の値を指定することで、
最後から入力内容を取得することもできます。
指定した string を返します。
@param index 取得対象のヒストリのインデックスを整数で指定します。
インデックスは [[c:Array]] ように 0 から指定します。
また、 -1 は最後の入力内容というように、負の数を指定することもできます。
@param string 文字列を指定します。この文字列でヒストリを書き換えます。
@raise IndexError index で指定したインデックスに該当する入力内容がない場合に発生します。
@raise RangeError index で指定したインデックスが int 型よりも大きな値の場合に発生します。
@raise NotImplementedError サポートしていない環境で発生します。
--- <<(string) -> self
ヒストリの最後に string で指定した文字列を追加します。
self を返します。
@param string 文字列を指定します。
例: "foo"を追加する。
require "readline"
Readline::HISTORY << "foo"
p Readline::HISTORY[-1] #=> "foo"
例: "foo"、"bar"を追加する。
require "readline"
Readline::HISTORY << "foo" << "bar"
p Readline::HISTORY[-1] #=> "bar"
p Readline::HISTORY[-2] #=> "foo"
@see [[m:Readline::HISTORY.push]]
--- push(*string) -> self
ヒストリの最後に string で指定した文字列を追加します。複数の string を指定できます。
self を返します。
@param string 文字列を指定します。複数指定できます。
例: "foo"を追加する。
require "readline"
Readline::HISTORY.push("foo")
p Readline::HISTORY[-1] #=> "foo"
例: "foo"、"bar"を追加する。
require "readline"
Readline::HISTORY.push("foo", "bar")
p Readline::HISTORY[-1] #=> "bar"
p Readline::HISTORY[-2] #=> "foo"
@see [[m:Readline::HISTORY.<<]]
--- pop -> String
ヒストリの最後の内容を取り出します。
最後の内容は、ヒストリから取り除かれます。
@raise NotImplementedError サポートしていない環境で発生します。
例:
require "readline"
Readline::HISTORY.push("foo", "bar", "baz")
p Readline::HISTORY.pop #=> "baz"
p Readline::HISTORY.pop #=> "bar"
p Readline::HISTORY.pop #=> "foo"
@see [[m:Readline::HISTORY.push]]、[[m:Readline::HISTORY.shift]]、
[[m:Readline::HISTORY.delete_at]]
--- shift -> String
ヒストリの最初の内容を取り出します。
最初の内容は、ヒストリから取り除かれます。
@raise NotImplementedError サポートしていない環境で発生します。
例:
require "readline"
Readline::HISTORY.push("foo", "bar", "baz")
p Readline::HISTORY.shift #=> "foo"
p Readline::HISTORY.shift #=> "bar"
p Readline::HISTORY.shift #=> "baz"
@see [[m:Readline::HISTORY.push]]、[[m:Readline::HISTORY.pop]]、
[[m:Readline::HISTORY.delete_at]]
#@since 1.9.1
--- each -> Enumerator
#@end
--- each {|string| ... }
ヒストリの内容に対してブロックを評価します。
ブロックパラメータにはヒストリの最初から最後までの内容を順番に渡します。
例: ヒストリの内容を最初から順番に出力する。
require "readline"
Readline::HISTORY.push("foo", "bar", "baz")
Readline::HISTORY.each do |s|
p s #=> "foo", "bar", "baz"
end
#@since 1.9.1
例: [[c:Enumerator]] オブジェクトを返す場合。
require "readline"
Readline::HISTORY.push("foo", "bar", "baz")
e = Readline::HISTORY.each
e.each do |s|
p s #=> "foo", "bar", "baz"
end
#@end
--- length -> Integer
--- size -> Integer
ヒストリに格納された内容の数を取得します。
例: ヒストリの内容を最初から順番に出力する。
require "readline"
Readline::HISTORY.push("foo", "bar", "baz")
p Readline::HISTORY.length #=> 3
@see [[m:Readline::HISTORY.empty?]]
--- empty? -> bool
ヒストリに格納された内容の数が 0 の場合は true を、
そうでない場合は false を返します。
例:
require "readline"
p Readline::HISTORY.empty? #=> true
Readline::HISTORY.push("foo", "bar", "baz")
p Readline::HISTORY.empty? #=> false
@see [[m:Readline::HISTORY.length]]
--- delete_at(index) -> String | nil
index で指定したインデックスの内容をヒストリから削除し、その内容を返します。
該当する index の内容がヒストリになければ、 nil を返します。
index に 0 を指定すると [[m:Readline::HISTORY.shift]]
と同様に最初の入力内容を削除します。
また、 -1 は最後の入力内容というように、index に負の値を指定することで、
最後から入力内容を取得することもできます。
index が -1 の場合は [[m:Readline::HISTORY.pop]] と同様に動作します。
@param index 削除対象のヒストリのインデックスを指定します。
@raise NotImplementedError サポートしていない環境で発生します。
例:
require "readline"
Readline::HISTORY.push("foo", "bar", "baz")
Readline::HISTORY.delete_at(1)
p Readline::HISTORY.to_a #=> ["foo", "baz"]
#@since 1.9.1
--- clear -> self
ヒストリの内容をすべて削除して空にします。
@raise NotImplementedError サポートしていない環境で発生します。
#@end
| /refm/api/src/readline.rd | no_license | foomin10/doctree | R | false | false | 23,865 | rd | category CUI
GNU Readline によるコマンドライン入力インタフェースを提供するライブラリです。
= module Readline
GNU Readline によるコマンドライン入力インタフェースを提供するモジュールです。
GNU Readline 互換ライブラリのひとつである Edit Line(libedit) もサポートしています。
* [[url:https://directory.fsf.org/wiki/Readline]]
* [[url:https://thrysoee.dk/editline/]]
Readline.readline を使用してユーザからの入力を取得できます。
このとき、 GNU Readline のように入力の補完や
Emacs のようなキー操作などができます。
例: プロンプト"> "を表示して、ユーザからの入力を取得する。
require 'readline'
while buf = Readline.readline("> ", true)
print("-> ", buf, "\n")
end
ユーザが入力した内容を履歴(以下、ヒストリ)として記録することができます。
定数 [[c:Readline::HISTORY]] を使用して入力履歴にアクセスできます。
例えば、[[c:Readline::HISTORY]].to_a により、
全ての入力した内容を文字列の配列として取得できます。
例: ヒストリを配列として取得する。
require 'readline'
while buf = Readline.readline("> ", true)
p Readline::HISTORY.to_a
print("-> ", buf, "\n")
end
== Module Functions
--- readline(prompt = "", add_hist = false) -> String | nil
prompt を出力し、ユーザからのキー入力を待ちます。
エンターキーの押下などでユーザが文字列を入力し終えると、
入力した文字列を返します。
このとき、add_hist が true であれば、入力した文字列を入力履歴に追加します。
何も入力していない状態で EOF(UNIX では ^D) を入力するなどで、
ユーザからの入力がない場合は nil を返します。
本メソッドはスレッドに対応しています。
入力待ち状態のときはスレッドコンテキストの切替えが発生します。
入力時には行内編集が可能で、vi モードと Emacs モードが用意されています。
デフォルトは Emacs モードです。
@param prompt カーソルの前に表示する文字列を指定します。デフォルトは""です。
@param add_hist 真ならば、入力した文字列をヒストリに記録します。デフォルトは偽です。
@raise IOError 標準入力が tty でない、かつ、標準入力をクローズしている
([[man:isatty(2)]] の errno が EBADF である。) 場合に発生します。
例:
require "readline"
input = Readline.readline
(プロンプトなどは表示せずに、入力待ちの状態になります。
ここでは「abc」を入力後、エンターキーを押したと想定します。)
abc
p input # => "abc"
input = Readline.readline("> ")
(">"を表示し、入力待ちの状態になります。
ここでは「ls」を入力後、エンターキーを押したと想定します。)
> ls
p input # => "ls"
input = Readline.readline("> ", true)
(">"を表示し、入力待ちの状態になります。
ここでは「cd」を入力後、エンターキーを押したと想定します。)
> cd
p input # => "cd"
input = Readline.readline("> ", true)
(">"を表示し、入力待ちの状態になります。
ここで、カーソルの上キー、または ^P を押すと、
先ほど入力した「cd」が表示されます。
そして、エンターキーを押したと想定します。)
> cd
p input # => "cd"
本メソッドには注意事項があります。
入力待ちの状態で ^C すると ruby インタプリタが終了し、端末状態を復帰しません。
これを回避するための例を2つ挙げます。
例: ^CによるInterrupt例外を捕捉して、端末状態を復帰する。
require 'readline'
stty_save = `stty -g`.chomp
begin
while buf = Readline.readline
p buf
end
rescue Interrupt
system("stty", stty_save)
exit
end
例: INTシグナルを捕捉して、端末状態を復帰する。
require 'readline'
stty_save = `stty -g`.chomp
trap("INT") { system "stty", stty_save; exit }
while buf = Readline.readline
p buf
end
また、単に ^C を無視する方法もあります。
require 'readline'
trap("INT", "SIG_IGN")
while buf = Readline.readline
p buf
end
入力履歴 Readline::HISTORY を使用して、次のようなこともできます。
例: 空行や直前の入力と同じ内容は入力履歴に残さない。
require 'readline'
while buf = Readline.readline("> ", true)
# p Readline::HISTORY.to_a
Readline::HISTORY.pop if /^\s*$/ =~ buf
begin
if Readline::HISTORY[Readline::HISTORY.length-2] == buf
Readline::HISTORY.pop
end
rescue IndexError
end
# p Readline::HISTORY.to_a
print "-> ", buf, "\n"
end
@see [[m:Readline.vi_editing_mode]]、[[m:Readline.emacs_editing_mode]]、
[[c:Readline::HISTORY]]
== Singleton Methods
#@since 1.9.1
--- input=(input)
readline メソッドで使用する入力用の [[c:File]] オブジェクト input を指定します。
戻り値は指定した [[c:File]] オブジェクト input です。
@param input [[c:File]] オブジェクトを指定します。
--- output=(output)
readline メソッドで使用する出力用の [[c:File]] オブジェクト output を指定します。
戻り値は指定した [[c:File]] オブジェクト output です。
@param output [[c:File]] オブジェクトを指定します。
#@end
--- completion_proc=(proc)
ユーザからの入力を補完する時の候補を取得する [[c:Proc]] オブジェクト
proc を指定します。
proc は、次のものを想定しています。
(1) callメソッドを持つ。callメソッドを持たない場合、例外 ArgumentError を発生します。
(2) 引数にユーザからの入力文字列を取る。
(3) 候補の文字列の配列を返す。
「/var/lib /v」の後で補完を行うと、
デフォルトでは proc の引数に「/v」が渡されます。
このように、ユーザが入力した文字列を
[[m:Readline.completer_word_break_characters]] に含まれる文字で区切ったものを単語とすると、
カーソルがある単語の最初の文字から現在のカーソル位置までの文字列が proc の引数に渡されます。
@param proc ユーザからの入力を補完する時の候補を取得する [[c:Proc]] オブジェクトを指定します。
#@since 1.9.2
nil を指定した場合はデフォルトの動作になります。
#@end
例: foo、foobar、foobazを補完する。
require 'readline'
WORDS = %w(foo foobar foobaz)
Readline.completion_proc = proc {|word|
WORDS.grep(/\A#{Regexp.quote word}/)
}
while buf = Readline.readline("> ")
print "-> ", buf, "\n"
end
@see [[m:Readline.completion_proc]]
--- completion_proc -> Proc
ユーザからの入力を補完する時の候補を取得する [[c:Proc]] オブジェクト
proc を取得します。
@see [[m:Readline.completion_proc=]]
--- completion_case_fold=(bool)
ユーザの入力を補完する際、大文字と小文字を同一視する/しないを指定します。
bool が真ならば同一視します。bool が偽ならば同一視しません。
@param bool 大文字と小文字を同一視する(true)/しない(false)を指定します。
@see [[m:Readline.completion_case_fold]]
--- completion_case_fold -> bool
ユーザの入力を補完する際、大文字と小文字を同一視する/しないを取得します。
bool が真ならば同一視します。bool が偽ならば同一視しません。
なお、Readline.completion_case_fold= メソッドで指定したオブジェクトを
そのまま取得するので、次のような動作をします。
require 'readline'
Readline.completion_case_fold = "This is a String."
p Readline.completion_case_fold # => "This is a String."
@see [[m:Readline.completion_case_fold=]]
--- vi_editing_mode -> nil
編集モードを vi モードにします。
vi モードの詳細は、GNU Readline のマニュアルを参照してください。
* [[url:http://www.gnu.org/directory/readline.html]]
@raise NotImplementedError サポートしていない環境で発生します。
--- emacs_editing_mode -> nil
編集モードを Emacs モードにします。デフォルトは Emacs モードです。
Emacs モードの詳細は、 GNU Readline のマニュアルを参照してください。
* [[url:http://www.gnu.org/directory/readline.html]]
@raise NotImplementedError サポートしていない環境で発生します。
--- completion_append_character=(string)
ユーザの入力の補完が完了した場合に、最後に付加する文字 string を指定します。
@param string 1文字を指定します。
@raise NotImplementedError サポートしていない環境で発生します。
半角スペース「" "」などの単語を区切る文字を指定すれば、
連続して入力する際に便利です。
require 'readline'
Readline.readline("> ", true)
Readline.completion_append_character = " "
> /var/li
ここで補完(TABキーを押す)を行う。
> /var/lib
最後に" "が追加されているため、すぐに「/usr」などを入力できる。
> /var/lib /usr
なお、1文字しか指定することはできないため、
例えば、"string"を指定した場合は最初の文字である"s"だけを使用します。
require 'readline'
Readline.completion_append_character = "string"
p Readline.completion_append_character # => "s"
@see [[m:Readline.completion_append_character]]
--- completion_append_character -> String
ユーザの入力の補完が完了した場合に、最後に付加する文字を取得します。
@raise NotImplementedError サポートしていない環境で発生します。
@see [[m:Readline.completion_append_character=]]
--- basic_word_break_characters=(string)
ユーザの入力の補完を行う際、
単語の区切りを示す複数の文字で構成される文字列 string を指定します。
GNU Readline のデフォルト値は、Bash の補完処理で使用している文字列
" \t\n\"\\'`@$><=;|&{(" (スペースを含む) になっています。
@param string 文字列を指定します。
@raise NotImplementedError サポートしていない環境で発生します。
@see [[m:Readline.basic_word_break_characters]]
--- basic_word_break_characters -> String
ユーザの入力の補完を行う際、
単語の区切りを示す複数の文字で構成される文字列を取得します。
@raise NotImplementedError サポートしていない環境で発生します。
@see [[m:Readline.basic_word_break_characters=]]
--- completer_word_break_characters=(string)
ユーザの入力の補完を行う際、
単語の区切りを示す複数の文字で構成される文字列 string を指定します。
[[m:Readline.basic_word_break_characters=]] との違いは、
GNU Readline の rl_complete_internal 関数で使用されることです。
GNU Readline のデフォルトの値は、
[[m:Readline.basic_word_break_characters]] と同じです。
@param string 文字列を指定します。
@raise NotImplementedError サポートしていない環境で発生します。
@see [[m:Readline.completer_word_break_characters]]
--- completer_word_break_characters -> String
ユーザの入力の補完を行う際、
単語の区切りを示す複数の文字で構成された文字列を取得します。
[[m:Readline.basic_word_break_characters]] との違いは、
GNU Readline の rl_complete_internal 関数で使用されることです。
@raise NotImplementedError サポートしていない環境で発生します。
@see [[m:Readline.completer_word_break_characters=]]
--- basic_quote_characters=(string)
スペースなどの単語の区切りをクオートするための
複数の文字で構成される文字列 string を指定します。
GNU Readline のデフォルト値は、「"'」です。
@param string 文字列を指定します。
@raise NotImplementedError サポートしていない環境で発生します。
@see [[m:Readline.basic_quote_characters]]
--- basic_quote_characters -> String
スペースなどの単語の区切りをクオートするための
複数の文字で構成される文字列を取得します。
@raise NotImplementedError サポートしていない環境で発生します。
@see [[m:Readline.basic_quote_characters=]]
--- completer_quote_characters=(string)
ユーザの入力の補完を行う際、スペースなどの単語の区切りを
クオートするための複数の文字で構成される文字列 string を指定します。
指定した文字の間では、[[m:Readline.completer_word_break_characters=]]
で指定した文字列に含まれる文字も、普通の文字列として扱われます。
@param string 文字列を指定します。
@raise NotImplementedError サポートしていない環境で発生します。
@see [[m:Readline.completer_quote_characters]]
--- completer_quote_characters -> String
ユーザの入力の補完を行う際、スペースなどの単語の区切りを
クオートするための複数の文字で構成される文字列を取得します。
@raise NotImplementedError サポートしていない環境で発生します。
@see [[m:Readline.completer_quote_characters=]]
--- filename_quote_characters=(string)
ユーザの入力時にファイル名の補完を行う際、スペースなどの単語の区切りを
クオートするための複数の文字で構成される文字列 string を指定します。
GNU Readline のデフォルト値は nil(NULL) です。
@param string 文字列を指定します。
@raise NotImplementedError サポートしていない環境で発生します。
@see [[m:Readline.filename_quote_characters]]
--- filename_quote_characters -> String
ユーザの入力時にファイル名の補完を行う際、スペースなどの単語の区切りを
クオートするための複数の文字で構成される文字列を取得します。
@raise NotImplementedError サポートしていない環境で発生します。
@see [[m:Readline.filename_quote_characters=]]
#@since 1.9.2
--- Readline.set_screen_size(rows, columns) -> Readline
端末のサイズを引数 row、columns に設定します。
@param rows 行数を整数で指定します。
@param columns 列数を整数で指定します。
@raise NotImplementedError サポートしていない環境で発生します。
@see GNU Readline ライブラリの rl_set_screen_size 関数
--- Readline.get_screen_size -> [Integer, Integer]
端末のサイズを [rows, columns] で返します。
@raise NotImplementedError サポートしていない環境で発生します。
@see GNU Readline ライブラリの rl_get_screen_size 関数
#@end
== Constants
--- VERSION -> String
Readlineモジュールが使用している GNU Readline や libedit のバージョンを
示す文字列です。
--- FILENAME_COMPLETION_PROC -> Proc
GNU Readline で定義されている関数を使用してファイル名の補完を行うための
[[c:Proc]] オブジェクトです。
[[m:Readline.completion_proc=]] で使用します。
@see [[m:Readline.completion_proc=]]
--- USERNAME_COMPLETION_PROC -> Proc
GNU Readline で定義されている関数を使用してユーザ名の補完を行うための
[[c:Proc]] オブジェクトです。
[[m:Readline.completion_proc=]] で使用します。
@see [[m:Readline.completion_proc=]]
= object Readline::HISTORY
extend Enumerable
Readline::HISTORY を使用してヒストリにアクセスできます。
[[c:Enumerable]] モジュールを extend しており、
[[c:Array]] クラスのように振る舞うことができます。
例えば、HISTORY[4] により 5 番目に入力した内容を取り出すことができます。
--- to_s -> "HISTORY"
文字列"HISTORY"を返します。
例:
require 'readline'
Readline::HISTORY.to_s #=> "HISTORY"
--- [](index) -> String
ヒストリから index で指定したインデックスの内容を取得します。
例えば index に 0 を指定すると最初の入力内容が取得できます。
また、 -1 は最後の入力内容というように、index に負の値を指定することで、
最後から入力内容を取得することもできます。
@param index 取得対象のヒストリのインデックスを整数で指定します。
インデックスは [[c:Array]] ように 0 から指定します。
また、 -1 は最後の入力内容というように、負の数を指定することもできます。
@raise IndexError index で指定したインデックスに該当する入力内容がない場合に発生します。
@raise RangeError index で指定したインデックスが int 型よりも大きな値の場合に発生します。
例:
require "readline"
Readline::HISTORY[0] #=> 最初の入力内容
Readline::HISTORY[4] #=> 5番目の入力内容
Readline::HISTORY[-1] #=> 最後の入力内容
Readline::HISTORY[-5] #=> 最後から5番目の入力内容
例: 1000000 番目の入力内容が存在しない場合、例外 IndexError が発生します。
require "readline"
Readline::HISTORY[1000000] #=> 例外 IndexError が発生します。
例: 32 bit のシステムの場合、例外 RangeError が発生します。
require "readline"
Readline::HISTORY[2 ** 32 + 1] #=> 例外 RangeError が発生します。
例: 64 bit のシステムの場合、例外 RangeError が発生します。
require "readline"
Readline::HISTORY[2 ** 64 + 1] #=> 例外 RangeError が発生します。
--- []=(index, string)
ヒストリの index で指定したインデックスの内容を string で指定した文字列で書き換えます。
例えば index に 0 を指定すると最初の入力内容が書き換えます。
また、 -1 は最後の入力内容というように、index に負の値を指定することで、
最後から入力内容を取得することもできます。
指定した string を返します。
@param index 取得対象のヒストリのインデックスを整数で指定します。
インデックスは [[c:Array]] ように 0 から指定します。
また、 -1 は最後の入力内容というように、負の数を指定することもできます。
@param string 文字列を指定します。この文字列でヒストリを書き換えます。
@raise IndexError index で指定したインデックスに該当する入力内容がない場合に発生します。
@raise RangeError index で指定したインデックスが int 型よりも大きな値の場合に発生します。
@raise NotImplementedError サポートしていない環境で発生します。
--- <<(string) -> self
ヒストリの最後に string で指定した文字列を追加します。
self を返します。
@param string 文字列を指定します。
例: "foo"を追加する。
require "readline"
Readline::HISTORY << "foo"
p Readline::HISTORY[-1] #=> "foo"
例: "foo"、"bar"を追加する。
require "readline"
Readline::HISTORY << "foo" << "bar"
p Readline::HISTORY[-1] #=> "bar"
p Readline::HISTORY[-2] #=> "foo"
@see [[m:Readline::HISTORY.push]]
--- push(*string) -> self
ヒストリの最後に string で指定した文字列を追加します。複数の string を指定できます。
self を返します。
@param string 文字列を指定します。複数指定できます。
例: "foo"を追加する。
require "readline"
Readline::HISTORY.push("foo")
p Readline::HISTORY[-1] #=> "foo"
例: "foo"、"bar"を追加する。
require "readline"
Readline::HISTORY.push("foo", "bar")
p Readline::HISTORY[-1] #=> "bar"
p Readline::HISTORY[-2] #=> "foo"
@see [[m:Readline::HISTORY.<<]]
--- pop -> String
ヒストリの最後の内容を取り出します。
最後の内容は、ヒストリから取り除かれます。
@raise NotImplementedError サポートしていない環境で発生します。
例:
require "readline"
Readline::HISTORY.push("foo", "bar", "baz")
p Readline::HISTORY.pop #=> "baz"
p Readline::HISTORY.pop #=> "bar"
p Readline::HISTORY.pop #=> "foo"
@see [[m:Readline::HISTORY.push]]、[[m:Readline::HISTORY.shift]]、
[[m:Readline::HISTORY.delete_at]]
--- shift -> String
ヒストリの最初の内容を取り出します。
最初の内容は、ヒストリから取り除かれます。
@raise NotImplementedError サポートしていない環境で発生します。
例:
require "readline"
Readline::HISTORY.push("foo", "bar", "baz")
p Readline::HISTORY.shift #=> "foo"
p Readline::HISTORY.shift #=> "bar"
p Readline::HISTORY.shift #=> "baz"
@see [[m:Readline::HISTORY.push]]、[[m:Readline::HISTORY.pop]]、
[[m:Readline::HISTORY.delete_at]]
#@since 1.9.1
--- each -> Enumerator
#@end
--- each {|string| ... }
ヒストリの内容に対してブロックを評価します。
ブロックパラメータにはヒストリの最初から最後までの内容を順番に渡します。
例: ヒストリの内容を最初から順番に出力する。
require "readline"
Readline::HISTORY.push("foo", "bar", "baz")
Readline::HISTORY.each do |s|
p s #=> "foo", "bar", "baz"
end
#@since 1.9.1
例: [[c:Enumerator]] オブジェクトを返す場合。
require "readline"
Readline::HISTORY.push("foo", "bar", "baz")
e = Readline::HISTORY.each
e.each do |s|
p s #=> "foo", "bar", "baz"
end
#@end
--- length -> Integer
--- size -> Integer
ヒストリに格納された内容の数を取得します。
例: ヒストリの内容を最初から順番に出力する。
require "readline"
Readline::HISTORY.push("foo", "bar", "baz")
p Readline::HISTORY.length #=> 3
@see [[m:Readline::HISTORY.empty?]]
--- empty? -> bool
ヒストリに格納された内容の数が 0 の場合は true を、
そうでない場合は false を返します。
例:
require "readline"
p Readline::HISTORY.empty? #=> true
Readline::HISTORY.push("foo", "bar", "baz")
p Readline::HISTORY.empty? #=> false
@see [[m:Readline::HISTORY.length]]
--- delete_at(index) -> String | nil
index で指定したインデックスの内容をヒストリから削除し、その内容を返します。
該当する index の内容がヒストリになければ、 nil を返します。
index に 0 を指定すると [[m:Readline::HISTORY.shift]]
と同様に最初の入力内容を削除します。
また、 -1 は最後の入力内容というように、index に負の値を指定することで、
最後から入力内容を取得することもできます。
index が -1 の場合は [[m:Readline::HISTORY.pop]] と同様に動作します。
@param index 削除対象のヒストリのインデックスを指定します。
@raise NotImplementedError サポートしていない環境で発生します。
例:
require "readline"
Readline::HISTORY.push("foo", "bar", "baz")
Readline::HISTORY.delete_at(1)
p Readline::HISTORY.to_a #=> ["foo", "baz"]
#@since 1.9.1
--- clear -> self
ヒストリの内容をすべて削除して空にします。
@raise NotImplementedError サポートしていない環境で発生します。
#@end
|
# Copyright 2022 Observational Health Data Sciences and Informatics
#
# This file is part of PheValuator
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Format and check code
styler::style_pkg()
OhdsiRTools::checkUsagePackage("PheValuator")
OhdsiRTools::updateCopyrightYearFolder()
devtools::spell_check()
# Create manual and vignettes
shell("rm extras/PheValuator.pdf")
shell("R CMD Rd2pdf ./ --output=extras/PheValuator.pdf")
rmarkdown::render("vignettes/EvaluatingPhenotypeAlgorithms.rmd",
output_file = "../inst/doc/EvaluatingPhenotypeAlgorithms.pdf",
rmarkdown::pdf_document(latex_engine = "pdflatex",
toc = TRUE,
toc_depth = 3,
number_sections = TRUE))
pkgdown::build_site()
OhdsiRTools::fixHadesLogo()
# Create arg functions:
rCode <- c("# This file has been autogenerated. Do not change by hand. ")
rCode <- ParallelLogger::createArgFunction("createEvaluationCohort",
excludeArgs = c("connectionDetails",
"cdmDatabaseSchema",
"oracleTempSchema",
"tempEmulationSchema",
"workDatabaseSchema",
"cohortDatabaseSchema",
"cohortTable",
"outFolder",
"evaluationCohortId",
"cdmVersion"),
rCode = rCode)
rCode <- ParallelLogger::createArgFunction("testPhenotypeAlgorithm",
excludeArgs = c("connectionDetails",
"cdmDatabaseSchema",
"cohortDatabaseSchema",
"cohortTable",
"outFolder",
"evaluationCohortId"),
rCode = rCode)
writeLines(rCode, "R/CreateArgFunctions.R")
OhdsiRTools::formatRFile("R/CreateArgFunctions.R")
| /extras/PackageMaintenance.R | permissive | gowthamrao/PheValuator | R | false | false | 3,082 | r | # Copyright 2022 Observational Health Data Sciences and Informatics
#
# This file is part of PheValuator
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Format and check code
styler::style_pkg()
OhdsiRTools::checkUsagePackage("PheValuator")
OhdsiRTools::updateCopyrightYearFolder()
devtools::spell_check()
# Create manual and vignettes
shell("rm extras/PheValuator.pdf")
shell("R CMD Rd2pdf ./ --output=extras/PheValuator.pdf")
rmarkdown::render("vignettes/EvaluatingPhenotypeAlgorithms.rmd",
output_file = "../inst/doc/EvaluatingPhenotypeAlgorithms.pdf",
rmarkdown::pdf_document(latex_engine = "pdflatex",
toc = TRUE,
toc_depth = 3,
number_sections = TRUE))
pkgdown::build_site()
OhdsiRTools::fixHadesLogo()
# Create arg functions:
rCode <- c("# This file has been autogenerated. Do not change by hand. ")
rCode <- ParallelLogger::createArgFunction("createEvaluationCohort",
excludeArgs = c("connectionDetails",
"cdmDatabaseSchema",
"oracleTempSchema",
"tempEmulationSchema",
"workDatabaseSchema",
"cohortDatabaseSchema",
"cohortTable",
"outFolder",
"evaluationCohortId",
"cdmVersion"),
rCode = rCode)
rCode <- ParallelLogger::createArgFunction("testPhenotypeAlgorithm",
excludeArgs = c("connectionDetails",
"cdmDatabaseSchema",
"cohortDatabaseSchema",
"cohortTable",
"outFolder",
"evaluationCohortId"),
rCode = rCode)
writeLines(rCode, "R/CreateArgFunctions.R")
OhdsiRTools::formatRFile("R/CreateArgFunctions.R")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Corr.R
\name{Corr}
\alias{Corr}
\alias{.Corr}
\title{Computes the correlation coefficient between an array of forecasts and their corresponding observations}
\usage{
Corr(
var_exp,
var_obs,
posloop = 1,
poscor = 2,
compROW = NULL,
limits = NULL,
siglev = 0.95,
method = "pearson",
conf = TRUE,
pval = TRUE
)
.Corr(exp, obs, siglev = 0.95, method = "pearson", conf = TRUE, pval = TRUE)
}
\arguments{
\item{var_exp}{Array of experimental data.}
\item{var_obs}{Array of observational data, same dimensions as var_exp
except along posloop dimension, where the length can be nobs instead of nexp.}
\item{posloop}{Dimension nobs and nexp.}
\item{poscor}{Dimension along which correlation are to be computed (the
dimension of the start dates).}
\item{compROW}{Data taken into account only if (compROW)th row is complete.
Default = NULL.}
\item{limits}{Complete between limits[1] & limits[2]. Default = NULL.}
\item{siglev}{Significance level. Default = 0.95.}
\item{method}{Type of correlation: 'pearson', 'spearman' or 'kendall'.
Default='pearson'}
\item{conf}{Whether to compute confidence intervals (default = 'TRUE') or
not (FALSE).}
\item{pval}{Whether to compute statistical significance p-value (default = 'TRUE')
or not (FALSE).}
\item{exp}{N by M matrix of N forecasts from M ensemble members.}
\item{obs}{Vector of the corresponding observations of length N.}
}
\value{
Corr: Array with dimensions :\cr
c(# of datasets along posloop in var_exp, # of datasets along posloop in
var_obs, 4, all other dimensions of var_exp & var_obs except poscor).\cr
The third dimension, of length 4 maximum, contains to the lower limit of
the 95\% confidence interval, the correlation, the upper limit of the 95\%
confidence interval and the 95\% significance level given by a one-sided
T-test. If the p-value is disabled via \code{pval = FALSE}, this dimension
will be of length 3. If the confidence intervals are disabled via
\code{conf = FALSE}, this dimension will be of length 2. If both are
disabled, this will be of length 2. \cr\cr
.Corr:
\itemize{
\item{$corr}{The correlation statistic.}
\item{$p_val}{Corresponds to the p values for the \code{siglev}\%
(only present if \code{pval = TRUE}) for the correlation.}
\item{$conf_low}{Corresponds to the upper limit of the \code{siglev}\%
(only present if \code{conf = TRUE}) for the correlation.}
\item{$conf_high}{Corresponds to the lower limit of the \code{siglev}\%
(only present if \code{conf = TRUE}) for the correlation.}
}
}
\description{
Calculates the correlation coefficient (Pearson, Kendall or Spearman) for
an array of forecasts and observations. The input should be an array with
dimensions c(no. of datasets, no. of start dates, no. of forecast times,
no. of lons, no. of lats.), where the longitude and latitude dimensions are
optional. The correlations are computed along the poscor dimension which
should correspond to the startdate dimension. If compROW is given, the
correlations are computed only if rows along the compROW dimension are
complete between limits[1] and limits[2], i.e. there are no NAs between
limits[1] and limits[2]. This option can be activated if the user wishes to
account only for the forecasts for which observations are available at all
leadtimes. \cr
Default: limits[1] = 1 and limits[2] = length(compROW dimension).\cr
The confidence interval is computed by a Fisher transformation.\cr
The significance level relies on a one-sided student-T distribution.\cr
We can modifiy the treshold of the test modifying siglev (default value=0.95).\cr\cr
.Corr calculates the correlation between the ensemble mean and the
observations, using an N by M matrix (exp) of forecasts and a vector of
observations (obs) as input.
}
\examples{
# Load sample data as in Load() example:
example(Load)
clim <- Clim(sampleData$mod, sampleData$obs)
ano_exp <- Ano(sampleData$mod, clim$clim_exp)
ano_obs <- Ano(sampleData$obs, clim$clim_obs)
runmean_months <- 12
dim_to_smooth <- 4
# Smooth along lead-times
smooth_ano_exp <- Smoothing(ano_exp, runmean_months, dim_to_smooth)
smooth_ano_obs <- Smoothing(ano_obs, runmean_months, dim_to_smooth)
dim_to_mean <- 2 # Mean along members
required_complete_row <- 3 # Discard start dates which contain any NA lead-times
leadtimes_per_startdate <- 60
corr <- Corr(Mean1Dim(smooth_ano_exp, dim_to_mean),
Mean1Dim(smooth_ano_obs, dim_to_mean),
compROW = required_complete_row,
limits = c(ceiling((runmean_months + 1) / 2),
leadtimes_per_startdate - floor(runmean_months / 2)))
\donttest{
PlotVsLTime(corr, toptitle = "correlations", ytitle = "correlation",
monini = 11, limits = c(-1, 2), listexp = c('CMIP5 IC3'),
listobs = c('ERSST'), biglab = FALSE, hlines = c(-1, 0, 1),
fileout = 'tos_cor.eps')
}
# The following example uses veriApply combined with .Corr instead of Corr
\dontrun{
require(easyVerification)
Corr2 <- s2dverification:::.Corr
corr2 <- veriApply("Corr2",
smooth_ano_exp,
# see ?veriApply for how to use the 'parallel' option
Mean1Dim(smooth_ano_obs, dim_to_mean),
tdim = 3, ensdim = 2)
}
}
\author{
History:\cr
0.1 - 2011-04 (V. Guemas, \email{vguemas@bsc.es}) - Original code\cr
1.0 - 2013-09 (N. Manubens, \email{nicolau.manubens@bsc.es}) - Formatting to R CRAN\cr
1.1 - 2014-10 (M. Menegoz, \email{martin.menegoz@bsc.es}) - Adding siglev argument\cr
1.2 - 2015-03 (L.P. Caron, \email{louis-philippe.caron@bsc.es}) - Adding method argument\cr
1.3 - 2017-02 (A. Hunter, \email{alasdair.hunter@bsc.es}) - Adapted to veriApply()
}
\keyword{datagen}
| /man/Corr.Rd | no_license | rpkgs/s2dverification | R | false | true | 5,925 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Corr.R
\name{Corr}
\alias{Corr}
\alias{.Corr}
\title{Computes the correlation coefficient between an array of forecasts and their corresponding observations}
\usage{
Corr(
var_exp,
var_obs,
posloop = 1,
poscor = 2,
compROW = NULL,
limits = NULL,
siglev = 0.95,
method = "pearson",
conf = TRUE,
pval = TRUE
)
.Corr(exp, obs, siglev = 0.95, method = "pearson", conf = TRUE, pval = TRUE)
}
\arguments{
\item{var_exp}{Array of experimental data.}
\item{var_obs}{Array of observational data, same dimensions as var_exp
except along posloop dimension, where the length can be nobs instead of nexp.}
\item{posloop}{Dimension nobs and nexp.}
\item{poscor}{Dimension along which correlation are to be computed (the
dimension of the start dates).}
\item{compROW}{Data taken into account only if (compROW)th row is complete.
Default = NULL.}
\item{limits}{Complete between limits[1] & limits[2]. Default = NULL.}
\item{siglev}{Significance level. Default = 0.95.}
\item{method}{Type of correlation: 'pearson', 'spearman' or 'kendall'.
Default='pearson'}
\item{conf}{Whether to compute confidence intervals (default = 'TRUE') or
not (FALSE).}
\item{pval}{Whether to compute statistical significance p-value (default = 'TRUE')
or not (FALSE).}
\item{exp}{N by M matrix of N forecasts from M ensemble members.}
\item{obs}{Vector of the corresponding observations of length N.}
}
\value{
Corr: Array with dimensions :\cr
c(# of datasets along posloop in var_exp, # of datasets along posloop in
var_obs, 4, all other dimensions of var_exp & var_obs except poscor).\cr
The third dimension, of length 4 maximum, contains to the lower limit of
the 95\% confidence interval, the correlation, the upper limit of the 95\%
confidence interval and the 95\% significance level given by a one-sided
T-test. If the p-value is disabled via \code{pval = FALSE}, this dimension
will be of length 3. If the confidence intervals are disabled via
\code{conf = FALSE}, this dimension will be of length 2. If both are
disabled, this will be of length 2. \cr\cr
.Corr:
\itemize{
\item{$corr}{The correlation statistic.}
\item{$p_val}{Corresponds to the p values for the \code{siglev}\%
(only present if \code{pval = TRUE}) for the correlation.}
\item{$conf_low}{Corresponds to the upper limit of the \code{siglev}\%
(only present if \code{conf = TRUE}) for the correlation.}
\item{$conf_high}{Corresponds to the lower limit of the \code{siglev}\%
(only present if \code{conf = TRUE}) for the correlation.}
}
}
\description{
Calculates the correlation coefficient (Pearson, Kendall or Spearman) for
an array of forecasts and observations. The input should be an array with
dimensions c(no. of datasets, no. of start dates, no. of forecast times,
no. of lons, no. of lats.), where the longitude and latitude dimensions are
optional. The correlations are computed along the poscor dimension which
should correspond to the startdate dimension. If compROW is given, the
correlations are computed only if rows along the compROW dimension are
complete between limits[1] and limits[2], i.e. there are no NAs between
limits[1] and limits[2]. This option can be activated if the user wishes to
account only for the forecasts for which observations are available at all
leadtimes. \cr
Default: limits[1] = 1 and limits[2] = length(compROW dimension).\cr
The confidence interval is computed by a Fisher transformation.\cr
The significance level relies on a one-sided student-T distribution.\cr
We can modifiy the treshold of the test modifying siglev (default value=0.95).\cr\cr
.Corr calculates the correlation between the ensemble mean and the
observations, using an N by M matrix (exp) of forecasts and a vector of
observations (obs) as input.
}
\examples{
# Load sample data as in Load() example:
example(Load)
clim <- Clim(sampleData$mod, sampleData$obs)
ano_exp <- Ano(sampleData$mod, clim$clim_exp)
ano_obs <- Ano(sampleData$obs, clim$clim_obs)
runmean_months <- 12
dim_to_smooth <- 4
# Smooth along lead-times
smooth_ano_exp <- Smoothing(ano_exp, runmean_months, dim_to_smooth)
smooth_ano_obs <- Smoothing(ano_obs, runmean_months, dim_to_smooth)
dim_to_mean <- 2 # Mean along members
required_complete_row <- 3 # Discard start dates which contain any NA lead-times
leadtimes_per_startdate <- 60
corr <- Corr(Mean1Dim(smooth_ano_exp, dim_to_mean),
Mean1Dim(smooth_ano_obs, dim_to_mean),
compROW = required_complete_row,
limits = c(ceiling((runmean_months + 1) / 2),
leadtimes_per_startdate - floor(runmean_months / 2)))
\donttest{
PlotVsLTime(corr, toptitle = "correlations", ytitle = "correlation",
monini = 11, limits = c(-1, 2), listexp = c('CMIP5 IC3'),
listobs = c('ERSST'), biglab = FALSE, hlines = c(-1, 0, 1),
fileout = 'tos_cor.eps')
}
# The following example uses veriApply combined with .Corr instead of Corr
\dontrun{
require(easyVerification)
Corr2 <- s2dverification:::.Corr
corr2 <- veriApply("Corr2",
smooth_ano_exp,
# see ?veriApply for how to use the 'parallel' option
Mean1Dim(smooth_ano_obs, dim_to_mean),
tdim = 3, ensdim = 2)
}
}
\author{
History:\cr
0.1 - 2011-04 (V. Guemas, \email{vguemas@bsc.es}) - Original code\cr
1.0 - 2013-09 (N. Manubens, \email{nicolau.manubens@bsc.es}) - Formatting to R CRAN\cr
1.1 - 2014-10 (M. Menegoz, \email{martin.menegoz@bsc.es}) - Adding siglev argument\cr
1.2 - 2015-03 (L.P. Caron, \email{louis-philippe.caron@bsc.es}) - Adding method argument\cr
1.3 - 2017-02 (A. Hunter, \email{alasdair.hunter@bsc.es}) - Adapted to veriApply()
}
\keyword{datagen}
|
library(FinePop)
### Name: FstEnv
### Title: Regression analysis of environmental factors on genetic
### differentiation
### Aliases: FstEnv
### ** Examples
# Example of genotypic and environmental dataset
data(herring)
# Data bootstrapping and Fst estimation
# fstbs <- FstBoot(herring$popdata)
# Effects of environmental factors on genetic differentiation
# fstenv <- FstEnv(fstbs, herring$environment, herring$distance)
# Since these calculations are too heavy, pre-calculated results are included in this dataset.
fstbs <- herring$fst.bootstrap
fstenv <- herring$fst.env
summary(fstenv)
| /data/genthat_extracted_code/FinePop/examples/FstEnv.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 603 | r | library(FinePop)
### Name: FstEnv
### Title: Regression analysis of environmental factors on genetic
### differentiation
### Aliases: FstEnv
### ** Examples
# Example of genotypic and environmental dataset
data(herring)
# Data bootstrapping and Fst estimation
# fstbs <- FstBoot(herring$popdata)
# Effects of environmental factors on genetic differentiation
# fstenv <- FstEnv(fstbs, herring$environment, herring$distance)
# Since these calculations are too heavy, pre-calculated results are included in this dataset.
fstbs <- herring$fst.bootstrap
fstenv <- herring$fst.env
summary(fstenv)
|
testlist <- list(doy = c(-Inf, 0), latitude = c(-6.93132091139805e-107, 1.86807199752012e+112, -Inf, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, NaN, -1.51345790188863e+21, 1.44018888022303e-285, -1.72131968218895e+83, 0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result) | /meteor/inst/testfiles/ET0_ThornthwaiteWilmott/AFL_ET0_ThornthwaiteWilmott/ET0_ThornthwaiteWilmott_valgrind_files/1615833229-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 421 | r | testlist <- list(doy = c(-Inf, 0), latitude = c(-6.93132091139805e-107, 1.86807199752012e+112, -Inf, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, NaN, -1.51345790188863e+21, 1.44018888022303e-285, -1.72131968218895e+83, 0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result) |
library(data.table)
# CV1 dataset
DATABASE = "CV1_1"
train.bak <-
fread('../data/train.csv', header=TRUE,
select = c("Semana","Agencia_ID","Canal_ID","Ruta_SAK","Cliente_ID","Producto_ID","Venta_uni_hoy","Venta_hoy","Dev_uni_proxima","Dev_proxima","Demanda_uni_equil"))
trainWeeks = c(7)
testWeeks = c(8)
nrow.train.bak = nrow(train.bak)
clients.train.bak = unique(train.bak$Cliente_ID)
products.train.bak = unique(train.bak$Producto_ID )
agencies.train.bak = unique(train.bak$Agencia_ID)
canals.train.bak = unique(train.bak$Canal_ID)
routes.train.bak = unique(train.bak$Ruta_SAK)
#train.bak$Demanda_uni_equil = log1p(train.bak$Demanda_uni_equil)
trainData <- train.bak[which(train.bak$Semana %in% trainWeeks)]
testData <- train.bak[which(train.bak$Semana %in% testWeeks)]
remove(train.bak)
trainData$id <- 1:nrow(trainData)
testData$id <- 1:nrow(testData)
gc() # garbage collector
train = trainData
test = testData
remove(trainData)
remove(testData)
gc()
VERBOSE = 1
source("wip.002.R")
remove(train)
remove(test)
gc()
| /R/build.data.cv1_1.R | no_license | tudor-m/Kaggle-Bimbo | R | false | false | 1,050 | r | library(data.table)
# CV1 dataset
DATABASE = "CV1_1"
train.bak <-
fread('../data/train.csv', header=TRUE,
select = c("Semana","Agencia_ID","Canal_ID","Ruta_SAK","Cliente_ID","Producto_ID","Venta_uni_hoy","Venta_hoy","Dev_uni_proxima","Dev_proxima","Demanda_uni_equil"))
trainWeeks = c(7)
testWeeks = c(8)
nrow.train.bak = nrow(train.bak)
clients.train.bak = unique(train.bak$Cliente_ID)
products.train.bak = unique(train.bak$Producto_ID )
agencies.train.bak = unique(train.bak$Agencia_ID)
canals.train.bak = unique(train.bak$Canal_ID)
routes.train.bak = unique(train.bak$Ruta_SAK)
#train.bak$Demanda_uni_equil = log1p(train.bak$Demanda_uni_equil)
trainData <- train.bak[which(train.bak$Semana %in% trainWeeks)]
testData <- train.bak[which(train.bak$Semana %in% testWeeks)]
remove(train.bak)
trainData$id <- 1:nrow(trainData)
testData$id <- 1:nrow(testData)
gc() # garbage collector
train = trainData
test = testData
remove(trainData)
remove(testData)
gc()
VERBOSE = 1
source("wip.002.R")
remove(train)
remove(test)
gc()
|
# Incidence plots of recent cases
# https://rviews.rstudio.com/2020/03/05/covid-19-epidemiology-with-r/
rm(list=ls()); # Clear environment
library('jsonlite')
library('incidence')
library('ggplot2')
# Set how many days you wish to see
last <- 21
# Import data from coronavirus API
govapi <- fromJSON("https://api.coronavirus.data.gov.uk/v1/data?filters=areaName=United%2520Kingdom;areaType=overview&structure=%7B%22areaType%22:%22areaType%22,%22areaName%22:%22areaName%22,%22areaCode%22:%22areaCode%22,%22date%22:%22date%22,%22newCasesByPublishDate%22:%22newCasesByPublishDate%22,%22cumCasesByPublishDate%22:%22cumCasesByPublishDate%22%7D&format=json")
# Import case data into an incidence object
i <- as.incidence(govapi$data$newCasesByPublishDate, dates = as.Date(govapi$data$date), interval=1)
# Count how many daily records are in the data set
rows <- length(i$counts)
# Create an exponential fit model for the 'last' however many days
fit.second <- fit(i[(rows-last):(rows)])
# Create label text to go on the graph to show daily growth rates / doubling times etc from the model
label_text <- paste("Last ", last ," days model:", "\n", "Daily growth rate: ", round(fit.second$info$r*100, digits = 1), "%\n",
"Doubling time: ", round(fit.second$info$doubling, digits = 1), " days",
sep="")
# Plot the graph
p <- plot(i[(rows-last):(rows)], fit=fit.second, color="blue", border = "white") +
# Add labels
labs(title=paste("UK Coronavirus Cases"),
subtitle=format(i[rows]$dates,'%A %d %B %Y'),
caption="Graph by @paulmaunders - data from coronavirus.data.gov.uk") +
# Set theme font size
theme_light(base_size = 14) +
# Add top, right, bottom, left padding
theme(plot.margin=unit(c(0.5,1.0,0.5,0.5),"cm")) +
# Add an information box with the label text set previously
geom_label(
label=label_text,
x=i[(rows-last)]$dates,
y=i$counts[(rows)]*.9,
hjust="left",
label.padding = unit(0.55, "lines"), # Rectangle size around label
label.size = 0.35,
size=5,
color = "black",
fill="#ffffff"
)
# Show the output of the model on the console
fit.second
# Print the graph in the plot view
print (p)
# Save the graph as a file on your Desktop in 15x15cm square format - 300 dpi for printing
ggsave(plot=p, filename = paste("~/Desktop/uk-cases-incidence-recent-", i[rows]$dates, ".png", sep=""), device="png", dpi=300, height = 15 , width = 15, units = "cm")
| /analysis/uk-cases-incidence-recent.R | permissive | paulmaunders/coronavirus-graphs | R | false | false | 2,486 | r | # Incidence plots of recent cases
# https://rviews.rstudio.com/2020/03/05/covid-19-epidemiology-with-r/
rm(list=ls()); # Clear environment
library('jsonlite')
library('incidence')
library('ggplot2')
# Set how many days you wish to see
last <- 21
# Import data from coronavirus API
govapi <- fromJSON("https://api.coronavirus.data.gov.uk/v1/data?filters=areaName=United%2520Kingdom;areaType=overview&structure=%7B%22areaType%22:%22areaType%22,%22areaName%22:%22areaName%22,%22areaCode%22:%22areaCode%22,%22date%22:%22date%22,%22newCasesByPublishDate%22:%22newCasesByPublishDate%22,%22cumCasesByPublishDate%22:%22cumCasesByPublishDate%22%7D&format=json")
# Import case data into an incidence object
i <- as.incidence(govapi$data$newCasesByPublishDate, dates = as.Date(govapi$data$date), interval=1)
# Count how many daily records are in the data set
rows <- length(i$counts)
# Create an exponential fit model for the 'last' however many days
fit.second <- fit(i[(rows-last):(rows)])
# Create label text to go on the graph to show daily growth rates / doubling times etc from the model
label_text <- paste("Last ", last ," days model:", "\n", "Daily growth rate: ", round(fit.second$info$r*100, digits = 1), "%\n",
"Doubling time: ", round(fit.second$info$doubling, digits = 1), " days",
sep="")
# Plot the graph
p <- plot(i[(rows-last):(rows)], fit=fit.second, color="blue", border = "white") +
# Add labels
labs(title=paste("UK Coronavirus Cases"),
subtitle=format(i[rows]$dates,'%A %d %B %Y'),
caption="Graph by @paulmaunders - data from coronavirus.data.gov.uk") +
# Set theme font size
theme_light(base_size = 14) +
# Add top, right, bottom, left padding
theme(plot.margin=unit(c(0.5,1.0,0.5,0.5),"cm")) +
# Add an information box with the label text set previously
geom_label(
label=label_text,
x=i[(rows-last)]$dates,
y=i$counts[(rows)]*.9,
hjust="left",
label.padding = unit(0.55, "lines"), # Rectangle size around label
label.size = 0.35,
size=5,
color = "black",
fill="#ffffff"
)
# Show the output of the model on the console
fit.second
# Print the graph in the plot view
print (p)
# Save the graph as a file on your Desktop in 15x15cm square format - 300 dpi for printing
ggsave(plot=p, filename = paste("~/Desktop/uk-cases-incidence-recent-", i[rows]$dates, ".png", sep=""), device="png", dpi=300, height = 15 , width = 15, units = "cm")
|
#install.packages("fasttime")
rm(list=ls())
gc();gc();gc()
library(data.table)
library(RcppRoll)
library(fasttime)
library(Hmisc)
library(readr)
path = '~/avito/data/'
# path = '/Users/dhanley2/Documents/avito/data/'
# Write out the <ip, device, os> level
keepcols = c("parent_category_name", "category_name", "price", 'region', 'city', 'user_type', 'activation_date',
'param_1', 'param_2', 'param_3', 'title', 'user_id')
# Load up train/test and active files and join them all together, keeping the position of train/test
trndf = data.table(read_csv(paste0(path, 'train.csv'), col_types = list('item_id' = col_skip(), 'description' = col_skip())))
trndf = trndf[,keepcols,with=F]
gc(); gc()
gc(); gc()
trnadf = data.table(read_csv(paste0(path, 'train_active.csv'),col_types = list('item_id' = col_skip(), 'description' = col_skip())))
trnadf = trnadf[,keepcols,with=F]
gc(); gc()
gc(); gc()
tstdf = data.table(read_csv(paste0(path, 'test.csv'), col_types = list('item_id' = col_skip(), 'description' = col_skip())))
tstdf = tstdf[,keepcols,with=F]
gc(); gc()
gc(); gc()
tstadf = data.table(read_csv(paste0(path, 'test_active.csv'), col_types = list('item_id' = col_skip(), 'description' = col_skip())))
tstadf = tstadf[,keepcols,with=F]
gc(); gc()
gc(); gc()
alldfsm = rbind(trndf, tstdf)
train_length = nrow(trndf)
rm(trndf, tstdf)
gc(); gc()
alladf = rbind(trnadf, tstadf)
rm(trnadf, tstadf)
gc(); gc()
alldfsm[, index := 1:nrow(alldfsm)]
alladf[, index := -1]
gc(); gc(); gc(); gc(); gc(); gc()
alldf = rbind(alladf, alldfsm)
rm(alladf, alldfsm)
gc(); gc(); gc(); gc(); gc(); gc()
priceRatios = function(df, cols, prior){
df[, `:=`(ct = length(!is.na(price)), meanpr = mean(price, na.rm = T)), by = cols]
df[, tmpcol:= (((price/meanpr)*ct)+(prior))/(ct+prior)]
df[is.na(tmpcol), tmpcol:= 1]
setnames(alldf, "tmpcol", paste0(paste0(cols, collapse = '_'), "_fratio5"))
return(alldf)
}
cols = c("parent_category_name", "category_name", 'param_1', 'param_2', 'param_3', 'region', 'city')
# Write out the <ip, device, os> level
ls = list(list(c("parent_category_name", "category_name", 'param_1', 'param_2', 'param_3', 'region', 'city', 'title'), 10),
list(c("parent_category_name", "category_name", 'param_1', 'param_2', 'param_3', 'region', 'title'), 20),
list(c("parent_category_name", "category_name", 'param_1', 'param_2', 'param_3', 'region'), 20),
list(c("parent_category_name", "category_name", 'param_1', 'param_2', 'user_id'), 20),
list(c("parent_category_name", "category_name", 'param_1', 'user_id', 'city'), 20),
list(c("parent_category_name", "category_name", 'param_1', 'param_2', 'param_3', 'region', 'user_id'), 20))
for (l in ls){
cols = l[[1]]
prior = l[[2]]
print(cols)
alldf = priceRatios(alldf, cols, prior = prior)
alldf[, tmpct := .N, by= setdiff(cols, 'price')]
setnames(alldf, "tmpct", paste0(paste0(cols, collapse = '_'), "_count5"))
}
#Create a second title column
alldf_enc = alldf[index!=-1][order(index)][, grep('_count5|_fratio5', colnames(alldf), value = T), with = F]
View(alldf_enc[1:1000])
sum(is.na(alldf_enc))
# Write out the files
writeme = function(df, name){
write.csv(df,
gzfile(paste0(path, '../features/', name,'.gz')),
row.names = F, quote = F)
}
writeme(alldf_enc, 'pratios_fest_2406')
rm(list=ls())
gc();gc();gc()
| /features/code/pratioFestivitiesR2406.R | no_license | yuanlaihenjiandan/avito-demand | R | false | false | 3,385 | r | #install.packages("fasttime")
rm(list=ls())
gc();gc();gc()
library(data.table)
library(RcppRoll)
library(fasttime)
library(Hmisc)
library(readr)
path = '~/avito/data/'
# path = '/Users/dhanley2/Documents/avito/data/'
# Write out the <ip, device, os> level
keepcols = c("parent_category_name", "category_name", "price", 'region', 'city', 'user_type', 'activation_date',
'param_1', 'param_2', 'param_3', 'title', 'user_id')
# Load up train/test and active files and join them all together, keeping the position of train/test
trndf = data.table(read_csv(paste0(path, 'train.csv'), col_types = list('item_id' = col_skip(), 'description' = col_skip())))
trndf = trndf[,keepcols,with=F]
gc(); gc()
gc(); gc()
trnadf = data.table(read_csv(paste0(path, 'train_active.csv'),col_types = list('item_id' = col_skip(), 'description' = col_skip())))
trnadf = trnadf[,keepcols,with=F]
gc(); gc()
gc(); gc()
tstdf = data.table(read_csv(paste0(path, 'test.csv'), col_types = list('item_id' = col_skip(), 'description' = col_skip())))
tstdf = tstdf[,keepcols,with=F]
gc(); gc()
gc(); gc()
tstadf = data.table(read_csv(paste0(path, 'test_active.csv'), col_types = list('item_id' = col_skip(), 'description' = col_skip())))
tstadf = tstadf[,keepcols,with=F]
gc(); gc()
gc(); gc()
alldfsm = rbind(trndf, tstdf)
train_length = nrow(trndf)
rm(trndf, tstdf)
gc(); gc()
alladf = rbind(trnadf, tstadf)
rm(trnadf, tstadf)
gc(); gc()
alldfsm[, index := 1:nrow(alldfsm)]
alladf[, index := -1]
gc(); gc(); gc(); gc(); gc(); gc()
alldf = rbind(alladf, alldfsm)
rm(alladf, alldfsm)
gc(); gc(); gc(); gc(); gc(); gc()
priceRatios = function(df, cols, prior){
df[, `:=`(ct = length(!is.na(price)), meanpr = mean(price, na.rm = T)), by = cols]
df[, tmpcol:= (((price/meanpr)*ct)+(prior))/(ct+prior)]
df[is.na(tmpcol), tmpcol:= 1]
setnames(alldf, "tmpcol", paste0(paste0(cols, collapse = '_'), "_fratio5"))
return(alldf)
}
cols = c("parent_category_name", "category_name", 'param_1', 'param_2', 'param_3', 'region', 'city')
# Write out the <ip, device, os> level
ls = list(list(c("parent_category_name", "category_name", 'param_1', 'param_2', 'param_3', 'region', 'city', 'title'), 10),
list(c("parent_category_name", "category_name", 'param_1', 'param_2', 'param_3', 'region', 'title'), 20),
list(c("parent_category_name", "category_name", 'param_1', 'param_2', 'param_3', 'region'), 20),
list(c("parent_category_name", "category_name", 'param_1', 'param_2', 'user_id'), 20),
list(c("parent_category_name", "category_name", 'param_1', 'user_id', 'city'), 20),
list(c("parent_category_name", "category_name", 'param_1', 'param_2', 'param_3', 'region', 'user_id'), 20))
for (l in ls){
cols = l[[1]]
prior = l[[2]]
print(cols)
alldf = priceRatios(alldf, cols, prior = prior)
alldf[, tmpct := .N, by= setdiff(cols, 'price')]
setnames(alldf, "tmpct", paste0(paste0(cols, collapse = '_'), "_count5"))
}
#Create a second title column
alldf_enc = alldf[index!=-1][order(index)][, grep('_count5|_fratio5', colnames(alldf), value = T), with = F]
View(alldf_enc[1:1000])
sum(is.na(alldf_enc))
# Write out the files
writeme = function(df, name){
write.csv(df,
gzfile(paste0(path, '../features/', name,'.gz')),
row.names = F, quote = F)
}
writeme(alldf_enc, 'pratios_fest_2406')
rm(list=ls())
gc();gc();gc()
|
#' @export makeBuffer
#'
#' @author Jason Mitchell (jmitchell@@west-inc.com)
#'
#' @seealso \code{\link{gBuffer}}, \code{\link{gIntersects}},
#' \code{\link{gIntersection}}
makeBuffer <- function(shp,cell,radius=2.5,cellDim.m=3218.69,inner=TRUE){
# shp <- allShps[[2]]
# cell <- "CO132952"
# radius <- 2.5
# cellDim.m <- 3218.69
# inner <- TRUE
# ---- Make sure the rownames of attribute data in shp match the polygonal IDs.
rownames(shp@data) <- sapply(shp@polygons,function(x) slot(x,"ID"))
# ---- Make the buffering circle.
center <- gCentroid(shp[shp@data$Grid_ID == cell,])
circle <- gBuffer(center,byid=TRUE,width=radius*cellDim.m)
# ---- Identify set of possible outer safety cells.
safetyOuterVec <- rownames(gIntersects(circle,shp,byid=TRUE))[gIntersects(circle,shp,byid=TRUE) == TRUE]
# ---- Restrict the outer safety cells to their own object.
safetyOuter <- shp[rownames(shp@data) %in% safetyOuterVec,]
# ---- Identify the inner safety cells with cells entirely in circle.
# ---- Do this by seeing that 4 square miles = 10359994 square meters.
preSafetyInnerVec <- gArea(gIntersection(circle,safetyOuter,byid=TRUE),byid=TRUE) >= 10359993
safetyInnerVec <- unlist(strsplit(names(preSafetyInnerVec[preSafetyInnerVec == TRUE])," ",fixed=TRUE))[c(FALSE,TRUE)]
# ---- Restrict the inner safety cells to their own object.
safetyInner <- shp[rownames(shp@data) %in% safetyInnerVec,]
if( inner == TRUE){
ans <- safetyInner
} else {
ans <- safetyOuter
}
ans
# plot(shp)
# plot(shp[shp@data$dblSamp == 1,],add=TRUE,col='red')
# plot(circle,col="blue")
# plot(safetyOuter,add=TRUE,col="green")
# plot(safetyInner,add=TRUE,col="brown")
} | /R/makeBuffer.R | no_license | jasmyace/BTPD | R | false | false | 1,754 | r | #' @export makeBuffer
#'
#' @author Jason Mitchell (jmitchell@@west-inc.com)
#'
#' @seealso \code{\link{gBuffer}}, \code{\link{gIntersects}},
#' \code{\link{gIntersection}}
makeBuffer <- function(shp,cell,radius=2.5,cellDim.m=3218.69,inner=TRUE){
# shp <- allShps[[2]]
# cell <- "CO132952"
# radius <- 2.5
# cellDim.m <- 3218.69
# inner <- TRUE
# ---- Make sure the rownames of attribute data in shp match the polygonal IDs.
rownames(shp@data) <- sapply(shp@polygons,function(x) slot(x,"ID"))
# ---- Make the buffering circle.
center <- gCentroid(shp[shp@data$Grid_ID == cell,])
circle <- gBuffer(center,byid=TRUE,width=radius*cellDim.m)
# ---- Identify set of possible outer safety cells.
safetyOuterVec <- rownames(gIntersects(circle,shp,byid=TRUE))[gIntersects(circle,shp,byid=TRUE) == TRUE]
# ---- Restrict the outer safety cells to their own object.
safetyOuter <- shp[rownames(shp@data) %in% safetyOuterVec,]
# ---- Identify the inner safety cells with cells entirely in circle.
# ---- Do this by seeing that 4 square miles = 10359994 square meters.
preSafetyInnerVec <- gArea(gIntersection(circle,safetyOuter,byid=TRUE),byid=TRUE) >= 10359993
safetyInnerVec <- unlist(strsplit(names(preSafetyInnerVec[preSafetyInnerVec == TRUE])," ",fixed=TRUE))[c(FALSE,TRUE)]
# ---- Restrict the inner safety cells to their own object.
safetyInner <- shp[rownames(shp@data) %in% safetyInnerVec,]
if( inner == TRUE){
ans <- safetyInner
} else {
ans <- safetyOuter
}
ans
# plot(shp)
# plot(shp[shp@data$dblSamp == 1,],add=TRUE,col='red')
# plot(circle,col="blue")
# plot(safetyOuter,add=TRUE,col="green")
# plot(safetyInner,add=TRUE,col="brown")
} |
library(BayesFactor)
data(puzzles)
## neverExclude argument makes sure that participant factor ID
## is in all models
result = generalTestBF(RT ~ shape*color + ID, data = puzzles, whichRandom = "ID",
neverExclude="ID", progress=FALSE)
result
BF.p = lm(as.numeric(participat) ~ Dominance + Conscientiousness + Openness +
Neuroticism + Extraversion + Agreeableness, data=aggPers)
summary(BF.p)
lmBF.1 = lmBF(as.numeric(participat) ~ Dominance + Conscientiousness + Openness +
Neuroticism + Extraversion + Agreeableness, data=aggPers, progress=F)
regrBF.1 = regressionBF(as.numeric(participat) ~ Dominance + Conscientiousness + Openness +
Neuroticism + Extraversion + Agreeableness, data=aggPers)
mod.gm1 <- generalTestBF(as.numeric(Accuracy) ~ Dominance + Conscientiousness + Openness + Neuroticism
+ Agreeableness + Extraversion + as.factor(Chimp),
whichRandom = "Chimp",
#family = binomial,
data=cz_bin_pers
)
| /Budongo cognition/BFactoring.R | no_license | Diapadion/R | R | false | false | 1,057 | r | library(BayesFactor)
data(puzzles)
## neverExclude argument makes sure that participant factor ID
## is in all models
result = generalTestBF(RT ~ shape*color + ID, data = puzzles, whichRandom = "ID",
neverExclude="ID", progress=FALSE)
result
BF.p = lm(as.numeric(participat) ~ Dominance + Conscientiousness + Openness +
Neuroticism + Extraversion + Agreeableness, data=aggPers)
summary(BF.p)
lmBF.1 = lmBF(as.numeric(participat) ~ Dominance + Conscientiousness + Openness +
Neuroticism + Extraversion + Agreeableness, data=aggPers, progress=F)
regrBF.1 = regressionBF(as.numeric(participat) ~ Dominance + Conscientiousness + Openness +
Neuroticism + Extraversion + Agreeableness, data=aggPers)
mod.gm1 <- generalTestBF(as.numeric(Accuracy) ~ Dominance + Conscientiousness + Openness + Neuroticism
+ Agreeableness + Extraversion + as.factor(Chimp),
whichRandom = "Chimp",
#family = binomial,
data=cz_bin_pers
)
|
\name{ggcontrib}
\alias{ggcontrib}
\title{
Genetic group contribution}
\description{
Calculates the genomic contribution each genetic group makes to every individual in a pedigree}
\usage{
ggcontrib(pedigree, ggroups = NULL, fuzz = NULL, output = "matrix")
}
\arguments{
\item{pedigree }{
A pedigree where the columns are ordered ID, Dam, Sire
}
\item{ggroups }{
An optional vector of either: genetic group assignment for every individual or just the unique genetic groups}
\item{fuzz }{
A matrix containing the fuzzy classification of individuals into genetic groups. Not yet implemented
}
\item{output }{
Format for the output
}
}
\details{
This function can handle the specification of genetic groups in three formats:
(1) similar to ASReml's format for specifying genetic groups, the first 'n' rows of the pedigree contain the label for each genetic group in the ID column and indicate missing values for the Dam and Sire columns (denoted by either 'NA', '0', or '*'). All individuals in the pedigree must then have one of the 'n' genetic groups as parent(s) for each unknown parent. Note, a warning message indicating \code{In numPed(pedigree) : Dams appearing as Sires} is expected, since the dam and sire can be the same for all individuals in the pedigree composing the base population of a genetic group.
(2) similar to Jarrod Hadfield's \code{rbv} function arguments in the \code{MCMCglmm} package, for a pedigree of dimension i x 3 (specified by the \code{pedigree} argument), where 'i' is the total number of individuals in the pedigree, a similar vector of length 'i' can be specified by the \code{ggroups} argument. This vector lists either the genetic group to which each individual belongs or NA if the individual is not to be considered part of one of the base populations (genetic groups).
(3) similar to DMU's format for specifying genetic groups, for a pedigree of dimension i x 3 (specified by the \code{pedigree} argument), where 'i' is the total number of individuals in the pedigree, instead of missing values for the parents, the genetic groups are specified. Note, that all individuals with a missing parent should have a genetic group substituted instead of the missing value symbol (i.e., either 'NA', '0', or '*').
}
\value{
Returns i x n genetic contributions to all 'i' individuals from each of the 'n' genetic groups. Default output is a matrix (dense), but this format can be changed (e.g., "dgCMatrix" for a sparse matrix).
}
\references{
Fikse, F. 2009. Fuzzy classification of phantom parent groups in an animal model. Genetics, Selection, Evolution. 41:42.
}
\author{\email{matthewwolak@gmail.com}}
\examples{
#### Below arbitrarily assigns genetic groups
### to the pedigree from Mrode (2005), chapter 2
### (See data(Mrode2).
# format (1) from above
ExPed1.tmp <- Mrode2
ExPed1.tmp[c(1,2,4), 2] <- c("a", "b", "b")
ExPed1.tmp[1:2, 3] <- c("a", "b")
ExPed1 <- data.frame(id = c(letters[1:2], as.character(ExPed1.tmp$id)),
dam = c(NA, NA, as.character(ExPed1.tmp$dam)),
sire = c(NA, NA, as.character(ExPed1.tmp$sire)))
gg1 <- ggcontrib(ExPed1) # note the warning message which is typical
# format (2) from above
ExPed2 <- Mrode2
ggvec.in <- c("a", "b", NA, "b", NA, NA)
gg2 <- ggcontrib(ExPed2, ggroups = ggvec.in)
as(gg2, "matrix") # changes object to a dense matrix
#for easier incorporation into dataframes etc.
str(as(gg2, "matrix"))
# format (3) from above
ExPed3 <- ExPed1.tmp
gg3 <- ggcontrib(ExPed3, ggroups = c("a", "b"))
}
| /man/ggcontrib.Rd | no_license | DuyDN/nadiv | R | false | false | 3,562 | rd | \name{ggcontrib}
\alias{ggcontrib}
\title{
Genetic group contribution}
\description{
Calculates the genomic contribution each genetic group makes to every individual in a pedigree}
\usage{
ggcontrib(pedigree, ggroups = NULL, fuzz = NULL, output = "matrix")
}
\arguments{
\item{pedigree }{
A pedigree where the columns are ordered ID, Dam, Sire
}
\item{ggroups }{
An optional vector of either: genetic group assignment for every individual or just the unique genetic groups}
\item{fuzz }{
A matrix containing the fuzzy classification of individuals into genetic groups. Not yet implemented
}
\item{output }{
Format for the output
}
}
\details{
This function can handle the specification of genetic groups in three formats:
(1) similar to ASReml's format for specifying genetic groups, the first 'n' rows of the pedigree contain the label for each genetic group in the ID column and indicate missing values for the Dam and Sire columns (denoted by either 'NA', '0', or '*'). All individuals in the pedigree must then have one of the 'n' genetic groups as parent(s) for each unknown parent. Note, a warning message indicating \code{In numPed(pedigree) : Dams appearing as Sires} is expected, since the dam and sire can be the same for all individuals in the pedigree composing the base population of a genetic group.
(2) similar to Jarrod Hadfield's \code{rbv} function arguments in the \code{MCMCglmm} package, for a pedigree of dimension i x 3 (specified by the \code{pedigree} argument), where 'i' is the total number of individuals in the pedigree, a similar vector of length 'i' can be specified by the \code{ggroups} argument. This vector lists either the genetic group to which each individual belongs or NA if the individual is not to be considered part of one of the base populations (genetic groups).
(3) similar to DMU's format for specifying genetic groups, for a pedigree of dimension i x 3 (specified by the \code{pedigree} argument), where 'i' is the total number of individuals in the pedigree, instead of missing values for the parents, the genetic groups are specified. Note, that all individuals with a missing parent should have a genetic group substituted instead of the missing value symbol (i.e., either 'NA', '0', or '*').
}
\value{
Returns i x n genetic contributions to all 'i' individuals from each of the 'n' genetic groups. Default output is a matrix (dense), but this format can be changed (e.g., "dgCMatrix" for a sparse matrix).
}
\references{
Fikse, F. 2009. Fuzzy classification of phantom parent groups in an animal model. Genetics, Selection, Evolution. 41:42.
}
\author{\email{matthewwolak@gmail.com}}
\examples{
#### Below arbitrarily assigns genetic groups
### to the pedigree from Mrode (2005), chapter 2
### (See data(Mrode2).
# format (1) from above
ExPed1.tmp <- Mrode2
ExPed1.tmp[c(1,2,4), 2] <- c("a", "b", "b")
ExPed1.tmp[1:2, 3] <- c("a", "b")
ExPed1 <- data.frame(id = c(letters[1:2], as.character(ExPed1.tmp$id)),
dam = c(NA, NA, as.character(ExPed1.tmp$dam)),
sire = c(NA, NA, as.character(ExPed1.tmp$sire)))
gg1 <- ggcontrib(ExPed1) # note the warning message which is typical
# format (2) from above
ExPed2 <- Mrode2
ggvec.in <- c("a", "b", NA, "b", NA, NA)
gg2 <- ggcontrib(ExPed2, ggroups = ggvec.in)
as(gg2, "matrix") # changes object to a dense matrix
#for easier incorporation into dataframes etc.
str(as(gg2, "matrix"))
# format (3) from above
ExPed3 <- ExPed1.tmp
gg3 <- ggcontrib(ExPed3, ggroups = c("a", "b"))
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in Rtmp9wH6ZH/file2ef74cfe6c83
\docType{class}
\name{MCMCsuiteClass-class}
\alias{MCMCsuiteClass}
\alias{MCMCsuiteClass-class}
\title{Class \code{MCMCsuiteClass}}
\description{
Objects of this class create, run, and organize output from a suite of MCMC algorithms, all applied to the same model, data, and initial values.
This can include WinBUGS, OpenBUGS, JAGS and Stan MCMCs, as well as NIMBLE MCMC algorithms.
Trace plots and density plots for the MCMC samples may also be generated and saved.
}
\examples{
\dontrun{
code <- nimbleCode({
mu ~ dnorm(0, 1)
x ~ dnorm(mu, 1)
})
output <- MCMCsuite(code,
data = list(x=3),
inits = list(mu=0),
niter = 10000,
monitors = 'mu',
MCMCs = c('nimble', 'nimble_RW'),
summaryStats = c('mean', 'sd', 'max', 'function(x) max(abs(x))'),
makePlot = FALSE)
}
}
\author{
Daniel Turek
}
\seealso{
\link{MCMCsuite}
}
| /packages/nimble/man/MCMCsuiteClass-class.Rd | no_license | clarkfitzg/nimble | R | false | false | 1,080 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in Rtmp9wH6ZH/file2ef74cfe6c83
\docType{class}
\name{MCMCsuiteClass-class}
\alias{MCMCsuiteClass}
\alias{MCMCsuiteClass-class}
\title{Class \code{MCMCsuiteClass}}
\description{
Objects of this class create, run, and organize output from a suite of MCMC algorithms, all applied to the same model, data, and initial values.
This can include WinBUGS, OpenBUGS, JAGS and Stan MCMCs, as well as NIMBLE MCMC algorithms.
Trace plots and density plots for the MCMC samples may also be generated and saved.
}
\examples{
\dontrun{
code <- nimbleCode({
mu ~ dnorm(0, 1)
x ~ dnorm(mu, 1)
})
output <- MCMCsuite(code,
data = list(x=3),
inits = list(mu=0),
niter = 10000,
monitors = 'mu',
MCMCs = c('nimble', 'nimble_RW'),
summaryStats = c('mean', 'sd', 'max', 'function(x) max(abs(x))'),
makePlot = FALSE)
}
}
\author{
Daniel Turek
}
\seealso{
\link{MCMCsuite}
}
|
# Creates and then calls a function that will plot participation frequency for each representative
plotparticipation<-function(periodnum){
library("rjson")
library(gridExtra)
library(ggplot2)
filename = c("vote_participation_",periodnum,".json")
json_data <- fromJSON(file=paste(filename,collapse=''))
full<-do.call("rbind", json_data)
df <- data.frame(matrix(unlist(json_data), nrow=length(json_data), byrow=T))
#Unlist data
unlistfull<-matrix(, nrow = length(json_data), ncol=4)
for(i in 1:4){
col = unlist(full[,i])
unlistfull[,i]<-col
}
unlistdf<-as.data.frame(unlistfull)
colnames(unlistdf)[2]<-"ParticipationFrequency"
colnames(unlistdf)[1]<-"Party"
#Convert from factor to numeric data type
unlistdf$ParticipationFrequency<-as.numeric(levels(unlistdf$ParticipationFrequency))[unlistdf$ParticipationFrequency]
#unlistdf$ParticipationFrequency<-format(round(unlistdf$ParticipationFrequency*100, 0), nsmall = 0)
unlistdf<-unlistdf[order(unlistdf[,"Party"]),]
unlistdf$Representative<-1:nrow(unlistdf)
rplot<-ggplot(data=unlistdf, aes(x=Representative, y=ParticipationFrequency,fill=Party)) + geom_bar(stat="identity", position=position_dodge())+ggtitle(paste("Period",periodnum))+theme(plot.title=element_text(face="bold", size=20))
return (rplot)
}
plot4<-plotparticipation('4')
plot5<-plotparticipation('5')
plot6<-plotparticipation('6')
plot8<-plotparticipation('8')
source("http://peterhaschke.com/Code/multiplot.R")
#png(filename="PlotRepresentativeParticipationFrequency.png")
#multiplot(plot4,plot5,plot6,plot8,ncol=2)
#dev.off()
grid.arrange(arrangeGrob(plot4,plot5,plot6,plot8, ncol=2, nrow=2), main=textGrob("Representative Participation Frequency",gp=gpar(fontsize=20,font=3)))
ggsave(file="PlotRepresentativeParticipationFrequency.png")
| /code_participationfrequency.R | no_license | slarrain/OpenDataCongress | R | false | false | 1,805 | r | # Creates and then calls a function that will plot participation frequency for each representative
plotparticipation<-function(periodnum){
library("rjson")
library(gridExtra)
library(ggplot2)
filename = c("vote_participation_",periodnum,".json")
json_data <- fromJSON(file=paste(filename,collapse=''))
full<-do.call("rbind", json_data)
df <- data.frame(matrix(unlist(json_data), nrow=length(json_data), byrow=T))
#Unlist data
unlistfull<-matrix(, nrow = length(json_data), ncol=4)
for(i in 1:4){
col = unlist(full[,i])
unlistfull[,i]<-col
}
unlistdf<-as.data.frame(unlistfull)
colnames(unlistdf)[2]<-"ParticipationFrequency"
colnames(unlistdf)[1]<-"Party"
#Convert from factor to numeric data type
unlistdf$ParticipationFrequency<-as.numeric(levels(unlistdf$ParticipationFrequency))[unlistdf$ParticipationFrequency]
#unlistdf$ParticipationFrequency<-format(round(unlistdf$ParticipationFrequency*100, 0), nsmall = 0)
unlistdf<-unlistdf[order(unlistdf[,"Party"]),]
unlistdf$Representative<-1:nrow(unlistdf)
rplot<-ggplot(data=unlistdf, aes(x=Representative, y=ParticipationFrequency,fill=Party)) + geom_bar(stat="identity", position=position_dodge())+ggtitle(paste("Period",periodnum))+theme(plot.title=element_text(face="bold", size=20))
return (rplot)
}
plot4<-plotparticipation('4')
plot5<-plotparticipation('5')
plot6<-plotparticipation('6')
plot8<-plotparticipation('8')
source("http://peterhaschke.com/Code/multiplot.R")
#png(filename="PlotRepresentativeParticipationFrequency.png")
#multiplot(plot4,plot5,plot6,plot8,ncol=2)
#dev.off()
grid.arrange(arrangeGrob(plot4,plot5,plot6,plot8, ncol=2, nrow=2), main=textGrob("Representative Participation Frequency",gp=gpar(fontsize=20,font=3)))
ggsave(file="PlotRepresentativeParticipationFrequency.png")
|
best <- function(state, outcome)
{
## Read outcome data
outcome_csv <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
StateFilter = outcome_csv[, "State"] == state
State = outcome_csv[StateFilter,]
Outcomes <- c("heart attack", "heart failure", "pneumonia")
## Check that state and outcome are valid
if(nrow(State) == 0)
stop("invalid state")
if(!outcome %in% Outcomes)
stop("invalid outcome")
if(outcome == "heart attack") {
sc <- 11
}
if(outcome == "heart failure") {
sc <- 17
}
if(outcome == "pneumonia") {
sc <- 23
}
## Return hospital name in that state with the lowest 30-day death
## rate
data <- outcome_csv[outcome_csv$State == state,]
data[, sc] <- sapply(data[, sc], as.numeric)
data <- data[order(data[ , 2]), ]
best <- data[which.min(data[ , sc]), "Hospital.Name"]
best
} | /IntroToR/best.r | no_license | gyurisc/datasciencecoursera | R | false | false | 896 | r | best <- function(state, outcome)
{
## Read outcome data
outcome_csv <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
StateFilter = outcome_csv[, "State"] == state
State = outcome_csv[StateFilter,]
Outcomes <- c("heart attack", "heart failure", "pneumonia")
## Check that state and outcome are valid
if(nrow(State) == 0)
stop("invalid state")
if(!outcome %in% Outcomes)
stop("invalid outcome")
if(outcome == "heart attack") {
sc <- 11
}
if(outcome == "heart failure") {
sc <- 17
}
if(outcome == "pneumonia") {
sc <- 23
}
## Return hospital name in that state with the lowest 30-day death
## rate
data <- outcome_csv[outcome_csv$State == state,]
data[, sc] <- sapply(data[, sc], as.numeric)
data <- data[order(data[ , 2]), ]
best <- data[which.min(data[ , sc]), "Hospital.Name"]
best
} |
#### mTORC1 AND mTORC2 EXPRESSION DATA - ESCA ####
# load expr. and clinical data files
library(TCGA2STAT)
genes <- c("MTOR", "RPTOR", "DEPDC6", "MLST8", "AKT1S1", "RICTOR", "MAPKAP1", "PRR5L")
expr.esca <- log2(expr.esca+1) # log of exp data
expr.genes.esca <- expr.esca[,genes] # to select genes from the matrix
### Visualizing heatmaps:
library(pheatmap)
p.esca <-pheatmap(t(expr.genes.esca), show_colnames = FALSE,
cutree_cols = 3) # to be specified after visualising first
# Extracting cluster assignments for each sample
clusteredSamples <- p.esca$tree_col
assignments.esca <- cutree(clusteredSamples, k=3) # k = cutree_cols
groupAssignments.esca <- data.frame(Group=factor(assignments.esca))
p.esca <- pheatmap(t(expr.genes.esca), show_colnames = FALSE,
cutree_cols = 3, annotation = groupAssignments.esca)
### Merging group assignments
df.expr.esca <- data.frame(expr.genes.esca)
df.expr.esca$Sample <- rownames(df.expr.esca)
groupAssignments.esca$SampleID <- rownames(groupAssignments.esca)
df.merged.esca <- merge(df.expr.esca, groupAssignments.esca,
by.x = "Sample", by.y = "SampleID",
all.x = FALSE, all.y = FALSE)
# merging clinical data
clinical.esca$PatientID <- rownames(clinical.esca)
df.merged.esca$PatientID <- substr(df.merged.esca$Sample, start = 1, stop = 12)
df.merged.esca$PatientAge <- clinical.esca[match(df.merged.esca$PatientID, clinical.esca$PatientID), "yearstobirth"]
df.merged.esca$PatientAge <- as.numeric(df.merged.esca$PatientAge)
# merging ageing signature data (DS and MP)
df.merged.esca$SubID <- substr(df.merged.esca$Sample, start = 1, stop = 19)
S1_corr_data_ESCA_filter$SubID <- substr(S1_corr_data_ESCA_filter$Sample_ID, start = 1, stop = 19)
df.merged.esca <- merge(df.merged.esca, S1_corr_data_ESCA_filter,
by.x = "SubID", by.y = "SubID",
all.x = FALSE, all.y = FALSE)
#### S1 correlation with mTORCs ####
values <- c("highDEPDC6.lowPRR5L", "lowDEPDC6.highPRR5L", "lowDEPDC6.lowPRR5L")
df.merged.esca$Status <- values[df.merged.esca$Group]
## MP ##
library(ggpubr)
violin.esca.mp <- ggviolin(df.merged.esca, x = "Status", y = "MP", fill = "Status", palette = c("#95d108", "#f29b30", "#5dd4f5"), add = "boxplot", add.params = list(fill = "white"))
comparison.esca <- list(c("highDEPDC6.lowPRR5L", "lowDEPDC6.highPRR5L"), c("lowDEPDC6.highPRR5L", "lowDEPDC6.lowPRR5L"), c("highDEPDC6.lowPRR5L", "lowDEPDC6.lowPRR5L"))
p.mp <- violin.esca.mp + stat_compare_means(comparisons = comparison.esca) +
stat_compare_means(label.y = 0.35, size = 5)
p.mp$layers[[2]]$aes_params$textsize <- 5
lab.esca.mp <- ggpar(p.mp,
main = "ESCA",
font.main = c(16, "bold"),
xlab = "Gene expression status",
ylab = "% S1 (MP)",
font.x = c(14, "bold"),
font.y = c(14, "bold"),
font.ytickslab = 14,
font.xtickslab = c(1, "white"),
legend = "none")
mean.ds <- aggregate(df.merged.esca$DS ~ df.merged.esca$Status, df.merged.esca, mean)
## DS ##
violin.esca.ds <- ggviolin(df.merged.esca, x = "Status", y = "DS", fill = "Status", palette = c("#2da10a", "#f27735", "#1155f5"), add = "boxplot", add.params = list(fill = "white"))
p.ds <- violin.esca.ds + stat_compare_means(comparisons = comparison.esca) +
stat_compare_means(label.y = 1.5, size = 5)
p.ds$layers[[2]]$aes_params$textsize <- 5
lab.esca.ds <- ggpar(p.ds,
main = "ESCA",
font.main = c(16, "bold"),
xlab = "Gene expression status",
ylab = "% S1 (MP)",
font.x = c(14, "bold"),
font.y = c(14, "bold"),
font.ytickslab = 14,
font.xtickslab = c(1, "white"),
legend = "none")
mean.ds <- aggregate(df.merged.esca$DS ~ df.merged.esca$Status, df.merged.esca, mean)
save(df.merged.esca, file = "df.merged.esca.RData")
| /MSc Project R/1 R Scripts /4 mTORC1&2_expression/mTORCs_ESCA.R | no_license | Varshini-Suresh/MSc-Project-R-Codes- | R | false | false | 4,092 | r | #### mTORC1 AND mTORC2 EXPRESSION DATA - ESCA ####
# load expr. and clinical data files
library(TCGA2STAT)
genes <- c("MTOR", "RPTOR", "DEPDC6", "MLST8", "AKT1S1", "RICTOR", "MAPKAP1", "PRR5L")
expr.esca <- log2(expr.esca+1) # log of exp data
expr.genes.esca <- expr.esca[,genes] # to select genes from the matrix
### Visualizing heatmaps:
library(pheatmap)
p.esca <-pheatmap(t(expr.genes.esca), show_colnames = FALSE,
cutree_cols = 3) # to be specified after visualising first
# Extracting cluster assignments for each sample
clusteredSamples <- p.esca$tree_col
assignments.esca <- cutree(clusteredSamples, k=3) # k = cutree_cols
groupAssignments.esca <- data.frame(Group=factor(assignments.esca))
p.esca <- pheatmap(t(expr.genes.esca), show_colnames = FALSE,
cutree_cols = 3, annotation = groupAssignments.esca)
### Merging group assignments
df.expr.esca <- data.frame(expr.genes.esca)
df.expr.esca$Sample <- rownames(df.expr.esca)
groupAssignments.esca$SampleID <- rownames(groupAssignments.esca)
df.merged.esca <- merge(df.expr.esca, groupAssignments.esca,
by.x = "Sample", by.y = "SampleID",
all.x = FALSE, all.y = FALSE)
# merging clinical data
clinical.esca$PatientID <- rownames(clinical.esca)
df.merged.esca$PatientID <- substr(df.merged.esca$Sample, start = 1, stop = 12)
df.merged.esca$PatientAge <- clinical.esca[match(df.merged.esca$PatientID, clinical.esca$PatientID), "yearstobirth"]
df.merged.esca$PatientAge <- as.numeric(df.merged.esca$PatientAge)
# merging ageing signature data (DS and MP)
df.merged.esca$SubID <- substr(df.merged.esca$Sample, start = 1, stop = 19)
S1_corr_data_ESCA_filter$SubID <- substr(S1_corr_data_ESCA_filter$Sample_ID, start = 1, stop = 19)
df.merged.esca <- merge(df.merged.esca, S1_corr_data_ESCA_filter,
by.x = "SubID", by.y = "SubID",
all.x = FALSE, all.y = FALSE)
#### S1 correlation with mTORCs ####
values <- c("highDEPDC6.lowPRR5L", "lowDEPDC6.highPRR5L", "lowDEPDC6.lowPRR5L")
df.merged.esca$Status <- values[df.merged.esca$Group]
## MP ##
library(ggpubr)
violin.esca.mp <- ggviolin(df.merged.esca, x = "Status", y = "MP", fill = "Status", palette = c("#95d108", "#f29b30", "#5dd4f5"), add = "boxplot", add.params = list(fill = "white"))
comparison.esca <- list(c("highDEPDC6.lowPRR5L", "lowDEPDC6.highPRR5L"), c("lowDEPDC6.highPRR5L", "lowDEPDC6.lowPRR5L"), c("highDEPDC6.lowPRR5L", "lowDEPDC6.lowPRR5L"))
p.mp <- violin.esca.mp + stat_compare_means(comparisons = comparison.esca) +
stat_compare_means(label.y = 0.35, size = 5)
p.mp$layers[[2]]$aes_params$textsize <- 5
lab.esca.mp <- ggpar(p.mp,
main = "ESCA",
font.main = c(16, "bold"),
xlab = "Gene expression status",
ylab = "% S1 (MP)",
font.x = c(14, "bold"),
font.y = c(14, "bold"),
font.ytickslab = 14,
font.xtickslab = c(1, "white"),
legend = "none")
mean.ds <- aggregate(df.merged.esca$DS ~ df.merged.esca$Status, df.merged.esca, mean)
## DS ##
violin.esca.ds <- ggviolin(df.merged.esca, x = "Status", y = "DS", fill = "Status", palette = c("#2da10a", "#f27735", "#1155f5"), add = "boxplot", add.params = list(fill = "white"))
p.ds <- violin.esca.ds + stat_compare_means(comparisons = comparison.esca) +
stat_compare_means(label.y = 1.5, size = 5)
p.ds$layers[[2]]$aes_params$textsize <- 5
lab.esca.ds <- ggpar(p.ds,
main = "ESCA",
font.main = c(16, "bold"),
xlab = "Gene expression status",
ylab = "% S1 (MP)",
font.x = c(14, "bold"),
font.y = c(14, "bold"),
font.ytickslab = 14,
font.xtickslab = c(1, "white"),
legend = "none")
mean.ds <- aggregate(df.merged.esca$DS ~ df.merged.esca$Status, df.merged.esca, mean)
save(df.merged.esca, file = "df.merged.esca.RData")
|
# CMPT 318 (Fall 2018)
# Group Assignment 1
#
# Authors:
# Aarish Kapila
# Che Jung (Kent) Lee
# Karan Sharma
# Razvan Andrei Cretu
# Yernur Nursultanov
library("DescTools") # For Gmean() and Mode()
fileName <- 'Dataset1.txt'
df <- read.table(fileName, header = TRUE, sep = ",")
feature_A <- df$Global_active_power
feature_B <- df$Global_reactive_power
feature_C <- df$Voltage
feature_D <- df$Global_intensity
# 1. Compute the arithmetic and the geometric mean, the median, the mode and the
# standard deviation for features A and B respectively.
print_stats <- function(x) {
cat(sprintf("\tArithmetic mean\t= %.02f\n", mean(x, na.rm = TRUE)))
cat(sprintf("\tGeometric mean\t= %.02f\n", Gmean(x, na.rm = TRUE)))
cat(sprintf("\tMedian\t\t= %.02f\n", median(x, na.rm = TRUE)))
cat(sprintf("\tMode\t\t= %.02f\n", Mode(x, na.rm = TRUE)))
cat(sprintf("\tStandard dev.\t= %.02f\n", sd(x, na.rm = TRUE)))
}
cat("\nFeature A statistics:\n")
print_stats(feature_A)
cat("\nFeature B statistics:\n")
print_stats(feature_B)
# 2. Compute the correlation between each of the four features A, B, C and D
# using Pearson’s correlation coefficient
print_cor <- function(x, y) {
cat(sprintf("\tCorrelation\t= %.02f\n", cor(x, y, use="complete.obs", method="pearson")))
}
cat("\nFeature A vs Feature B:\n")
print_cor(feature_A, feature_B)
cat("\nFeature A vs Feature C:\n")
print_cor(feature_A, feature_C)
cat("\nFeature A vs Feature D:\n")
print_cor(feature_A, feature_D)
cat("\nFeature B vs Feature C:\n")
print_cor(feature_B, feature_C)
cat("\nFeature B vs Feature D:\n")
print_cor(feature_B, feature_D)
cat("\nFeature C vs Feature D:\n")
print_cor(feature_C, feature_D)
# 3. For features A and B compute the min and max values on weekdays and weekend
# days respectively.
# Converting the format
date <- as.POSIXlt(df$Date, format = "%d/%m/%Y")
# Creating a column
df$day <- weekdays(date)
# Separating the days based on weekday and weekend
weekday <- df[df$day %in% c("Monday","Tuesday","Wednesday","Thursday","Friday"),]
weekend <- df[df$day %in% c("Saturday","Sunday"),]
print_min_max <- function(weekdays, weekends) {
cat(sprintf("\tMin value on weekdays = %.02f\n", min(weekdays, na.rm = TRUE)))
cat(sprintf("\tMax value on weekdays = %.02f\n", max(weekdays, na.rm = TRUE)))
cat(sprintf("\tMin value on weekends = %.02f\n", min(weekends, na.rm = TRUE)))
cat(sprintf("\tMax value on weekends = %.02f\n", max(weekends, na.rm = TRUE)))
}
cat("\nFeature A:\n")
print_min_max(weekday$Global_active_power, weekend$Global_active_power)
cat("\nFeature B:\n")
print_min_max(weekday$Global_reactive_power, weekend$Global_reactive_power) | /a1/a1.R | no_license | aarishk/critical-infrastructure-analysis | R | false | false | 2,685 | r | # CMPT 318 (Fall 2018)
# Group Assignment 1
#
# Authors:
# Aarish Kapila
# Che Jung (Kent) Lee
# Karan Sharma
# Razvan Andrei Cretu
# Yernur Nursultanov
library("DescTools") # For Gmean() and Mode()
fileName <- 'Dataset1.txt'
df <- read.table(fileName, header = TRUE, sep = ",")
feature_A <- df$Global_active_power
feature_B <- df$Global_reactive_power
feature_C <- df$Voltage
feature_D <- df$Global_intensity
# 1. Compute the arithmetic and the geometric mean, the median, the mode and the
# standard deviation for features A and B respectively.
print_stats <- function(x) {
cat(sprintf("\tArithmetic mean\t= %.02f\n", mean(x, na.rm = TRUE)))
cat(sprintf("\tGeometric mean\t= %.02f\n", Gmean(x, na.rm = TRUE)))
cat(sprintf("\tMedian\t\t= %.02f\n", median(x, na.rm = TRUE)))
cat(sprintf("\tMode\t\t= %.02f\n", Mode(x, na.rm = TRUE)))
cat(sprintf("\tStandard dev.\t= %.02f\n", sd(x, na.rm = TRUE)))
}
cat("\nFeature A statistics:\n")
print_stats(feature_A)
cat("\nFeature B statistics:\n")
print_stats(feature_B)
# 2. Compute the correlation between each of the four features A, B, C and D
# using Pearson’s correlation coefficient
print_cor <- function(x, y) {
cat(sprintf("\tCorrelation\t= %.02f\n", cor(x, y, use="complete.obs", method="pearson")))
}
cat("\nFeature A vs Feature B:\n")
print_cor(feature_A, feature_B)
cat("\nFeature A vs Feature C:\n")
print_cor(feature_A, feature_C)
cat("\nFeature A vs Feature D:\n")
print_cor(feature_A, feature_D)
cat("\nFeature B vs Feature C:\n")
print_cor(feature_B, feature_C)
cat("\nFeature B vs Feature D:\n")
print_cor(feature_B, feature_D)
cat("\nFeature C vs Feature D:\n")
print_cor(feature_C, feature_D)
# 3. For features A and B compute the min and max values on weekdays and weekend
# days respectively.
# Converting the format
date <- as.POSIXlt(df$Date, format = "%d/%m/%Y")
# Creating a column
df$day <- weekdays(date)
# Separating the days based on weekday and weekend
weekday <- df[df$day %in% c("Monday","Tuesday","Wednesday","Thursday","Friday"),]
weekend <- df[df$day %in% c("Saturday","Sunday"),]
print_min_max <- function(weekdays, weekends) {
cat(sprintf("\tMin value on weekdays = %.02f\n", min(weekdays, na.rm = TRUE)))
cat(sprintf("\tMax value on weekdays = %.02f\n", max(weekdays, na.rm = TRUE)))
cat(sprintf("\tMin value on weekends = %.02f\n", min(weekends, na.rm = TRUE)))
cat(sprintf("\tMax value on weekends = %.02f\n", max(weekends, na.rm = TRUE)))
}
cat("\nFeature A:\n")
print_min_max(weekday$Global_active_power, weekend$Global_active_power)
cat("\nFeature B:\n")
print_min_max(weekday$Global_reactive_power, weekend$Global_reactive_power) |
f= 1:100
y= cos(f)
plot(y,a)
| /test1.R | no_license | marinaspynu/Modeling | R | false | false | 29 | r | f= 1:100
y= cos(f)
plot(y,a)
|
library(SymRC)
library(TauStar)
source("TestHelpers.R")
context("Testing the multivariate tau star measures.")
test_that("Check that multivariate measures agree with t* in 2 dims", {
set.seed(123)
for (i in 1:10) {
X = matrix(rnorm(30), ncol = 1)
Y = matrix(rnorm(30), ncol = 1)
a = tStar(X, Y)
b = partialTauStarFromDef(X, Y)
c = lexTauStarFromDef(X, Y, 0, 0)
d = fullLexTauStarFromDef(X, Y)
e = jointTauStarFromDef(X, Y, 1, 1)
expect_all_equal(a, b, c, d, e)
}
for (i in 1:10) {
X = matrix(rpois(30, lambda = 2), ncol = 1)
Y = matrix(rpois(30, lambda = 2), ncol = 1)
a = tStar(X, Y)
b = partialTauStarFromDef(X, Y)
c = lexTauStarFromDef(X, Y, 0, 0)
d = fullLexTauStarFromDef(X, Y)
e = jointTauStarFromDef(X, Y, 1, 1)
expect_all_equal(a, b, c, d, e)
}
})
test_that("Check that the RangeTree version of partial tau* agrees with naive", {
set.seed(123)
n = 15
nXCols = 1
nYCols = 1
for (i in 1:10) {
X = matrix(rpois(nXCols * n, 1), ncol = nXCols)
Y = matrix(rpois(nYCols * n, 1), ncol = nYCols)
expect_all_equal(partialTauStarFromDef(X, Y),
partialTauStarNaive(X, Y),
partialTauStarRangeTree(X, Y))
}
n = 15
nXCols = 2
nYCols = 1
for (i in 1:10) {
X = matrix(rpois(nXCols * n, 1), ncol = nXCols)
Y = matrix(rpois(nYCols * n, 1), ncol = nYCols)
expect_all_equal(partialTauStarFromDef(X, Y),
partialTauStarNaive(X, Y),
partialTauStarRangeTree(X, Y))
}
n = 15
nXCols = 1
nYCols = 2
for (i in 1:10) {
X = matrix(rpois(nXCols * n, 1), ncol = nXCols)
Y = matrix(rpois(nYCols * n, 1), ncol = nYCols)
expect_all_equal(partialTauStarFromDef(X, Y),
partialTauStarNaive(X, Y),
partialTauStarRangeTree(X, Y))
}
n = 20
nXCols = 2
nYCols = 2
for (i in 1:10) {
X = matrix(rpois(nXCols * n, 1), ncol = nXCols)
Y = matrix(rpois(nYCols * n, 1), ncol = nYCols)
expect_all_equal(partialTauStarFromDef(X, Y),
partialTauStarNaive(X, Y),
partialTauStarRangeTree(X, Y))
}
n = 10
nXCols = 2
nYCols = 2
for (i in 1:10) {
X = matrix(rnorm(nXCols * n), ncol = nXCols)
Y = matrix(rnorm(nYCols * n), ncol = nYCols)
expect_all_equal(partialTauStarFromDef(X, Y),
partialTauStarNaive(X, Y),
partialTauStarRangeTree(X, Y))
}
})
test_that("Check that the RangeTree version of joint tau* agrees with naive", {
set.seed(1234)
n = 15
nXCols = 1
nYCols = 1
for (i in 1:10) {
xOnOffVec = sample(c(1, rbinom(nXCols - 1, size = 1, prob = 1/2)))
yOnOffVec = sample(c(1, rbinom(nYCols - 1, size = 1, prob = 1/2)))
X = matrix(rpois(nXCols * n, 1), ncol = nXCols)
Y = matrix(rpois(nYCols * n, 1), ncol = nYCols)
expect_all_equal(jointTauStarFromDef(X, Y, xOnOffVec = xOnOffVec, yOnOffVec = yOnOffVec),
jointTauStarNaive(X, Y, xOnOffVec = xOnOffVec, yOnOffVec = yOnOffVec),
jointTauStarRangeTree(X, Y, xOnOffVec = xOnOffVec, yOnOffVec = yOnOffVec))
}
n = 15
nXCols = 2
nYCols = 1
for (i in 1:10) {
xOnOffVec = sample(c(1, rbinom(nXCols - 1, size = 1, prob = 1/2)))
yOnOffVec = sample(c(1, rbinom(nYCols - 1, size = 1, prob = 1/2)))
X = matrix(rpois(nXCols * n, 1), ncol = nXCols)
Y = matrix(rpois(nYCols * n, 1), ncol = nYCols)
expect_all_equal(jointTauStarFromDef(X, Y, xOnOffVec = xOnOffVec, yOnOffVec = yOnOffVec),
jointTauStarNaive(X, Y, xOnOffVec = xOnOffVec, yOnOffVec = yOnOffVec),
jointTauStarRangeTree(X, Y, xOnOffVec = xOnOffVec, yOnOffVec = yOnOffVec))
}
n = 15
nXCols = 1
nYCols = 2
for (i in 1:10) {
xOnOffVec = sample(c(1, rbinom(nXCols - 1, size = 1, prob = 1/2)))
yOnOffVec = sample(c(1, rbinom(nYCols - 1, size = 1, prob = 1/2)))
X = matrix(rpois(nXCols * n, 1), ncol = nXCols)
Y = matrix(rpois(nYCols * n, 1), ncol = nYCols)
expect_all_equal(jointTauStarFromDef(X, Y, xOnOffVec = xOnOffVec, yOnOffVec = yOnOffVec),
jointTauStarNaive(X, Y, xOnOffVec = xOnOffVec, yOnOffVec = yOnOffVec),
jointTauStarRangeTree(X, Y, xOnOffVec = xOnOffVec, yOnOffVec = yOnOffVec))
}
n = 20
nXCols = 2
nYCols = 2
for (i in 1:10) {
xOnOffVec = sample(c(1, rbinom(nXCols - 1, size = 1, prob = 1/2)))
yOnOffVec = sample(c(1, rbinom(nYCols - 1, size = 1, prob = 1/2)))
X = matrix(rpois(nXCols * n, 1), ncol = nXCols)
Y = matrix(rpois(nYCols * n, 1), ncol = nYCols)
expect_all_equal(jointTauStarFromDef(X, Y, xOnOffVec = xOnOffVec, yOnOffVec = yOnOffVec),
jointTauStarNaive(X, Y, xOnOffVec = xOnOffVec, yOnOffVec = yOnOffVec),
jointTauStarRangeTree(X, Y, xOnOffVec = xOnOffVec, yOnOffVec = yOnOffVec))
}
n = 15
nXCols = 2
nYCols = 2
for (i in 1:10) {
xOnOffVec = sample(c(1, rbinom(nXCols - 1, size = 1, prob = 1/2)))
yOnOffVec = sample(c(1, rbinom(nYCols - 1, size = 1, prob = 1/2)))
X = matrix(rnorm(nXCols * n), ncol = nXCols)
Y = matrix(rnorm(nYCols * n), ncol = nYCols)
expect_all_equal(jointTauStarFromDef(X, Y, xOnOffVec = xOnOffVec, yOnOffVec = yOnOffVec),
jointTauStarNaive(X, Y, xOnOffVec = xOnOffVec, yOnOffVec = yOnOffVec),
jointTauStarRangeTree(X, Y, xOnOffVec = xOnOffVec, yOnOffVec = yOnOffVec))
}
})
| /tests/testthat/test_MultivariateTauStar.R | no_license | Lucaweihs/SymRC | R | false | false | 5,535 | r | library(SymRC)
library(TauStar)
source("TestHelpers.R")
context("Testing the multivariate tau star measures.")
test_that("Check that multivariate measures agree with t* in 2 dims", {
set.seed(123)
for (i in 1:10) {
X = matrix(rnorm(30), ncol = 1)
Y = matrix(rnorm(30), ncol = 1)
a = tStar(X, Y)
b = partialTauStarFromDef(X, Y)
c = lexTauStarFromDef(X, Y, 0, 0)
d = fullLexTauStarFromDef(X, Y)
e = jointTauStarFromDef(X, Y, 1, 1)
expect_all_equal(a, b, c, d, e)
}
for (i in 1:10) {
X = matrix(rpois(30, lambda = 2), ncol = 1)
Y = matrix(rpois(30, lambda = 2), ncol = 1)
a = tStar(X, Y)
b = partialTauStarFromDef(X, Y)
c = lexTauStarFromDef(X, Y, 0, 0)
d = fullLexTauStarFromDef(X, Y)
e = jointTauStarFromDef(X, Y, 1, 1)
expect_all_equal(a, b, c, d, e)
}
})
test_that("Check that the RangeTree version of partial tau* agrees with naive", {
set.seed(123)
n = 15
nXCols = 1
nYCols = 1
for (i in 1:10) {
X = matrix(rpois(nXCols * n, 1), ncol = nXCols)
Y = matrix(rpois(nYCols * n, 1), ncol = nYCols)
expect_all_equal(partialTauStarFromDef(X, Y),
partialTauStarNaive(X, Y),
partialTauStarRangeTree(X, Y))
}
n = 15
nXCols = 2
nYCols = 1
for (i in 1:10) {
X = matrix(rpois(nXCols * n, 1), ncol = nXCols)
Y = matrix(rpois(nYCols * n, 1), ncol = nYCols)
expect_all_equal(partialTauStarFromDef(X, Y),
partialTauStarNaive(X, Y),
partialTauStarRangeTree(X, Y))
}
n = 15
nXCols = 1
nYCols = 2
for (i in 1:10) {
X = matrix(rpois(nXCols * n, 1), ncol = nXCols)
Y = matrix(rpois(nYCols * n, 1), ncol = nYCols)
expect_all_equal(partialTauStarFromDef(X, Y),
partialTauStarNaive(X, Y),
partialTauStarRangeTree(X, Y))
}
n = 20
nXCols = 2
nYCols = 2
for (i in 1:10) {
X = matrix(rpois(nXCols * n, 1), ncol = nXCols)
Y = matrix(rpois(nYCols * n, 1), ncol = nYCols)
expect_all_equal(partialTauStarFromDef(X, Y),
partialTauStarNaive(X, Y),
partialTauStarRangeTree(X, Y))
}
n = 10
nXCols = 2
nYCols = 2
for (i in 1:10) {
X = matrix(rnorm(nXCols * n), ncol = nXCols)
Y = matrix(rnorm(nYCols * n), ncol = nYCols)
expect_all_equal(partialTauStarFromDef(X, Y),
partialTauStarNaive(X, Y),
partialTauStarRangeTree(X, Y))
}
})
test_that("Check that the RangeTree version of joint tau* agrees with naive", {
set.seed(1234)
n = 15
nXCols = 1
nYCols = 1
for (i in 1:10) {
xOnOffVec = sample(c(1, rbinom(nXCols - 1, size = 1, prob = 1/2)))
yOnOffVec = sample(c(1, rbinom(nYCols - 1, size = 1, prob = 1/2)))
X = matrix(rpois(nXCols * n, 1), ncol = nXCols)
Y = matrix(rpois(nYCols * n, 1), ncol = nYCols)
expect_all_equal(jointTauStarFromDef(X, Y, xOnOffVec = xOnOffVec, yOnOffVec = yOnOffVec),
jointTauStarNaive(X, Y, xOnOffVec = xOnOffVec, yOnOffVec = yOnOffVec),
jointTauStarRangeTree(X, Y, xOnOffVec = xOnOffVec, yOnOffVec = yOnOffVec))
}
n = 15
nXCols = 2
nYCols = 1
for (i in 1:10) {
xOnOffVec = sample(c(1, rbinom(nXCols - 1, size = 1, prob = 1/2)))
yOnOffVec = sample(c(1, rbinom(nYCols - 1, size = 1, prob = 1/2)))
X = matrix(rpois(nXCols * n, 1), ncol = nXCols)
Y = matrix(rpois(nYCols * n, 1), ncol = nYCols)
expect_all_equal(jointTauStarFromDef(X, Y, xOnOffVec = xOnOffVec, yOnOffVec = yOnOffVec),
jointTauStarNaive(X, Y, xOnOffVec = xOnOffVec, yOnOffVec = yOnOffVec),
jointTauStarRangeTree(X, Y, xOnOffVec = xOnOffVec, yOnOffVec = yOnOffVec))
}
n = 15
nXCols = 1
nYCols = 2
for (i in 1:10) {
xOnOffVec = sample(c(1, rbinom(nXCols - 1, size = 1, prob = 1/2)))
yOnOffVec = sample(c(1, rbinom(nYCols - 1, size = 1, prob = 1/2)))
X = matrix(rpois(nXCols * n, 1), ncol = nXCols)
Y = matrix(rpois(nYCols * n, 1), ncol = nYCols)
expect_all_equal(jointTauStarFromDef(X, Y, xOnOffVec = xOnOffVec, yOnOffVec = yOnOffVec),
jointTauStarNaive(X, Y, xOnOffVec = xOnOffVec, yOnOffVec = yOnOffVec),
jointTauStarRangeTree(X, Y, xOnOffVec = xOnOffVec, yOnOffVec = yOnOffVec))
}
n = 20
nXCols = 2
nYCols = 2
for (i in 1:10) {
xOnOffVec = sample(c(1, rbinom(nXCols - 1, size = 1, prob = 1/2)))
yOnOffVec = sample(c(1, rbinom(nYCols - 1, size = 1, prob = 1/2)))
X = matrix(rpois(nXCols * n, 1), ncol = nXCols)
Y = matrix(rpois(nYCols * n, 1), ncol = nYCols)
expect_all_equal(jointTauStarFromDef(X, Y, xOnOffVec = xOnOffVec, yOnOffVec = yOnOffVec),
jointTauStarNaive(X, Y, xOnOffVec = xOnOffVec, yOnOffVec = yOnOffVec),
jointTauStarRangeTree(X, Y, xOnOffVec = xOnOffVec, yOnOffVec = yOnOffVec))
}
n = 15
nXCols = 2
nYCols = 2
for (i in 1:10) {
xOnOffVec = sample(c(1, rbinom(nXCols - 1, size = 1, prob = 1/2)))
yOnOffVec = sample(c(1, rbinom(nYCols - 1, size = 1, prob = 1/2)))
X = matrix(rnorm(nXCols * n), ncol = nXCols)
Y = matrix(rnorm(nYCols * n), ncol = nYCols)
expect_all_equal(jointTauStarFromDef(X, Y, xOnOffVec = xOnOffVec, yOnOffVec = yOnOffVec),
jointTauStarNaive(X, Y, xOnOffVec = xOnOffVec, yOnOffVec = yOnOffVec),
jointTauStarRangeTree(X, Y, xOnOffVec = xOnOffVec, yOnOffVec = yOnOffVec))
}
})
|
#-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
args <- commandArgs(TRUE)
options(digits=22)
library("Matrix")
V1 <- readMM(paste(args[1], "V.mtx", sep=""))
V <- as.matrix(V1);
m <- nrow(V);
n <- ncol(V);
R1 <- matrix(1,m,n);
for( i in 1:(n-7) )
{
X <- V[,i];
R1[,i] <- X;
}
R <- R1 + R1;
writeMM(as(R, "CsparseMatrix"), paste(args[2], "Rout", sep=""));
| /src/test/scripts/functions/parfor/parfor_pr_resultmerge1b.R | permissive | apache/systemds | R | false | false | 1,242 | r | #-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
args <- commandArgs(TRUE)
options(digits=22)
library("Matrix")
V1 <- readMM(paste(args[1], "V.mtx", sep=""))
V <- as.matrix(V1);
m <- nrow(V);
n <- ncol(V);
R1 <- matrix(1,m,n);
for( i in 1:(n-7) )
{
X <- V[,i];
R1[,i] <- X;
}
R <- R1 + R1;
writeMM(as(R, "CsparseMatrix"), paste(args[2], "Rout", sep=""));
|
#Specify conference and collect yearly totals for each team in that conference and output to a SQLite database.
#Currently leaves out Independents.
library(rvest)
library(stringr)
library(dplyr)
library(RSQLite)
setwd("C:\\path\\to\\cfb\\data\\")
conf<-"sec" #aac, acc, big10, big12, conferenceusa, mac, mountainwest,
#pac10, sec, sunbelt
weblink <- paste("http://www.cfbdatawarehouse.com/data/div_ia/", conf, "/index.php", sep="")
webpage <- read_html(weblink)
nodeslinks<- html_nodes(webpage, xpath = "//a")
needlinks<-nodeslinks[27:40]
links<-bind_rows(lapply(xml_attrs(needlinks), function(x) data.frame(as.list(x), stringsAsFactors=FALSE)))
links$href<-gsub("../../", "http://www.cfbdatawarehouse.com/data/", links$href)
links$href<-gsub("index", "yearly_totals", links$href)
get_yearly_totals<-function(webaddress){
url<-paste(webaddress)
print(strsplit(url, "/")[[1]][7])
webaddy <- read_html(url)
yearlytotals <- html_nodes(webaddy, 'table')
yt <- html_table(yearlytotals, fill = TRUE)[[15]]
yt.clean<-yt[,c(1:10)]
colnames(yt.clean) <- c("Year","Coach","Win","Loss","Tie","Pct","PF","PA", "Delta", "School")
yt.clean$School<-paste(as.character(strsplit(url, "/")[[1]][7]))
yt.clean <- yt.clean[-nrow(yt.clean),] #last row contains totals
yt.clean <- yt.clean[-c(1:5), ] #remove first 5 rows
}
yearly.totals.matrix<- do.call(rbind,lapply(as.character(links$href),get_yearly_totals))
yearly.totals <- data.frame(yearly.totals.matrix)
#import to sqlite database
db<-dbConnect(SQLite(), dbname = "cfbdata.sqlite")
dbWriteTable(conn = db, name = paste(conf, "_yt", sep=""), value = yearly.totals)
#test to make sure it's there
#dbListTables(db)
#dbListFields(db, paste(conf, "_yt", sep=""))
#head(dbReadTable(db, paste(conf, "_yt", sep="")))
dbDisconnect(db)
| /R/coach_by_conf.R | no_license | rnall0/buzzduo | R | false | false | 1,856 | r | #Specify conference and collect yearly totals for each team in that conference and output to a SQLite database.
#Currently leaves out Independents.
library(rvest)
library(stringr)
library(dplyr)
library(RSQLite)
setwd("C:\\path\\to\\cfb\\data\\")
conf<-"sec" #aac, acc, big10, big12, conferenceusa, mac, mountainwest,
#pac10, sec, sunbelt
weblink <- paste("http://www.cfbdatawarehouse.com/data/div_ia/", conf, "/index.php", sep="")
webpage <- read_html(weblink)
nodeslinks<- html_nodes(webpage, xpath = "//a")
needlinks<-nodeslinks[27:40]
links<-bind_rows(lapply(xml_attrs(needlinks), function(x) data.frame(as.list(x), stringsAsFactors=FALSE)))
links$href<-gsub("../../", "http://www.cfbdatawarehouse.com/data/", links$href)
links$href<-gsub("index", "yearly_totals", links$href)
get_yearly_totals<-function(webaddress){
url<-paste(webaddress)
print(strsplit(url, "/")[[1]][7])
webaddy <- read_html(url)
yearlytotals <- html_nodes(webaddy, 'table')
yt <- html_table(yearlytotals, fill = TRUE)[[15]]
yt.clean<-yt[,c(1:10)]
colnames(yt.clean) <- c("Year","Coach","Win","Loss","Tie","Pct","PF","PA", "Delta", "School")
yt.clean$School<-paste(as.character(strsplit(url, "/")[[1]][7]))
yt.clean <- yt.clean[-nrow(yt.clean),] #last row contains totals
yt.clean <- yt.clean[-c(1:5), ] #remove first 5 rows
}
yearly.totals.matrix<- do.call(rbind,lapply(as.character(links$href),get_yearly_totals))
yearly.totals <- data.frame(yearly.totals.matrix)
#import to sqlite database
db<-dbConnect(SQLite(), dbname = "cfbdata.sqlite")
dbWriteTable(conn = db, name = paste(conf, "_yt", sep=""), value = yearly.totals)
#test to make sure it's there
#dbListTables(db)
#dbListFields(db, paste(conf, "_yt", sep=""))
#head(dbReadTable(db, paste(conf, "_yt", sep="")))
dbDisconnect(db)
|
library(data.table)
library(rmeta)
annot_cell = "/n/groups/price/kushal/singlecellLDSC/data/ANNOTATIONS/Modules/healthy/celltype_enriched/brain2/LAMP5"
results_cell = "/n/groups/price/kushal/singlecellLDSC/data/LDSC_RESULTS/Modules/healthy/celltype_enriched/brain2/baselineLD_v2.1/LAMP5"
annot_names = list.dirs(results_cell, full.names=F)[-1]
#annot_names = "FS2"
annot_idx = 1
all_traits = c('UKB_460K.body_BMIz','UKB_460K.cov_EDU_YEARS','UKB_460K.lung_FVCzSMOKE','UKB_460K.cov_SMOKING_STATUS',
'UKB_460K.mental_NEUROTICISM','UKB_460K.blood_WHITE_COUNT','PASS_Years_of_Education2','UKB_460K.bp_SYSTOLICadjMEDz',
'UKB_460K.body_HEIGHTz','UKB_460K.other_MORNINGPERSON','UKB_460K.body_WHRadjBMIz','UKB_460K.lung_FEV1FVCzSMOKE',
'UKB_460K.repro_MENARCHE_AGE','UKB_460K.blood_RED_COUNT','UKB_460K.blood_PLATELET_COUNT','UKB_460K.bmd_HEEL_TSCOREz',
'UKB_460K.blood_EOSINOPHIL_COUNT','PASS_Schizophrenia','UKB_460K.blood_RBC_DISTRIB_WIDTH','PASS_Height1','PASS_BMI1',
'UKB_460K.disease_T2D','PASS_AgeFirstBirth','UKB_460K.disease_RESPIRATORY_ENT','UKB_460K.body_BALDING1','UKB_460K.disease_HYPOTHYROIDISM_SELF_REP',
'UKB_460K.disease_ALLERGY_ECZEMA_DIAGNOSED','UKB_460K.disease_HI_CHOL_SELF_REP','UKB_460K.repro_MENOPAUSE_AGE','PASS_HDL','UKB_460K.pigment_SUNBURN',
'PASS_NumberChildrenEverBorn','PASS_Anorexia','PASS_LDL','PASS_Crohns_Disease','PASS_DS','PASS_Ever_Smoked','UKB_460K.pigment_HAIR',
'PASS_Rheumatoid_Arthritis','PASS_Type_2_Diabetes','PASS_Autism','UKB_460K.pigment_TANNING','PASS_Ulcerative_Colitis',
'UKB_460K.disease_DERMATOLOGY','PASS_Coronary_Artery_Disease','UKB_460K.disease_AID_SURE','UKB_460K.pigment_SKIN')
blood_traits = c("UKB_460K.blood_RBC_DISTRIB_WIDTH", "UKB_460K.blood_RED_COUNT", "UKB_460K.blood_WHITE_COUNT",
"UKB_460K.blood_PLATELET_COUNT", "UKB_460K.blood_EOSINOPHIL_COUNT")
autoimmune_traits = c("UKB_460K.disease_AID_SURE", "PASS_Ulcerative_Colitis", "PASS_Crohns_Disease",
"PASS_Rheumatoid_Arthritis", "PASS_Celiac", "PASS_Lupus", "PASS_Type_1_Diabetes",
"PASS_IBD", "PASS_Primary_biliary_cirrhosis")
brain_traits = c("PASS_Ever_Smoked", "UKB_460K.cov_SMOKING_STATUS", "UKB_460K.mental_NEUROTICISM", "UKB_460K.repro_MENARCHE_AGE",
"PASS_Years_of_Education2", "PASS_DS", "PASS_Schizophrenia", "UKB_460K.body_WHRadjBMIz",
"PASS_BMI1", "UKB_460K.body_BMIz")
immune_traits = c("PASS_Celiac", "PASS_Crohns_Disease", "PASS_IBD", "PASS_Lupus",
"PASS_Primary_biliary_cirrhosis", "PASS_Rheumatoid_Arthritis",
"PASS_Type_1_Diabetes", "PASS_Ulcerative_Colitis",
"UKB_460K.disease_ASTHMA_DIAGNOSED", "UKB_460K.disease_ALLERGY_ECZEMA_DIAGNOSED",
"PASS_Multiple_sclerosis", "UKB_460K.body_BMIz", "UKB_460K.disease_HYPERTENSION_DIAGNOSED",
"PASS_Triglycerides", "PASS_LDL", "PASS_HDL",
"UKB_460K.bp_DIASTOLICadjMEDz", "UKB_460K.bp_SYSTOLICadjMEDz",
"PASS_Alzheimer", "PASS_Anorexia", "PASS_Bipolar_Disorder",
"PASS_Schizophrenia", "UKB_460K.mental_NEUROTICISM", "UKB_460K.disease_AID_SURE",
"PASS_Type_2_Diabetes")
get_sd_annot = function(cell_path, annot_index = 1, flag=0){
if(flag == 0){
if(file.exists(paste0(cell_path, "/", "sd_annot_", annot_index, ".rda"))){
sd_annot = get(load(paste0(cell_path, "/", "sd_annot_", annot_index, ".rda")))
return(sd_annot)
}else{
flag = 1
}}
if(flag == 1){
num = 0
den = 0
ll <- list.files(cell_path, pattern = ".annot.gz")
for(m in 1:length(ll)){
dat <- data.frame(fread(paste0("zcat ", cell_path, "/", ll[m])))
num = num + (nrow(dat)-1) * var(dat[,4+annot_index])
den = den + (nrow(dat)-1)
rm(dat)
}
}
estd_sd_annot = sqrt(num/den)
save(estd_sd_annot, file = paste0(cell_path, "/", "sd_annot_", annot_index, ".rda"))
return(estd_sd_annot)
}
run_single_tau_analysis = function(annot_cell,
results_cell,
annotations,
traits,
index_in_results=1,
base_index = NULL,
flag = 1){
if(is.null(base_index)){base_index = index_in_results}
tau_star_table = matrix(0, length(annotations), 3)
for(annot_id in 1:length(annotations)){
cell_path = paste0(annot_cell, "/", annotations[annot_id])
sd_annot1=get_sd_annot(cell_path, annot_index=index_in_results, flag = flag)
Mref = 5961159
df = c()
for(trait_id in 1:length(traits)){
result.file=paste0(results_cell, "/", annotations[annot_id], "/", traits[trait_id], ".sumstats.part_delete")
new_table=read.table(result.file,header=F)
sc=c()
logfile = paste(results_cell, "/", annotations[annot_id], "/", traits[trait_id],".sumstats.log", sep="")
log = read.table(logfile,h=F,fill=T)
h2g = as.numeric(as.character(log[which(log$V4=="h2:"),5]))
coef1=sd_annot1*Mref/h2g
for(i in 1:dim(new_table)[1]){
tau1=as.numeric(new_table[i,base_index])
taus1=tau1*coef1
sc=c(sc,taus1)
#cat("Block ", i, "\n")
}
mean_sc=mean(sc)
se_sc=sqrt(199**2/200*var(sc))
df = rbind(df, c(mean_sc,se_sc))
}
test_tauj=meta.summaries(df[,1],df[,2],method="random")
tau=test_tauj$summary
tau_se=test_tauj$se.summary
z=tau/tau_se
cat("Printing results for annotation:", annotations[annot_id], "\n")
cat(tau, " ", tau_se, " ", 2*pnorm(-abs(z)), "\n")
tau_star_table[annot_id, ] = c(tau, tau_se, 2*pnorm(-abs(z)))
}
rownames(tau_star_table) = annotations
return(tau_star_table)
}
out1 = run_single_tau_analysis(annot_cell, results_cell, annotations = annot_names, traits = all_traits,
index_in_results = annot_idx, flag = 0)
out2 = run_single_tau_analysis(annot_cell, results_cell, annotations = annot_names, traits = brain_traits,
index_in_results = annot_idx, flag = 0)
out3 = run_single_tau_analysis(annot_cell, results_cell, annotations = annot_names, traits = c(blood_traits, autoimmune_traits),
index_in_results = annot_idx, flag = 0)
out4 = run_single_tau_analysis(annot_cell, results_cell, annotations = annot_names, traits = c(autoimmune_traits),
index_in_results = annot_idx, flag = 0)
ll <- list()
ll[["All"]] = out1
ll[["Brain"]] = out2
ll[["Blood"]] = out3
temp_trait = c("PASS_Ulcerative_Colitis")
out3 = run_single_tau_analysis(annot_cell, results_cell, annotations = annot_names[9],
traits = temp_trait,
index_in_results = annot_idx, flag = 1)
| /code/single_tau_star.R | no_license | kkdey/singlecellLDSC | R | false | false | 7,231 | r | library(data.table)
library(rmeta)
annot_cell = "/n/groups/price/kushal/singlecellLDSC/data/ANNOTATIONS/Modules/healthy/celltype_enriched/brain2/LAMP5"
results_cell = "/n/groups/price/kushal/singlecellLDSC/data/LDSC_RESULTS/Modules/healthy/celltype_enriched/brain2/baselineLD_v2.1/LAMP5"
annot_names = list.dirs(results_cell, full.names=F)[-1]
#annot_names = "FS2"
annot_idx = 1
all_traits = c('UKB_460K.body_BMIz','UKB_460K.cov_EDU_YEARS','UKB_460K.lung_FVCzSMOKE','UKB_460K.cov_SMOKING_STATUS',
'UKB_460K.mental_NEUROTICISM','UKB_460K.blood_WHITE_COUNT','PASS_Years_of_Education2','UKB_460K.bp_SYSTOLICadjMEDz',
'UKB_460K.body_HEIGHTz','UKB_460K.other_MORNINGPERSON','UKB_460K.body_WHRadjBMIz','UKB_460K.lung_FEV1FVCzSMOKE',
'UKB_460K.repro_MENARCHE_AGE','UKB_460K.blood_RED_COUNT','UKB_460K.blood_PLATELET_COUNT','UKB_460K.bmd_HEEL_TSCOREz',
'UKB_460K.blood_EOSINOPHIL_COUNT','PASS_Schizophrenia','UKB_460K.blood_RBC_DISTRIB_WIDTH','PASS_Height1','PASS_BMI1',
'UKB_460K.disease_T2D','PASS_AgeFirstBirth','UKB_460K.disease_RESPIRATORY_ENT','UKB_460K.body_BALDING1','UKB_460K.disease_HYPOTHYROIDISM_SELF_REP',
'UKB_460K.disease_ALLERGY_ECZEMA_DIAGNOSED','UKB_460K.disease_HI_CHOL_SELF_REP','UKB_460K.repro_MENOPAUSE_AGE','PASS_HDL','UKB_460K.pigment_SUNBURN',
'PASS_NumberChildrenEverBorn','PASS_Anorexia','PASS_LDL','PASS_Crohns_Disease','PASS_DS','PASS_Ever_Smoked','UKB_460K.pigment_HAIR',
'PASS_Rheumatoid_Arthritis','PASS_Type_2_Diabetes','PASS_Autism','UKB_460K.pigment_TANNING','PASS_Ulcerative_Colitis',
'UKB_460K.disease_DERMATOLOGY','PASS_Coronary_Artery_Disease','UKB_460K.disease_AID_SURE','UKB_460K.pigment_SKIN')
blood_traits = c("UKB_460K.blood_RBC_DISTRIB_WIDTH", "UKB_460K.blood_RED_COUNT", "UKB_460K.blood_WHITE_COUNT",
"UKB_460K.blood_PLATELET_COUNT", "UKB_460K.blood_EOSINOPHIL_COUNT")
autoimmune_traits = c("UKB_460K.disease_AID_SURE", "PASS_Ulcerative_Colitis", "PASS_Crohns_Disease",
"PASS_Rheumatoid_Arthritis", "PASS_Celiac", "PASS_Lupus", "PASS_Type_1_Diabetes",
"PASS_IBD", "PASS_Primary_biliary_cirrhosis")
brain_traits = c("PASS_Ever_Smoked", "UKB_460K.cov_SMOKING_STATUS", "UKB_460K.mental_NEUROTICISM", "UKB_460K.repro_MENARCHE_AGE",
"PASS_Years_of_Education2", "PASS_DS", "PASS_Schizophrenia", "UKB_460K.body_WHRadjBMIz",
"PASS_BMI1", "UKB_460K.body_BMIz")
immune_traits = c("PASS_Celiac", "PASS_Crohns_Disease", "PASS_IBD", "PASS_Lupus",
"PASS_Primary_biliary_cirrhosis", "PASS_Rheumatoid_Arthritis",
"PASS_Type_1_Diabetes", "PASS_Ulcerative_Colitis",
"UKB_460K.disease_ASTHMA_DIAGNOSED", "UKB_460K.disease_ALLERGY_ECZEMA_DIAGNOSED",
"PASS_Multiple_sclerosis", "UKB_460K.body_BMIz", "UKB_460K.disease_HYPERTENSION_DIAGNOSED",
"PASS_Triglycerides", "PASS_LDL", "PASS_HDL",
"UKB_460K.bp_DIASTOLICadjMEDz", "UKB_460K.bp_SYSTOLICadjMEDz",
"PASS_Alzheimer", "PASS_Anorexia", "PASS_Bipolar_Disorder",
"PASS_Schizophrenia", "UKB_460K.mental_NEUROTICISM", "UKB_460K.disease_AID_SURE",
"PASS_Type_2_Diabetes")
get_sd_annot = function(cell_path, annot_index = 1, flag=0){
if(flag == 0){
if(file.exists(paste0(cell_path, "/", "sd_annot_", annot_index, ".rda"))){
sd_annot = get(load(paste0(cell_path, "/", "sd_annot_", annot_index, ".rda")))
return(sd_annot)
}else{
flag = 1
}}
if(flag == 1){
num = 0
den = 0
ll <- list.files(cell_path, pattern = ".annot.gz")
for(m in 1:length(ll)){
dat <- data.frame(fread(paste0("zcat ", cell_path, "/", ll[m])))
num = num + (nrow(dat)-1) * var(dat[,4+annot_index])
den = den + (nrow(dat)-1)
rm(dat)
}
}
estd_sd_annot = sqrt(num/den)
save(estd_sd_annot, file = paste0(cell_path, "/", "sd_annot_", annot_index, ".rda"))
return(estd_sd_annot)
}
run_single_tau_analysis = function(annot_cell,
results_cell,
annotations,
traits,
index_in_results=1,
base_index = NULL,
flag = 1){
if(is.null(base_index)){base_index = index_in_results}
tau_star_table = matrix(0, length(annotations), 3)
for(annot_id in 1:length(annotations)){
cell_path = paste0(annot_cell, "/", annotations[annot_id])
sd_annot1=get_sd_annot(cell_path, annot_index=index_in_results, flag = flag)
Mref = 5961159
df = c()
for(trait_id in 1:length(traits)){
result.file=paste0(results_cell, "/", annotations[annot_id], "/", traits[trait_id], ".sumstats.part_delete")
new_table=read.table(result.file,header=F)
sc=c()
logfile = paste(results_cell, "/", annotations[annot_id], "/", traits[trait_id],".sumstats.log", sep="")
log = read.table(logfile,h=F,fill=T)
h2g = as.numeric(as.character(log[which(log$V4=="h2:"),5]))
coef1=sd_annot1*Mref/h2g
for(i in 1:dim(new_table)[1]){
tau1=as.numeric(new_table[i,base_index])
taus1=tau1*coef1
sc=c(sc,taus1)
#cat("Block ", i, "\n")
}
mean_sc=mean(sc)
se_sc=sqrt(199**2/200*var(sc))
df = rbind(df, c(mean_sc,se_sc))
}
test_tauj=meta.summaries(df[,1],df[,2],method="random")
tau=test_tauj$summary
tau_se=test_tauj$se.summary
z=tau/tau_se
cat("Printing results for annotation:", annotations[annot_id], "\n")
cat(tau, " ", tau_se, " ", 2*pnorm(-abs(z)), "\n")
tau_star_table[annot_id, ] = c(tau, tau_se, 2*pnorm(-abs(z)))
}
rownames(tau_star_table) = annotations
return(tau_star_table)
}
out1 = run_single_tau_analysis(annot_cell, results_cell, annotations = annot_names, traits = all_traits,
index_in_results = annot_idx, flag = 0)
out2 = run_single_tau_analysis(annot_cell, results_cell, annotations = annot_names, traits = brain_traits,
index_in_results = annot_idx, flag = 0)
out3 = run_single_tau_analysis(annot_cell, results_cell, annotations = annot_names, traits = c(blood_traits, autoimmune_traits),
index_in_results = annot_idx, flag = 0)
out4 = run_single_tau_analysis(annot_cell, results_cell, annotations = annot_names, traits = c(autoimmune_traits),
index_in_results = annot_idx, flag = 0)
ll <- list()
ll[["All"]] = out1
ll[["Brain"]] = out2
ll[["Blood"]] = out3
temp_trait = c("PASS_Ulcerative_Colitis")
out3 = run_single_tau_analysis(annot_cell, results_cell, annotations = annot_names[9],
traits = temp_trait,
index_in_results = annot_idx, flag = 1)
|
ssblup <-
function(Zty,ZtX,ZtZ,coefs,rdm,tau){
Zte <- Zty-ZtX%*%coefs
if(length(tau)==1L){
if(is.matrix(ZtZ)){
bhat <- as.numeric(tau*(diag(nrow(ZtZ))-ZtZ%*%pinvsm(diag(rep(1/tau,nrow(ZtZ)))+ZtZ))%*%Zte)
} else {
bhat <- as.numeric(tau*(1-ZtZ/((1/tau)+ZtZ))*Zte)
}
} else {
if(is.matrix(ZtZ)){
bhat <- as.numeric(rep(tau,rdm)*(diag(nrow(ZtZ))-ZtZ%*%pinvsm(diag(rep(1/tau,rdm))+ZtZ))%*%Zte)
} else {
bhat <- as.numeric(rep(tau,rdm)*(1-ZtZ/((rep(1/tau,rdm))+ZtZ))*Zte)
}
}
names(bhat) <- names(Zty)
return(bhat)
} | /R/ssblup.R | no_license | cran/bigsplines | R | false | false | 617 | r | ssblup <-
function(Zty,ZtX,ZtZ,coefs,rdm,tau){
Zte <- Zty-ZtX%*%coefs
if(length(tau)==1L){
if(is.matrix(ZtZ)){
bhat <- as.numeric(tau*(diag(nrow(ZtZ))-ZtZ%*%pinvsm(diag(rep(1/tau,nrow(ZtZ)))+ZtZ))%*%Zte)
} else {
bhat <- as.numeric(tau*(1-ZtZ/((1/tau)+ZtZ))*Zte)
}
} else {
if(is.matrix(ZtZ)){
bhat <- as.numeric(rep(tau,rdm)*(diag(nrow(ZtZ))-ZtZ%*%pinvsm(diag(rep(1/tau,rdm))+ZtZ))%*%Zte)
} else {
bhat <- as.numeric(rep(tau,rdm)*(1-ZtZ/((rep(1/tau,rdm))+ZtZ))*Zte)
}
}
names(bhat) <- names(Zty)
return(bhat)
} |
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31635986806604e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(10L, 3L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) | /CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615779708-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 348 | r | testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31635986806604e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(10L, 3L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) |
#Caching the Inverse of a Matrix using the functions makeCacheMatrix and cacheSolve
#first function makeCacheMatrix it creates a special
# matrix to set and get the value of matrix and inverse
# Basically to cache matrix's inverse
makeCacheMatrix<- function( x = matrix()){
j<-NULL
set<-function(y){
x<<-y #operator "<<-" assigns the value of this object to a different environment
j<<-NULL
}
get<- function()x
setInverse<-function(inverse)j<<-inverse
getInverse<-function()j
list(set=set,get=get,
setInverse=setInverse,
getInverse=getInverse)
}
#second function cacheSolve
#computes the inverse of the special matrix returned by the above function
#decides to either calculate or retrive the cache
cacheSolve <- function(x,...){
## Return a matrix that is the inverse of 'x'
j<-x$getInverse()
if(!is.null(j)){
message("getting cache data")
return(j)
}
mat<-x$get()
j<-solve(mat,...)
x$setInverse(j)
j
}
| /cachematrix.R | no_license | AGAMFIRE/ProgrammingAssignment2 | R | false | false | 931 | r | #Caching the Inverse of a Matrix using the functions makeCacheMatrix and cacheSolve
#first function makeCacheMatrix it creates a special
# matrix to set and get the value of matrix and inverse
# Basically to cache matrix's inverse
makeCacheMatrix<- function( x = matrix()){
j<-NULL
set<-function(y){
x<<-y #operator "<<-" assigns the value of this object to a different environment
j<<-NULL
}
get<- function()x
setInverse<-function(inverse)j<<-inverse
getInverse<-function()j
list(set=set,get=get,
setInverse=setInverse,
getInverse=getInverse)
}
#second function cacheSolve
#computes the inverse of the special matrix returned by the above function
#decides to either calculate or retrive the cache
cacheSolve <- function(x,...){
## Return a matrix that is the inverse of 'x'
j<-x$getInverse()
if(!is.null(j)){
message("getting cache data")
return(j)
}
mat<-x$get()
j<-solve(mat,...)
x$setInverse(j)
j
}
|
context("Correlation-based distance")
test_that("corDist_fast equivalent to TSclust::diss.COR (apart from some special cases)", {
for (i in 1:50) {
# not too short series, or precision difference between R implementation in
# diss.COR and our own C++ implementation becomes too relevant for some
# correlations (close to -1 and 1)
n = floor(runif(n = 1, min = 10, max = 200))
ts1 <- rnorm(n)
ts2 <- rnorm(n)
beta <- abs(rnorm(1))
correlation <- cor(ts1, ts2)
if (!is.na(correlation)) { # one series might be constant
expect_equal(corDist_fast(ts1, ts2, beta),
TSclust::diss.COR(ts1 ,ts2, beta))
expect_equal(corDist_fast(ts1, ts2, 0),
TSclust::diss.COR(ts1 ,ts2, NULL))
}
}
})
test_that("crossCorNormalized same as dtwclust::NCCc", {
for (i in 1:50) {
n = floor(runif(n = 1, min = 200, max = 1000))
ts1 <- rnorm(n)
ts2 <- rnorm(n)
expect_equal(crossCorNormalized(ts1, ts2), dtwclust::NCCc(ts1 ,ts2))
}
})
test_that("crossCorNormalized corner cases work", {
for (i in 1:10) {
n <- floor(runif(n = 1, min = 2, max = 200))
ts1 <- rnorm(n)
if (!all(ts1 == 0)) {
expect_equal(crossCorNormalized(ts1, rep(0, n)), rep(0, 2*n - 1))
expect_equal(crossCorNormalized(rep(0, n), ts1), rep(0, 2*n - 1))
n2 <- floor(runif(n = 1, min = 2, max = 200))
expect_equal(crossCorNormalized(rep(0, n), rep(0, n2)),
crossCorNormalized(rep(ts1[1], n), rep(ts1[1], n2)))
}
}
})
test_that("shapeBasedDistance (uni-variate) same as dtwclust::SBD", {
for (i in 1:50) {
n = floor(runif(n = 1, min = 2, max = 200))
ts1 <- rnorm(n)
ts2 <- rnorm(n)
expect_equal(shapeBasedDistance(ts1,ts2), dtwclust::SBD(ts1,ts2)$dist)
}
})
test_that("shapeBasedDistance (uni-variate) for constant series valid", {
for (i in 1:10) {
n = floor(runif(n = 1, min = 2, max = 200))
ts1 <- rnorm(n)
ts2 <- rep(rnorm(1), times = n)
ccDist <- shapeBasedDistance(ts1,ts2)
expect_true(ccDist >= 0 & ccDist <= 2)
}
})
test_that("shapeBasedDistance (multi-variate) for identical attributes same as univariate", {
for (i in 1:10) {
n = floor(runif(n = 1, min = 2, max = 200))
tsDim <- floor(runif(1, min = 2, max = 5))
ts1 <- rnorm(n)
ts2 <- rnorm(n)
tsMult1 <- matrix(rep(ts1, times = tsDim), ncol = tsDim)
tsMult2 <- matrix(rep(ts2, times = tsDim), ncol = tsDim)
expect_equal(shapeBasedDistance(ts1, ts2), shapeBasedDistance(tsMult1, tsMult2))
}
})
test_that("shapeBasedDistance (multi-variate) in expected range", {
for (i in 1:50) {
tsLength <- floor(runif(1, min = 2, max = 50))
tsDim <- floor(runif(1, min = 2, max = 5))
ts1 <- matrix(rnorm(n = tsLength * tsDim), nrow = tsLength)
ts2 <- matrix(rnorm(n = tsLength * tsDim), nrow = tsLength)
ccDist <- shapeBasedDistance(ts1,ts2)
expect_true(ccDist >= 0 & ccDist <= 2)
}
})
test_that("shapeBasedDistance (multi-variate) for constant series valid", {
for (i in 1:10) {
tsLength <- floor(runif(1, min = 2, max = 50))
tsDim <- floor(runif(1, min = 2, max = 5))
ts1 <- matrix(rnorm(n = tsLength * tsDim), nrow = tsLength)
ts2 <- matrix(rnorm(n = tsLength * tsDim), nrow = tsLength)
# Set random columns to a constant
ts2[, sample(seq_len(tsDim), size = floor(runif(1, min = 1, max = tsDim + 1)))] <-
rep(rnorm(1), times = tsLength)
ccDist <- shapeBasedDistance(ts1,ts2)
expect_true(ccDist >= 0 & ccDist <= 2)
}
})
| /tests/testthat/testCOR.R | permissive | vishalbelsare/FastTSDistances | R | false | false | 3,534 | r | context("Correlation-based distance")
test_that("corDist_fast equivalent to TSclust::diss.COR (apart from some special cases)", {
for (i in 1:50) {
# not too short series, or precision difference between R implementation in
# diss.COR and our own C++ implementation becomes too relevant for some
# correlations (close to -1 and 1)
n = floor(runif(n = 1, min = 10, max = 200))
ts1 <- rnorm(n)
ts2 <- rnorm(n)
beta <- abs(rnorm(1))
correlation <- cor(ts1, ts2)
if (!is.na(correlation)) { # one series might be constant
expect_equal(corDist_fast(ts1, ts2, beta),
TSclust::diss.COR(ts1 ,ts2, beta))
expect_equal(corDist_fast(ts1, ts2, 0),
TSclust::diss.COR(ts1 ,ts2, NULL))
}
}
})
test_that("crossCorNormalized same as dtwclust::NCCc", {
for (i in 1:50) {
n = floor(runif(n = 1, min = 200, max = 1000))
ts1 <- rnorm(n)
ts2 <- rnorm(n)
expect_equal(crossCorNormalized(ts1, ts2), dtwclust::NCCc(ts1 ,ts2))
}
})
test_that("crossCorNormalized corner cases work", {
for (i in 1:10) {
n <- floor(runif(n = 1, min = 2, max = 200))
ts1 <- rnorm(n)
if (!all(ts1 == 0)) {
expect_equal(crossCorNormalized(ts1, rep(0, n)), rep(0, 2*n - 1))
expect_equal(crossCorNormalized(rep(0, n), ts1), rep(0, 2*n - 1))
n2 <- floor(runif(n = 1, min = 2, max = 200))
expect_equal(crossCorNormalized(rep(0, n), rep(0, n2)),
crossCorNormalized(rep(ts1[1], n), rep(ts1[1], n2)))
}
}
})
test_that("shapeBasedDistance (uni-variate) same as dtwclust::SBD", {
for (i in 1:50) {
n = floor(runif(n = 1, min = 2, max = 200))
ts1 <- rnorm(n)
ts2 <- rnorm(n)
expect_equal(shapeBasedDistance(ts1,ts2), dtwclust::SBD(ts1,ts2)$dist)
}
})
test_that("shapeBasedDistance (uni-variate) for constant series valid", {
for (i in 1:10) {
n = floor(runif(n = 1, min = 2, max = 200))
ts1 <- rnorm(n)
ts2 <- rep(rnorm(1), times = n)
ccDist <- shapeBasedDistance(ts1,ts2)
expect_true(ccDist >= 0 & ccDist <= 2)
}
})
test_that("shapeBasedDistance (multi-variate) for identical attributes same as univariate", {
for (i in 1:10) {
n = floor(runif(n = 1, min = 2, max = 200))
tsDim <- floor(runif(1, min = 2, max = 5))
ts1 <- rnorm(n)
ts2 <- rnorm(n)
tsMult1 <- matrix(rep(ts1, times = tsDim), ncol = tsDim)
tsMult2 <- matrix(rep(ts2, times = tsDim), ncol = tsDim)
expect_equal(shapeBasedDistance(ts1, ts2), shapeBasedDistance(tsMult1, tsMult2))
}
})
test_that("shapeBasedDistance (multi-variate) in expected range", {
for (i in 1:50) {
tsLength <- floor(runif(1, min = 2, max = 50))
tsDim <- floor(runif(1, min = 2, max = 5))
ts1 <- matrix(rnorm(n = tsLength * tsDim), nrow = tsLength)
ts2 <- matrix(rnorm(n = tsLength * tsDim), nrow = tsLength)
ccDist <- shapeBasedDistance(ts1,ts2)
expect_true(ccDist >= 0 & ccDist <= 2)
}
})
test_that("shapeBasedDistance (multi-variate) for constant series valid", {
for (i in 1:10) {
tsLength <- floor(runif(1, min = 2, max = 50))
tsDim <- floor(runif(1, min = 2, max = 5))
ts1 <- matrix(rnorm(n = tsLength * tsDim), nrow = tsLength)
ts2 <- matrix(rnorm(n = tsLength * tsDim), nrow = tsLength)
# Set random columns to a constant
ts2[, sample(seq_len(tsDim), size = floor(runif(1, min = 1, max = tsDim + 1)))] <-
rep(rnorm(1), times = tsLength)
ccDist <- shapeBasedDistance(ts1,ts2)
expect_true(ccDist >= 0 & ccDist <= 2)
}
})
|
#
# This test file has been generated by kwb.test::create_test_files()
#
test_that("leafValues() works", {
expect_error(
kwb.vs2dh:::leafValues()
# argument "x" is missing, with no default
)
})
| /tests/testthat/test-function-leafValues.R | permissive | KWB-R/kwb.vs2dh | R | false | false | 210 | r | #
# This test file has been generated by kwb.test::create_test_files()
#
test_that("leafValues() works", {
expect_error(
kwb.vs2dh:::leafValues()
# argument "x" is missing, with no default
)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/step1_choosePortfolio_module.R
\name{step1_choosePortfolioUI}
\alias{step1_choosePortfolioUI}
\alias{step1_choosePortfolio}
\title{step1_choosePortfolioUI}
\usage{
step1_choosePortfolioUI(id)
step1_choosePortfolio(input, output, session, active = reactive(TRUE),
currstep = reactive(-1), portfolioID = reactive(""))
}
\arguments{
\item{id}{Namespace ID for the module.}
\item{input, output, session}{Shiny input, output, and session objects.}
\item{active}{reactive expression whether the module state should be updated.}
\item{currstep}{current selected step.}
\item{portfolioID}{selected portfolio ID.}
}
\value{
List of tags.
The returned list also includes reactive navigation state
constructed via \code{\link[=outputNavigation]{outputNavigation()}}.
portfolioID Id of selected portfolioID
tbl_portfoliosData POData model association table.
newstep navigation step
}
\description{
UI/View for the step1_choosePortfolio.
Server logic to step1_choosePortfolio
}
| /BFE_RShiny/oasisui/man/step1_choosePortfolio.Rd | permissive | smacintyreR/OasisUI | R | false | true | 1,055 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/step1_choosePortfolio_module.R
\name{step1_choosePortfolioUI}
\alias{step1_choosePortfolioUI}
\alias{step1_choosePortfolio}
\title{step1_choosePortfolioUI}
\usage{
step1_choosePortfolioUI(id)
step1_choosePortfolio(input, output, session, active = reactive(TRUE),
currstep = reactive(-1), portfolioID = reactive(""))
}
\arguments{
\item{id}{Namespace ID for the module.}
\item{input, output, session}{Shiny input, output, and session objects.}
\item{active}{reactive expression whether the module state should be updated.}
\item{currstep}{current selected step.}
\item{portfolioID}{selected portfolio ID.}
}
\value{
List of tags.
The returned list also includes reactive navigation state
constructed via \code{\link[=outputNavigation]{outputNavigation()}}.
portfolioID Id of selected portfolioID
tbl_portfoliosData POData model association table.
newstep navigation step
}
\description{
UI/View for the step1_choosePortfolio.
Server logic to step1_choosePortfolio
}
|
# climate
library(httr)
library(jsonlite)
library(tidyverse)
library(magrittr)
library(beepr)
source(here::here("api-key.R"))
source(here::here("functions", "regulations-gov-API-search.R"))
search_keyword_page <- search_keyword_page4
# climate NPRMs
# test with first 10k
climatePR <- map_dfr(.x = c(1:20),
.f = search_keyword_page4,
documenttype = "Proposed Rule",
keyword = "climate change",
lastModifiedDate = Sys.time())
climatePR %>%
filter(!is.na(postedDate)) %>%
arrange(postedDate) %>%
head() %>%
select(postedDate, lastpage)
# # up to 100k
climatePR100 <- map_dfr(.x = c(11:100),
.f = search_keyword_page,
documenttype = "Proposed Rule",
keyword = "climate change")
# # inspect
climatePR100 %>%
filter(!is.na(postedDate)) %>%
arrange(postedDate) %>%
head() %>%
select(postedDate, page)
climatePR %>%
mutate(year = str_sub(postedDate, 1,4) %>% as.numeric()) %>%
ggplot() +
aes(x = year, fill = documentType) +
geom_bar()
#
climatePR %<>% full_join(climatePR100)
save(climatePR, file = here::here("data",
str_c("climatePR",
Sys.Date(),
".Rdata")))
##################################
# climate Rules
# test with 1 batch of 250
climateFR <- search_keyword_page4(keyword = "climate change",
documenttype = "Rule",
lastModifiedDate = Sys.time()) #NOT SYS DATE!!
# test with first 10k
climateFR <- map_dfr(.x = c(1:20),
.f = search_keyword_page,
documenttype = "Rule",
keyword = "climate change",
lastModifiedDate <- Sys.time())
# if(content$meta$lastPage){
# lastModifiedDate <-- content$data$attributes$lastModifiedDate %>% tail(1)
# #lastModifiedDate <-- Sys.time() %>% str_remove(" [A-Z]")
# }
# up to 100k
climateFR2 <- map_dfr(.x = c(1:20),
.f = search_keyword_page,
documenttype = "Rule",
keyword = "climate change")
# inspect
climateFR2 %>%
filter(!is.na(postedDate)) %>%
arrange(postedDate) %>%
head() %>%
select(postedDate, page)
climateFR2 %>% count(documentType)
climateFR %>%
mutate(year = str_sub(postedDate, 1,4) %>% as.numeric()) %>%
ggplot() +
aes(x = year, fill = documentType) +
geom_bar()
climateFR %<>% full_join(climateFR2)
climateFR %>% filter(is.na(postedDate)) %>% count(docketId, lastpage, sort= T)
save(climateFR, file = here::here("data",
str_c("climateFR",
Sys.Date(),
".Rdata")))
###################
# climate COMMENTS
# initialize
climatecomments1 <- map_dfr(.x = c(1),
.f = search_keyword_page4,
documenttype = "Public Submission",
keyword = "climate change",
lastModifiedDate = Sys.time() %>% str_remove(" [A-Z]"))
# first 5k
climatecomments <- map_dfr(.x = c(1:20),
.f = possibly(search_keyword_page4, otherwise = climatecomments1),
documenttype = "Public Submission",
keyword = "climate change",
lastModifiedDate = Sys.time() %>% str_remove(" [A-Z]"))
unique(climatecomments$lastpage)
## begin loop (as of 2021-02-06, there were ~XXX climate comments, so repeat 5+ times)
date <- climatecomments$lastModifiedDate %>% min()
# over 5k climate comments before 1 AM
# date <- "2018-04-26T10:58:55Z"
while(climatecomments$lastModifiedDate %>% min() > as.Date("1993-01-01")){
# next 5k
climate2 <- map_dfr(.x = c(1:20),
.f = possibly(search_keyword_page4, otherwise = climatecomments1),
documenttype = "Public Submission",
keyword = "climate change",
# starting at the last modified date (the function arranges by last modified date)
lastModifiedDate = date
#"2020-05-14T23:47:37Z"# there are more than 5k ej comenst on 2020-5-15
)
# if we get some, add them
if(nrow(climate2) > 0){
print(nrow(climate2))
climatecomments$lastModifiedDate %>% min() %>% paste(" = old date") %>% print()
climate2$lastModifiedDate %>% min() %>% paste(" = current date") %>% print()
# inspect
climate2 %>%
ggplot() +
aes(x = as.Date(postedDate), fill = documentType) +
geom_bar()
############
# JOIN #
climatecomments %<>% full_join(climate2)
climatecomments %>%
ggplot() +
aes(x = as.Date(postedDate), fill = documentType) +
geom_bar()
# Repeat above
# TODO make while loop in function
file = here::here("data",
str_c("climatecomments",
Sys.Date(),
".Rdata"))
save(climatecomments, file = file)
# if we are getting stuck on the same date (i.e. if there are >5000 comments on a date)
if(climatecomments$lastModifiedDate %>%
min() == date ){
beep()
# date rounded down to the nearist 20:00 hrs
date <- climatecomments$lastModifiedDate %>% min() %>% str_replace("T2.", "T20")
if(climatecomments$lastModifiedDate %>%
min() == date){
# date rounded down to the nearist 1X:00 hrs
date <- climatecomments$lastModifiedDate %>% min() %>% str_replace("T2", "T1")
}
if(climatecomments$lastModifiedDate %>%
min() == date){
# date rounded down to the nearist 10:00 hrs
date <- climatecomments$lastModifiedDate %>% min() %>% str_replace("T1.", "T10")
}
if(climatecomments$lastModifiedDate %>%
min() == date){
# date rounded down to the nearist 0X:00 hrs
date <- climatecomments$lastModifiedDate %>% min() %>% str_replace("T1", "T0")
}
if(climatecomments$lastModifiedDate %>%
min() == date){
# date rounded down to the nearist 0:00 hrs
date <- climatecomments$lastModifiedDate %>% min() %>% str_replace("T0.", "T00")
}
} else {
# otherwise, the new date is the min
date <- climatecomments$lastModifiedDate %>% min()
beep(sound = 2)
}
date %>% paste(" = new date (should be current date unless the old date didn't change)") %>% print()
} else{
beep()
print(nrow(climate2))
}
Sys.sleep(50)
}
# # up to .5m if needed (but as of 2020, n = 41,591k)
# climate500 <- map_dfr(.x = c(101:500),
# .f = search_keyword_page,
# documenttype = "PS",
# keyword = "climate change")
#
# climatecomments %<>% full_join(climate500)
#
# save(climatecomments, file = here::here("data", "climatecomments.Rdata"))
| /code/regs-dot-gov-API4-bulk-CC.R | no_license | zoeang/rulemaking | R | false | false | 6,738 | r | # climate
library(httr)
library(jsonlite)
library(tidyverse)
library(magrittr)
library(beepr)
source(here::here("api-key.R"))
source(here::here("functions", "regulations-gov-API-search.R"))
search_keyword_page <- search_keyword_page4
# climate NPRMs
# test with first 10k
climatePR <- map_dfr(.x = c(1:20),
.f = search_keyword_page4,
documenttype = "Proposed Rule",
keyword = "climate change",
lastModifiedDate = Sys.time())
climatePR %>%
filter(!is.na(postedDate)) %>%
arrange(postedDate) %>%
head() %>%
select(postedDate, lastpage)
# # up to 100k
climatePR100 <- map_dfr(.x = c(11:100),
.f = search_keyword_page,
documenttype = "Proposed Rule",
keyword = "climate change")
# # inspect
climatePR100 %>%
filter(!is.na(postedDate)) %>%
arrange(postedDate) %>%
head() %>%
select(postedDate, page)
climatePR %>%
mutate(year = str_sub(postedDate, 1,4) %>% as.numeric()) %>%
ggplot() +
aes(x = year, fill = documentType) +
geom_bar()
#
climatePR %<>% full_join(climatePR100)
save(climatePR, file = here::here("data",
str_c("climatePR",
Sys.Date(),
".Rdata")))
##################################
# climate Rules
# test with 1 batch of 250
climateFR <- search_keyword_page4(keyword = "climate change",
documenttype = "Rule",
lastModifiedDate = Sys.time()) #NOT SYS DATE!!
# test with first 10k
climateFR <- map_dfr(.x = c(1:20),
.f = search_keyword_page,
documenttype = "Rule",
keyword = "climate change",
lastModifiedDate <- Sys.time())
# if(content$meta$lastPage){
# lastModifiedDate <-- content$data$attributes$lastModifiedDate %>% tail(1)
# #lastModifiedDate <-- Sys.time() %>% str_remove(" [A-Z]")
# }
# up to 100k
climateFR2 <- map_dfr(.x = c(1:20),
.f = search_keyword_page,
documenttype = "Rule",
keyword = "climate change")
# inspect
climateFR2 %>%
filter(!is.na(postedDate)) %>%
arrange(postedDate) %>%
head() %>%
select(postedDate, page)
climateFR2 %>% count(documentType)
climateFR %>%
mutate(year = str_sub(postedDate, 1,4) %>% as.numeric()) %>%
ggplot() +
aes(x = year, fill = documentType) +
geom_bar()
climateFR %<>% full_join(climateFR2)
climateFR %>% filter(is.na(postedDate)) %>% count(docketId, lastpage, sort= T)
save(climateFR, file = here::here("data",
str_c("climateFR",
Sys.Date(),
".Rdata")))
###################
# climate COMMENTS
# initialize
climatecomments1 <- map_dfr(.x = c(1),
.f = search_keyword_page4,
documenttype = "Public Submission",
keyword = "climate change",
lastModifiedDate = Sys.time() %>% str_remove(" [A-Z]"))
# first 5k
climatecomments <- map_dfr(.x = c(1:20),
.f = possibly(search_keyword_page4, otherwise = climatecomments1),
documenttype = "Public Submission",
keyword = "climate change",
lastModifiedDate = Sys.time() %>% str_remove(" [A-Z]"))
unique(climatecomments$lastpage)
## begin loop (as of 2021-02-06, there were ~XXX climate comments, so repeat 5+ times)
date <- climatecomments$lastModifiedDate %>% min()
# over 5k climate comments before 1 AM
# date <- "2018-04-26T10:58:55Z"
while(climatecomments$lastModifiedDate %>% min() > as.Date("1993-01-01")){
# next 5k
climate2 <- map_dfr(.x = c(1:20),
.f = possibly(search_keyword_page4, otherwise = climatecomments1),
documenttype = "Public Submission",
keyword = "climate change",
# starting at the last modified date (the function arranges by last modified date)
lastModifiedDate = date
#"2020-05-14T23:47:37Z"# there are more than 5k ej comenst on 2020-5-15
)
# if we get some, add them
if(nrow(climate2) > 0){
print(nrow(climate2))
climatecomments$lastModifiedDate %>% min() %>% paste(" = old date") %>% print()
climate2$lastModifiedDate %>% min() %>% paste(" = current date") %>% print()
# inspect
climate2 %>%
ggplot() +
aes(x = as.Date(postedDate), fill = documentType) +
geom_bar()
############
# JOIN #
climatecomments %<>% full_join(climate2)
climatecomments %>%
ggplot() +
aes(x = as.Date(postedDate), fill = documentType) +
geom_bar()
# Repeat above
# TODO make while loop in function
file = here::here("data",
str_c("climatecomments",
Sys.Date(),
".Rdata"))
save(climatecomments, file = file)
# if we are getting stuck on the same date (i.e. if there are >5000 comments on a date)
if(climatecomments$lastModifiedDate %>%
min() == date ){
beep()
# date rounded down to the nearist 20:00 hrs
date <- climatecomments$lastModifiedDate %>% min() %>% str_replace("T2.", "T20")
if(climatecomments$lastModifiedDate %>%
min() == date){
# date rounded down to the nearist 1X:00 hrs
date <- climatecomments$lastModifiedDate %>% min() %>% str_replace("T2", "T1")
}
if(climatecomments$lastModifiedDate %>%
min() == date){
# date rounded down to the nearist 10:00 hrs
date <- climatecomments$lastModifiedDate %>% min() %>% str_replace("T1.", "T10")
}
if(climatecomments$lastModifiedDate %>%
min() == date){
# date rounded down to the nearist 0X:00 hrs
date <- climatecomments$lastModifiedDate %>% min() %>% str_replace("T1", "T0")
}
if(climatecomments$lastModifiedDate %>%
min() == date){
# date rounded down to the nearist 0:00 hrs
date <- climatecomments$lastModifiedDate %>% min() %>% str_replace("T0.", "T00")
}
} else {
# otherwise, the new date is the min
date <- climatecomments$lastModifiedDate %>% min()
beep(sound = 2)
}
date %>% paste(" = new date (should be current date unless the old date didn't change)") %>% print()
} else{
beep()
print(nrow(climate2))
}
Sys.sleep(50)
}
# # up to .5m if needed (but as of 2020, n = 41,591k)
# climate500 <- map_dfr(.x = c(101:500),
# .f = search_keyword_page,
# documenttype = "PS",
# keyword = "climate change")
#
# climatecomments %<>% full_join(climate500)
#
# save(climatecomments, file = here::here("data", "climatecomments.Rdata"))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stemming.R
\name{GetStemNames}
\alias{GetStemNames}
\title{\code{GetStemNames}}
\usage{
GetStemNames(tokens, counts)
}
\arguments{
\item{tokens}{A character vector containing the tokens to be mapped.}
\item{counts}{A numeric vector representing the counts of each word.}
}
\value{
A character vector with the same length as \code{tokens} that contains the
new mapped tokens.
}
\description{
Stem a set of tokens and map each token to the most frequent
token in the set which also has the same stem.
}
\details{
On it's own, stemming can often produce text which is not readable
because word stems are often not real words.
This function uses the Snowball stemming algorithm from the package
\code{SnowballC} and some additional heuristics to map each token
to the token in the set which has the same stem and the highest count.
For example, if the tokens \code{search} and \code{searched} have
counts of 4 and 5 in the text, then both will be mapped to \code{searched}
as it appears more frequently.
}
\examples{
GetStemNames(c("string", "strings", "stringing", "hello"), c(10, 2, 2, 10))
}
| /man/GetStemNames.Rd | no_license | PoLabs/flipTextAnalysis | R | false | true | 1,256 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stemming.R
\name{GetStemNames}
\alias{GetStemNames}
\title{\code{GetStemNames}}
\usage{
GetStemNames(tokens, counts)
}
\arguments{
\item{tokens}{A character vector containing the tokens to be mapped.}
\item{counts}{A numeric vector representing the counts of each word.}
}
\value{
A character vector with the same length as \code{tokens} that contains the
new mapped tokens.
}
\description{
Stem a set of tokens and map each token to the most frequent
token in the set which also has the same stem.
}
\details{
On it's own, stemming can often produce text which is not readable
because word stems are often not real words.
This function uses the Snowball stemming algorithm from the package
\code{SnowballC} and some additional heuristics to map each token
to the token in the set which has the same stem and the highest count.
For example, if the tokens \code{search} and \code{searched} have
counts of 4 and 5 in the text, then both will be mapped to \code{searched}
as it appears more frequently.
}
\examples{
GetStemNames(c("string", "strings", "stringing", "hello"), c(10, 2, 2, 10))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGlobals.R
\docType{data}
\name{basejumpTestsURL}
\alias{basejumpTestsURL}
\title{basejump test data URL}
\format{An object of class \code{character} of length 1.}
\usage{
basejumpTestsURL
}
\description{
basejump test data URL
}
\note{
Updated 2019-08-21.
}
\examples{
basejumpTestsURL
}
\keyword{internal}
| /man/basejumpTestsURL.Rd | permissive | trichelab/basejump | R | false | true | 388 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGlobals.R
\docType{data}
\name{basejumpTestsURL}
\alias{basejumpTestsURL}
\title{basejump test data URL}
\format{An object of class \code{character} of length 1.}
\usage{
basejumpTestsURL
}
\description{
basejump test data URL
}
\note{
Updated 2019-08-21.
}
\examples{
basejumpTestsURL
}
\keyword{internal}
|
library(shiny)
source("app_ui.R")
source("app_server.R")
shinyApp(ui = ui, server = server) | /app.R | permissive | mmk42/public-health-project | R | false | false | 93 | r | library(shiny)
source("app_ui.R")
source("app_server.R")
shinyApp(ui = ui, server = server) |
path = 'F:\\Kartheek\\Data\\ml-competitions\\ML - Funding Successful Projects'
setwd(path)
# # read input files # #
data = fread('input-data\\train.csv', stringsAsFactors = TRUE)
ftest = fread('input-data\\test.csv', stringsAsFactors = TRUE)
# # load functions # #
source('https://raw.githubusercontent.com/kartheekpnsn/machine-learning-codes/master/R/functions.R')
# # get document term matrix from text # #
text_dtm = function(keywords, project_id) {
library(tm)
library(SnowballC) # for stemming
library(text2vec) # for DTM
print("==> replacing '-' with space")
keywords = gsub('-', ' ', as.character(keywords))
print("==> removing stop words")
keywords = removeWords(keywords, stopwords("english"))
print("==> removing numbers")
keywords = removeNumbers(keywords)
print("==> removing double space")
keywords = gsub('\\s+', ' ', keywords)
# print("==> Stemming words")
# text_data[, keywords = wordStem(keywords)]
print("==> Splitting keywords")
keywords = strsplit(keywords, ' ')
print("==> Dropping words with length <= 2")
keywords = lapply(keywords, function(x) x[nchar(x) > 2])
print("==> Creating DTM")
vec_train = itoken(keywords, tokenizer = word_tokenizer, ids = project_id)
vocab = create_vocabulary(vec_train)
pruned_vocab = prune_vocabulary(vocab, term_count_min = 150) # words occuring 150 or more times
vocab1 = vocab_vectorizer(pruned_vocab)
dtm_text = create_dtm(vec_train, vocab1)
print("Done <==")
print("==> Returning output as dtm_train and dtm_test")
dtm_text1 = as.data.table(as.matrix(dtm_text))
dtm_train = dtm_text1[1:108129]
dtm_test = dtm_text1[108130:171594]
print("Done <==")
return(list(dtm_test = dtm_test, dtm_train = dtm_train))
}
# # word count and text length (flag = 1) # #
text_word_counts = function(vector, flag = 1) {
library(tm)
vector = gsub('-', ' ', vector)
vector = tolower(removePunctuation(as.character(vector)))
if(flag == 1) {
cat("\t ==> Getting Number of characters in the text\n")
vector_lc = nchar(vector)
cat("\t Done <==\n")
return(vector_lc)
} else if(flag == 2) {
cat('\t ==> Get Number of words in the text\n')
vector = removeWords(vector, stopwords("english"))
vector = removeNumbers(vector)
# vector = stemDocument(vector)
vector = gsub('\\s+', ' ', vector)
vector_wc = unlist(lapply(strsplit(vector, '\\s'), function(x) length(unique(x))))
cat("\t Done <==\n")
return(vector_wc)
} else {
cat('\t ==> Get Number of duplicate words in the text\n')
vector = removeWords(vector, stopwords("english"))
vector = removeNumbers(vector)
# vector = stemDocument(vector)
vector = gsub('\\s+', ' ', vector)
vector_wc = unlist(lapply(strsplit(vector, '\\s'), function(x) length(unique(x))))
vector_dup = (str_count(vector, "\\w+") - vector_wc)
cat("\t Done <==\n")
return(vector_dup)
}
}
# # engineer features # #
engineerFeatures = function(data, log_apply = TRUE, test = FALSE) {
library(text2vec)
library(stringr)
data[, disable_communication := factor(as.numeric(disable_communication) - 1)]
# # process time stamps # #
print("==> Processing dates")
unix_timestamps = c('deadline','state_changed_at','created_at','launched_at')
data[, c(unix_timestamps) := lapply(.SD, function(x) structure(x, class=c('POSIXct'))),
.SDcols = unix_timestamps]
print("Done <==")
# # country currency processing # #
print("==> Processing Country and Currency")
data[, country := as.character(country)]
data[country %in% c('IE', 'NL', 'DE'), country := 'IE']
data[, country := as.factor(country)]
setnames(data, 'country', 'country_name')
data[, amount_usd := ifelse(currency == 'SEK', 0.1149 * goal,
ifelse(currency == 'AUD', 0.761 * goal,
ifelse(currency == 'CAD', 0.7544 * goal,
ifelse(currency == 'DKK', 0.1509 * goal,
ifelse(currency == 'EUR', 1.1222 * goal,
ifelse(currency == 'GBP', 1.2746 * goal,
ifelse(currency == 'NOK', 0.1180 * goal,
ifelse(currency == 'NZD', 0.7220 * goal, goal))))))))]
print("Done <==")
# # date values # #
print("==> Getting days out from dates")
data[, launch_time_days := as.numeric(difftime(launched_at, created_at, units = 'days'))]
data[, status_change_days := as.numeric(difftime(state_changed_at, launched_at, units = 'days'))]
data[, time_given_days := as.numeric(difftime(deadline, launched_at, units = 'days'))]
data[, time_to_change_days := as.numeric(difftime(state_changed_at, deadline, units = 'days'))]
print("Done <==")
if(log_apply) {
print("==> Applying log on days and goal amounts")
data[, launch_time_days_log := log1p(launch_time_days)]
data[, status_change_days_log := log1p(status_change_days)]
data[, time_given_days_log := log1p(time_given_days)]
data[, time_to_change_days_log := log1p(time_to_change_days)]
data[, goal_log := log1p(goal)]
data[, amount_usd_log := log1p(amount_usd)]
print("Done <==")
}
# # extract month of the project # #
print("==> Extracting month of the projects")
month_cols = c('launched_at', 'created_at', 'state_changed_at')
month_cols_create = c('launch_month', 'create_month', 'sc_month')
data[, c(month_cols_create) := lapply(.SD, function(x) { factor(format(x, '%b')) }), .SDcols = month_cols]
print("Done <==")
# # clean and extract keywords, description word count (wc), letter count (lc) # #
print("==> Getting Word count and Character count")
len_cols = c('name_lc', 'desc_lc', 'keywords_lc')
count_cols = c('name_wc', 'desc_wc', 'keywords_wc')
count_dup_cols = c('name_dups', 'desc_dups', 'keywords_dups')
cols = c('name', 'desc', 'keywords')
data[, c(len_cols) := lapply(.SD, function(x) text_word_counts(x, flag = 1)), .SDcols = cols]
data[, c(count_cols) := lapply(.SD, function(x) text_word_counts(x, flag = 2)), .SDcols = cols]
data[, c(count_dup_cols) := lapply(.SD, function(x) text_word_counts(x, flag = 3)), .SDcols = cols]
print("Done <==")
# 1 if state_changed_at < deadline
# 2 if state_changed_at is on same day as deadline
# 3 if state_changed_at is > deadline
print("==> Some more features")
data[, s_change_before_deadline := factor(ifelse(state_changed_at < deadline, 1,
ifelse(round(as.numeric(state_changed_at - deadline)) == 0, 2, 0)))]
print("Done <==")
# # drop dates # #
print("==> Dropping columns")
data[, launched_at := NULL]
data[, state_changed_at := NULL]
data[, created_at := NULL]
data[, deadline := NULL]
if(!test){
data = subset(data, select = c(setdiff(colnames(data), 'final_status'), 'final_status'))
}
# # drop columns # #
data[, backers_count := NULL]
data[, project_id := NULL]
data[, name := NULL]
data[, desc := NULL]
data[, keywords := NULL]
print("Done <==")
print("==> Returning output")
print("Done <==")
return(data)
}
ftest_id = ftest[, project_id]
text_data = text_dtm(keywords = c(as.character(data$keywords), as.character(ftest$keywords)),
project_id = c(data$project_id, ftest$project_id))
data = engineerFeatures(copy(data), log_apply = TRUE)
ftest = engineerFeatures(copy(ftest), test = TRUE, log_apply = TRUE)
data = cbind(data, text_data$dtm_train)
ftest = cbind(ftest, text_data$dtm_test)
# # ML Model # #
drop_cols = c('final_status', 'country', 'currency', "launch_month", 'next')
# "create_month", "sc_month", "s_change_before_deadline", "disable_communication")
X = data[, !drop_cols, with = FALSE]
Y = data$final_status
imp = importantFeatures(X, Y)
XX = copy(X)
# XX = X[, !imp$anova[significant == F, feature], with = F]
index = dataSplit(Y, split_ratio = 0.7)
xgb_fit = xgb_train(X = XX[index$train, ] , Y = Y[index$train],
X_test = XX[-index$train, ], Y_test = Y[-index$train],
hyper_params = list(nrounds = 141, eta = 0.05), cv = TRUE, eval_metric = 'error')
xgb_pred = xgb_predict(xgb_fit$fit, XX[-index$train])
performance_measure(predicted = xgb_pred, actual = Y[-index$train], optimal_threshold = F)
xgb_pred = xgb_predict(xgb_fit$fit, ftest[, !drop_cols, with = FALSE])
write.csv(data.table(project_id = ftest_id, final_status = round(xgb_pred)), 'xgb_submission_3.csv', row.names = F)
library(h2o)
h2o.init(nthreads = -1, max_mem_size = '8G')
drop_cols = c('country', 'currency', "launch_month", 'next')
h2o_train = as.h2o(data[index$train, !drop_cols, with = FALSE])
h2o_test = as.h2o(data[-index$train, !drop_cols, with = FALSE])
h2o_ftest = as.h2o(ftest)
x = colnames(X)
y = 'final_status'
rf_fit = h2o.randomForest(x = x, y = y, training_frame = h2o_train, ntrees = 500, validation_frame = h2o_test, seed = 294056)
h2o_pred = as.numeric(as.vector(h2o.predict(rf_fit, h2o_ftest)$predict))
write.csv(data.table(project_id = ftest_id, final_status = round(h2o_pred)), 'rf_h2o_submission.csv', row.names = F)
| /Machine Learning Challenge/2-Funding Successful Projects/code.R | no_license | kartheekpnsn/ml-competitions | R | false | false | 8,730 | r | path = 'F:\\Kartheek\\Data\\ml-competitions\\ML - Funding Successful Projects'
setwd(path)
# # read input files # #
data = fread('input-data\\train.csv', stringsAsFactors = TRUE)
ftest = fread('input-data\\test.csv', stringsAsFactors = TRUE)
# # load functions # #
source('https://raw.githubusercontent.com/kartheekpnsn/machine-learning-codes/master/R/functions.R')
# # get document term matrix from text # #
text_dtm = function(keywords, project_id) {
library(tm)
library(SnowballC) # for stemming
library(text2vec) # for DTM
print("==> replacing '-' with space")
keywords = gsub('-', ' ', as.character(keywords))
print("==> removing stop words")
keywords = removeWords(keywords, stopwords("english"))
print("==> removing numbers")
keywords = removeNumbers(keywords)
print("==> removing double space")
keywords = gsub('\\s+', ' ', keywords)
# print("==> Stemming words")
# text_data[, keywords = wordStem(keywords)]
print("==> Splitting keywords")
keywords = strsplit(keywords, ' ')
print("==> Dropping words with length <= 2")
keywords = lapply(keywords, function(x) x[nchar(x) > 2])
print("==> Creating DTM")
vec_train = itoken(keywords, tokenizer = word_tokenizer, ids = project_id)
vocab = create_vocabulary(vec_train)
pruned_vocab = prune_vocabulary(vocab, term_count_min = 150) # words occuring 150 or more times
vocab1 = vocab_vectorizer(pruned_vocab)
dtm_text = create_dtm(vec_train, vocab1)
print("Done <==")
print("==> Returning output as dtm_train and dtm_test")
dtm_text1 = as.data.table(as.matrix(dtm_text))
dtm_train = dtm_text1[1:108129]
dtm_test = dtm_text1[108130:171594]
print("Done <==")
return(list(dtm_test = dtm_test, dtm_train = dtm_train))
}
# # word count and text length (flag = 1) # #
text_word_counts = function(vector, flag = 1) {
library(tm)
vector = gsub('-', ' ', vector)
vector = tolower(removePunctuation(as.character(vector)))
if(flag == 1) {
cat("\t ==> Getting Number of characters in the text\n")
vector_lc = nchar(vector)
cat("\t Done <==\n")
return(vector_lc)
} else if(flag == 2) {
cat('\t ==> Get Number of words in the text\n')
vector = removeWords(vector, stopwords("english"))
vector = removeNumbers(vector)
# vector = stemDocument(vector)
vector = gsub('\\s+', ' ', vector)
vector_wc = unlist(lapply(strsplit(vector, '\\s'), function(x) length(unique(x))))
cat("\t Done <==\n")
return(vector_wc)
} else {
cat('\t ==> Get Number of duplicate words in the text\n')
vector = removeWords(vector, stopwords("english"))
vector = removeNumbers(vector)
# vector = stemDocument(vector)
vector = gsub('\\s+', ' ', vector)
vector_wc = unlist(lapply(strsplit(vector, '\\s'), function(x) length(unique(x))))
vector_dup = (str_count(vector, "\\w+") - vector_wc)
cat("\t Done <==\n")
return(vector_dup)
}
}
# # engineer features # #
engineerFeatures = function(data, log_apply = TRUE, test = FALSE) {
library(text2vec)
library(stringr)
data[, disable_communication := factor(as.numeric(disable_communication) - 1)]
# # process time stamps # #
print("==> Processing dates")
unix_timestamps = c('deadline','state_changed_at','created_at','launched_at')
data[, c(unix_timestamps) := lapply(.SD, function(x) structure(x, class=c('POSIXct'))),
.SDcols = unix_timestamps]
print("Done <==")
# # country currency processing # #
print("==> Processing Country and Currency")
data[, country := as.character(country)]
data[country %in% c('IE', 'NL', 'DE'), country := 'IE']
data[, country := as.factor(country)]
setnames(data, 'country', 'country_name')
data[, amount_usd := ifelse(currency == 'SEK', 0.1149 * goal,
ifelse(currency == 'AUD', 0.761 * goal,
ifelse(currency == 'CAD', 0.7544 * goal,
ifelse(currency == 'DKK', 0.1509 * goal,
ifelse(currency == 'EUR', 1.1222 * goal,
ifelse(currency == 'GBP', 1.2746 * goal,
ifelse(currency == 'NOK', 0.1180 * goal,
ifelse(currency == 'NZD', 0.7220 * goal, goal))))))))]
print("Done <==")
# # date values # #
print("==> Getting days out from dates")
data[, launch_time_days := as.numeric(difftime(launched_at, created_at, units = 'days'))]
data[, status_change_days := as.numeric(difftime(state_changed_at, launched_at, units = 'days'))]
data[, time_given_days := as.numeric(difftime(deadline, launched_at, units = 'days'))]
data[, time_to_change_days := as.numeric(difftime(state_changed_at, deadline, units = 'days'))]
print("Done <==")
if(log_apply) {
print("==> Applying log on days and goal amounts")
data[, launch_time_days_log := log1p(launch_time_days)]
data[, status_change_days_log := log1p(status_change_days)]
data[, time_given_days_log := log1p(time_given_days)]
data[, time_to_change_days_log := log1p(time_to_change_days)]
data[, goal_log := log1p(goal)]
data[, amount_usd_log := log1p(amount_usd)]
print("Done <==")
}
# # extract month of the project # #
print("==> Extracting month of the projects")
month_cols = c('launched_at', 'created_at', 'state_changed_at')
month_cols_create = c('launch_month', 'create_month', 'sc_month')
data[, c(month_cols_create) := lapply(.SD, function(x) { factor(format(x, '%b')) }), .SDcols = month_cols]
print("Done <==")
# # clean and extract keywords, description word count (wc), letter count (lc) # #
print("==> Getting Word count and Character count")
len_cols = c('name_lc', 'desc_lc', 'keywords_lc')
count_cols = c('name_wc', 'desc_wc', 'keywords_wc')
count_dup_cols = c('name_dups', 'desc_dups', 'keywords_dups')
cols = c('name', 'desc', 'keywords')
data[, c(len_cols) := lapply(.SD, function(x) text_word_counts(x, flag = 1)), .SDcols = cols]
data[, c(count_cols) := lapply(.SD, function(x) text_word_counts(x, flag = 2)), .SDcols = cols]
data[, c(count_dup_cols) := lapply(.SD, function(x) text_word_counts(x, flag = 3)), .SDcols = cols]
print("Done <==")
# 1 if state_changed_at < deadline
# 2 if state_changed_at is on same day as deadline
# 3 if state_changed_at is > deadline
print("==> Some more features")
data[, s_change_before_deadline := factor(ifelse(state_changed_at < deadline, 1,
ifelse(round(as.numeric(state_changed_at - deadline)) == 0, 2, 0)))]
print("Done <==")
# # drop dates # #
print("==> Dropping columns")
data[, launched_at := NULL]
data[, state_changed_at := NULL]
data[, created_at := NULL]
data[, deadline := NULL]
if(!test){
data = subset(data, select = c(setdiff(colnames(data), 'final_status'), 'final_status'))
}
# # drop columns # #
data[, backers_count := NULL]
data[, project_id := NULL]
data[, name := NULL]
data[, desc := NULL]
data[, keywords := NULL]
print("Done <==")
print("==> Returning output")
print("Done <==")
return(data)
}
ftest_id = ftest[, project_id]
text_data = text_dtm(keywords = c(as.character(data$keywords), as.character(ftest$keywords)),
project_id = c(data$project_id, ftest$project_id))
data = engineerFeatures(copy(data), log_apply = TRUE)
ftest = engineerFeatures(copy(ftest), test = TRUE, log_apply = TRUE)
data = cbind(data, text_data$dtm_train)
ftest = cbind(ftest, text_data$dtm_test)
# # ML Model # #
drop_cols = c('final_status', 'country', 'currency', "launch_month", 'next')
# "create_month", "sc_month", "s_change_before_deadline", "disable_communication")
X = data[, !drop_cols, with = FALSE]
Y = data$final_status
imp = importantFeatures(X, Y)
XX = copy(X)
# XX = X[, !imp$anova[significant == F, feature], with = F]
index = dataSplit(Y, split_ratio = 0.7)
xgb_fit = xgb_train(X = XX[index$train, ] , Y = Y[index$train],
X_test = XX[-index$train, ], Y_test = Y[-index$train],
hyper_params = list(nrounds = 141, eta = 0.05), cv = TRUE, eval_metric = 'error')
xgb_pred = xgb_predict(xgb_fit$fit, XX[-index$train])
performance_measure(predicted = xgb_pred, actual = Y[-index$train], optimal_threshold = F)
xgb_pred = xgb_predict(xgb_fit$fit, ftest[, !drop_cols, with = FALSE])
write.csv(data.table(project_id = ftest_id, final_status = round(xgb_pred)), 'xgb_submission_3.csv', row.names = F)
library(h2o)
h2o.init(nthreads = -1, max_mem_size = '8G')
drop_cols = c('country', 'currency', "launch_month", 'next')
h2o_train = as.h2o(data[index$train, !drop_cols, with = FALSE])
h2o_test = as.h2o(data[-index$train, !drop_cols, with = FALSE])
h2o_ftest = as.h2o(ftest)
x = colnames(X)
y = 'final_status'
rf_fit = h2o.randomForest(x = x, y = y, training_frame = h2o_train, ntrees = 500, validation_frame = h2o_test, seed = 294056)
h2o_pred = as.numeric(as.vector(h2o.predict(rf_fit, h2o_ftest)$predict))
write.csv(data.table(project_id = ftest_id, final_status = round(h2o_pred)), 'rf_h2o_submission.csv', row.names = F)
|
dispersion<-function (x,y,ulim,llim=ulim,intervals=TRUE,
arrow.cap=0.01,arrow.gap=NA,type="a",fill=NA,lty=NA,pch=NA,
border=NA,col=par("fg"),display.na=TRUE,...) {
if(is.list(x) && length(x[[1]]) == length(x[[2]])) {
y<-x$y
x<-x$x
}
# if no x values, just use integers from 1 to length(x)
if(missing(y) && !missing(x)) {
y<-x
x<-1:length(x)
}
# if offsets are passed, convert them to absolute values
if(intervals) {
llim<-y-llim
ulim<-y+ulim
}
plotlim<-par("usr")
npoints<-length(x)
if(is.na(arrow.gap)) arrow.gap<-strheight("O")/1.5
# this hack for matrices may not work in all cases
if(length(col) < npoints) {
if(is.matrix(x) && length(col) == dim(x)[2]) col<-rep(col,each=dim(x)[1])
else col<-rep(col,npoints)
}
for(i in 1:npoints) {
if(toupper(type) == "A") {
if(!is.na(llim[i])) {
# display the lower dispersion limit
if(arrow.gap >= (y[i]-llim[i]) * 0.9) {
# avoid the zero length arrow problem
caplen<-arrow.cap * diff(par("usr")[1:2])
x0<-x[i]-caplen
x1<-x[i]+caplen
y0<-y1<-llim[i]
segments(x0,y0,x1,y1,col=col[i],...)
}
else {
caplen<-arrow.cap*par("pin")[1]
x0<-x1<-x[i]
y0<-y[i]-arrow.gap
y1<-llim[i]
arrows(x0,y0,x1,y1,length=caplen,angle=90,col=col[i],...)
}
}
else {
if(display.na) {
x0<-x1<-x[i]
y0<-y[i]-arrow.gap
y1<-plotlim[3]
segments(x0,y0,x1,y1,col=col[i],...)
}
}
if(!is.na(ulim[i])) {
# display the upper dispersion limit
if(arrow.gap >= (ulim[i]-y[i]) * 0.9) {
caplen<-arrow.cap * diff(par("usr")[1:2])
x0<-x[i]-caplen
x1<-x[i]+caplen
y0<-y1<-ulim[i]
segments(x0,y0,x1,y1,col=col[i],...)
}
else {
caplen<-arrow.cap*par("pin")[1]
x0<-x1<-x[i]
y0<-y[i]+arrow.gap
y1<-ulim[i]
arrows(x0,y0,x1,y1,length=caplen,angle=90,col=col[i],...)
}
}
else {
if(display.na) {
x0<-x1<-x[i]
y0<-y[i]+arrow.gap
y1<-plotlim[4]
segments(x0,y0,x1,y1,col=col[i],...)
}
}
}
}
if(toupper(type) == "L") {
if(!is.na(fill)) {
polygon(c(x,rev(x)),c(ulim,rev(llim)),col=fill,border=NA)
if(!is.na(pch)) {
if(is.na(lty)) points(x,y,pch=pch)
else lines(x,y,lty=lty,pch=pch,type="b")
}
else {
if(!is.na(lty)) lines(x,y,lty=lty)
}
}
if(!is.na(border)) {
lines(x,ulim,lty=border,...)
lines(x,llim,lty=border,...)
}
}
}
| /plotrix/R/dispersion.R | no_license | ingted/R-Examples | R | false | false | 2,415 | r | dispersion<-function (x,y,ulim,llim=ulim,intervals=TRUE,
arrow.cap=0.01,arrow.gap=NA,type="a",fill=NA,lty=NA,pch=NA,
border=NA,col=par("fg"),display.na=TRUE,...) {
if(is.list(x) && length(x[[1]]) == length(x[[2]])) {
y<-x$y
x<-x$x
}
# if no x values, just use integers from 1 to length(x)
if(missing(y) && !missing(x)) {
y<-x
x<-1:length(x)
}
# if offsets are passed, convert them to absolute values
if(intervals) {
llim<-y-llim
ulim<-y+ulim
}
plotlim<-par("usr")
npoints<-length(x)
if(is.na(arrow.gap)) arrow.gap<-strheight("O")/1.5
# this hack for matrices may not work in all cases
if(length(col) < npoints) {
if(is.matrix(x) && length(col) == dim(x)[2]) col<-rep(col,each=dim(x)[1])
else col<-rep(col,npoints)
}
for(i in 1:npoints) {
if(toupper(type) == "A") {
if(!is.na(llim[i])) {
# display the lower dispersion limit
if(arrow.gap >= (y[i]-llim[i]) * 0.9) {
# avoid the zero length arrow problem
caplen<-arrow.cap * diff(par("usr")[1:2])
x0<-x[i]-caplen
x1<-x[i]+caplen
y0<-y1<-llim[i]
segments(x0,y0,x1,y1,col=col[i],...)
}
else {
caplen<-arrow.cap*par("pin")[1]
x0<-x1<-x[i]
y0<-y[i]-arrow.gap
y1<-llim[i]
arrows(x0,y0,x1,y1,length=caplen,angle=90,col=col[i],...)
}
}
else {
if(display.na) {
x0<-x1<-x[i]
y0<-y[i]-arrow.gap
y1<-plotlim[3]
segments(x0,y0,x1,y1,col=col[i],...)
}
}
if(!is.na(ulim[i])) {
# display the upper dispersion limit
if(arrow.gap >= (ulim[i]-y[i]) * 0.9) {
caplen<-arrow.cap * diff(par("usr")[1:2])
x0<-x[i]-caplen
x1<-x[i]+caplen
y0<-y1<-ulim[i]
segments(x0,y0,x1,y1,col=col[i],...)
}
else {
caplen<-arrow.cap*par("pin")[1]
x0<-x1<-x[i]
y0<-y[i]+arrow.gap
y1<-ulim[i]
arrows(x0,y0,x1,y1,length=caplen,angle=90,col=col[i],...)
}
}
else {
if(display.na) {
x0<-x1<-x[i]
y0<-y[i]+arrow.gap
y1<-plotlim[4]
segments(x0,y0,x1,y1,col=col[i],...)
}
}
}
}
if(toupper(type) == "L") {
if(!is.na(fill)) {
polygon(c(x,rev(x)),c(ulim,rev(llim)),col=fill,border=NA)
if(!is.na(pch)) {
if(is.na(lty)) points(x,y,pch=pch)
else lines(x,y,lty=lty,pch=pch,type="b")
}
else {
if(!is.na(lty)) lines(x,y,lty=lty)
}
}
if(!is.na(border)) {
lines(x,ulim,lty=border,...)
lines(x,llim,lty=border,...)
}
}
}
|
lslr<-function(x, dist="weibull", npar=2, reg_method="XonY") {
## a convergence limit is fixed here for 3rd parameter convergence
## no longer an argument for the R function, but still an argument to C++ functions
limit<-1e-5
if(is.vector(x)) {
stop("use MRR functions for casual fitting, or pre-process with getPPP")
}else{
if(names(x)[1]=="time"&&names(x)[2]=="ppp") {
## will handle the output from getPPP
}else{
if(length(x$ppp)<3) {
stop("insufficient failure points")
}else{
stop("input format not recognized")
}
}
}
## It turns out that this code is general to all fitting methods:
if(tolower(dist) %in% c("weibull","weibull2p","weibull3p")){
fit_dist<-"weibull"
}else{
if(tolower(dist) %in% c("lnorm", "lognormal","lognormal2p", "lognormal3p")){
fit_dist<-"lnorm"
}else{
if(!dist=="gumbel") {
## Note: lslr contains experimental support for "gumbel"
stop(paste0("dist argument ", dist, "is not recognized for distribution fitting"))
}
}
}
## npar<-2 ## introducing 3p in dist argument will override any npar (or its default)
if(tolower(dist) %in% c("weibull3p", "lognormal3p")){
npar<-3
}
casenum<-0
if(reg_method=="YonX") casenum=casenum+1
if(npar==3) casenum=casenum+2
if(fit_dist=="lnorm")casenum=casenum+4
if(dist=="gumbel") casenum=casenum+8
resultVec<-.Call("LSLR", x$time, x$ppp, limit, casenum , package="WeibullR")
if(casenum < 4) {
if(length(resultVec)==3) {
prr<-AbPval(dim(x)[1], resultVec[3])
outVec<-c(Eta=resultVec[1],Beta=resultVec[2],Rsqr=resultVec[3], AbPval=prr[1])
}else{
outVec<-c(Eta=resultVec[1],Beta=resultVec[2], t0=resultVec[3],Rsqr=resultVec[4])
if(resultVec[5]==1) {
warn="3p optimization did not converge"
attr(outVec,"warning")<-warn
}
}
}else{
if(casenum < 8) {
if(length(resultVec)==3) {
prr<-AbPval(length(x[,1]), resultVec[3],"lnorm")
outVec<-c(Mulog=resultVec[1],Sigmalog=resultVec[2],Rsqr=resultVec[3], AbPval=prr[1])
}else{
outVec<-c(Mulog=resultVec[1],Sigmalog=resultVec[2], t0=resultVec[3],Rsqr=resultVec[4])
if(resultVec[5]==1) {
warn="3p optimization did not converge"
attr(outVec,"warning")<-warn
}
}
}else{
if(length(resultVec)==3) {
outVec<-c(Etalog=resultVec[1],Betalog=resultVec[2],Rsqr=resultVec[3])
}else{
outVec<-c(Etalog=resultVec[1],Betalog=resultVec[2], t0=resultVec[3],Rsqr=resultVec[4])
if(resultVec[5]==1) {
warn="3p optimization did not converge"
attr(outVec,"warning")<-warn
}
}
}
}
return(outVec)
}
| /R/lslr.r | no_license | CarlesCG/WeibullR | R | false | false | 2,671 | r | lslr<-function(x, dist="weibull", npar=2, reg_method="XonY") {
## a convergence limit is fixed here for 3rd parameter convergence
## no longer an argument for the R function, but still an argument to C++ functions
limit<-1e-5
if(is.vector(x)) {
stop("use MRR functions for casual fitting, or pre-process with getPPP")
}else{
if(names(x)[1]=="time"&&names(x)[2]=="ppp") {
## will handle the output from getPPP
}else{
if(length(x$ppp)<3) {
stop("insufficient failure points")
}else{
stop("input format not recognized")
}
}
}
## It turns out that this code is general to all fitting methods:
if(tolower(dist) %in% c("weibull","weibull2p","weibull3p")){
fit_dist<-"weibull"
}else{
if(tolower(dist) %in% c("lnorm", "lognormal","lognormal2p", "lognormal3p")){
fit_dist<-"lnorm"
}else{
if(!dist=="gumbel") {
## Note: lslr contains experimental support for "gumbel"
stop(paste0("dist argument ", dist, "is not recognized for distribution fitting"))
}
}
}
## npar<-2 ## introducing 3p in dist argument will override any npar (or its default)
if(tolower(dist) %in% c("weibull3p", "lognormal3p")){
npar<-3
}
casenum<-0
if(reg_method=="YonX") casenum=casenum+1
if(npar==3) casenum=casenum+2
if(fit_dist=="lnorm")casenum=casenum+4
if(dist=="gumbel") casenum=casenum+8
resultVec<-.Call("LSLR", x$time, x$ppp, limit, casenum , package="WeibullR")
if(casenum < 4) {
if(length(resultVec)==3) {
prr<-AbPval(dim(x)[1], resultVec[3])
outVec<-c(Eta=resultVec[1],Beta=resultVec[2],Rsqr=resultVec[3], AbPval=prr[1])
}else{
outVec<-c(Eta=resultVec[1],Beta=resultVec[2], t0=resultVec[3],Rsqr=resultVec[4])
if(resultVec[5]==1) {
warn="3p optimization did not converge"
attr(outVec,"warning")<-warn
}
}
}else{
if(casenum < 8) {
if(length(resultVec)==3) {
prr<-AbPval(length(x[,1]), resultVec[3],"lnorm")
outVec<-c(Mulog=resultVec[1],Sigmalog=resultVec[2],Rsqr=resultVec[3], AbPval=prr[1])
}else{
outVec<-c(Mulog=resultVec[1],Sigmalog=resultVec[2], t0=resultVec[3],Rsqr=resultVec[4])
if(resultVec[5]==1) {
warn="3p optimization did not converge"
attr(outVec,"warning")<-warn
}
}
}else{
if(length(resultVec)==3) {
outVec<-c(Etalog=resultVec[1],Betalog=resultVec[2],Rsqr=resultVec[3])
}else{
outVec<-c(Etalog=resultVec[1],Betalog=resultVec[2], t0=resultVec[3],Rsqr=resultVec[4])
if(resultVec[5]==1) {
warn="3p optimization did not converge"
attr(outVec,"warning")<-warn
}
}
}
}
return(outVec)
}
|
require(spatialEco)
require(sp)
require(usedist)
require(rgeos)
require(raster)
require(spatstat)
require(igraph)
require(sf)
require(rgdal)
require(gdistance)
require(otuSummary)
require(gdata)
require(maptools)
require(tidyverse)
require(reshape2)
require(data.table)
#setwd("E:/LCP sensitivity test/HPC_all_inputs")
#Create master map of all habitat created by a conservation strategy
i=20
u=5000
model="rand"
species="s"
replicate=5
XO<- list.files(paste0(model,"/"), pattern =paste0(species,".tif$",sep=""))
ManyRunsStack<-raster::stack(paste0(model,"/",XO))
SumStack<-sum(ManyRunsStack)
rm(ManyRunsStack)
#Bring in ecoregion map to use for crs and extent template
Ecoregion <- raster(paste0(model,"/","Ecoregion100f.tif"))
#Read in roads file
roads <- raster(paste0(model,"/","road.tif",sep=""))
#Create empty vectors for connectivity indices
nodes <- vector()
links <- vector()
avgnode <- vector()
totnode <- vector()
avgLCP <- vector()
avgENN <- vector()
density <- vector()
transitivity <- vector()
#Time steps
TimestepList <- as.character(seq(from=0, to=80, by=10))
#Connectivity analysis
Longleaf<-"PinuPalu"
Loblolly<-"PinuTaed"
Pine<- c("PinuEchi","PinuTaed","PinuVirg")
Hardwood<-c("QuerAlba","AcerRubr","LiriTuli","LiquStyr","OxydArbo","CornFlor")
Year0<-list.files(paste0(model,"/",model,replicate),pattern=(".img$"))
#paste0("inputs/", model, "/",model,replicate,"/")
Longleaf_Stack<-raster(paste0(model,"/",model,replicate,"/",Year0[Year0 %in% paste0("bio-", Longleaf,"-",i,".img")]))
Loblolly_Stack<-raster(paste0(model,"/",model,replicate,"/",Year0[Year0 %in% paste0("bio-", Loblolly,"-",i,".img")]))
Pine_Stack<-raster::stack(paste0(model,"/",model,replicate,"/",Year0[Year0 %in% paste0("bio-", Pine,"-",i,".img")]))
Hardwood_Stack<-raster::stack(paste0(model,"/",model,replicate,"/",Year0[Year0 %in% paste0("bio-", Hardwood,"-",i,".img")]))
Total<-raster(paste0(model,"/",model,replicate,"/",Year0[Year0 %in% paste0("bio-TotalBiomass-", i, ".img")]))
###Reclassification of biomass into community types
###Rule 1
Longleaf_Stack[Longleaf_Stack> 0.25*(Total),]<-1
Longleaf_Stack[!Longleaf_Stack==1]<-999
### Rule 2
Loblolly_Stack[Loblolly_Stack> 0.9*(Total),]<-2
Loblolly_Stack[!Loblolly_Stack==2]<-999
### Rule 3
Pine_Stack[Pine_Stack> 0.65*(Total),]<-3
Pine_Stack[!Pine_Stack==3]<-999
### Rule 4
Hardwood_Stack[Hardwood_Stack>0.5*(Total),]<-4
Hardwood_Stack[!Hardwood_Stack==4]<-999
### Rule 5
Total[Total >0,]<-5
bigstack<-stack(Longleaf_Stack, Loblolly_Stack, Pine_Stack, Hardwood_Stack, Total)
test_stack<-min(bigstack)
crs(test_stack) <- "+proj=utm +zone=17 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
extent(test_stack)<-raster::extent(Ecoregion)
median0 <- raster(paste0(model,"/",model,replicate,"/",Year0[Year0 %in% paste0("AGE-MED-",i,".img")]))
crs(median0) <- "+proj=utm +zone=17 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
extent(median0)<-raster::extent(Ecoregion)
#use to incorporate land use change
#LU0 <- raster(paste("C:/Users/tgmozele/Desktop/LCP sensitivity test/geo2noLUC/land-use-", i, ".tif",sep=""))
#use to not incorporate land use change, but establish BAU land use types
LU0 <- raster(paste0(model,"/","NLCD100.tif"))
#Create a raster that will become resistance raster
test_raster <- test_stack
#Assign projection and reformat to ecoregion extent for the resistance raster
crs(test_raster) <- "+proj=utm +zone=17 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
extent(test_raster)<-raster::extent(Ecoregion)
#Assign values to resistance raster
#longleaf community comp
test_raster[test_stack == 1 & median0 %in% c(0:1),] <- (1/90)
test_raster[test_stack == 1 & median0 %in% c(2:5),] <- (1/80)
test_raster[test_stack == 1 & median0 %in% c(6:7),] <- (1/70)
test_raster[test_stack == 1 & median0 %in% c(8:9),] <- (1/40)
test_raster[test_stack == 1 & median0 %in% c(10:20),] <- (1/10)
test_raster[test_stack == 1 & median0 %in% c(21:34),] <- 1
test_raster[test_stack == 1 & median0 >= 35,] <- 1
#test_raster[test_stack == 3 & median0 %in% c(0:5),] <- (1/95)
#test_raster[test_stack == 3 & median0 %in% c(6:10),] <- (1/80)
#test_raster[test_stack == 3 & median0 %in% c(11:20),] <- (1/25)
#test_raster[test_stack == 3 & median0 %in% c(21:34),] <- (1/5)
#test_raster[test_stack == 3 & median0 >= 35,] <- 1
#pine plantation community type (was pine mix)
test_raster[test_stack == 2 & median0 %in% c(0:5),] <- (1/90)
test_raster[test_stack == 2 & median0 %in% c(6:10),] <- (1/70)
test_raster[test_stack == 2 & median0 %in% c(11:20),] <- (1/60)
test_raster[test_stack == 2 & median0 %in% c(21:30),] <- (1/50)
test_raster[test_stack == 2 & median0 >= 31,] <- (1/40)
#pine mix community type (was lob_)
test_raster[test_stack == 3 & median0 %in% c(0:5),] <- (1/90)
test_raster[test_stack == 3 & median0 %in% c(6:10),] <- (1/70)
test_raster[test_stack == 3 & median0 %in% c(11:20),] <- (1/40)
test_raster[test_stack == 3 & median0 %in% c(21:34),] <- (1/30)
test_raster[test_stack == 3 & median0 >= 35,] <- (1/20)
#test_raster[test_stack == 3 & median0 %in% c(0:5),] <- (1/95)
#test_raster[test_stack == 3 & median0 %in% c(6:10),] <- (1/80)
#test_raster[test_stack == 3 & median0 %in% c(11:20),] <- (1/25)
#test_raster[test_stack == 3 & median0 %in% c(21:34),] <- (1/5)
#test_raster[test_stack == 3 & median0 >= 35,] <- 1
#hardwood community type (was mix)
test_raster[test_stack == 4 & median0 %in% c(0:10),] <- (1/90)
test_raster[test_stack == 4 & median0 %in% c(11:20),] <- (1/80)
test_raster[test_stack == 4 & median0 %in% c(21:30),] <- (1/70)
test_raster[test_stack == 4 & median0 >= 31,] <- (1/60)
#mixed forest community type (was hardwood)
test_raster[test_stack == 5 & median0 %in% c(0:10),] <- (1/90)
test_raster[test_stack == 5 & median0 %in% c(11:20),] <- (1/70)
test_raster[test_stack == 5 & median0 %in% c(21:30),] <- (1/60)
test_raster[test_stack == 5 & median0 >= 31,] <- (1/50)
#test_raster[test_stack == 5 & median0 %in% c(0:10),] <- (1/100)
#test_raster[test_stack == 5 & median0 %in% c(11:20),] <- (1/85)
#test_raster[test_stack == 5 & median0 %in% c(21:30),] <- (1/70)
#test_raster[test_stack == 5 & median0 >= 31,] <- (1/60)
test_raster2 <- test_raster
test_raster2[test_raster ==0] <- NA
#land use types
test_raster2[LU0 == 82] <- (1/90) #cropland
test_raster2[LU0 == 81] <- (1/90) #hay/pasture
test_raster2[LU0 == 11] <- (1/100) #water
test_raster2[LU0 == 24] <- (1/100) #developed, high intensity
test_raster2[LU0 == 23] <- (1/90) #developed, med intensity
test_raster2[LU0 == 22] <- (1/80) #developed, low intensity
test_raster2[LU0 == 31] <- (1/90) #barren land
#test_raster2[LU0 == 6] <- (1/100) #mining
test_raster2[test_raster2 ==0] <- (1/90)
#roads
test_raster2[roads %in% c(1:2)] <- (1/100)
test_raster2[roads %in% c(3:4)] <- (1/100)
test_raster2[roads %in% c(5:89)] <- (1/90)
test_raster3 <- test_raster2
test_raster3[test_raster3 >0.1] <- 1
test_raster3[test_raster3 < 1] <- 0
habitat_raster <- overlay(test_raster3, SumStack, fun=function(x,y){(x*y)} )
#Cluster habitat cells into habitat nodes using quintiles of occurrence
LikelyHabitat8<-habitat_raster
LikelyHabitat8[LikelyHabitat8%in%c(0:36),]<-NA
pol8 <- rasterToPolygons(LikelyHabitat8)
proj4string(pol8) = "+proj=utm +zone=17 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
pol8$ID<-seq(1,length(pol8[1]))
polbuf <- gBuffer(pol8, byid=TRUE, id=pol8$ID, width=1.0, quadsegs=5, capStyle="ROUND",
joinStyle="ROUND", mitreLimit=1.0)
polbufdis <- gUnaryUnion(polbuf, id = NULL, checkValidity=NULL)
a<-raster::disaggregate(polbufdis)
LikelyHabitat5<-habitat_raster
LikelyHabitat5[LikelyHabitat5%in%c(0:27,37:45),]<-NA
pol5 <- rasterToPolygons(LikelyHabitat5)
proj4string(pol5) = "+proj=utm +zone=17 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
pol5$ID<-seq(1,length(pol5[1]))
polbuf <- gBuffer(pol5, byid=TRUE, id=pol5$ID, width=1.0, quadsegs=5, capStyle="ROUND",
joinStyle="ROUND", mitreLimit=1.0)
polbufdis <- gUnaryUnion(polbuf, id = NULL, checkValidity=NULL)
b<-raster::disaggregate(polbufdis)
LikelyHabitat3<-habitat_raster
LikelyHabitat3[LikelyHabitat3%in%c(0:18,28:45),]<-NA
pol3 <- rasterToPolygons(LikelyHabitat3)
proj4string(pol3) = "+proj=utm +zone=17 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
pol3$ID<-seq(1,length(pol3[1]))
polbuf <- gBuffer(pol3, byid=TRUE, id=pol3$ID, width=1.0, quadsegs=5, capStyle="ROUND",
joinStyle="ROUND", mitreLimit=1.0)
polbufdis <- gUnaryUnion(polbuf, id = NULL, checkValidity=NULL)
c<-raster::disaggregate(polbufdis)
LikelyHabitat1<-habitat_raster
LikelyHabitat1[LikelyHabitat1%in%c(0:9,19:45),]<-NA
pol1 <- rasterToPolygons(LikelyHabitat1)
proj4string(pol1) = "+proj=utm +zone=17 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
pol1$ID<-seq(1,length(pol1[1]))
polbuf <- gBuffer(pol1, byid=TRUE, id=pol1$ID, width=1.0, quadsegs=5, capStyle="ROUND",
joinStyle="ROUND", mitreLimit=1.0)
polbufdis <- gUnaryUnion(polbuf, id = NULL, checkValidity=NULL)
d<-raster::disaggregate(polbufdis)
LikelyHabitat<-habitat_raster
LikelyHabitat[LikelyHabitat%in%c(0,10:45),]<-NA
pol <- rasterToPolygons(LikelyHabitat)
proj4string(pol) = "+proj=utm +zone=17 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
pol$ID<-seq(1,length(pol[1]))
polbuf <- gBuffer(pol, byid=TRUE, id=pol$ID, width=1.0, quadsegs=5, capStyle="ROUND",
joinStyle="ROUND", mitreLimit=1.0)
polbufdis <- gUnaryUnion(polbuf, id = NULL, checkValidity=NULL)
e<-raster::disaggregate(polbufdis)
#Bring quintile-based habitat nodes together into one SpatialPolygonsDataFrame, find area of nodes, and assign numbers
polys <- bind(a,b,c,d,e)
data<-data.frame(ID=seq(1,length(polys)))
pol1_dis<-SpatialPolygonsDataFrame(polys,data)
pol1_dis$area_ha <- raster::area(pol1_dis)/10000
pol1_dis$num1 <- seq(from = 1, to= length(pol1_dis), by=1)
pol1_dis$num2 <- seq(from = 1, to= length(pol1_dis), by=1)
#Assign weight to habitat by type and area to be used in Conefor
pol1_dis$weight <- NA
pol1_dis$weight <- pol1_dis$area_ha
#Restrict habitat patches to those 2 hectares and larger, reassign ID's
pol1_dis <- pol1_dis[pol1_dis$area_ha >= 2,]
pol1_dis$ID<-seq(from = 1, to= length(pol1_dis), by=1)
#Make habitat nodes file to be used for Conefor
maketext <- cbind(pol1_dis$ID, pol1_dis$weight)
write.table(maketext, file=paste0(model,"/",model,replicate,"/","Output_",model,replicate,species,u,"/nodes_",u,model,species,"yr",i,"Rep_",replicate,".txt"), sep = "\t", row.names = FALSE, col.names = FALSE)
#use to find #nodes, avg node size, and total habitat area (to be used for ECA:Area)
nodes[length(nodes)+1] <- length(pol1_dis$ID)
avgnode[length(avgnode)+1] <- mean(pol1_dis$area_ha)
totnode[length(totnode)+1] <- sum(pol1_dis$area_ha)
###create transition matrix from resistance raster, which is required by gdistance package to calculate resistance
###distance and least cost path
test_tr <- transition(test_raster2, transitionFunction=mean, directions=8)
#find polgyon centroid
trueCentroids <- gCentroid(pol1_dis, byid=TRUE, id = pol1_dis$ID)
#clear memory
rm(Longleaf_Stack)
rm(Loblolly_Stack)
rm(Pine_Stack)
rm(Hardwood_Stack)
rm(Total)
rm(bigstack)
rm(pol8)
rm(pol5)
rm(pol3)
rm(pol1)
rm(pol)
rm(a)
rm(b)
rm(c)
rm(d)
rm(e)
rm(polbuf)
rm(polys)
rm(LikelyHabitat8)
rm(LikelyHabitat5)
rm(LikelyHabitat3)
rm(LikelyHabitat1)
rm(LikelyHabitat)
#get coordinates from trueCentroids
cent_coords <- geom(trueCentroids)
#find euclidean distance nearest neighbor
EUnn <- nndist(cent_coords)
avgENN[length(avgENN)+1] <- mean(EUnn)
#Euclidean distance between points- if euclidean distance is greater than 2000 meters, remove that pair- STILL NEED TO DO!!
#1500 meters for small songbird (Minor and Urban 2008, Sutherland et al. 2000)
#timber rattlesnake (generalist) ~1200 meters (USFS FEIS)
#~500 (449) for eastern spadefoot toad (Baumberger et al. 2019- Movement and habtiat selecton of western spadefoot)
#10,000 biggest median disersal distance for birds found by Sutherland et al.
#create matrix of euclidean distance between polygon centroids
EUpts <- spDists(x= trueCentroids, y = trueCentroids, longlat = FALSE, segments = FALSE, diagonal = FALSE)
#condense matrix into table and remove duplicate pairs
EUnew <- subset(reshape2::melt(EUpts), value!=0)
EU5000<-EUnew[!(EUnew$value > u),]
EU5000_nodups <- EU5000[!duplicated(data.frame(list(do.call(pmin,EU5000),do.call(pmax,EU5000)))),]
rm(EUpts)
#merge
colnames(EU5000_nodups) <- c("num1", "num2", "EUD")
lookup <- cbind(pol1_dis$ID, pol1_dis$num1, pol1_dis$num2)
colnames(lookup) <- c("ID", "num1", "num2")
EU_test <- merge(x = EU5000_nodups, y = lookup, by = "num1", all.x = TRUE)
colnames(EU_test) <- c("num1", "num2", "EUD", "ID", "num2.y")
EU_test2 <- merge(x = EU_test, y = lookup, by = "num2", all.x = TRUE)
EU_fin <- cbind(EU_test2$ID.x, EU_test2$ID.y)
EU_fin_df <- data.frame(EU_fin)
#clear more memory
rm(EU_test)
rm(EU_test2)
rm(EU_fin)
#
print("#####################################Entering Cost Distance#############################")
#calculate least cost path
test_trC <- geoCorrection(test_tr, type="c") #geocorrection for least cost path
rm(test_tr)
costDist <- costDistance(test_trC, trueCentroids) #LCP
rm(trueCentroids)
costmatrix <- matrixConvert(costDist, colname = c("X1", "X2", "resistance"))
colnames(costmatrix) <- c("X1", "X2", "resistance")
EU_fin_df$costdis <- NULL
costdist5000 <- merge(EU_fin_df, costmatrix, by.x= c("X2", "X1"), by.y = c("X1", "X2"))
costdist5000df <- data.frame(costdist5000)
costcomplete <- costdist5000df[!is.infinite(rowSums(costdist5000df)),]
write.table(costcomplete, file=paste0(model,"/",model,replicate,"/","Output_",model,replicate,species,u,"/distance_",u,model,species,"yr",i,"Rep_",replicate,".txt"), sep = "\t", row.names = FALSE, col.names = FALSE)
#write.csv(costcomplete, file=paste0("Outputs/distance_",u,model,"yr",i,"Rep_",replicate,".csv"), row.names=F)
print("#####################################Finished Cost distance#############################")
links[length(links)+1] <- nrow(costcomplete)
avgLCP[length(avgLCP)+1] <- mean(costcomplete$resistance)
#get adjacency matrix to build igraph
cost_col<- cbind(costcomplete$X2, costcomplete$X1)
adj <- get.adjacency(graph.edgelist(as.matrix(cost_col), directed=FALSE))
network <- graph_from_adjacency_matrix(adj)
gdensity <- edge_density(network, loops = FALSE)
density[length(density)+1] <- gdensity
trans <- transitivity(network, type="global")
transitivity[length(transitivity)+1] <- trans
results <- data.frame(nodes, links, avgnode, totnode, avgLCP, avgENN, density, transitivity)
write.table(results, file=paste0(model,"/",model,replicate,"/","Output_",model,replicate,species,u,"/Metrics_",u,model,species,"yr",i,"Rep_",replicate,".txt"), sep = "\t", row.names = TRUE, col.names = TRUE)
| /Code/HPC_5000randspsyr20Rep5.R | no_license | ZacharyRobbins/TransferHPC | R | false | false | 14,796 | r | require(spatialEco)
require(sp)
require(usedist)
require(rgeos)
require(raster)
require(spatstat)
require(igraph)
require(sf)
require(rgdal)
require(gdistance)
require(otuSummary)
require(gdata)
require(maptools)
require(tidyverse)
require(reshape2)
require(data.table)
#setwd("E:/LCP sensitivity test/HPC_all_inputs")
#Create master map of all habitat created by a conservation strategy
i=20
u=5000
model="rand"
species="s"
replicate=5
XO<- list.files(paste0(model,"/"), pattern =paste0(species,".tif$",sep=""))
ManyRunsStack<-raster::stack(paste0(model,"/",XO))
SumStack<-sum(ManyRunsStack)
rm(ManyRunsStack)
#Bring in ecoregion map to use for crs and extent template
Ecoregion <- raster(paste0(model,"/","Ecoregion100f.tif"))
#Read in roads file
roads <- raster(paste0(model,"/","road.tif",sep=""))
#Create empty vectors for connectivity indices
nodes <- vector()
links <- vector()
avgnode <- vector()
totnode <- vector()
avgLCP <- vector()
avgENN <- vector()
density <- vector()
transitivity <- vector()
#Time steps
TimestepList <- as.character(seq(from=0, to=80, by=10))
#Connectivity analysis
Longleaf<-"PinuPalu"
Loblolly<-"PinuTaed"
Pine<- c("PinuEchi","PinuTaed","PinuVirg")
Hardwood<-c("QuerAlba","AcerRubr","LiriTuli","LiquStyr","OxydArbo","CornFlor")
Year0<-list.files(paste0(model,"/",model,replicate),pattern=(".img$"))
#paste0("inputs/", model, "/",model,replicate,"/")
Longleaf_Stack<-raster(paste0(model,"/",model,replicate,"/",Year0[Year0 %in% paste0("bio-", Longleaf,"-",i,".img")]))
Loblolly_Stack<-raster(paste0(model,"/",model,replicate,"/",Year0[Year0 %in% paste0("bio-", Loblolly,"-",i,".img")]))
Pine_Stack<-raster::stack(paste0(model,"/",model,replicate,"/",Year0[Year0 %in% paste0("bio-", Pine,"-",i,".img")]))
Hardwood_Stack<-raster::stack(paste0(model,"/",model,replicate,"/",Year0[Year0 %in% paste0("bio-", Hardwood,"-",i,".img")]))
Total<-raster(paste0(model,"/",model,replicate,"/",Year0[Year0 %in% paste0("bio-TotalBiomass-", i, ".img")]))
###Reclassification of biomass into community types
###Rule 1
Longleaf_Stack[Longleaf_Stack> 0.25*(Total),]<-1
Longleaf_Stack[!Longleaf_Stack==1]<-999
### Rule 2
Loblolly_Stack[Loblolly_Stack> 0.9*(Total),]<-2
Loblolly_Stack[!Loblolly_Stack==2]<-999
### Rule 3
Pine_Stack[Pine_Stack> 0.65*(Total),]<-3
Pine_Stack[!Pine_Stack==3]<-999
### Rule 4
Hardwood_Stack[Hardwood_Stack>0.5*(Total),]<-4
Hardwood_Stack[!Hardwood_Stack==4]<-999
### Rule 5
Total[Total >0,]<-5
bigstack<-stack(Longleaf_Stack, Loblolly_Stack, Pine_Stack, Hardwood_Stack, Total)
test_stack<-min(bigstack)
crs(test_stack) <- "+proj=utm +zone=17 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
extent(test_stack)<-raster::extent(Ecoregion)
median0 <- raster(paste0(model,"/",model,replicate,"/",Year0[Year0 %in% paste0("AGE-MED-",i,".img")]))
crs(median0) <- "+proj=utm +zone=17 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
extent(median0)<-raster::extent(Ecoregion)
#use to incorporate land use change
#LU0 <- raster(paste("C:/Users/tgmozele/Desktop/LCP sensitivity test/geo2noLUC/land-use-", i, ".tif",sep=""))
#use to not incorporate land use change, but establish BAU land use types
LU0 <- raster(paste0(model,"/","NLCD100.tif"))
#Create a raster that will become resistance raster
test_raster <- test_stack
#Assign projection and reformat to ecoregion extent for the resistance raster
crs(test_raster) <- "+proj=utm +zone=17 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
extent(test_raster)<-raster::extent(Ecoregion)
#Assign values to resistance raster
#longleaf community comp
test_raster[test_stack == 1 & median0 %in% c(0:1),] <- (1/90)
test_raster[test_stack == 1 & median0 %in% c(2:5),] <- (1/80)
test_raster[test_stack == 1 & median0 %in% c(6:7),] <- (1/70)
test_raster[test_stack == 1 & median0 %in% c(8:9),] <- (1/40)
test_raster[test_stack == 1 & median0 %in% c(10:20),] <- (1/10)
test_raster[test_stack == 1 & median0 %in% c(21:34),] <- 1
test_raster[test_stack == 1 & median0 >= 35,] <- 1
#test_raster[test_stack == 3 & median0 %in% c(0:5),] <- (1/95)
#test_raster[test_stack == 3 & median0 %in% c(6:10),] <- (1/80)
#test_raster[test_stack == 3 & median0 %in% c(11:20),] <- (1/25)
#test_raster[test_stack == 3 & median0 %in% c(21:34),] <- (1/5)
#test_raster[test_stack == 3 & median0 >= 35,] <- 1
#pine plantation community type (was pine mix)
test_raster[test_stack == 2 & median0 %in% c(0:5),] <- (1/90)
test_raster[test_stack == 2 & median0 %in% c(6:10),] <- (1/70)
test_raster[test_stack == 2 & median0 %in% c(11:20),] <- (1/60)
test_raster[test_stack == 2 & median0 %in% c(21:30),] <- (1/50)
test_raster[test_stack == 2 & median0 >= 31,] <- (1/40)
#pine mix community type (was lob_)
test_raster[test_stack == 3 & median0 %in% c(0:5),] <- (1/90)
test_raster[test_stack == 3 & median0 %in% c(6:10),] <- (1/70)
test_raster[test_stack == 3 & median0 %in% c(11:20),] <- (1/40)
test_raster[test_stack == 3 & median0 %in% c(21:34),] <- (1/30)
test_raster[test_stack == 3 & median0 >= 35,] <- (1/20)
#test_raster[test_stack == 3 & median0 %in% c(0:5),] <- (1/95)
#test_raster[test_stack == 3 & median0 %in% c(6:10),] <- (1/80)
#test_raster[test_stack == 3 & median0 %in% c(11:20),] <- (1/25)
#test_raster[test_stack == 3 & median0 %in% c(21:34),] <- (1/5)
#test_raster[test_stack == 3 & median0 >= 35,] <- 1
#hardwood community type (was mix)
test_raster[test_stack == 4 & median0 %in% c(0:10),] <- (1/90)
test_raster[test_stack == 4 & median0 %in% c(11:20),] <- (1/80)
test_raster[test_stack == 4 & median0 %in% c(21:30),] <- (1/70)
test_raster[test_stack == 4 & median0 >= 31,] <- (1/60)
#mixed forest community type (was hardwood)
test_raster[test_stack == 5 & median0 %in% c(0:10),] <- (1/90)
test_raster[test_stack == 5 & median0 %in% c(11:20),] <- (1/70)
test_raster[test_stack == 5 & median0 %in% c(21:30),] <- (1/60)
test_raster[test_stack == 5 & median0 >= 31,] <- (1/50)
#test_raster[test_stack == 5 & median0 %in% c(0:10),] <- (1/100)
#test_raster[test_stack == 5 & median0 %in% c(11:20),] <- (1/85)
#test_raster[test_stack == 5 & median0 %in% c(21:30),] <- (1/70)
#test_raster[test_stack == 5 & median0 >= 31,] <- (1/60)
test_raster2 <- test_raster
test_raster2[test_raster ==0] <- NA
#land use types
test_raster2[LU0 == 82] <- (1/90) #cropland
test_raster2[LU0 == 81] <- (1/90) #hay/pasture
test_raster2[LU0 == 11] <- (1/100) #water
test_raster2[LU0 == 24] <- (1/100) #developed, high intensity
test_raster2[LU0 == 23] <- (1/90) #developed, med intensity
test_raster2[LU0 == 22] <- (1/80) #developed, low intensity
test_raster2[LU0 == 31] <- (1/90) #barren land
#test_raster2[LU0 == 6] <- (1/100) #mining
test_raster2[test_raster2 ==0] <- (1/90)
#roads
test_raster2[roads %in% c(1:2)] <- (1/100)
test_raster2[roads %in% c(3:4)] <- (1/100)
test_raster2[roads %in% c(5:89)] <- (1/90)
test_raster3 <- test_raster2
test_raster3[test_raster3 >0.1] <- 1
test_raster3[test_raster3 < 1] <- 0
habitat_raster <- overlay(test_raster3, SumStack, fun=function(x,y){(x*y)} )
#Cluster habitat cells into habitat nodes using quintiles of occurrence
LikelyHabitat8<-habitat_raster
LikelyHabitat8[LikelyHabitat8%in%c(0:36),]<-NA
pol8 <- rasterToPolygons(LikelyHabitat8)
proj4string(pol8) = "+proj=utm +zone=17 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
pol8$ID<-seq(1,length(pol8[1]))
polbuf <- gBuffer(pol8, byid=TRUE, id=pol8$ID, width=1.0, quadsegs=5, capStyle="ROUND",
joinStyle="ROUND", mitreLimit=1.0)
polbufdis <- gUnaryUnion(polbuf, id = NULL, checkValidity=NULL)
a<-raster::disaggregate(polbufdis)
LikelyHabitat5<-habitat_raster
LikelyHabitat5[LikelyHabitat5%in%c(0:27,37:45),]<-NA
pol5 <- rasterToPolygons(LikelyHabitat5)
proj4string(pol5) = "+proj=utm +zone=17 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
pol5$ID<-seq(1,length(pol5[1]))
polbuf <- gBuffer(pol5, byid=TRUE, id=pol5$ID, width=1.0, quadsegs=5, capStyle="ROUND",
joinStyle="ROUND", mitreLimit=1.0)
polbufdis <- gUnaryUnion(polbuf, id = NULL, checkValidity=NULL)
b<-raster::disaggregate(polbufdis)
LikelyHabitat3<-habitat_raster
LikelyHabitat3[LikelyHabitat3%in%c(0:18,28:45),]<-NA
pol3 <- rasterToPolygons(LikelyHabitat3)
proj4string(pol3) = "+proj=utm +zone=17 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
pol3$ID<-seq(1,length(pol3[1]))
polbuf <- gBuffer(pol3, byid=TRUE, id=pol3$ID, width=1.0, quadsegs=5, capStyle="ROUND",
joinStyle="ROUND", mitreLimit=1.0)
polbufdis <- gUnaryUnion(polbuf, id = NULL, checkValidity=NULL)
c<-raster::disaggregate(polbufdis)
LikelyHabitat1<-habitat_raster
LikelyHabitat1[LikelyHabitat1%in%c(0:9,19:45),]<-NA
pol1 <- rasterToPolygons(LikelyHabitat1)
proj4string(pol1) = "+proj=utm +zone=17 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
pol1$ID<-seq(1,length(pol1[1]))
polbuf <- gBuffer(pol1, byid=TRUE, id=pol1$ID, width=1.0, quadsegs=5, capStyle="ROUND",
joinStyle="ROUND", mitreLimit=1.0)
polbufdis <- gUnaryUnion(polbuf, id = NULL, checkValidity=NULL)
d<-raster::disaggregate(polbufdis)
LikelyHabitat<-habitat_raster
LikelyHabitat[LikelyHabitat%in%c(0,10:45),]<-NA
pol <- rasterToPolygons(LikelyHabitat)
proj4string(pol) = "+proj=utm +zone=17 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
pol$ID<-seq(1,length(pol[1]))
polbuf <- gBuffer(pol, byid=TRUE, id=pol$ID, width=1.0, quadsegs=5, capStyle="ROUND",
joinStyle="ROUND", mitreLimit=1.0)
polbufdis <- gUnaryUnion(polbuf, id = NULL, checkValidity=NULL)
e<-raster::disaggregate(polbufdis)
#Bring quintile-based habitat nodes together into one SpatialPolygonsDataFrame, find area of nodes, and assign numbers
polys <- bind(a,b,c,d,e)
data<-data.frame(ID=seq(1,length(polys)))
pol1_dis<-SpatialPolygonsDataFrame(polys,data)
pol1_dis$area_ha <- raster::area(pol1_dis)/10000
pol1_dis$num1 <- seq(from = 1, to= length(pol1_dis), by=1)
pol1_dis$num2 <- seq(from = 1, to= length(pol1_dis), by=1)
#Assign weight to habitat by type and area to be used in Conefor
pol1_dis$weight <- NA
pol1_dis$weight <- pol1_dis$area_ha
#Restrict habitat patches to those 2 hectares and larger, reassign ID's
pol1_dis <- pol1_dis[pol1_dis$area_ha >= 2,]
pol1_dis$ID<-seq(from = 1, to= length(pol1_dis), by=1)
#Make habitat nodes file to be used for Conefor
maketext <- cbind(pol1_dis$ID, pol1_dis$weight)
write.table(maketext, file=paste0(model,"/",model,replicate,"/","Output_",model,replicate,species,u,"/nodes_",u,model,species,"yr",i,"Rep_",replicate,".txt"), sep = "\t", row.names = FALSE, col.names = FALSE)
#use to find #nodes, avg node size, and total habitat area (to be used for ECA:Area)
nodes[length(nodes)+1] <- length(pol1_dis$ID)
avgnode[length(avgnode)+1] <- mean(pol1_dis$area_ha)
totnode[length(totnode)+1] <- sum(pol1_dis$area_ha)
###create transition matrix from resistance raster, which is required by gdistance package to calculate resistance
###distance and least cost path
test_tr <- transition(test_raster2, transitionFunction=mean, directions=8)
#find polgyon centroid
trueCentroids <- gCentroid(pol1_dis, byid=TRUE, id = pol1_dis$ID)
#clear memory
rm(Longleaf_Stack)
rm(Loblolly_Stack)
rm(Pine_Stack)
rm(Hardwood_Stack)
rm(Total)
rm(bigstack)
rm(pol8)
rm(pol5)
rm(pol3)
rm(pol1)
rm(pol)
rm(a)
rm(b)
rm(c)
rm(d)
rm(e)
rm(polbuf)
rm(polys)
rm(LikelyHabitat8)
rm(LikelyHabitat5)
rm(LikelyHabitat3)
rm(LikelyHabitat1)
rm(LikelyHabitat)
#get coordinates from trueCentroids
cent_coords <- geom(trueCentroids)
#find euclidean distance nearest neighbor
EUnn <- nndist(cent_coords)
avgENN[length(avgENN)+1] <- mean(EUnn)
#Euclidean distance between points- if euclidean distance is greater than 2000 meters, remove that pair- STILL NEED TO DO!!
#1500 meters for small songbird (Minor and Urban 2008, Sutherland et al. 2000)
#timber rattlesnake (generalist) ~1200 meters (USFS FEIS)
#~500 (449) for eastern spadefoot toad (Baumberger et al. 2019- Movement and habtiat selecton of western spadefoot)
#10,000 biggest median disersal distance for birds found by Sutherland et al.
#create matrix of euclidean distance between polygon centroids
EUpts <- spDists(x= trueCentroids, y = trueCentroids, longlat = FALSE, segments = FALSE, diagonal = FALSE)
#condense matrix into table and remove duplicate pairs
EUnew <- subset(reshape2::melt(EUpts), value!=0)
EU5000<-EUnew[!(EUnew$value > u),]
EU5000_nodups <- EU5000[!duplicated(data.frame(list(do.call(pmin,EU5000),do.call(pmax,EU5000)))),]
rm(EUpts)
#merge
colnames(EU5000_nodups) <- c("num1", "num2", "EUD")
lookup <- cbind(pol1_dis$ID, pol1_dis$num1, pol1_dis$num2)
colnames(lookup) <- c("ID", "num1", "num2")
EU_test <- merge(x = EU5000_nodups, y = lookup, by = "num1", all.x = TRUE)
colnames(EU_test) <- c("num1", "num2", "EUD", "ID", "num2.y")
EU_test2 <- merge(x = EU_test, y = lookup, by = "num2", all.x = TRUE)
EU_fin <- cbind(EU_test2$ID.x, EU_test2$ID.y)
EU_fin_df <- data.frame(EU_fin)
#clear more memory
rm(EU_test)
rm(EU_test2)
rm(EU_fin)
#
print("#####################################Entering Cost Distance#############################")
#calculate least cost path
test_trC <- geoCorrection(test_tr, type="c") #geocorrection for least cost path
rm(test_tr)
costDist <- costDistance(test_trC, trueCentroids) #LCP
rm(trueCentroids)
costmatrix <- matrixConvert(costDist, colname = c("X1", "X2", "resistance"))
colnames(costmatrix) <- c("X1", "X2", "resistance")
EU_fin_df$costdis <- NULL
costdist5000 <- merge(EU_fin_df, costmatrix, by.x= c("X2", "X1"), by.y = c("X1", "X2"))
costdist5000df <- data.frame(costdist5000)
costcomplete <- costdist5000df[!is.infinite(rowSums(costdist5000df)),]
write.table(costcomplete, file=paste0(model,"/",model,replicate,"/","Output_",model,replicate,species,u,"/distance_",u,model,species,"yr",i,"Rep_",replicate,".txt"), sep = "\t", row.names = FALSE, col.names = FALSE)
#write.csv(costcomplete, file=paste0("Outputs/distance_",u,model,"yr",i,"Rep_",replicate,".csv"), row.names=F)
print("#####################################Finished Cost distance#############################")
links[length(links)+1] <- nrow(costcomplete)
avgLCP[length(avgLCP)+1] <- mean(costcomplete$resistance)
#get adjacency matrix to build igraph
cost_col<- cbind(costcomplete$X2, costcomplete$X1)
adj <- get.adjacency(graph.edgelist(as.matrix(cost_col), directed=FALSE))
network <- graph_from_adjacency_matrix(adj)
gdensity <- edge_density(network, loops = FALSE)
density[length(density)+1] <- gdensity
trans <- transitivity(network, type="global")
transitivity[length(transitivity)+1] <- trans
results <- data.frame(nodes, links, avgnode, totnode, avgLCP, avgENN, density, transitivity)
write.table(results, file=paste0(model,"/",model,replicate,"/","Output_",model,replicate,species,u,"/Metrics_",u,model,species,"yr",i,"Rep_",replicate,".txt"), sep = "\t", row.names = TRUE, col.names = TRUE)
|
##--------------------------------------------------
## plot the loess correction for gc content
##
plotGcLoess <- function(med,gc,reads,fitted,fixed,fixedfit,rdo,libNum,readLength,libName){
params=rdo@params
if(is.null(params$prefix)){
pdf(paste(params$outputDirectory,"/plots/gccontent.lib",libNum,".readLength",readLength,".pdf",sep=""),width=8,height=11)
} else {
pdf(paste(params$outputDirectory,"/plots/",params$prefix,".gccontent.lib",libNum,".readLength",readLength,".pdf",sep=""),width=8,height=11)
}
par(mfcol=c(2,1))
ymax <- med*3
##plot original
plot(gc,reads,ylim=c(0,ymax),ylab="mean # reads", main="GC content bias - raw Data", xlab="GC content %")
mtext(paste("lib:",names(rdo@chrs[[1]])[libNum]," readLength:",readLength,sep=""))
points(gc,fitted,col="green",pch=20)
abline(h=med,col="red")
## plot post-adjustment
plot(gc,fixed,ylim=c(0,ymax),ylab="mean # reads", main="GC content bias - post LOESS correction", xlab="GC content %")
points(gc,fixedfit,col="green",pch=20)
abline(h=med,col="red")
dev.off()
}
##--------------------------------------------------
## plot the loess correction for mapability content
##
plotMapLoess <- function(med,map,reads,fitted,fixed,fixedfit,rdo,libNum,readLength,libName){
params=rdo@params
if(is.null(params$prefix)){
pdf(paste(params$outputDirectory,"/plots/mapability.lib",libNum,".readLength",readLength,".pdf",sep=""),width=8,height=11)
} else {
pdf(paste(params$outputDirectory,"/plots/",params$prefix,".mapability.lib",libNum,".readLength",readLength,".pdf",sep=""),width=8,height=11)
}
par(mfcol=c(2,1))
ymax <- med*3
##plot original
plot(map,reads,ylim=c(0,ymax),ylab="mean # reads", main="Mapability bias - raw Data", xlab="Mapability content %")
mtext(paste("lib:",names(rdo@chrs[[1]])[libNum]," readLength:",readLength,sep=""))
points(map,fitted,col="green",pch=20)
abline(h=med,col="red")
## plot post-adjustment
plot(map,fixed,ylim=c(0,ymax),ylab="mean # reads", main="Mapability bias - post LOESS correction", xlab="Mapability content %")
points(map,fixedfit,col="green",pch=20)
abline(h=med,col="red")
dev.off()
}
##----------------------------------------------
## given a set of parameters, plot the peaks
## and thresholds
##
plotWindows <- function(windowSize, genomeSize, divGain, divLoss, fdr, numReads, oDisp, ploidyPerc, med){
numWinds <- genomeSize/windowSize
##generate distributions
d2 <- rpois.od(numWinds*ploidyPerc$diploidPerc,med,oDisp)
d3 <- rpois.od(numWinds*ploidyPerc$triploidPerc,(med*1.5),oDisp)
d1 <- rpois.od(numWinds*ploidyPerc$haploidPerc,(med*0.5),oDisp)
hrange=10*sqrt(med*oDisp)
hist(d2,breaks=seq(-100000,100000,20),xlim=c(med-hrange,med+hrange),main=paste("Window Size: ",windowSize,sep=""))
mtext(paste("FDR: ",round(fdr,5),sep=""))
hist(d3,breaks=seq(-100000,100000,20),add=T,col="red")
hist(d1,breaks=seq(-100000,100000,20),add=T,col="red")
abline(v=med,col="blue")
abline(v=med*(0.5),col="blue")
abline(v=med*(1.5),col="blue")
abline(v=divGain,col="green")
abline(v=divLoss,col="green")
}
##---------------------------------------------------------
## Internal function to wrap plotting
##
doSegPlot <- function(rdo,segs,chr){
st = 1
sp = rdo@entrypoints[which(rdo@entrypoints$chr == chr),]$length
ymax=3 #multiplied by the median to get height
med = rdo@binParams$med
winds = rdo@chrs[[chr]]$rd
binSize = rdo@binParams$binSize
pos = seq(binSize/2,(((length(winds)-1)*binSize)-binSize/2),binSize)
pos = append(pos,sp)
par(mar=c(5, 4, 4, 4) + 0.1)
plot(pos,winds,ylab="number of reads", xlab="position (bp)", ylim=c(0,(median(winds,na.rm=T)*ymax)), pch=18, col=rgb(0,0,0,0.5), main=paste("Chr",chr,sep=""))
abline(h=med,col="blue")
abline(h=(rdo@binParams$gainThresh/2)*med,col=rgb(0,0.5,0,0.5))
abline(h=(rdo@binParams$lossThresh/2)*med,col=rgb(0,0.5,0,0.5))
asegs = segs[which(segs$chrom == chr),]
for(i in 1:length(asegs[,1])){
segments(asegs[i,2],(asegs[i,5]*(med/2)),asegs[i,3],(asegs[i,5]*(med/2)), col="red",lwd=3)
}
par(new=T)
plot(-10000,-10000, ylim=c(0,(median(winds,na.rm=T)*(ymax*2))/med), xlim=c(1,sp), axes=F,xlab="", ylab="")
axis(4, ylim=c(0,(median(winds,na.rm=T)*(ymax*2))/med), col="red",col.axis="red")
abline(h=seq(0,100,1),col="grey50",lty=3)
abline(v=seq(1,sp,2000000),col="grey50",lty=2)
mtext("Copy Number",side=4,col="red",line=2.5)
}
##----------------------------------------------
## plot the segments for a given chromosome
##
plotSegs <- function(rdo,segs,chr){
pdf(paste(rdo@params$outputDirectory,"/plots/points.",chr,".pdf",sep=""),width=12,height=4)
st = 1
sp = rdo@entrypoints[which(rdo@entrypoints$chr == chr),]$length
ymax=3 #multiplied by the median to get height
med = rdo@binParams$med
winds = rdo@chrs[[chr]]$rd
binSize = rdo@binParams$binSize
pos = seq(binSize/2,(((length(winds)-1)*binSize)-binSize/2),binSize)
pos = append(pos,sp)
par(mar=c(5, 4, 4, 4) + 0.1)
plot(pos,winds,ylab="number of reads", xlab="position (bp)", ylim=c(0,(median(winds,na.rm=T)*ymax)), pch=18, col=rgb(0,0,0,0.5), main=paste("Chr",chr,sep=""))
abline(h=med,col="blue")
abline(h=(rdo@binParams$gainThresh/2)*med,col=rgb(0,0.5,0,0.5))
abline(h=(rdo@binParams$lossThresh/2)*med,col=rgb(0,0.5,0,0.5))
asegs = segs[which(segs$chrom == chr),]
for(i in 1:length(asegs[,1])){
segments(asegs[i,2],(asegs[i,5]*(med/2)),asegs[i,3],(asegs[i,5]*(med/2)), col="red",lwd=3)
}
par(new=T)
plot(-10000,-10000, ylim=c(0,(median(winds,na.rm=T)*(ymax*2))/med), xlim=c(1,sp), axes=F,xlab="", ylab="")
axis(4, ylim=c(0,(median(winds,na.rm=T)*(ymax*2))/med), col="red",col.axis="red")
abline(h=seq(0,100,1),col="grey50",lty=3)
abline(v=seq(1,sp,2000000),col="grey50",lty=2)
mtext("Copy Number",side=4,col="red",line=2.5)
dev.off()
}
##----------------------------------------------
## plot the segments for a given chromosome
##
plotSegsPairedLog <- function(rdo.ref, rdo.test, segs,chr){
pdf(paste(rdo.ref@params$outputDirectory,"/plots/points.",chr,".paired.log.pdf",sep=""),width=12,height=4)
st = 1
sp = rdo.ref@entrypoints[which(rdo.ref@entrypoints$chr == chr),]$length
ymax=
df = makeDfLogPaired(rdo.ref,rdo.test)
df = df[df$chr==chr,]
df$score = (2^df$score)*2
par(mar=c(5, 4, 4, 4) + 0.1)
plot(df$pos,df$score,ylab="log2 ratio", xlab="position (bp)", ylim=c(0,ymax), pch=18, col=rgb(0,0,0,0.5), main=paste("Chr",chr,sep=""),plot.first=abline(h=seq(0,100,1),col="grey50",lty=3))
abline(h=2,col="blue")
abline(h=rdo.ref@binParams$gainThresh,col="green")
abline(h=rdo.ref@binParams$lossThresh,col="green")
asegs = segs[which(segs$chrom == chr),]
for(i in 1:length(asegs[,1])){
segments(asegs[i,2], asegs[i,5], asegs[i,3], asegs[i,5], col="red", lwd=3)
}
abline(v=seq(1,sp,2000000),col="grey50",lty=2)
dev.off()
}
##----------------------------------------------
## plot the segments for a given chromosome
##
plotSegsPaired <- function(rdo.ref, rdo.test, segs,chr){
pdf(paste(rdo.ref@params$outputDirectory,"/plots/points.",chr,".paired.pdf",sep=""),width=12,height=8)
st = 1
sp = rdo.ref@entrypoints[which(rdo.ref@entrypoints$chr == chr),]$length
ymax=3 #multiplier to median
df = makeDfLogPaired(rdo.ref,rdo.test)
df = df[df$chr==chr,]
df$score = (2^df$score)*2
par(mar=c(5, 4, 4, 4) + 0.1)
plot(df$pos,df$score,ylab="log2 ratio", xlab="position (bp)", ylim=c(0,ymax), pch=18, col=rgb(0,0,0,0.5), main=paste("Chr",chr,sep=""),plot.first=abline(h=seq(0,100,1),col="grey50",lty=3))
abline(h=2,col="blue")
abline(h=rdo.ref@binParams$gainThresh,col="green")
abline(h=rdo.ref@binParams$lossThresh,col="green")
asegs = segs[which(segs$chrom == chr),]
for(i in 1:length(asegs[,1])){
segments(asegs[i,2], asegs[i,5], asegs[i,3], asegs[i,5], col="red", lwd=3)
}
abline(v=seq(1,sp,2000000),col="grey50",lty=2)
dev.off()
}
| /R/plotting.R | permissive | chrisamiller/copyCat | R | false | false | 7,980 | r | ##--------------------------------------------------
## plot the loess correction for gc content
##
plotGcLoess <- function(med,gc,reads,fitted,fixed,fixedfit,rdo,libNum,readLength,libName){
params=rdo@params
if(is.null(params$prefix)){
pdf(paste(params$outputDirectory,"/plots/gccontent.lib",libNum,".readLength",readLength,".pdf",sep=""),width=8,height=11)
} else {
pdf(paste(params$outputDirectory,"/plots/",params$prefix,".gccontent.lib",libNum,".readLength",readLength,".pdf",sep=""),width=8,height=11)
}
par(mfcol=c(2,1))
ymax <- med*3
##plot original
plot(gc,reads,ylim=c(0,ymax),ylab="mean # reads", main="GC content bias - raw Data", xlab="GC content %")
mtext(paste("lib:",names(rdo@chrs[[1]])[libNum]," readLength:",readLength,sep=""))
points(gc,fitted,col="green",pch=20)
abline(h=med,col="red")
## plot post-adjustment
plot(gc,fixed,ylim=c(0,ymax),ylab="mean # reads", main="GC content bias - post LOESS correction", xlab="GC content %")
points(gc,fixedfit,col="green",pch=20)
abline(h=med,col="red")
dev.off()
}
##--------------------------------------------------
## plot the loess correction for mapability content
##
plotMapLoess <- function(med,map,reads,fitted,fixed,fixedfit,rdo,libNum,readLength,libName){
params=rdo@params
if(is.null(params$prefix)){
pdf(paste(params$outputDirectory,"/plots/mapability.lib",libNum,".readLength",readLength,".pdf",sep=""),width=8,height=11)
} else {
pdf(paste(params$outputDirectory,"/plots/",params$prefix,".mapability.lib",libNum,".readLength",readLength,".pdf",sep=""),width=8,height=11)
}
par(mfcol=c(2,1))
ymax <- med*3
##plot original
plot(map,reads,ylim=c(0,ymax),ylab="mean # reads", main="Mapability bias - raw Data", xlab="Mapability content %")
mtext(paste("lib:",names(rdo@chrs[[1]])[libNum]," readLength:",readLength,sep=""))
points(map,fitted,col="green",pch=20)
abline(h=med,col="red")
## plot post-adjustment
plot(map,fixed,ylim=c(0,ymax),ylab="mean # reads", main="Mapability bias - post LOESS correction", xlab="Mapability content %")
points(map,fixedfit,col="green",pch=20)
abline(h=med,col="red")
dev.off()
}
##----------------------------------------------
## given a set of parameters, plot the peaks
## and thresholds
##
plotWindows <- function(windowSize, genomeSize, divGain, divLoss, fdr, numReads, oDisp, ploidyPerc, med){
numWinds <- genomeSize/windowSize
##generate distributions
d2 <- rpois.od(numWinds*ploidyPerc$diploidPerc,med,oDisp)
d3 <- rpois.od(numWinds*ploidyPerc$triploidPerc,(med*1.5),oDisp)
d1 <- rpois.od(numWinds*ploidyPerc$haploidPerc,(med*0.5),oDisp)
hrange=10*sqrt(med*oDisp)
hist(d2,breaks=seq(-100000,100000,20),xlim=c(med-hrange,med+hrange),main=paste("Window Size: ",windowSize,sep=""))
mtext(paste("FDR: ",round(fdr,5),sep=""))
hist(d3,breaks=seq(-100000,100000,20),add=T,col="red")
hist(d1,breaks=seq(-100000,100000,20),add=T,col="red")
abline(v=med,col="blue")
abline(v=med*(0.5),col="blue")
abline(v=med*(1.5),col="blue")
abline(v=divGain,col="green")
abline(v=divLoss,col="green")
}
##---------------------------------------------------------
## Internal function to wrap plotting
##
doSegPlot <- function(rdo,segs,chr){
st = 1
sp = rdo@entrypoints[which(rdo@entrypoints$chr == chr),]$length
ymax=3 #multiplied by the median to get height
med = rdo@binParams$med
winds = rdo@chrs[[chr]]$rd
binSize = rdo@binParams$binSize
pos = seq(binSize/2,(((length(winds)-1)*binSize)-binSize/2),binSize)
pos = append(pos,sp)
par(mar=c(5, 4, 4, 4) + 0.1)
plot(pos,winds,ylab="number of reads", xlab="position (bp)", ylim=c(0,(median(winds,na.rm=T)*ymax)), pch=18, col=rgb(0,0,0,0.5), main=paste("Chr",chr,sep=""))
abline(h=med,col="blue")
abline(h=(rdo@binParams$gainThresh/2)*med,col=rgb(0,0.5,0,0.5))
abline(h=(rdo@binParams$lossThresh/2)*med,col=rgb(0,0.5,0,0.5))
asegs = segs[which(segs$chrom == chr),]
for(i in 1:length(asegs[,1])){
segments(asegs[i,2],(asegs[i,5]*(med/2)),asegs[i,3],(asegs[i,5]*(med/2)), col="red",lwd=3)
}
par(new=T)
plot(-10000,-10000, ylim=c(0,(median(winds,na.rm=T)*(ymax*2))/med), xlim=c(1,sp), axes=F,xlab="", ylab="")
axis(4, ylim=c(0,(median(winds,na.rm=T)*(ymax*2))/med), col="red",col.axis="red")
abline(h=seq(0,100,1),col="grey50",lty=3)
abline(v=seq(1,sp,2000000),col="grey50",lty=2)
mtext("Copy Number",side=4,col="red",line=2.5)
}
##----------------------------------------------
## plot the segments for a given chromosome
##
plotSegs <- function(rdo,segs,chr){
pdf(paste(rdo@params$outputDirectory,"/plots/points.",chr,".pdf",sep=""),width=12,height=4)
st = 1
sp = rdo@entrypoints[which(rdo@entrypoints$chr == chr),]$length
ymax=3 #multiplied by the median to get height
med = rdo@binParams$med
winds = rdo@chrs[[chr]]$rd
binSize = rdo@binParams$binSize
pos = seq(binSize/2,(((length(winds)-1)*binSize)-binSize/2),binSize)
pos = append(pos,sp)
par(mar=c(5, 4, 4, 4) + 0.1)
plot(pos,winds,ylab="number of reads", xlab="position (bp)", ylim=c(0,(median(winds,na.rm=T)*ymax)), pch=18, col=rgb(0,0,0,0.5), main=paste("Chr",chr,sep=""))
abline(h=med,col="blue")
abline(h=(rdo@binParams$gainThresh/2)*med,col=rgb(0,0.5,0,0.5))
abline(h=(rdo@binParams$lossThresh/2)*med,col=rgb(0,0.5,0,0.5))
asegs = segs[which(segs$chrom == chr),]
for(i in 1:length(asegs[,1])){
segments(asegs[i,2],(asegs[i,5]*(med/2)),asegs[i,3],(asegs[i,5]*(med/2)), col="red",lwd=3)
}
par(new=T)
plot(-10000,-10000, ylim=c(0,(median(winds,na.rm=T)*(ymax*2))/med), xlim=c(1,sp), axes=F,xlab="", ylab="")
axis(4, ylim=c(0,(median(winds,na.rm=T)*(ymax*2))/med), col="red",col.axis="red")
abline(h=seq(0,100,1),col="grey50",lty=3)
abline(v=seq(1,sp,2000000),col="grey50",lty=2)
mtext("Copy Number",side=4,col="red",line=2.5)
dev.off()
}
##----------------------------------------------
## plot the segments for a given chromosome
##
plotSegsPairedLog <- function(rdo.ref, rdo.test, segs,chr){
pdf(paste(rdo.ref@params$outputDirectory,"/plots/points.",chr,".paired.log.pdf",sep=""),width=12,height=4)
st = 1
sp = rdo.ref@entrypoints[which(rdo.ref@entrypoints$chr == chr),]$length
ymax=
df = makeDfLogPaired(rdo.ref,rdo.test)
df = df[df$chr==chr,]
df$score = (2^df$score)*2
par(mar=c(5, 4, 4, 4) + 0.1)
plot(df$pos,df$score,ylab="log2 ratio", xlab="position (bp)", ylim=c(0,ymax), pch=18, col=rgb(0,0,0,0.5), main=paste("Chr",chr,sep=""),plot.first=abline(h=seq(0,100,1),col="grey50",lty=3))
abline(h=2,col="blue")
abline(h=rdo.ref@binParams$gainThresh,col="green")
abline(h=rdo.ref@binParams$lossThresh,col="green")
asegs = segs[which(segs$chrom == chr),]
for(i in 1:length(asegs[,1])){
segments(asegs[i,2], asegs[i,5], asegs[i,3], asegs[i,5], col="red", lwd=3)
}
abline(v=seq(1,sp,2000000),col="grey50",lty=2)
dev.off()
}
##----------------------------------------------
## plot the segments for a given chromosome
##
plotSegsPaired <- function(rdo.ref, rdo.test, segs,chr){
pdf(paste(rdo.ref@params$outputDirectory,"/plots/points.",chr,".paired.pdf",sep=""),width=12,height=8)
st = 1
sp = rdo.ref@entrypoints[which(rdo.ref@entrypoints$chr == chr),]$length
ymax=3 #multiplier to median
df = makeDfLogPaired(rdo.ref,rdo.test)
df = df[df$chr==chr,]
df$score = (2^df$score)*2
par(mar=c(5, 4, 4, 4) + 0.1)
plot(df$pos,df$score,ylab="log2 ratio", xlab="position (bp)", ylim=c(0,ymax), pch=18, col=rgb(0,0,0,0.5), main=paste("Chr",chr,sep=""),plot.first=abline(h=seq(0,100,1),col="grey50",lty=3))
abline(h=2,col="blue")
abline(h=rdo.ref@binParams$gainThresh,col="green")
abline(h=rdo.ref@binParams$lossThresh,col="green")
asegs = segs[which(segs$chrom == chr),]
for(i in 1:length(asegs[,1])){
segments(asegs[i,2], asegs[i,5], asegs[i,3], asegs[i,5], col="red", lwd=3)
}
abline(v=seq(1,sp,2000000),col="grey50",lty=2)
dev.off()
}
|
#' Initialize Parameter
#'
#' @param genome An object of type Genome necessary for the initialization of the Parameter object.
#' The default value is NULL.
#'
#' @param sphi Initial values for sphi. Expected is a vector of length numMixtures.
#' The default value is NULL.
#'
#' @param num.mixtures The number of mixtures elements for the underlying mixture distribution (numMixtures > 0).
#' The default value is 1.
#'
#' @param gene.assignment A vector holding the initial mixture assignment for each gene.
#' The vector length has to equal the number of genes in the genome.
#' Valid values for the vector range from 1 to numMixtures.
#' It is possible but not advised to leave a mixture element empty.
#' The default Value is NULL.
#'
#' @param initial.expression.values (Optional) A vector with intial phi values.
#' The length of the vector has to equal the number of genes in the Genome object.
#' The default value is NULL.
#'
#' @param model Specifies the model used. Valid options are "ROC", "PA", "PANSE", or "FONSE".
#' The default model is "ROC".
#' ROC is described in Gilchrist et al. 2015.
#' PA, PANSE and FONSE are currently unpublished.
#'
#' @param split.serine Whether serine should be considered as
#' one or two amino acids when running the model.
#' TRUE and FALSE are the only valid values.
#' The default value for split.serine is TRUE.
#'
#' @param mixture.definition A string describing how each mixture should
#' be treated with respect to mutation and selection.
#' Valid values consist of "allUnique", "mutationShared", and "selectionShared".
#' The default value for mixture.definition is "allUnique".
#' See details for more information.
#'
#' @param mixture.definition.matrix A matrix representation of how
#' the mutation and selection categories correspond to the mixtures.
#' The default value for mixture.definition.matrix is NULL.
#' If provided, the model will use the matrix to initialize the mutation and selection
#' categories instead of the definition listed directly above.
#' See details for more information.
#'
#' @param init.with.restart.file File name containing information to reinitialize a
#' previous Parameter object.
#' If given, all other arguments will be ignored.
#' The default value for init.with.restart.file is NULL.
#'
#' @param mutation.prior.sd Controlling the standard deviation of the normal
#' prior on the mutation parameters
#'
#' @param init.csp.variance specifies the initial proposal width for codon specific parameter (default is 0.0025).
#' The proposal width adapts during the runtime to reach a taget acceptance rate of ~0.25
#'
#' @param init.sepsilon specifies the initial value for sepsilon. default is 0.1
#'
#' @param init.w.obs.phi TRUE: initialize phi values with observed phi values
#' (data from RNAseq, mass spectrometry, ribosome footprinting) Default is FALSE.
#' If multiple observed phi values exist for a gene, the geometric mean of these values is used as initial phi.
#' When using this function, one should remove any genes with
#' missing phi values, as these genes will not have an initial phi value.
#'
#' @return parameter Returns an initialized Parameter object.
#'
#' @description \code{initializeParameterObject} initializes a new parameter object or reconstructs one from a restart file
#'
#' @details \code{initializeParameterObject} checks the values of the arguments
#' given to insure the values are valid.
#'
#' The mixture definition and mixture definition matrix describes how the mutation
#' and selection categories are set up with respect to the number of mixtures. For
#' example, if mixture.definition = "allUnique" and numMixtures = 3, a matrix
#' representation would be \code{matrix(c(1,2,3,1,2,3), ncol=2)}
#' where each row represents a mixture, the first column represents the mutation
#' category, and the second column represents the selection category.
#' Another example would be mixture.definition = "selectionShared" and numMixtures = 4 (
#' \code{matrix(c(1,2,3,4,1,1,1,1), ncol=2)}).
#' In this case, the selection category is the same for every mixture. If a matrix
#' is given, and it is valid, then the mutation/selection relationship will be
#' defined by the given matrix and the keyword will be ignored. A matrix should only
#' be given in cases where the keywords would not create the desired matrix.
#'
#' @examples
#'
#' genome_file <- system.file("extdata", "genome.fasta", package = "AnaCoDa")
#' restart_file <- system.file("extdata", "restart_file.rst", package = "AnaCoDa")
#'
#' genome <- initializeGenomeObject(file = genome_file)
#'
#' ## initialize a new parameter object
#' sphi_init <- 1
#' numMixtures <- 1
#' geneAssignment <- rep(1, length(genome))
#' parameter <- initializeParameterObject(genome = genome, sphi = sphi_init,
#' num.mixtures = numMixtures,
#' gene.assignment = geneAssignment,
#' mixture.definition = "allUnique")
#'
#' ## re-initialize a parameter object from a restart file. Useful for checkpointing
#' parameter <- initializeParameterObject(init.with.restart.file = restart_file)
#'
#' ## initialize a parameter object with a custon mixture definition matrix
#' def.matrix <- matrix(c(1,1,1,2), ncol=2)
#' geneAssignment <- sample(1:2, length(genome), replace = TRUE) # random assignment to mixtures
#' parameter <- initializeParameterObject(genome = genome, sphi = c(0.5, 2), num.mixtures = 2,
#' gene.assignment = geneAssignment,
#' mixture.definition.matrix = def.matrix)
#'
initializeParameterObject <- function(genome = NULL, sphi = NULL, num.mixtures = 1,
gene.assignment = NULL, initial.expression.values = NULL,
model = "ROC", split.serine = TRUE,
mixture.definition = "allUnique",
mixture.definition.matrix = NULL,
init.with.restart.file = NULL, mutation.prior.sd = 0.35,
init.csp.variance = 0.0025, init.sepsilon = 0.1,
init.w.obs.phi=FALSE){
# check input integrity
if(is.null(init.with.restart.file)){
if(length(sphi) != num.mixtures){
stop("Not all mixtures have an Sphi value assigned!\n")
}
if(length(genome) != length(gene.assignment)){
stop("Not all Genes have a mixture assignment!\n")
}
if(max(gene.assignment) > num.mixtures){
stop("Gene is assigned to non existing mixture!\n")
}
if(num.mixtures < 1){
stop("num. mixture has to be a positive non-zero value!\n")
}
if (!is.null(sphi)) {
if (length(sphi) != num.mixtures) {
stop("sphi must be a vector of length numMixtures\n")
}
}
if (!is.null(initial.expression.values)) {
if (length(initial.expression.values) != length.Rcpp_Genome(genome)) {
stop("initial.expression.values must have length equal to the number of genes in the Genome object\n")
}
}
if (!identical(split.serine, TRUE) && !identical(split.serine, FALSE)) {
stop("split.serine must be a boolean value\n")
}
if (mixture.definition != "allUnique" && mixture.definition != "mutationShared" &&
mixture.definition != "selectionShared") {
stop("mixture.definition must be \"allUnique\", \"mutationShared\", or \"selectionShared\". Default is \"allUnique\"\n")
}
if (mutation.prior.sd < 0) {
stop("mutation.prior.sd should be positive\n")
}
if (init.csp.variance < 0) {
stop("init.csp.variance should be positive\n")
}
if (init.sepsilon < 0) {
stop("init.sepsilon should be positive\n")
}
} else {
if (!file.exists(init.with.restart.file)) {
stop("init.with.restart.file provided does not exist\n")
}
}
if(model == "ROC"){
if(is.null(init.with.restart.file)){
parameter <- initializeROCParameterObject(genome, sphi, num.mixtures,
gene.assignment, initial.expression.values, split.serine,
mixture.definition, mixture.definition.matrix,
mutation.prior.sd, init.csp.variance, init.sepsilon,init.w.obs.phi)
}else{
parameter <- new(ROCParameter, init.with.restart.file)
}
}else if(model == "FONSE"){
if(is.null(init.with.restart.file)){
parameter <- initializeFONSEParameterObject(genome, sphi, num.mixtures,
gene.assignment, initial.expression.values, split.serine,
mixture.definition, mixture.definition.matrix, init.csp.variance,init.w.obs.phi)
}else{
parameter <- new(FONSEParameter, init.with.restart.file)
}
}else if(model == "PA"){
if(is.null(init.with.restart.file)){
parameter <- initializePAParameterObject(genome, sphi, num.mixtures,
gene.assignment, initial.expression.values, split.serine,
mixture.definition, mixture.definition.matrix, init.csp.variance,init.w.obs.phi)
}else{
parameter <- new(PAParameter, init.with.restart.file)
}
}else if(model == "PANSE"){
if(is.null(init.with.restart.file)){
parameter <- initializePANSEParameterObject(genome, sphi, num.mixtures,
gene.assignment, initial.expression.values, split.serine,
mixture.definition, mixture.definition.matrix, init.csp.variance,init.w.obs.phi)
}else{
parameter <- new(PANSEParameter, init.with.restart.file)
}
}else{
stop("Unknown model.")
}
return(parameter)
}
#Called from initializeParameterObject.
initializeROCParameterObject <- function(genome, sphi, numMixtures, geneAssignment,
expressionValues = NULL, split.serine = TRUE,
mixture.definition = "allUnique",
mixture.definition.matrix = NULL, mutation_prior_sd = 0.35, init.csp.variance = 0.0025, init.sepsilon = 0.1,init.w.obs.phi=FALSE){
if(is.null(mixture.definition.matrix)){
# keyword constructor
parameter <- new(ROCParameter, as.vector(sphi), numMixtures, geneAssignment,
split.serine, mixture.definition)
}else{
#matrix constructor
mixture.definition <- c(mixture.definition.matrix[, 1],
mixture.definition.matrix[, 2])
parameter <- new(ROCParameter, as.vector(sphi), geneAssignment,
mixture.definition, split.serine)
}
# initialize expression values
if(is.null(expressionValues) && init.w.obs.phi == F)
{
parameter$initializeSynthesisRateByGenome(genome, mean(sphi))
}
else if(init.w.obs.phi == T && is.null(expressionValues))
{
observed.phi <- getObservedSynthesisRateSet(genome)
if (ncol(observed.phi)-1 > 1)
{
observed.phi <- apply(observed.phi[,2:ncol(observed.phi)],geom_mean,MARGIN = 1)
}
else
{
observed.phi <- observed.phi[,2]
}
parameter$initializeSynthesisRateByList(observed.phi)
}
else if (!is.null(expressionValues) && init.w.obs.phi == F)
{
parameter$initializeSynthesisRateByList(expressionValues)
}
else
{
stop("expressionValues is not NULL and init.w.obs.phi == TRUE. Please choose only one of these options.")
}
n.obs.phi.sets <- ncol(getObservedSynthesisRateSet(genome)) - 1
parameter$setNumObservedSynthesisRateSets(n.obs.phi.sets)
parameter$mutation_prior_sd <- mutation_prior_sd
if (n.obs.phi.sets != 0){
parameter$setInitialValuesForSepsilon(as.vector(init.sepsilon))
}
parameter <- initializeCovarianceMatrices(parameter, genome, numMixtures, geneAssignment, init.csp.variance)
return(parameter)
}
#Called from initializeParameterObject.
initializePAParameterObject <- function(genome, sphi, numMixtures, geneAssignment,
expressionValues = NULL, split.serine = TRUE,
mixture.definition = "allUnique",
mixture.definition.matrix = NULL, init.csp.variance,init.w.obs.phi=FALSE){
if(is.null(mixture.definition.matrix))
{ # keyword constructor
parameter <- new(PAParameter, as.vector(sphi), numMixtures, geneAssignment,
split.serine, mixture.definition)
}else{
#matrix constructor
mixture.definition <- c(mixture.definition.matrix[, 1],
mixture.definition.matrix[, 2])
parameter <- new(PAParameter, as.vector(sphi), geneAssignment,
mixture.definition, split.serine)
}
# initialize expression values
# initialize expression values
if(is.null(expressionValues) && init.w.obs.phi == F)
{
parameter$initializeSynthesisRateByGenome(genome, mean(sphi))
}
else if(init.w.obs.phi == T && is.null(expressionValues))
{
observed.phi <- getObservedSynthesisRateSet(genome)
if (ncol(observed.phi)-1 > 1)
{
observed.phi <- apply(observed.phi[,2:ncol(observed.phi)],geom_mean,MARGIN = 1)
}
else
{
observed.phi <- observed.phi[,2]
}
parameter$initializeSynthesisRateByList(observed.phi)
}
else if (!is.null(expressionValues) && init.w.obs.phi == F)
{
parameter$initializeSynthesisRateByList(expressionValues)
}
else
{
stop("expressionValues is not NULL and init.w.obs.phi == TRUE. Please choose only one of these options.")
}
## TODO (Cedric): use init.csp.variance to set initial proposal width for CSP parameters
return (parameter)
}
#Called from initializeParameterObject.
initializePANSEParameterObject <- function(genome, sphi, numMixtures, geneAssignment,
expressionValues = NULL, split.serine = TRUE,
mixture.definition = "allUnique",
mixture.definition.matrix = NULL, init.csp.variance,init.w.obs.phi=FALSE){
if(is.null(mixture.definition.matrix))
{ # keyword constructor
parameter <- new(PANSEParameter, as.vector(sphi), numMixtures, geneAssignment,
split.serine, mixture.definition)
}else{
#matrix constructor
mixture.definition <- c(mixture.definition.matrix[, 1],
mixture.definition.matrix[, 2])
parameter <- new(PANSEParameter, as.vector(sphi), geneAssignment,
mixture.definition, split.serine)
}
# initialize expression values
# initialize expression values
if(is.null(expressionValues) && init.w.obs.phi == F)
{
parameter$initializeSynthesisRateByGenome(genome, mean(sphi))
}
else if(init.w.obs.phi == T && is.null(expressionValues))
{
observed.phi <- getObservedSynthesisRateSet(genome)
if (ncol(observed.phi)-1 > 1)
{
observed.phi <- apply(observed.phi[,2:ncol(observed.phi)],geom_mean,MARGIN = 1)
}
else
{
observed.phi <- observed.phi[,2]
}
parameter$initializeSynthesisRateByList(observed.phi)
}
else if (!is.null(expressionValues) && init.w.obs.phi == F)
{
parameter$initializeSynthesisRateByList(expressionValues)
}
else
{
stop("expressionValues is not NULL and init.w.obs.phi == TRUE. Please choose only one of these options.")
}
return (parameter)
}
#Called from initializeParameterObject.
initializeFONSEParameterObject <- function(genome, sphi, numMixtures,
geneAssignment, expressionValues = NULL, split.serine = TRUE,
mixture.definition = "allUnique",
mixture.definition.matrix = NULL, init.csp.variance,init.w.obs.phi=FALSE){
# create Parameter object
if(is.null(mixture.definition.matrix))
{ # keyword constructor
parameter <- new(FONSEParameter, as.vector(sphi), numMixtures, geneAssignment,
split.serine, mixture.definition)
}else{
#matrix constructor
mixture.definition <- c(mixture.definition.matrix[, 1],
mixture.definition.matrix[, 2])
parameter <- new(FONSEParameter, as.vector(sphi), geneAssignment,
mixture.definition, split.serine)
}
# initialize expression values
# initialize expression values
if(is.null(expressionValues) && init.w.obs.phi == F)
{
parameter$initializeSynthesisRateByGenome(genome, mean(sphi))
}
else if(init.w.obs.phi == T && is.null(expressionValues))
{
observed.phi <- getObservedSynthesisRateSet(genome)
if (ncol(observed.phi)-1 > 1)
{
observed.phi <- apply(observed.phi[,2:ncol(observed.phi)],geom_mean,MARGIN = 1)
}
else
{
observed.phi <- observed.phi[,2]
}
parameter$initializeSynthesisRateByList(observed.phi)
}
else if (!is.null(expressionValues) && init.w.obs.phi == F)
{
parameter$initializeSynthesisRateByList(expressionValues)
}
else
{
stop("expressionValues is not NULL and init.w.obs.phi == TRUE. Please choose only one of these options.")
}
parameter <- initializeCovarianceMatrices(parameter, genome, numMixtures, geneAssignment, init.csp.variance)
return(parameter)
}
#' Return Codon Specific Paramters (or write to csv) estimates as data.frame
#'
#' @param parameter parameter an object created by \code{initializeParameterObject}.
#'
#' @param filename Posterior estimates will be written to file instead of returned if specified (format: csv).
#'
#' @param CSP which type of codon specific parameter should be returned (mutation (default) or selection)
#'
#' @param mixture estimates for which mixture should be returned
#'
#' @param samples The number of samples used for the posterior estimates.
#'
#' @return returns a data.frame with the posterior estimates of the models
#' codon specific parameters or writes it directly to a csv file if \code{filename} is specified
#'
#' @description \code{getCSPEstimates} returns the codon specific
#' parameter estimates for a given parameter and mixture or write it to a csv file.
#'
#' @examples
#' genome_file <- system.file("extdata", "genome.fasta", package = "AnaCoDa")
#'
#' genome <- initializeGenomeObject(file = genome_file)
#' sphi_init <- c(1,1)
#' numMixtures <- 2
#' geneAssignment <- sample(1:2, length(genome), replace = TRUE) # random assignment to mixtures
#' parameter <- initializeParameterObject(genome = genome, sphi = sphi_init,
#' num.mixtures = numMixtures,
#' gene.assignment = geneAssignment,
#' mixture.definition = "allUnique")
#' model <- initializeModelObject(parameter = parameter, model = "ROC")
#' samples <- 2500
#' thinning <- 50
#' adaptiveWidth <- 25
#' mcmc <- initializeMCMCObject(samples = samples, thinning = thinning,
#' adaptive.width=adaptiveWidth, est.expression=TRUE,
#' est.csp=TRUE, est.hyper=TRUE, est.mix = TRUE)
#' divergence.iteration <- 10
#' \dontrun{
#' runMCMC(mcmc = mcmc, genome = genome, model = model,
#' ncores = 4, divergence.iteration = divergence.iteration)
#'
#' ## return estimates for codon specific parameters
#' csp_mat <- getCSPEstimates(parameter, CSP="Mutation")
#'
#' # write the result directly to the filesystem as a csv file. No values are returned
#' getCSPEstimates(parameter, , filename=file.path(tempdir(), "csp_out.csv"), CSP="Mutation")
#'
#' }
#'
getCSPEstimates <- function(parameter, filename=NULL, CSP="Mutation", mixture = 1, samples = 10){
Amino_Acid <- c()
Value <- c()
Codon <- c()
quantile_list <- vector("list")
if (class(parameter) == "Rcpp_ROCParameter" || class(parameter) == "Rcpp_FONSEParameter"){
names.aa <- aminoAcids()
for(aa in names.aa){
if(aa == "M" || aa == "W" || aa == "X") next
codons <- AAToCodon(aa, T)
for(i in 1:length(codons)){
Amino_Acid <- c(Amino_Acid, aa)
Codon <- c(Codon, codons[i])
if(CSP == "Mutation"){
Value <- c(Value, parameter$getCodonSpecificPosteriorMean(mixture, samples, codons[i], 0, TRUE))
quantile_list <- c(quantile_list, parameter$getCodonSpecificQuantile(mixture, samples, codons[i], 0, c(0.025, 0.975), TRUE))
}
else if(CSP == "Selection"){
Value <- c(Value, parameter$getCodonSpecificPosteriorMean(mixture, samples, codons[i], 1, TRUE))
quantile_list <- c(quantile_list, parameter$getCodonSpecificQuantile(mixture, samples, codons[i], 1, c(0.025, 0.975), TRUE))
}
else {
stop("Unknown parameter type given with argument: CSP")
}
}
}
}
else if (class(parameter) == "Rcpp_PAParameter"){
groupList <- parameter$getGroupList()
for(i in 1:length(groupList)){
aa <- codonToAA(groupList[i])
Codon <- c(Codon, groupList[i])
Amino_Acid <- c(Amino_Acid, aa)
if(CSP == "Alpha"){
Value <- c(Value, parameter$getCodonSpecificPosteriorMean(mixture, samples, codons[i], 0, FALSE))
quantile_list <- c(quantile_list, parameter$getCodonSpecificQuantile(mixture, samples, codons[i], 0, c(0.025, 0.975), FALSE))
}
else if(CSP == "Lambda Prime"){
Value <- c(Value, parameter$getCodonSpecificPosteriorMean(mixture, samples, codons[i], 1, FALSE))
quantile_list <- c(quantile_list, parameter$getCodonSpecificQuantile(mixture, samples, codons[i], 1, c(0.025, 0.975), FALSE))
}
else {
stop("Unknown parameter type given with argument: CSP")
}
}
}
else if (class(parameter) == "Rcpp_PANSEParameter"){
groupList <- parameter$getGroupList()
for(i in 1:length(groupList)){
aa <- codonToAA(groupList[i])
Codon <- c(Codon, groupList[i])
Amino_Acid <- c(Amino_Acid, aa)
if(CSP == "Alpha"){
Value <- c(Value, parameter$getCodonSpecificPosteriorMean(mixture, samples, codons[i], 0, FALSE))
quantile_list <- c(quantile_list, parameter$getCodonSpecificQuantile(mixture, samples, codons[i], 0, c(0.025, 0.975), FALSE))
}
else if(CSP == "Lambda Prime"){
Value <- c(Value, parameter$getCodonSpecificPosteriorMean(mixture, samples, codons[i], 1, FALSE))
quantile_list <- c(quantile_list, parameter$getCodonSpecificQuantile(mixture, samples, codons[i], 1, c(0.025, 0.975), FALSE))
}
else {
stop("Unknown parameter type given with argument: CSP")
}
}
}
else{
stop("Unknown object provided with argument: parameter")
}
quantile_list <- matrix(unlist(quantile_list), nrow = 2)
data <- data.frame(Amino_Acid, Codon, Value, Lower=quantile_list[1,], Upper=quantile_list[2,])
colnames(data) <- c("AA", "Codon", "Posterior", "0.025%", "0.975%")
if(is.null(filename))
{
return(data)
}else {
write.csv(data, file = filename, row.names = FALSE, quote=FALSE)
}
}
#' Calculate Selection coefficients
#'
#' \code{getSelectionCoefficients} calculates the selection coefficient of each codon in each gene.
#'
#' @param genome A genome object initialized with
#' \code{\link{initializeGenomeObject}} to add observed expression data.
#'
#' @param parameter an object created by \code{initializeParameterObject}.
#'
#' @param samples The number of samples used for the posterior estimates.
#'
#' @return A matrix with selection coefficients.
#'
#' @examples
#' genome_file <- system.file("extdata", "genome.fasta", package = "AnaCoDa")
#'
#' genome <- initializeGenomeObject(file = genome_file)
#' sphi_init <- 1
#' numMixtures <- 1
#' geneAssignment <- rep(1, length(genome))
#' parameter <- initializeParameterObject(genome = genome, sphi = sphi_init,
#' num.mixtures = numMixtures,
#' gene.assignment = geneAssignment,
#' mixture.definition = "allUnique")
#' model <- initializeModelObject(parameter = parameter, model = "ROC")
#' samples <- 2500
#' thinning <- 50
#' adaptiveWidth <- 25
#' mcmc <- initializeMCMCObject(samples = samples, thinning = thinning,
#' adaptive.width=adaptiveWidth, est.expression=TRUE,
#' est.csp=TRUE, est.hyper=TRUE, est.mix = TRUE)
#' divergence.iteration <- 10
#' \dontrun{
#' runMCMC(mcmc = mcmc, genome = genome, model = model,
#' ncores = 4, divergence.iteration = divergence.iteration)
#'
#' ## return estimates for selection coefficients s for each codon in each gene
#' selection.coefficients <- getSelectionCoefficients(genome = genome,
#' parameter = parameter, samples = 1000)
#' }
#'
getSelectionCoefficients <- function(genome, parameter, samples = 100)
{
sel.coef <- parameter$calculateSelectionCoefficients(samples)
grouplist <- parameter$getGroupList()
codon.names <- NULL
if(class(parameter) == "Rcpp_ROCParameter" || class(parameter) == "Rcpp_FONSEParameter")
{
for(aa in grouplist)
codon.names <- c(codon.names, AAToCodon(aa))
sel.coef <- sel.coef[, -c(60, 61)] # The matrix is to large as it could store M and W which is not used here.
}else{
codon.names <- grouplist
}
gene.names <- getNames(genome)
colnames(sel.coef) <- codon.names
rownames(sel.coef) <- gene.names
return(sel.coef)
}
# Uses a multinomial logistic regression to estimate the codon specific parameters for every category.
# Delta M is the intercept - and Delta eta is the slope of the regression.
# The package VGAM is used to perform the regression.
getCSPbyLogit <- function(codonCounts, phi, coefstart = NULL, x.arg = FALSE,
y.arg = FALSE, qr.arg = FALSE){
#avoid cases with 0 aa count
idx <- rowSums(codonCounts) != 0
# performs the regression and returns Delta M and Delta eta as well as other information no used here
ret <- VGAM::vglm(codonCounts[idx, ] ~ phi[idx],
VGAM::multinomial, coefstart = coefstart,
x.arg = x.arg, y.arg = y.arg, qr.arg = qr.arg)
coefficients <- ret@coefficients
## convert delta.t to delta.eta
coefficients <- -coefficients
ret <- list(coefficients = coefficients,
coef.mat = matrix(coefficients, nrow = 2, byrow = TRUE),
R = ret@R)
return(ret)
}
#TODO: Need comments explaining what is going on
subMatrices <- function(M, r, c){
rg <- (row(M) - 1) %/% r + 1
cg <- (col(M) - 1) %/% c + 1
rci <- (rg - 1) * max(cg) + cg
return(rci)
}
#TODO: Need comments explaining what is going on
splitMatrix <- function(M, r, c){
rci <- subMatrices(M, r, c)
N <- prod(dim(M)) / r / c
cv <- lapply(1:N, function(x) M[rci==x])
return(lapply(1:N, function(i) matrix(cv[[i]], nrow = r)))
}
#' extracts an object of traces from a parameter object.
#'
#' @param parameter A Parameter object that corresponds to one of the model types.
#'
#' @return trace Returns an object of type Trace extracted from the given parameter object
#'
#' @examples
#' genome_file <- system.file("extdata", "genome.fasta", package = "AnaCoDa")
#'
#' genome <- initializeGenomeObject(file = genome_file)
#' sphi_init <- c(1,1)
#' numMixtures <- 2
#' geneAssignment <- sample(1:2, length(genome), replace = TRUE) # random assignment to mixtures
#' parameter <- initializeParameterObject(genome = genome, sphi = sphi_init,
#' num.mixtures = numMixtures,
#' gene.assignment = geneAssignment,
#' mixture.definition = "allUnique")
#'
#' trace <- getTrace(parameter) # empty trace object since no MCMC was perfomed
#'
getTrace <- function(parameter){
return(parameter$getTraceObject())
}
#######
### CURRENTLY NOT EXPOSED
#######
#' Initialize Covariance Matrices
#'
#' @param parameter A Parameter object that corresponds to one of the model types.
#' Valid values are "ROC", "PA", and "FONSE".
#'
#' @param genome An object of type Genome necessary for the initialization of the Parameter object.
#'
#' @param numMixtures The number of mixture elements for the underlying mixture distribution (numMixtures > 0).
#'
#' @param geneAssignment A vector holding the initial mixture assignment for each gene.
#' The vector length has to equal the number of genes in the genome.
#' Valid values for the vector range from 1 to numMixtures.
#' It is possible but not advised to leave a mixture element empty.
#'
#' @param init.csp.variance initial proposal variance for codon specific parameter, default is 0.0025.
#'
#' @return parameter Returns the Parameter argument, now modified with initialized mutation, selection, and covariance matrices.
#'
# Also initializes the mutaiton and selection parameter
initializeCovarianceMatrices <- function(parameter, genome, numMixtures, geneAssignment, init.csp.variance = 0.0025) {
numMutationCategory <- parameter$numMutationCategories
numSelectionCategory <- parameter$numSelectionCategories
phi <- parameter$getCurrentSynthesisRateForMixture(1) # phi values are all the same initially
names.aa <- aminoAcids()
# ct <- getInstance()
# names.aa <- ct$getGroupList()
for(aa in names.aa){
if(aa == "M" || aa == "W" || aa == "X") next
#should go away when CT is up and running
codonCounts <- getCodonCountsForAA(aa, genome) # ignore column with gene ids
numCodons <- dim(codonCounts)[2] - 1
#-----------------------------------------
# TODO WORKS CURRENTLY ONLY FOR ALLUNIQUE!
#-----------------------------------------
covmat <- vector("list", numMixtures)
for(mixElement in 1:numMixtures){
idx <- geneAssignment == mixElement
csp <- getCSPbyLogit(codonCounts[idx, ], phi[idx])
parameter$initMutation(csp$coef.mat[1,], mixElement, aa)
parameter$initSelection(csp$coef.mat[2,], mixElement, aa)
}
# One covariance matrix for all mixtures.
# Currently only variances used.
compl.covMat <- diag((numMutationCategory + numSelectionCategory) * numCodons) * init.csp.variance
parameter$initCovarianceMatrix(compl.covMat, aa)
}
#for(aa in names.aa){
# if(aa == "M" || aa == "W" || aa == "X") next
#should go away when CT is up and running
#codonCounts <- getCodonCountsForAA(aa, genome)
#numCodons <- dim(codonCounts)[2] - 1
#-----------------------------------------
# TODO WORKS CURRENTLY ONLY FOR ALLUNIQUE!
#-----------------------------------------
# covmat <- vector("list", numMixtures)
#for(mixElement in 1:numMixtures){
# idx <- geneAssignment == mixElement
#csp <- getCSPbyLogit(codonCounts[idx, ], phi[idx])
# parameter$initMutation(csp$coef.mat[1,], mixElement, aa)
# parameter$initSelection(csp$coef.mat[2,], mixElement, aa)
# split matrix into sup matrices (dM and dEta)
# covmat[[mixElement]] <- splitMatrix(t(csp$R) %*% csp$R, numCodons, numCodons) # we expect the covariance matrix, but get the decomposition.
# }
# compl.covMat <- matrix(0, ncol = numMixtures * numCodons * 2, nrow = numMixtures * numCodons * 2)
#matrix.positions <- subMatrices(compl.covMat, numCodons, numCodons)
#compl.seq <- seq(1, dim(compl.covMat)[1], numCodons)
#mut.seq <- compl.seq[1:(length(compl.seq)/2)]
#i <- 1
#for(pos in mut.seq){
# compl.covMat[matrix.positions == matrix.positions[pos, pos]] <- unlist(covmat[[i]][1])
# i <- i + 1
# i <- ifelse(i > numMutationCategory, 1, i)
# }
# sel.seq <- compl.seq[(length(compl.seq)/2 + 1):length(compl.seq)]
# i <- 1
# for(pos in sel.seq){
# compl.covMat[matrix.positions == matrix.positions[pos, pos]] <- unlist(covmat[[i]][4])
# i <- i + 1
#i <- ifelse(i > numMutationCategory, 1, i)
#}
#ofdiag.seq <- mut.seq + numCodons*numMutationCategory
#for(i in 1:length(mut.seq)){
# compl.covMat[matrix.positions == matrix.positions[mut.seq[i], ofdiag.seq[i]]] <- unlist(covmat[[i]][2])
# compl.covMat[matrix.positions == matrix.positions[ofdiag.seq[i], mut.seq[i]]] <- unlist(covmat[[i]][3])
#}
#for testing - in actuality this is used, it is currently overwriting
#previous steps.
#compl.covMat <- diag((numMutationCategory + numSelectionCategory) * numCodons) * 0.05
#compl.covMat / max(compl.covMat)
#parameter$initCovarianceMatrix(compl.covMat, aa)
#}
return(parameter)
}
#' Returns mixture assignment estimates for each gene
#'
#' @param parameter on object created by \code{initializeParameterObject}
#'
#' @param gene.index a integer or vector of integers representing the gene(s) of interesst.
#'
#' @param samples number of samples for the posterior estimate
#'
#' @return returns a vector with the mixture assignment of each gene corresbonding to \code{gene.index} in the same order as the genome.
#'
#' @description Posterior estimates for the mixture assignment of specified genes
#'
#' @details The returned vector is unnamed as gene ids are only stored in the \code{genome} object,
#' but the \code{gene.index} vector can be used to match the assignment to the genome.
#'
#' @examples
#' genome_file <- system.file("extdata", "genome.fasta", package = "AnaCoDa")
#'
#' genome <- initializeGenomeObject(file = genome_file)
#' sphi_init <- c(1,1)
#' numMixtures <- 2
#' geneAssignment <- sample(1:2, length(genome), replace = TRUE) # random assignment to mixtures
#' parameter <- initializeParameterObject(genome = genome, sphi = sphi_init,
#' num.mixtures = numMixtures,
#' gene.assignment = geneAssignment,
#' mixture.definition = "allUnique")
#' model <- initializeModelObject(parameter = parameter, model = "ROC")
#' samples <- 2500
#' thinning <- 50
#' adaptiveWidth <- 25
#' mcmc <- initializeMCMCObject(samples = samples, thinning = thinning, adaptive.width=adaptiveWidth,
#' est.expression=TRUE, est.csp=TRUE, est.hyper=TRUE, est.mix = TRUE)
#' divergence.iteration <- 10
#' \dontrun{
#' runMCMC(mcmc = mcmc, genome = genome, model = model,
#' ncores = 4, divergence.iteration = divergence.iteration)
#'
#' # get the mixture assignment for all genes
#' mixAssign <- getMixtureAssignmentEstimate(parameter = parameter,
#' gene.index = 1:length(genome), samples = 1000)
#'
#' # get the mixture assignment for a subsample
#' mixAssign <- getMixtureAssignmentEstimate(parameter = parameter,
#' gene.index = 5:100, samples = 1000)
#' # or
#' mixAssign <- getMixtureAssignmentEstimate(parameter = parameter,
#' gene.index = c(10, 30:50, 3, 90), samples = 1000)
#' }
#'
getMixtureAssignmentEstimate <- function(parameter, gene.index, samples)
{
mixtureAssignment <- unlist(lapply(gene.index, function(geneIndex){parameter$getEstimatedMixtureAssignmentForGene(samples, geneIndex)}))
return(mixtureAssignment)
}
#' Returns the estimated phi posterior for a gene
#'
#' @param parameter on object created by \code{initializeParameterObject}.
#'
#' @param gene.index a integer or vector of integers representing the gene(s) of interesst.
#'
#' @param samples number of samples for the posterior estimate
#'
#' @param quantiles vector of quantiles, (default: c(0.025, 0.975))
#'
#' @return returns a vector with the mixture assignment of each gene corresbonding to \code{gene.index} in the same order as the genome.
#'
#' @description Posterior estimates for the phi value of specified genes
#'
#' @details The returned vector is unnamed as gene ids are only stored in the \code{genome} object,
#' but the \code{gene.index} vector can be used to match the assignment to the genome.
#'
#' @examples
#' genome_file <- system.file("extdata", "genome.fasta", package = "AnaCoDa")
#'
#' genome <- initializeGenomeObject(file = genome_file)
#' sphi_init <- c(1,1)
#' numMixtures <- 2
#' geneAssignment <- sample(1:2, length(genome), replace = TRUE) # random assignment to mixtures
#' parameter <- initializeParameterObject(genome = genome, sphi = sphi_init,
#' num.mixtures = numMixtures,
#' gene.assignment = geneAssignment,
#' mixture.definition = "allUnique")
#' model <- initializeModelObject(parameter = parameter, model = "ROC")
#' samples <- 2500
#' thinning <- 50
#' adaptiveWidth <- 25
#' mcmc <- initializeMCMCObject(samples = samples, thinning = thinning,
#' adaptive.width=adaptiveWidth, est.expression=TRUE,
#' est.csp=TRUE, est.hyper=TRUE, est.mix = TRUE)
#' divergence.iteration <- 10
#' \dontrun{
#' runMCMC(mcmc = mcmc, genome = genome, model = model,
#' ncores = 4, divergence.iteration = divergence.iteration)
#'
#' # get the estimated expression values for all genes based on the mixture
#' # they are assigned to at each step
#' estimatedExpression <- getExpressionEstimates(parameter, 1:length(genome), 1000)
#' }
#'
getExpressionEstimates <- function(parameter, gene.index, samples, quantiles=c(0.025, 0.975))
{
expressionValues <- unlist(lapply(gene.index, function(geneIndex){
parameter$getSynthesisRatePosteriorMeanForGene(samples, geneIndex, FALSE)
}))
expressionValuesLog <- unlist(lapply(gene.index, function(geneIndex){
parameter$getSynthesisRatePosteriorMeanForGene(samples, geneIndex, TRUE)
}))
expressionStdErr <- sqrt(unlist(lapply(gene.index, function(geneIndex){
parameter$getSynthesisRateVarianceForGene(samples, geneIndex, TRUE, FALSE)
}))) / samples
expressionStdErrLog <- sqrt(unlist(lapply(gene.index, function(geneIndex){
parameter$getSynthesisRateVarianceForGene(samples, geneIndex, TRUE, TRUE)
}))) / samples
expressionQuantile <- lapply(gene.index, function(geneIndex){
parameter$getExpressionQuantile(samples, geneIndex, quantiles, FALSE)
})
expressionQuantile <- do.call(rbind, expressionQuantile)
expressionQuantileLog <- lapply(gene.index, function(geneIndex){
parameter$getExpressionQuantile(samples, geneIndex, quantiles, TRUE)
})
expressionQuantileLog <- do.call(rbind, expressionQuantileLog)
expr.mat <- cbind(expressionValues, expressionValuesLog, expressionStdErr, expressionStdErrLog, expressionQuantile, expressionQuantileLog)
colnames(expr.mat) <- c("PHI", "log10.PHI", "Std.Error", "log10.Std.Error", quantiles, paste("log10.", quantiles, sep=""))
return(expr.mat)
}
#' Write Parameter Object to a File
#'
#' @param parameter parameter on object created by \code{initializeParameterObject}.
#'
#' @param file A filename that where the data will be stored.
#'
#' @return This function has no return value.
#'
#' @description \code{writeParameterObject} will write the parameter object as binary to the filesystem
#'
#' @details As Rcpp object are not serializable with the default R \code{save} function,
#' therefore this custom save function is provided (see \link{loadParameterObject}).
#'
#' @examples
#' \dontrun{
#'
#' genome_file <- system.file("extdata", "genome.fasta", package = "AnaCoDa")
#'
#' genome <- initializeGenomeObject(file = genome_file)
#' sphi_init <- c(1,1)
#' numMixtures <- 2
#' geneAssignment <- sample(1:2, length(genome), replace = TRUE) # random assignment to mixtures
#' parameter <- initializeParameterObject(genome = genome, sphi = sphi_init,
#' num.mixtures = numMixtures,
#' gene.assignment = geneAssignment,
#' mixture.definition = "allUnique")
#'
#' ## writing an empty parameter object as the runMCMC routine was not called yet
#' writeParameterObject(parameter = parameter, file = file.path(tempdir(), "file.Rda"))
#'
#' }
#'
writeParameterObject <- function(parameter, file)
{
UseMethod("writeParameterObject", parameter)
}
# extracts traces and parameter information from the base class Parameter
extractBaseInfo <- function(parameter){
trace <- parameter$getTraceObject()
stdDevSynthesisRateTraces <- trace$getStdDevSynthesisRateTraces()
stdDevSynthesisRateAcceptRatTrace <- trace$getStdDevSynthesisRateAcceptanceRateTrace()
synthRateTrace <- trace$getSynthesisRateTrace()
synthAcceptRatTrace <- trace$getSynthesisRateAcceptanceRateTrace()
mixAssignTrace <- trace$getMixtureAssignmentTrace()
mixProbTrace <- trace$getMixtureProbabilitiesTrace()
codonSpecificAcceptRatTrace <- trace$getCodonSpecificAcceptanceRateTrace()
numMix <- parameter$numMixtures
numMut <- parameter$numMutationCategories
numSel <- parameter$numSelectionCategories
categories <- parameter$getCategories()
curMixAssignment <- parameter$getMixtureAssignment()
lastIteration <- parameter$getLastIteration()
grouplist <- parameter$getGroupList()
varList <- list(stdDevSynthesisRateTraces = stdDevSynthesisRateTraces,
stdDevSynthesisRateAcceptRatTrace = stdDevSynthesisRateAcceptRatTrace,
synthRateTrace = synthRateTrace,
synthAcceptRatTrace = synthAcceptRatTrace,
mixAssignTrace = mixAssignTrace,
mixProbTrace = mixProbTrace,
codonSpecificAcceptRatTrace = codonSpecificAcceptRatTrace,
numMix = numMix,
numMut = numMut,
numSel = numSel,
categories = categories,
curMixAssignment = curMixAssignment,
lastIteration = lastIteration,
grouplist = grouplist
)
return(varList)
}
#called from "writeParameterObject."
writeParameterObject.Rcpp_ROCParameter <- function(parameter, file){
paramBase <- extractBaseInfo(parameter)
currentMutation <- parameter$currentMutationParameter
currentSelection <- parameter$currentSelectionParameter
proposedMutation <- parameter$proposedMutationParameter
proposedSelection <- parameter$proposedSelectionParameter
model = "ROC"
mutationPrior <- parameter$getMutationPriorStandardDeviation()
trace <- parameter$getTraceObject()
mutationTrace <- trace$getCodonSpecificParameterTrace(0)
selectionTrace <- trace$getCodonSpecificParameterTrace(1)
synthesisOffsetAcceptRatTrace <- trace$getSynthesisOffsetAcceptanceRateTrace()
synthesisOffsetTrace <- trace$getSynthesisOffsetTrace()
observedSynthesisNoiseTrace <- trace$getObservedSynthesisNoiseTrace()
if (length(synthesisOffsetTrace) == 0){
withPhi = FALSE
}else{
withPhi = TRUE
}
save(list = c("paramBase", "currentMutation", "currentSelection",
"proposedMutation", "proposedSelection", "model",
"mutationPrior", "mutationTrace", "selectionTrace",
"synthesisOffsetAcceptRatTrace", "synthesisOffsetTrace",
"observedSynthesisNoiseTrace", "withPhi"),
file=file)
}
#called from "writeParameterObject."
writeParameterObject.Rcpp_PAParameter <- function(parameter, file){
paramBase <- extractBaseInfo(parameter)
currentAlpha <- parameter$currentAlphaParameter
currentLambdaPrime <- parameter$currentLambdaPrimeParameter
proposedAlpha <- parameter$proposedAlphaParameter
proposedLambdaPrime <- parameter$proposedLambdaPrimeParameter
model = "PA"
trace <- parameter$getTraceObject()
alphaTrace <- trace$getCodonSpecificParameterTrace(0)
lambdaPrimeTrace <- trace$getCodonSpecificParameterTrace(1)
save(list = c("paramBase", "currentAlpha", "currentLambdaPrime", "proposedAlpha",
"proposedLambdaPrime", "model", "alphaTrace", "lambdaPrimeTrace"),
file=file)
}
#called from "writeParameterObject."
writeParameterObject.Rcpp_PANSEParameter <- function(parameter, file){
paramBase <- extractBaseInfo(parameter)
currentAlpha <- parameter$currentAlphaParameter
currentLambdaPrime <- parameter$currentLambdaPrimeParameter
proposedAlpha <- parameter$proposedAlphaParameter
proposedLambdaPrime <- parameter$proposedLambdaPrimeParameter
model = "PANSE"
trace <- parameter$getTraceObject()
alphaTrace <- trace$getCodonSpecificParameterTrace(0)
lambdaPrimeTrace <- trace$getCodonSpecificParameterTrace(1)
save(list = c("paramBase", "currentAlpha", "currentLambdaPrime", "proposedAlpha",
"proposedLambdaPrime", "model", "alphaTrace", "lambdaPrimeTrace"),
file=file)
}
#called from "writeParameterObject."
writeParameterObject.Rcpp_FONSEParameter <- function(parameter, file)
{
paramBase <- extractBaseInfo(parameter)
currentMutation <- parameter$currentMutationParameter
currentSelection <- parameter$currentSelectionParameter
model = "FONSE"
mutationPrior <- parameter$getMutationPriorStandardDeviation()
trace <- parameter$getTraceObject()
mutationTrace <- trace$getCodonSpecificParameterTrace(0)
selectionTrace <- trace$getCodonSpecificParameterTrace(1)
save(list = c("paramBase", "currentMutation", "currentSelection",
"model","mutationPrior", "mutationTrace", "selectionTrace"),
file=file)
}
#' Load Parameter Object
#'
#' @param files A list of parameter filenames to be loaded. If multiple files are given,
#' the parameter objects will be concatenated in the order provided
#'
#' @return Returns an initialized Parameter object.
#'
#' @description \code{loadParameterObject} will load a parameter object from the filesystem
#'
#' @details The function loads one or multiple files. In the case of multiple file, e.g. due to the use of check pointing, the files will
#' be concatenated to one parameter object. See \link{writeParameterObject} for the writing of parameter objects
#'
#' @examples
#' \dontrun{
#' # load a single parameter object
#' parameter <- loadParameterObject("parameter.Rda")
#'
#' # load and concatenate multiple parameter object
#' parameter <- loadParameterObject(c("parameter1.Rda", "parameter2.Rda"))
#' }
#'
loadParameterObject <- function(files)
{
#A temporary env is set up to stop R errors.
firstModel <- "Invalid model"
for (i in 1:length(files)){
tempEnv <- new.env();
load(file = files[i], envir = tempEnv)
if (i == 1){
firstModel <- tempEnv$model
}else{
if (firstModel != tempEnv$model){
stop("The models do not match between files")
}#end of error check
}#end of if-else
}#end of for
# browser()
if (firstModel == "ROC"){
parameter <- new(ROCParameter)
parameter <- loadROCParameterObject(parameter, files)
}else if (firstModel == "PA") {
parameter <- new(PAParameter)
parameter <- loadPAParameterObject(parameter, files)
}else if (firstModel == "PANSE") {
parameter <- new(PANSEParameter)
parameter <- loadPANSEParameterObject(parameter, files)
}else if (firstModel == "FONSE") {
parameter <- new(FONSEParameter)
parameter <- loadFONSEParameterObject(parameter, files)
}else{
stop("File data corrupted")
}
return(parameter)
}
#Sets all the common variables in the Parameter objects.
setBaseInfo <- function(parameter, files)
{
for (i in 1:length(files)) {
tempEnv <- new.env();
load(file = files[i], envir = tempEnv)
if (i == 1) {
categories <- tempEnv$paramBase$categories
categories.matrix <- do.call("rbind", tempEnv$paramBase$categories)
numMixtures <- tempEnv$paramBase$numMix
numMutationCategories <- tempEnv$paramBase$numMut
numSelectionCategories <- tempEnv$paramBase$numSel
mixtureAssignment <- tempEnv$paramBase$curMixAssignment
lastIteration <- tempEnv$paramBase$lastIteration
max <- tempEnv$paramBase$lastIteration + 1
grouplist <- tempEnv$paramBase$grouplist
stdDevSynthesisRateTraces <- vector("list", length = numSelectionCategories)
for (j in 1:numSelectionCategories) {
stdDevSynthesisRateTraces[[j]] <- tempEnv$paramBase$stdDevSynthesisRateTraces[[j]][1:max]
}
stdDevSynthesisRateAcceptanceRateTrace <- tempEnv$paramBase$stdDevSynthesisRateAcceptRatTrace
synthesisRateTrace <- vector("list", length = numSelectionCategories)
for (j in 1:numSelectionCategories) {
for (k in 1:length(tempEnv$paramBase$synthRateTrace[[j]])){
synthesisRateTrace[[j]][[k]] <- tempEnv$paramBase$synthRateTrace[[j]][[k]][1:max]
}
}
synthesisRateAcceptanceRateTrace <- tempEnv$paramBase$synthAcceptRatTrace
mixtureAssignmentTrace <- vector("list", length = length(tempEnv$paramBase$mixAssignTrace))
for (j in 1:length(tempEnv$paramBase$mixAssignTrace)){
mixtureAssignmentTrace[[j]] <- tempEnv$paramBase$mixAssignTrace[[j]][1:max]
}
mixtureProbabilitiesTrace <- c()
for (j in 1:numMixtures) {
mixtureProbabilitiesTrace[[j]] <- tempEnv$paramBase$mixProbTrace[[j]][1:max]
}
codonSpecificAcceptanceRateTrace <- tempEnv$paramBase$codonSpecificAcceptRatTrace
} else {
if (sum(categories.matrix != do.call("rbind", tempEnv$paramBase$categories)) != 0){
stop("categories is not the same between all files")
}#end of error check
if (numMixtures != tempEnv$paramBase$numMix){
stop("The number of mixtures is not the same between files")
}
if (numMutationCategories != tempEnv$paramBase$numMut){
stop("The number of mutation categories is not the same between files")
}
if (numSelectionCategories != tempEnv$paramBase$numSel){
stop("The number of selection categories is not the same between files")
}
if (length(mixtureAssignment) != length(tempEnv$paramBase$curMixAssignment)){
stop("The length of the mixture assignment is not the same between files.
Make sure the same genome is used on each run.")
}
if(length(grouplist) != length(tempEnv$paramBase$grouplist)){
stop("Number of Amino Acids/Codons is not the same between files.")
}
curStdDevSynthesisRateTraces <- tempEnv$paramBase$stdDevSynthesisRateTraces
curStdDevSynthesisRateAcceptanceRateTrace <- tempEnv$paramBase$stdDevSynthesisRateAcceptRatTrace
curSynthesisRateTrace <- tempEnv$paramBase$synthRateTrace
curSynthesisRateAcceptanceRateTrace <- tempEnv$paramBase$synthAcceptRatTrace
curMixtureAssignmentTrace <- tempEnv$paramBase$mixAssignTrace
curMixtureProbabilitiesTrace <- tempEnv$paramBase$mixProbTrace
curCodonSpecificAcceptanceRateTrace <- tempEnv$paramBase$codonSpecificAcceptRatTrace
lastIteration <- lastIteration + tempEnv$paramBase$lastIteration
#assuming all checks have passed, time to concatenate traces
max <- tempEnv$paramBase$lastIteration + 1
combineTwoDimensionalTrace(stdDevSynthesisRateTraces, curStdDevSynthesisRateTraces, max)
size <- length(curStdDevSynthesisRateAcceptanceRateTrace)
stdDevSynthesisRateAcceptanceRateTrace <- c(stdDevSynthesisRateAcceptanceRateTrace,
curStdDevSynthesisRateAcceptanceRateTrace[2:size])
combineThreeDimensionalTrace(synthesisRateTrace, curSynthesisRateTrace, max)
size <- length(curSynthesisRateAcceptanceRateTrace)
combineThreeDimensionalTrace(synthesisRateAcceptanceRateTrace, curSynthesisRateAcceptanceRateTrace, size)
combineTwoDimensionalTrace(mixtureAssignmentTrace, curMixtureAssignmentTrace, max)
combineTwoDimensionalTrace(mixtureProbabilitiesTrace, curMixtureProbabilitiesTrace, max)
size <- length(curCodonSpecificAcceptanceRateTrace)
combineTwoDimensionalTrace(codonSpecificAcceptanceRateTrace, curCodonSpecificAcceptanceRateTrace, size)
}
}
parameter$setCategories(categories)
parameter$setCategoriesForTrace()
parameter$numMixtures <- numMixtures
parameter$numMutationCategories <- numMutationCategories
parameter$numSelectionCategories <- numSelectionCategories
parameter$setMixtureAssignment(tempEnv$paramBase$curMixAssignment) #want the last in the file sequence
parameter$setLastIteration(lastIteration)
parameter$setGroupList(grouplist)
trace <- parameter$getTraceObject()
trace$setStdDevSynthesisRateTraces(stdDevSynthesisRateTraces)
trace$setStdDevSynthesisRateAcceptanceRateTrace(stdDevSynthesisRateAcceptanceRateTrace)
trace$setSynthesisRateTrace(synthesisRateTrace)
trace$setSynthesisRateAcceptanceRateTrace(synthesisRateAcceptanceRateTrace)
trace$setMixtureAssignmentTrace(mixtureAssignmentTrace)
trace$setMixtureProbabilitiesTrace(mixtureProbabilitiesTrace)
trace$setCodonSpecificAcceptanceRateTrace(codonSpecificAcceptanceRateTrace)
parameter$setTraceObject(trace)
return(parameter)
}
#Called from "loadParameterObject."
loadROCParameterObject <- function(parameter, files)
{
parameter <- setBaseInfo(parameter, files)
for (i in 1:length(files)){
tempEnv <- new.env();
load(file = files[i], envir = tempEnv)
numMutationCategories <- tempEnv$paramBase$numMut
numSelectionCategories <- tempEnv$paramBase$numSel
max <- tempEnv$paramBase$lastIteration + 1
if (i == 1){
withPhi <- tempEnv$withPhi
if (withPhi){
phiGroups <- length(tempEnv$synthesisOffsetTrace)
synthesisOffsetTrace <- c()
for (j in 1:phiGroups) {
synthesisOffsetTrace[[j]] <- tempEnv$synthesisOffsetTrace[[j]][1:max]
}
synthesisOffsetAcceptanceRateTrace <- tempEnv$synthesisOffsetAcceptRatTrace
observedSynthesisNoiseTrace <- c()
for (j in 1:phiGroups) {
observedSynthesisNoiseTrace[[j]] <- tempEnv$observedSynthesisNoiseTrace[[j]][1:max]
}
#need number of phi groups, not the number of mixtures apparently.
}else {
synthesisOffsetTrace <- c()
synthesisOffsetAcceptanceRateTrace <- c()
observedSynthesisNoiseTrace <- c()
}
codonSpecificParameterTraceMut <- vector("list", length=numMutationCategories)
for (j in 1:numMutationCategories) {
codonSpecificParameterTraceMut[[j]] <- vector("list", length=length(tempEnv$mutationTrace[[j]]))
for (k in 1:length(tempEnv$mutationTrace[[j]])){
codonSpecificParameterTraceMut[[j]][[k]] <- tempEnv$mutationTrace[[j]][[k]][1:max]
#codonSpecificParameterTraceSel[[j]][[k]] <- tempEnv$selectionTrace[[j]][[k]][1:max]
}
}
codonSpecificParameterTraceSel <- vector("list", length=numSelectionCategories)
for (j in 1:numSelectionCategories) {
codonSpecificParameterTraceSel[[j]] <- vector("list", length=length(tempEnv$selectionTrace[[j]]))
for (k in 1:length(tempEnv$selectionTrace[[j]])){
#codonSpecificParameterTraceMut[[j]][[k]] <- tempEnv$mutationTrace[[j]][[k]][1:max]
codonSpecificParameterTraceSel[[j]][[k]] <- tempEnv$selectionTrace[[j]][[k]][1:max]
}
}
}else{
curSynthesisOffsetTrace <- tempEnv$synthesisOffsetTrace
curSynthesisOffsetAcceptanceRateTrace <- tempEnv$synthesisOffsetAcceptRatTrace
curObservedSynthesisNoiseTrace <- tempEnv$observedSynthesisNoiseTrace
curCodonSpecificParameterTraceMut <- tempEnv$mutationTrace
curCodonSpecificParameterTraceSel <- tempEnv$selectionTrace
if (withPhi != tempEnv$withPhi){
stop("Runs do not match in concern in with.phi")
}
if (withPhi){
combineTwoDimensionalTrace(synthesisOffsetTrace, curSynthesisOffsetTrace, max)
size <- length(curSynthesisOffsetAcceptanceRateTrace)
combineTwoDimensionalTrace(synthesisOffsetAcceptanceRateTrace, curSynthesisOffsetAcceptanceRateTrace, size)
combineTwoDimensionalTrace(observedSynthesisNoiseTrace, curObservedSynthesisNoiseTrace, max)
}
combineThreeDimensionalTrace(codonSpecificParameterTraceMut, curCodonSpecificParameterTraceMut, max)
combineThreeDimensionalTrace(codonSpecificParameterTraceSel, curCodonSpecificParameterTraceSel, max)
}#end of if-else
}#end of for loop (files)
trace <- parameter$getTraceObject()
trace$setSynthesisOffsetTrace(synthesisOffsetTrace)
trace$setSynthesisOffsetAcceptanceRateTrace(synthesisOffsetAcceptanceRateTrace)
trace$setObservedSynthesisNoiseTrace(observedSynthesisNoiseTrace)
trace$setCodonSpecificParameterTrace(codonSpecificParameterTraceMut, 0)
trace$setCodonSpecificParameterTrace(codonSpecificParameterTraceSel, 1)
parameter$currentMutationParameter <- tempEnv$currentMutation
parameter$currentSelectionParameter <- tempEnv$currentSelection
parameter$proposedMutationParameter <- tempEnv$proposedMutation
parameter$proposedSelectionParameter <- tempEnv$proposedSelection
parameter$setTraceObject(trace)
return(parameter)
}
#Called from "loadParameterObject."
loadPAParameterObject <- function(parameter, files)
{
parameter <- setBaseInfo(parameter, files)
for (i in 1:length(files)){
tempEnv <- new.env();
load(file = files[i], envir = tempEnv)
max <- tempEnv$paramBase$lastIteration + 1
numMixtures <- tempEnv$paramBase$numMix
numMutationCategories <- tempEnv$paramBase$numMut
numSelectionCategories <- tempEnv$paramBase$numSel
if (i == 1){
#for future use: This may break if PA is ran with more than
#one mixture, in this case just follow the format of the
#ROC CSP parameters.
alphaTrace <- vector("list", length=numMutationCategories)
for (j in 1:numMutationCategories) {
for (k in 1:length(tempEnv$alphaTrace[[j]])){
alphaTrace[[j]][[k]] <- tempEnv$alphaTrace[[j]][[k]][1:max]
}
}
lambdaPrimeTrace <- vector("list", length=numSelectionCategories)
for (j in 1:numSelectionCategories) {
for (k in 1:length(tempEnv$lambdaPrimeTrace[[j]])){
lambdaPrimeTrace[[j]][[k]] <- tempEnv$lambdaPrimeTrace[[j]][[k]][1:max]
}
}
}else{
curAlphaTrace <- tempEnv$alphaTrace
curLambdaPrimeTrace <- tempEnv$lambdaPrimeTrace
combineThreeDimensionalTrace(alphaTrace, curAlphaTrace, max)
combineThreeDimensionalTrace(lambdaPrimeTrace, curLambdaPrimeTrace, max)
}
}#end of for loop (files)
parameter$currentAlphaParameter <- tempEnv$currentAlpha
parameter$proposedAlphaParameter <- tempEnv$proposedAlpha
parameter$currentLambdaPrimeParameter <- tempEnv$currentLambdaPrime
parameter$proposedLambdaPrimeParameter <- tempEnv$proposedLambdaPrime
trace <- parameter$getTraceObject()
trace$setCodonSpecificParameterTrace(alphaTrace, 0)
trace$setCodonSpecificParameterTrace(lambdaPrimeTrace, 1)
parameter$setTraceObject(trace)
return(parameter)
}
loadPANSEParameterObject <- function(parameter, files)
{
parameter <- setBaseInfo(parameter, files)
for (i in 1:length(files)){
tempEnv <- new.env();
load(file = files[i], envir = tempEnv)
max <- tempEnv$paramBase$lastIteration + 1
numMixtures <- tempEnv$paramBase$numMix
numMutationCategories <- tempEnv$paramBase$numMut
numSelectionCategories <- tempEnv$paramBase$numSel
if (i == 1){
#for future use: This may break if PANSE is ran with more than
#one mixture, in this case just follow the format of the
#ROC CSP parameters.
alphaTrace <- vector("list", length=numMutationCategories)
for (j in 1:numMutationCategories) {
for (k in 1:length(tempEnv$alphaTrace[[j]])){
alphaTrace[[j]][[k]] <- tempEnv$alphaTrace[[j]][[k]][1:max]
}
}
lambdaPrimeTrace <- vector("list", length=numSelectionCategories)
for (j in 1:numSelectionCategories) {
for (k in 1:length(tempEnv$lambdaPrimeTrace[[j]])){
lambdaPrimeTrace[[j]][[k]] <- tempEnv$lambdaPrimeTrace[[j]][[k]][1:max]
}
}
}else{
curAlphaTrace <- tempEnv$alphaTrace
curLambdaPrimeTrace <- tempEnv$lambdaPrimeTrace
combineThreeDimensionalTrace(alphaTrace, curAlphaTrace, max)
combineThreeDimensionalTrace(lambdaPrimeTrace, curLambdaPrimeTrace, max)
}
}#end of for loop (files)
parameter$currentAlphaParameter <- tempEnv$currentAlpha
parameter$proposedAlphaParameter <- tempEnv$proposedAlpha
parameter$currentLambdaPrimeParameter <- tempEnv$currentLambdaPrime
parameter$proposedLambdaPrimeParameter <- tempEnv$proposedLambdaPrime
trace <- parameter$getTraceObject()
trace$setCodonSpecificParameterTrace(alphaTrace, 0)
trace$setCodonSpecificParameterTrace(lambdaPrimeTrace, 1)
parameter$setTraceObject(trace)
return(parameter)
}
#Called from "loadParameterObject."
loadFONSEParameterObject <- function(parameter, files)
{
parameter <- setBaseInfo(parameter, files)
for (i in 1:length(files)){
tempEnv <- new.env();
load(file = files[i], envir = tempEnv)
numMutationCategories <- tempEnv$paramBase$numMut
numSelectionCategories <- tempEnv$paramBase$numSel
max <- tempEnv$paramBase$lastIteration + 1
if (i == 1){
codonSpecificParameterTraceMut <- vector("list", length=numMutationCategories)
for (j in 1:numMutationCategories) {
codonSpecificParameterTraceMut[[j]] <- vector("list", length=length(tempEnv$mutationTrace[[j]]))
for (k in 1:length(tempEnv$mutationTrace[[j]])){
codonSpecificParameterTraceMut[[j]][[k]] <- tempEnv$mutationTrace[[j]][[k]][1:max]
#codonSpecificParameterTraceSel[[j]][[k]] <- tempEnv$selectionTrace[[j]][[k]][1:max]
}
}
codonSpecificParameterTraceSel <- vector("list", length=numSelectionCategories)
for (j in 1:numSelectionCategories) {
codonSpecificParameterTraceSel[[j]] <- vector("list", length=length(tempEnv$selectionTrace[[j]]))
for (k in 1:length(tempEnv$selectionTrace[[j]])){
#codonSpecificParameterTraceMut[[j]][[k]] <- tempEnv$mutationTrace[[j]][[k]][1:max]
codonSpecificParameterTraceSel[[j]][[k]] <- tempEnv$selectionTrace[[j]][[k]][1:max]
}
}
}else{
curCodonSpecificParameterTraceMut <- tempEnv$mutationTrace
curCodonSpecificParameterTraceSel <- tempEnv$selectionTrace
combineThreeDimensionalTrace(codonSpecificParameterTraceMut, curCodonSpecificParameterTraceMut, max)
combineThreeDimensionalTrace(codonSpecificParameterTraceSel, curCodonSpecificParameterTraceSel, max)
}#end of if-else
}#end of for loop (files)
trace <- parameter$getTraceObject()
trace$setCodonSpecificParameterTrace(codonSpecificParameterTraceMut, 0)
trace$setCodonSpecificParameterTrace(codonSpecificParameterTraceSel, 1)
parameter$currentMutationParameter <- tempEnv$currentMutation
parameter$currentSelectionParameter <- tempEnv$currentSelection
parameter$setTraceObject(trace)
return(parameter)
}
#' Take the geometric mean of a vector
#'
#' @param x A vector of numerical .
#'
#' @param rm.invalid Boolean value for handling 0, negative, or NA values in the vector. Default is TRUE and will not
#' include these values in the calculation. If FALSE, these values will be replaced by the value give to \code{default} and will
#' be included in the calculation.
#'
#' @param default Numerical value that serves as the value to replace 0, negative, or NA values in the calculation when rm.invalid is FALSE.
#' Default is 1e-5.
#'
#' @return Returns the geometric mean of a vector.
#'
#' @description \code{geom_mean} will calculate the geometric mean of a list of numerical values.
#'
#' @details This function is a special version of the geometric mean specifically for AnaCoda.
#' Most models in Anacoda assume a log normal distribution for phi values, thus all values in \code{x} are expectd to be positive.
#' geom_mean returns the geometric mean of a vector and can handle 0, negative, or NA values.
#'
#' @examples
#' x <- c(1, 2, 3, 4)
#' geom_mean(x)
#'
#' y<- c(1, NA, 3, 4, 0, -1)
#' # Only take the mean of non-Na values greater than 0
#' geom_mean(y)
#'
#' # Replace values <= 0 or NAs with a default value 0.001 and then take the mean
#' geom_mean(y, rm.invalid = FALSE, default = 0.001)
#'
geom_mean <- function(x, rm.invalid = TRUE, default = 1e-5)
{
if(!rm.invalid)
{
x[x <= 0 | is.na(x)] <- default
} else{
x <- x[which(x > 0 & !is.na(x))]
}
total <- prod(x) ^ (1/length(x))
return(total)
}
#Intended to combine 2D traces (vector of vectors) read in from C++. The first
#element of the second trace is omited since it should be the same as the
#last value of the first trace.
combineTwoDimensionalTrace <- function(trace1, trace2, max){
for (size in 1:length(trace1))
{
trace1[[size]]<- c(trace1[[size]], trace2[[size]][2:max])
}
}
#Intended to combine 3D traces (vector of vectors of vectors) read in from C++. The first
#element of the second trace is omited since it should be the same as the
#last value of the first trace.
combineThreeDimensionalTrace <- function(trace1, trace2, max){
for (size in 1:length(trace1)){
for (sizeTwo in 1:length(trace1[[size]])){
trace1[[size]][[sizeTwo]] <- c(trace1[[size]][[sizeTwo]],
trace2[[size]][[sizeTwo]][2:max])
}
}
}
| /output/sources/authors/7618/AnaCoDa/parameterObject.R | no_license | Irbis3/crantasticScrapper | R | false | false | 65,682 | r | #' Initialize Parameter
#'
#' @param genome An object of type Genome necessary for the initialization of the Parameter object.
#' The default value is NULL.
#'
#' @param sphi Initial values for sphi. Expected is a vector of length numMixtures.
#' The default value is NULL.
#'
#' @param num.mixtures The number of mixtures elements for the underlying mixture distribution (numMixtures > 0).
#' The default value is 1.
#'
#' @param gene.assignment A vector holding the initial mixture assignment for each gene.
#' The vector length has to equal the number of genes in the genome.
#' Valid values for the vector range from 1 to numMixtures.
#' It is possible but not advised to leave a mixture element empty.
#' The default Value is NULL.
#'
#' @param initial.expression.values (Optional) A vector with intial phi values.
#' The length of the vector has to equal the number of genes in the Genome object.
#' The default value is NULL.
#'
#' @param model Specifies the model used. Valid options are "ROC", "PA", "PANSE", or "FONSE".
#' The default model is "ROC".
#' ROC is described in Gilchrist et al. 2015.
#' PA, PANSE and FONSE are currently unpublished.
#'
#' @param split.serine Whether serine should be considered as
#' one or two amino acids when running the model.
#' TRUE and FALSE are the only valid values.
#' The default value for split.serine is TRUE.
#'
#' @param mixture.definition A string describing how each mixture should
#' be treated with respect to mutation and selection.
#' Valid values consist of "allUnique", "mutationShared", and "selectionShared".
#' The default value for mixture.definition is "allUnique".
#' See details for more information.
#'
#' @param mixture.definition.matrix A matrix representation of how
#' the mutation and selection categories correspond to the mixtures.
#' The default value for mixture.definition.matrix is NULL.
#' If provided, the model will use the matrix to initialize the mutation and selection
#' categories instead of the definition listed directly above.
#' See details for more information.
#'
#' @param init.with.restart.file File name containing information to reinitialize a
#' previous Parameter object.
#' If given, all other arguments will be ignored.
#' The default value for init.with.restart.file is NULL.
#'
#' @param mutation.prior.sd Controlling the standard deviation of the normal
#' prior on the mutation parameters
#'
#' @param init.csp.variance specifies the initial proposal width for codon specific parameter (default is 0.0025).
#' The proposal width adapts during the runtime to reach a taget acceptance rate of ~0.25
#'
#' @param init.sepsilon specifies the initial value for sepsilon. default is 0.1
#'
#' @param init.w.obs.phi TRUE: initialize phi values with observed phi values
#' (data from RNAseq, mass spectrometry, ribosome footprinting) Default is FALSE.
#' If multiple observed phi values exist for a gene, the geometric mean of these values is used as initial phi.
#' When using this function, one should remove any genes with
#' missing phi values, as these genes will not have an initial phi value.
#'
#' @return parameter Returns an initialized Parameter object.
#'
#' @description \code{initializeParameterObject} initializes a new parameter object or reconstructs one from a restart file
#'
#' @details \code{initializeParameterObject} checks the values of the arguments
#' given to insure the values are valid.
#'
#' The mixture definition and mixture definition matrix describes how the mutation
#' and selection categories are set up with respect to the number of mixtures. For
#' example, if mixture.definition = "allUnique" and numMixtures = 3, a matrix
#' representation would be \code{matrix(c(1,2,3,1,2,3), ncol=2)}
#' where each row represents a mixture, the first column represents the mutation
#' category, and the second column represents the selection category.
#' Another example would be mixture.definition = "selectionShared" and numMixtures = 4 (
#' \code{matrix(c(1,2,3,4,1,1,1,1), ncol=2)}).
#' In this case, the selection category is the same for every mixture. If a matrix
#' is given, and it is valid, then the mutation/selection relationship will be
#' defined by the given matrix and the keyword will be ignored. A matrix should only
#' be given in cases where the keywords would not create the desired matrix.
#'
#' @examples
#'
#' genome_file <- system.file("extdata", "genome.fasta", package = "AnaCoDa")
#' restart_file <- system.file("extdata", "restart_file.rst", package = "AnaCoDa")
#'
#' genome <- initializeGenomeObject(file = genome_file)
#'
#' ## initialize a new parameter object
#' sphi_init <- 1
#' numMixtures <- 1
#' geneAssignment <- rep(1, length(genome))
#' parameter <- initializeParameterObject(genome = genome, sphi = sphi_init,
#' num.mixtures = numMixtures,
#' gene.assignment = geneAssignment,
#' mixture.definition = "allUnique")
#'
#' ## re-initialize a parameter object from a restart file. Useful for checkpointing
#' parameter <- initializeParameterObject(init.with.restart.file = restart_file)
#'
#' ## initialize a parameter object with a custon mixture definition matrix
#' def.matrix <- matrix(c(1,1,1,2), ncol=2)
#' geneAssignment <- sample(1:2, length(genome), replace = TRUE) # random assignment to mixtures
#' parameter <- initializeParameterObject(genome = genome, sphi = c(0.5, 2), num.mixtures = 2,
#' gene.assignment = geneAssignment,
#' mixture.definition.matrix = def.matrix)
#'
initializeParameterObject <- function(genome = NULL, sphi = NULL, num.mixtures = 1,
gene.assignment = NULL, initial.expression.values = NULL,
model = "ROC", split.serine = TRUE,
mixture.definition = "allUnique",
mixture.definition.matrix = NULL,
init.with.restart.file = NULL, mutation.prior.sd = 0.35,
init.csp.variance = 0.0025, init.sepsilon = 0.1,
init.w.obs.phi=FALSE){
# check input integrity
if(is.null(init.with.restart.file)){
if(length(sphi) != num.mixtures){
stop("Not all mixtures have an Sphi value assigned!\n")
}
if(length(genome) != length(gene.assignment)){
stop("Not all Genes have a mixture assignment!\n")
}
if(max(gene.assignment) > num.mixtures){
stop("Gene is assigned to non existing mixture!\n")
}
if(num.mixtures < 1){
stop("num. mixture has to be a positive non-zero value!\n")
}
if (!is.null(sphi)) {
if (length(sphi) != num.mixtures) {
stop("sphi must be a vector of length numMixtures\n")
}
}
if (!is.null(initial.expression.values)) {
if (length(initial.expression.values) != length.Rcpp_Genome(genome)) {
stop("initial.expression.values must have length equal to the number of genes in the Genome object\n")
}
}
if (!identical(split.serine, TRUE) && !identical(split.serine, FALSE)) {
stop("split.serine must be a boolean value\n")
}
if (mixture.definition != "allUnique" && mixture.definition != "mutationShared" &&
mixture.definition != "selectionShared") {
stop("mixture.definition must be \"allUnique\", \"mutationShared\", or \"selectionShared\". Default is \"allUnique\"\n")
}
if (mutation.prior.sd < 0) {
stop("mutation.prior.sd should be positive\n")
}
if (init.csp.variance < 0) {
stop("init.csp.variance should be positive\n")
}
if (init.sepsilon < 0) {
stop("init.sepsilon should be positive\n")
}
} else {
if (!file.exists(init.with.restart.file)) {
stop("init.with.restart.file provided does not exist\n")
}
}
if(model == "ROC"){
if(is.null(init.with.restart.file)){
parameter <- initializeROCParameterObject(genome, sphi, num.mixtures,
gene.assignment, initial.expression.values, split.serine,
mixture.definition, mixture.definition.matrix,
mutation.prior.sd, init.csp.variance, init.sepsilon,init.w.obs.phi)
}else{
parameter <- new(ROCParameter, init.with.restart.file)
}
}else if(model == "FONSE"){
if(is.null(init.with.restart.file)){
parameter <- initializeFONSEParameterObject(genome, sphi, num.mixtures,
gene.assignment, initial.expression.values, split.serine,
mixture.definition, mixture.definition.matrix, init.csp.variance,init.w.obs.phi)
}else{
parameter <- new(FONSEParameter, init.with.restart.file)
}
}else if(model == "PA"){
if(is.null(init.with.restart.file)){
parameter <- initializePAParameterObject(genome, sphi, num.mixtures,
gene.assignment, initial.expression.values, split.serine,
mixture.definition, mixture.definition.matrix, init.csp.variance,init.w.obs.phi)
}else{
parameter <- new(PAParameter, init.with.restart.file)
}
}else if(model == "PANSE"){
if(is.null(init.with.restart.file)){
parameter <- initializePANSEParameterObject(genome, sphi, num.mixtures,
gene.assignment, initial.expression.values, split.serine,
mixture.definition, mixture.definition.matrix, init.csp.variance,init.w.obs.phi)
}else{
parameter <- new(PANSEParameter, init.with.restart.file)
}
}else{
stop("Unknown model.")
}
return(parameter)
}
#Called from initializeParameterObject.
initializeROCParameterObject <- function(genome, sphi, numMixtures, geneAssignment,
expressionValues = NULL, split.serine = TRUE,
mixture.definition = "allUnique",
mixture.definition.matrix = NULL, mutation_prior_sd = 0.35, init.csp.variance = 0.0025, init.sepsilon = 0.1,init.w.obs.phi=FALSE){
if(is.null(mixture.definition.matrix)){
# keyword constructor
parameter <- new(ROCParameter, as.vector(sphi), numMixtures, geneAssignment,
split.serine, mixture.definition)
}else{
#matrix constructor
mixture.definition <- c(mixture.definition.matrix[, 1],
mixture.definition.matrix[, 2])
parameter <- new(ROCParameter, as.vector(sphi), geneAssignment,
mixture.definition, split.serine)
}
# initialize expression values
if(is.null(expressionValues) && init.w.obs.phi == F)
{
parameter$initializeSynthesisRateByGenome(genome, mean(sphi))
}
else if(init.w.obs.phi == T && is.null(expressionValues))
{
observed.phi <- getObservedSynthesisRateSet(genome)
if (ncol(observed.phi)-1 > 1)
{
observed.phi <- apply(observed.phi[,2:ncol(observed.phi)],geom_mean,MARGIN = 1)
}
else
{
observed.phi <- observed.phi[,2]
}
parameter$initializeSynthesisRateByList(observed.phi)
}
else if (!is.null(expressionValues) && init.w.obs.phi == F)
{
parameter$initializeSynthesisRateByList(expressionValues)
}
else
{
stop("expressionValues is not NULL and init.w.obs.phi == TRUE. Please choose only one of these options.")
}
n.obs.phi.sets <- ncol(getObservedSynthesisRateSet(genome)) - 1
parameter$setNumObservedSynthesisRateSets(n.obs.phi.sets)
parameter$mutation_prior_sd <- mutation_prior_sd
if (n.obs.phi.sets != 0){
parameter$setInitialValuesForSepsilon(as.vector(init.sepsilon))
}
parameter <- initializeCovarianceMatrices(parameter, genome, numMixtures, geneAssignment, init.csp.variance)
return(parameter)
}
#Called from initializeParameterObject.
initializePAParameterObject <- function(genome, sphi, numMixtures, geneAssignment,
expressionValues = NULL, split.serine = TRUE,
mixture.definition = "allUnique",
mixture.definition.matrix = NULL, init.csp.variance,init.w.obs.phi=FALSE){
if(is.null(mixture.definition.matrix))
{ # keyword constructor
parameter <- new(PAParameter, as.vector(sphi), numMixtures, geneAssignment,
split.serine, mixture.definition)
}else{
#matrix constructor
mixture.definition <- c(mixture.definition.matrix[, 1],
mixture.definition.matrix[, 2])
parameter <- new(PAParameter, as.vector(sphi), geneAssignment,
mixture.definition, split.serine)
}
# initialize expression values
# initialize expression values
if(is.null(expressionValues) && init.w.obs.phi == F)
{
parameter$initializeSynthesisRateByGenome(genome, mean(sphi))
}
else if(init.w.obs.phi == T && is.null(expressionValues))
{
observed.phi <- getObservedSynthesisRateSet(genome)
if (ncol(observed.phi)-1 > 1)
{
observed.phi <- apply(observed.phi[,2:ncol(observed.phi)],geom_mean,MARGIN = 1)
}
else
{
observed.phi <- observed.phi[,2]
}
parameter$initializeSynthesisRateByList(observed.phi)
}
else if (!is.null(expressionValues) && init.w.obs.phi == F)
{
parameter$initializeSynthesisRateByList(expressionValues)
}
else
{
stop("expressionValues is not NULL and init.w.obs.phi == TRUE. Please choose only one of these options.")
}
## TODO (Cedric): use init.csp.variance to set initial proposal width for CSP parameters
return (parameter)
}
#Called from initializeParameterObject.
initializePANSEParameterObject <- function(genome, sphi, numMixtures, geneAssignment,
expressionValues = NULL, split.serine = TRUE,
mixture.definition = "allUnique",
mixture.definition.matrix = NULL, init.csp.variance,init.w.obs.phi=FALSE){
if(is.null(mixture.definition.matrix))
{ # keyword constructor
parameter <- new(PANSEParameter, as.vector(sphi), numMixtures, geneAssignment,
split.serine, mixture.definition)
}else{
#matrix constructor
mixture.definition <- c(mixture.definition.matrix[, 1],
mixture.definition.matrix[, 2])
parameter <- new(PANSEParameter, as.vector(sphi), geneAssignment,
mixture.definition, split.serine)
}
# initialize expression values
# initialize expression values
if(is.null(expressionValues) && init.w.obs.phi == F)
{
parameter$initializeSynthesisRateByGenome(genome, mean(sphi))
}
else if(init.w.obs.phi == T && is.null(expressionValues))
{
observed.phi <- getObservedSynthesisRateSet(genome)
if (ncol(observed.phi)-1 > 1)
{
observed.phi <- apply(observed.phi[,2:ncol(observed.phi)],geom_mean,MARGIN = 1)
}
else
{
observed.phi <- observed.phi[,2]
}
parameter$initializeSynthesisRateByList(observed.phi)
}
else if (!is.null(expressionValues) && init.w.obs.phi == F)
{
parameter$initializeSynthesisRateByList(expressionValues)
}
else
{
stop("expressionValues is not NULL and init.w.obs.phi == TRUE. Please choose only one of these options.")
}
return (parameter)
}
#Called from initializeParameterObject.
initializeFONSEParameterObject <- function(genome, sphi, numMixtures,
geneAssignment, expressionValues = NULL, split.serine = TRUE,
mixture.definition = "allUnique",
mixture.definition.matrix = NULL, init.csp.variance,init.w.obs.phi=FALSE){
# create Parameter object
if(is.null(mixture.definition.matrix))
{ # keyword constructor
parameter <- new(FONSEParameter, as.vector(sphi), numMixtures, geneAssignment,
split.serine, mixture.definition)
}else{
#matrix constructor
mixture.definition <- c(mixture.definition.matrix[, 1],
mixture.definition.matrix[, 2])
parameter <- new(FONSEParameter, as.vector(sphi), geneAssignment,
mixture.definition, split.serine)
}
# initialize expression values
# initialize expression values
if(is.null(expressionValues) && init.w.obs.phi == F)
{
parameter$initializeSynthesisRateByGenome(genome, mean(sphi))
}
else if(init.w.obs.phi == T && is.null(expressionValues))
{
observed.phi <- getObservedSynthesisRateSet(genome)
if (ncol(observed.phi)-1 > 1)
{
observed.phi <- apply(observed.phi[,2:ncol(observed.phi)],geom_mean,MARGIN = 1)
}
else
{
observed.phi <- observed.phi[,2]
}
parameter$initializeSynthesisRateByList(observed.phi)
}
else if (!is.null(expressionValues) && init.w.obs.phi == F)
{
parameter$initializeSynthesisRateByList(expressionValues)
}
else
{
stop("expressionValues is not NULL and init.w.obs.phi == TRUE. Please choose only one of these options.")
}
parameter <- initializeCovarianceMatrices(parameter, genome, numMixtures, geneAssignment, init.csp.variance)
return(parameter)
}
#' Return Codon Specific Paramters (or write to csv) estimates as data.frame
#'
#' @param parameter parameter an object created by \code{initializeParameterObject}.
#'
#' @param filename Posterior estimates will be written to file instead of returned if specified (format: csv).
#'
#' @param CSP which type of codon specific parameter should be returned (mutation (default) or selection)
#'
#' @param mixture estimates for which mixture should be returned
#'
#' @param samples The number of samples used for the posterior estimates.
#'
#' @return returns a data.frame with the posterior estimates of the models
#' codon specific parameters or writes it directly to a csv file if \code{filename} is specified
#'
#' @description \code{getCSPEstimates} returns the codon specific
#' parameter estimates for a given parameter and mixture or write it to a csv file.
#'
#' @examples
#' genome_file <- system.file("extdata", "genome.fasta", package = "AnaCoDa")
#'
#' genome <- initializeGenomeObject(file = genome_file)
#' sphi_init <- c(1,1)
#' numMixtures <- 2
#' geneAssignment <- sample(1:2, length(genome), replace = TRUE) # random assignment to mixtures
#' parameter <- initializeParameterObject(genome = genome, sphi = sphi_init,
#' num.mixtures = numMixtures,
#' gene.assignment = geneAssignment,
#' mixture.definition = "allUnique")
#' model <- initializeModelObject(parameter = parameter, model = "ROC")
#' samples <- 2500
#' thinning <- 50
#' adaptiveWidth <- 25
#' mcmc <- initializeMCMCObject(samples = samples, thinning = thinning,
#' adaptive.width=adaptiveWidth, est.expression=TRUE,
#' est.csp=TRUE, est.hyper=TRUE, est.mix = TRUE)
#' divergence.iteration <- 10
#' \dontrun{
#' runMCMC(mcmc = mcmc, genome = genome, model = model,
#' ncores = 4, divergence.iteration = divergence.iteration)
#'
#' ## return estimates for codon specific parameters
#' csp_mat <- getCSPEstimates(parameter, CSP="Mutation")
#'
#' # write the result directly to the filesystem as a csv file. No values are returned
#' getCSPEstimates(parameter, , filename=file.path(tempdir(), "csp_out.csv"), CSP="Mutation")
#'
#' }
#'
getCSPEstimates <- function(parameter, filename=NULL, CSP="Mutation", mixture = 1, samples = 10){
Amino_Acid <- c()
Value <- c()
Codon <- c()
quantile_list <- vector("list")
if (class(parameter) == "Rcpp_ROCParameter" || class(parameter) == "Rcpp_FONSEParameter"){
names.aa <- aminoAcids()
for(aa in names.aa){
if(aa == "M" || aa == "W" || aa == "X") next
codons <- AAToCodon(aa, T)
for(i in 1:length(codons)){
Amino_Acid <- c(Amino_Acid, aa)
Codon <- c(Codon, codons[i])
if(CSP == "Mutation"){
Value <- c(Value, parameter$getCodonSpecificPosteriorMean(mixture, samples, codons[i], 0, TRUE))
quantile_list <- c(quantile_list, parameter$getCodonSpecificQuantile(mixture, samples, codons[i], 0, c(0.025, 0.975), TRUE))
}
else if(CSP == "Selection"){
Value <- c(Value, parameter$getCodonSpecificPosteriorMean(mixture, samples, codons[i], 1, TRUE))
quantile_list <- c(quantile_list, parameter$getCodonSpecificQuantile(mixture, samples, codons[i], 1, c(0.025, 0.975), TRUE))
}
else {
stop("Unknown parameter type given with argument: CSP")
}
}
}
}
else if (class(parameter) == "Rcpp_PAParameter"){
groupList <- parameter$getGroupList()
for(i in 1:length(groupList)){
aa <- codonToAA(groupList[i])
Codon <- c(Codon, groupList[i])
Amino_Acid <- c(Amino_Acid, aa)
if(CSP == "Alpha"){
Value <- c(Value, parameter$getCodonSpecificPosteriorMean(mixture, samples, codons[i], 0, FALSE))
quantile_list <- c(quantile_list, parameter$getCodonSpecificQuantile(mixture, samples, codons[i], 0, c(0.025, 0.975), FALSE))
}
else if(CSP == "Lambda Prime"){
Value <- c(Value, parameter$getCodonSpecificPosteriorMean(mixture, samples, codons[i], 1, FALSE))
quantile_list <- c(quantile_list, parameter$getCodonSpecificQuantile(mixture, samples, codons[i], 1, c(0.025, 0.975), FALSE))
}
else {
stop("Unknown parameter type given with argument: CSP")
}
}
}
else if (class(parameter) == "Rcpp_PANSEParameter"){
groupList <- parameter$getGroupList()
for(i in 1:length(groupList)){
aa <- codonToAA(groupList[i])
Codon <- c(Codon, groupList[i])
Amino_Acid <- c(Amino_Acid, aa)
if(CSP == "Alpha"){
Value <- c(Value, parameter$getCodonSpecificPosteriorMean(mixture, samples, codons[i], 0, FALSE))
quantile_list <- c(quantile_list, parameter$getCodonSpecificQuantile(mixture, samples, codons[i], 0, c(0.025, 0.975), FALSE))
}
else if(CSP == "Lambda Prime"){
Value <- c(Value, parameter$getCodonSpecificPosteriorMean(mixture, samples, codons[i], 1, FALSE))
quantile_list <- c(quantile_list, parameter$getCodonSpecificQuantile(mixture, samples, codons[i], 1, c(0.025, 0.975), FALSE))
}
else {
stop("Unknown parameter type given with argument: CSP")
}
}
}
else{
stop("Unknown object provided with argument: parameter")
}
quantile_list <- matrix(unlist(quantile_list), nrow = 2)
data <- data.frame(Amino_Acid, Codon, Value, Lower=quantile_list[1,], Upper=quantile_list[2,])
colnames(data) <- c("AA", "Codon", "Posterior", "0.025%", "0.975%")
if(is.null(filename))
{
return(data)
}else {
write.csv(data, file = filename, row.names = FALSE, quote=FALSE)
}
}
#' Calculate Selection coefficients
#'
#' \code{getSelectionCoefficients} calculates the selection coefficient of each codon in each gene.
#'
#' @param genome A genome object initialized with
#' \code{\link{initializeGenomeObject}} to add observed expression data.
#'
#' @param parameter an object created by \code{initializeParameterObject}.
#'
#' @param samples The number of samples used for the posterior estimates.
#'
#' @return A matrix with selection coefficients.
#'
#' @examples
#' genome_file <- system.file("extdata", "genome.fasta", package = "AnaCoDa")
#'
#' genome <- initializeGenomeObject(file = genome_file)
#' sphi_init <- 1
#' numMixtures <- 1
#' geneAssignment <- rep(1, length(genome))
#' parameter <- initializeParameterObject(genome = genome, sphi = sphi_init,
#' num.mixtures = numMixtures,
#' gene.assignment = geneAssignment,
#' mixture.definition = "allUnique")
#' model <- initializeModelObject(parameter = parameter, model = "ROC")
#' samples <- 2500
#' thinning <- 50
#' adaptiveWidth <- 25
#' mcmc <- initializeMCMCObject(samples = samples, thinning = thinning,
#' adaptive.width=adaptiveWidth, est.expression=TRUE,
#' est.csp=TRUE, est.hyper=TRUE, est.mix = TRUE)
#' divergence.iteration <- 10
#' \dontrun{
#' runMCMC(mcmc = mcmc, genome = genome, model = model,
#' ncores = 4, divergence.iteration = divergence.iteration)
#'
#' ## return estimates for selection coefficients s for each codon in each gene
#' selection.coefficients <- getSelectionCoefficients(genome = genome,
#' parameter = parameter, samples = 1000)
#' }
#'
getSelectionCoefficients <- function(genome, parameter, samples = 100)
{
sel.coef <- parameter$calculateSelectionCoefficients(samples)
grouplist <- parameter$getGroupList()
codon.names <- NULL
if(class(parameter) == "Rcpp_ROCParameter" || class(parameter) == "Rcpp_FONSEParameter")
{
for(aa in grouplist)
codon.names <- c(codon.names, AAToCodon(aa))
sel.coef <- sel.coef[, -c(60, 61)] # The matrix is to large as it could store M and W which is not used here.
}else{
codon.names <- grouplist
}
gene.names <- getNames(genome)
colnames(sel.coef) <- codon.names
rownames(sel.coef) <- gene.names
return(sel.coef)
}
# Uses a multinomial logistic regression to estimate the codon specific parameters for every category.
# Delta M is the intercept - and Delta eta is the slope of the regression.
# The package VGAM is used to perform the regression.
getCSPbyLogit <- function(codonCounts, phi, coefstart = NULL, x.arg = FALSE,
y.arg = FALSE, qr.arg = FALSE){
#avoid cases with 0 aa count
idx <- rowSums(codonCounts) != 0
# performs the regression and returns Delta M and Delta eta as well as other information no used here
ret <- VGAM::vglm(codonCounts[idx, ] ~ phi[idx],
VGAM::multinomial, coefstart = coefstart,
x.arg = x.arg, y.arg = y.arg, qr.arg = qr.arg)
coefficients <- ret@coefficients
## convert delta.t to delta.eta
coefficients <- -coefficients
ret <- list(coefficients = coefficients,
coef.mat = matrix(coefficients, nrow = 2, byrow = TRUE),
R = ret@R)
return(ret)
}
#TODO: Need comments explaining what is going on
subMatrices <- function(M, r, c){
rg <- (row(M) - 1) %/% r + 1
cg <- (col(M) - 1) %/% c + 1
rci <- (rg - 1) * max(cg) + cg
return(rci)
}
#TODO: Need comments explaining what is going on
splitMatrix <- function(M, r, c){
rci <- subMatrices(M, r, c)
N <- prod(dim(M)) / r / c
cv <- lapply(1:N, function(x) M[rci==x])
return(lapply(1:N, function(i) matrix(cv[[i]], nrow = r)))
}
#' extracts an object of traces from a parameter object.
#'
#' @param parameter A Parameter object that corresponds to one of the model types.
#'
#' @return trace Returns an object of type Trace extracted from the given parameter object
#'
#' @examples
#' genome_file <- system.file("extdata", "genome.fasta", package = "AnaCoDa")
#'
#' genome <- initializeGenomeObject(file = genome_file)
#' sphi_init <- c(1,1)
#' numMixtures <- 2
#' geneAssignment <- sample(1:2, length(genome), replace = TRUE) # random assignment to mixtures
#' parameter <- initializeParameterObject(genome = genome, sphi = sphi_init,
#' num.mixtures = numMixtures,
#' gene.assignment = geneAssignment,
#' mixture.definition = "allUnique")
#'
#' trace <- getTrace(parameter) # empty trace object since no MCMC was perfomed
#'
getTrace <- function(parameter){
return(parameter$getTraceObject())
}
#######
### CURRENTLY NOT EXPOSED
#######
#' Initialize Covariance Matrices
#'
#' @param parameter A Parameter object that corresponds to one of the model types.
#' Valid values are "ROC", "PA", and "FONSE".
#'
#' @param genome An object of type Genome necessary for the initialization of the Parameter object.
#'
#' @param numMixtures The number of mixture elements for the underlying mixture distribution (numMixtures > 0).
#'
#' @param geneAssignment A vector holding the initial mixture assignment for each gene.
#' The vector length has to equal the number of genes in the genome.
#' Valid values for the vector range from 1 to numMixtures.
#' It is possible but not advised to leave a mixture element empty.
#'
#' @param init.csp.variance initial proposal variance for codon specific parameter, default is 0.0025.
#'
#' @return parameter Returns the Parameter argument, now modified with initialized mutation, selection, and covariance matrices.
#'
# Also initializes the mutaiton and selection parameter
initializeCovarianceMatrices <- function(parameter, genome, numMixtures, geneAssignment, init.csp.variance = 0.0025) {
numMutationCategory <- parameter$numMutationCategories
numSelectionCategory <- parameter$numSelectionCategories
phi <- parameter$getCurrentSynthesisRateForMixture(1) # phi values are all the same initially
names.aa <- aminoAcids()
# ct <- getInstance()
# names.aa <- ct$getGroupList()
for(aa in names.aa){
if(aa == "M" || aa == "W" || aa == "X") next
#should go away when CT is up and running
codonCounts <- getCodonCountsForAA(aa, genome) # ignore column with gene ids
numCodons <- dim(codonCounts)[2] - 1
#-----------------------------------------
# TODO WORKS CURRENTLY ONLY FOR ALLUNIQUE!
#-----------------------------------------
covmat <- vector("list", numMixtures)
for(mixElement in 1:numMixtures){
idx <- geneAssignment == mixElement
csp <- getCSPbyLogit(codonCounts[idx, ], phi[idx])
parameter$initMutation(csp$coef.mat[1,], mixElement, aa)
parameter$initSelection(csp$coef.mat[2,], mixElement, aa)
}
# One covariance matrix for all mixtures.
# Currently only variances used.
compl.covMat <- diag((numMutationCategory + numSelectionCategory) * numCodons) * init.csp.variance
parameter$initCovarianceMatrix(compl.covMat, aa)
}
#for(aa in names.aa){
# if(aa == "M" || aa == "W" || aa == "X") next
#should go away when CT is up and running
#codonCounts <- getCodonCountsForAA(aa, genome)
#numCodons <- dim(codonCounts)[2] - 1
#-----------------------------------------
# TODO WORKS CURRENTLY ONLY FOR ALLUNIQUE!
#-----------------------------------------
# covmat <- vector("list", numMixtures)
#for(mixElement in 1:numMixtures){
# idx <- geneAssignment == mixElement
#csp <- getCSPbyLogit(codonCounts[idx, ], phi[idx])
# parameter$initMutation(csp$coef.mat[1,], mixElement, aa)
# parameter$initSelection(csp$coef.mat[2,], mixElement, aa)
# split matrix into sup matrices (dM and dEta)
# covmat[[mixElement]] <- splitMatrix(t(csp$R) %*% csp$R, numCodons, numCodons) # we expect the covariance matrix, but get the decomposition.
# }
# compl.covMat <- matrix(0, ncol = numMixtures * numCodons * 2, nrow = numMixtures * numCodons * 2)
#matrix.positions <- subMatrices(compl.covMat, numCodons, numCodons)
#compl.seq <- seq(1, dim(compl.covMat)[1], numCodons)
#mut.seq <- compl.seq[1:(length(compl.seq)/2)]
#i <- 1
#for(pos in mut.seq){
# compl.covMat[matrix.positions == matrix.positions[pos, pos]] <- unlist(covmat[[i]][1])
# i <- i + 1
# i <- ifelse(i > numMutationCategory, 1, i)
# }
# sel.seq <- compl.seq[(length(compl.seq)/2 + 1):length(compl.seq)]
# i <- 1
# for(pos in sel.seq){
# compl.covMat[matrix.positions == matrix.positions[pos, pos]] <- unlist(covmat[[i]][4])
# i <- i + 1
#i <- ifelse(i > numMutationCategory, 1, i)
#}
#ofdiag.seq <- mut.seq + numCodons*numMutationCategory
#for(i in 1:length(mut.seq)){
# compl.covMat[matrix.positions == matrix.positions[mut.seq[i], ofdiag.seq[i]]] <- unlist(covmat[[i]][2])
# compl.covMat[matrix.positions == matrix.positions[ofdiag.seq[i], mut.seq[i]]] <- unlist(covmat[[i]][3])
#}
#for testing - in actuality this is used, it is currently overwriting
#previous steps.
#compl.covMat <- diag((numMutationCategory + numSelectionCategory) * numCodons) * 0.05
#compl.covMat / max(compl.covMat)
#parameter$initCovarianceMatrix(compl.covMat, aa)
#}
return(parameter)
}
#' Returns mixture assignment estimates for each gene
#'
#' @param parameter on object created by \code{initializeParameterObject}
#'
#' @param gene.index a integer or vector of integers representing the gene(s) of interesst.
#'
#' @param samples number of samples for the posterior estimate
#'
#' @return returns a vector with the mixture assignment of each gene corresbonding to \code{gene.index} in the same order as the genome.
#'
#' @description Posterior estimates for the mixture assignment of specified genes
#'
#' @details The returned vector is unnamed as gene ids are only stored in the \code{genome} object,
#' but the \code{gene.index} vector can be used to match the assignment to the genome.
#'
#' @examples
#' genome_file <- system.file("extdata", "genome.fasta", package = "AnaCoDa")
#'
#' genome <- initializeGenomeObject(file = genome_file)
#' sphi_init <- c(1,1)
#' numMixtures <- 2
#' geneAssignment <- sample(1:2, length(genome), replace = TRUE) # random assignment to mixtures
#' parameter <- initializeParameterObject(genome = genome, sphi = sphi_init,
#' num.mixtures = numMixtures,
#' gene.assignment = geneAssignment,
#' mixture.definition = "allUnique")
#' model <- initializeModelObject(parameter = parameter, model = "ROC")
#' samples <- 2500
#' thinning <- 50
#' adaptiveWidth <- 25
#' mcmc <- initializeMCMCObject(samples = samples, thinning = thinning, adaptive.width=adaptiveWidth,
#' est.expression=TRUE, est.csp=TRUE, est.hyper=TRUE, est.mix = TRUE)
#' divergence.iteration <- 10
#' \dontrun{
#' runMCMC(mcmc = mcmc, genome = genome, model = model,
#' ncores = 4, divergence.iteration = divergence.iteration)
#'
#' # get the mixture assignment for all genes
#' mixAssign <- getMixtureAssignmentEstimate(parameter = parameter,
#' gene.index = 1:length(genome), samples = 1000)
#'
#' # get the mixture assignment for a subsample
#' mixAssign <- getMixtureAssignmentEstimate(parameter = parameter,
#' gene.index = 5:100, samples = 1000)
#' # or
#' mixAssign <- getMixtureAssignmentEstimate(parameter = parameter,
#' gene.index = c(10, 30:50, 3, 90), samples = 1000)
#' }
#'
getMixtureAssignmentEstimate <- function(parameter, gene.index, samples)
{
mixtureAssignment <- unlist(lapply(gene.index, function(geneIndex){parameter$getEstimatedMixtureAssignmentForGene(samples, geneIndex)}))
return(mixtureAssignment)
}
#' Returns the estimated phi posterior for a gene
#'
#' @param parameter on object created by \code{initializeParameterObject}.
#'
#' @param gene.index a integer or vector of integers representing the gene(s) of interesst.
#'
#' @param samples number of samples for the posterior estimate
#'
#' @param quantiles vector of quantiles, (default: c(0.025, 0.975))
#'
#' @return returns a vector with the mixture assignment of each gene corresbonding to \code{gene.index} in the same order as the genome.
#'
#' @description Posterior estimates for the phi value of specified genes
#'
#' @details The returned vector is unnamed as gene ids are only stored in the \code{genome} object,
#' but the \code{gene.index} vector can be used to match the assignment to the genome.
#'
#' @examples
#' genome_file <- system.file("extdata", "genome.fasta", package = "AnaCoDa")
#'
#' genome <- initializeGenomeObject(file = genome_file)
#' sphi_init <- c(1,1)
#' numMixtures <- 2
#' geneAssignment <- sample(1:2, length(genome), replace = TRUE) # random assignment to mixtures
#' parameter <- initializeParameterObject(genome = genome, sphi = sphi_init,
#' num.mixtures = numMixtures,
#' gene.assignment = geneAssignment,
#' mixture.definition = "allUnique")
#' model <- initializeModelObject(parameter = parameter, model = "ROC")
#' samples <- 2500
#' thinning <- 50
#' adaptiveWidth <- 25
#' mcmc <- initializeMCMCObject(samples = samples, thinning = thinning,
#' adaptive.width=adaptiveWidth, est.expression=TRUE,
#' est.csp=TRUE, est.hyper=TRUE, est.mix = TRUE)
#' divergence.iteration <- 10
#' \dontrun{
#' runMCMC(mcmc = mcmc, genome = genome, model = model,
#' ncores = 4, divergence.iteration = divergence.iteration)
#'
#' # get the estimated expression values for all genes based on the mixture
#' # they are assigned to at each step
#' estimatedExpression <- getExpressionEstimates(parameter, 1:length(genome), 1000)
#' }
#'
getExpressionEstimates <- function(parameter, gene.index, samples, quantiles=c(0.025, 0.975))
{
expressionValues <- unlist(lapply(gene.index, function(geneIndex){
parameter$getSynthesisRatePosteriorMeanForGene(samples, geneIndex, FALSE)
}))
expressionValuesLog <- unlist(lapply(gene.index, function(geneIndex){
parameter$getSynthesisRatePosteriorMeanForGene(samples, geneIndex, TRUE)
}))
expressionStdErr <- sqrt(unlist(lapply(gene.index, function(geneIndex){
parameter$getSynthesisRateVarianceForGene(samples, geneIndex, TRUE, FALSE)
}))) / samples
expressionStdErrLog <- sqrt(unlist(lapply(gene.index, function(geneIndex){
parameter$getSynthesisRateVarianceForGene(samples, geneIndex, TRUE, TRUE)
}))) / samples
expressionQuantile <- lapply(gene.index, function(geneIndex){
parameter$getExpressionQuantile(samples, geneIndex, quantiles, FALSE)
})
expressionQuantile <- do.call(rbind, expressionQuantile)
expressionQuantileLog <- lapply(gene.index, function(geneIndex){
parameter$getExpressionQuantile(samples, geneIndex, quantiles, TRUE)
})
expressionQuantileLog <- do.call(rbind, expressionQuantileLog)
expr.mat <- cbind(expressionValues, expressionValuesLog, expressionStdErr, expressionStdErrLog, expressionQuantile, expressionQuantileLog)
colnames(expr.mat) <- c("PHI", "log10.PHI", "Std.Error", "log10.Std.Error", quantiles, paste("log10.", quantiles, sep=""))
return(expr.mat)
}
#' Write Parameter Object to a File
#'
#' @param parameter parameter on object created by \code{initializeParameterObject}.
#'
#' @param file A filename that where the data will be stored.
#'
#' @return This function has no return value.
#'
#' @description \code{writeParameterObject} will write the parameter object as binary to the filesystem
#'
#' @details As Rcpp object are not serializable with the default R \code{save} function,
#' therefore this custom save function is provided (see \link{loadParameterObject}).
#'
#' @examples
#' \dontrun{
#'
#' genome_file <- system.file("extdata", "genome.fasta", package = "AnaCoDa")
#'
#' genome <- initializeGenomeObject(file = genome_file)
#' sphi_init <- c(1,1)
#' numMixtures <- 2
#' geneAssignment <- sample(1:2, length(genome), replace = TRUE) # random assignment to mixtures
#' parameter <- initializeParameterObject(genome = genome, sphi = sphi_init,
#' num.mixtures = numMixtures,
#' gene.assignment = geneAssignment,
#' mixture.definition = "allUnique")
#'
#' ## writing an empty parameter object as the runMCMC routine was not called yet
#' writeParameterObject(parameter = parameter, file = file.path(tempdir(), "file.Rda"))
#'
#' }
#'
writeParameterObject <- function(parameter, file)
{
UseMethod("writeParameterObject", parameter)
}
# extracts traces and parameter information from the base class Parameter
extractBaseInfo <- function(parameter){
trace <- parameter$getTraceObject()
stdDevSynthesisRateTraces <- trace$getStdDevSynthesisRateTraces()
stdDevSynthesisRateAcceptRatTrace <- trace$getStdDevSynthesisRateAcceptanceRateTrace()
synthRateTrace <- trace$getSynthesisRateTrace()
synthAcceptRatTrace <- trace$getSynthesisRateAcceptanceRateTrace()
mixAssignTrace <- trace$getMixtureAssignmentTrace()
mixProbTrace <- trace$getMixtureProbabilitiesTrace()
codonSpecificAcceptRatTrace <- trace$getCodonSpecificAcceptanceRateTrace()
numMix <- parameter$numMixtures
numMut <- parameter$numMutationCategories
numSel <- parameter$numSelectionCategories
categories <- parameter$getCategories()
curMixAssignment <- parameter$getMixtureAssignment()
lastIteration <- parameter$getLastIteration()
grouplist <- parameter$getGroupList()
varList <- list(stdDevSynthesisRateTraces = stdDevSynthesisRateTraces,
stdDevSynthesisRateAcceptRatTrace = stdDevSynthesisRateAcceptRatTrace,
synthRateTrace = synthRateTrace,
synthAcceptRatTrace = synthAcceptRatTrace,
mixAssignTrace = mixAssignTrace,
mixProbTrace = mixProbTrace,
codonSpecificAcceptRatTrace = codonSpecificAcceptRatTrace,
numMix = numMix,
numMut = numMut,
numSel = numSel,
categories = categories,
curMixAssignment = curMixAssignment,
lastIteration = lastIteration,
grouplist = grouplist
)
return(varList)
}
#called from "writeParameterObject."
writeParameterObject.Rcpp_ROCParameter <- function(parameter, file){
paramBase <- extractBaseInfo(parameter)
currentMutation <- parameter$currentMutationParameter
currentSelection <- parameter$currentSelectionParameter
proposedMutation <- parameter$proposedMutationParameter
proposedSelection <- parameter$proposedSelectionParameter
model = "ROC"
mutationPrior <- parameter$getMutationPriorStandardDeviation()
trace <- parameter$getTraceObject()
mutationTrace <- trace$getCodonSpecificParameterTrace(0)
selectionTrace <- trace$getCodonSpecificParameterTrace(1)
synthesisOffsetAcceptRatTrace <- trace$getSynthesisOffsetAcceptanceRateTrace()
synthesisOffsetTrace <- trace$getSynthesisOffsetTrace()
observedSynthesisNoiseTrace <- trace$getObservedSynthesisNoiseTrace()
if (length(synthesisOffsetTrace) == 0){
withPhi = FALSE
}else{
withPhi = TRUE
}
save(list = c("paramBase", "currentMutation", "currentSelection",
"proposedMutation", "proposedSelection", "model",
"mutationPrior", "mutationTrace", "selectionTrace",
"synthesisOffsetAcceptRatTrace", "synthesisOffsetTrace",
"observedSynthesisNoiseTrace", "withPhi"),
file=file)
}
#called from "writeParameterObject."
writeParameterObject.Rcpp_PAParameter <- function(parameter, file){
paramBase <- extractBaseInfo(parameter)
currentAlpha <- parameter$currentAlphaParameter
currentLambdaPrime <- parameter$currentLambdaPrimeParameter
proposedAlpha <- parameter$proposedAlphaParameter
proposedLambdaPrime <- parameter$proposedLambdaPrimeParameter
model = "PA"
trace <- parameter$getTraceObject()
alphaTrace <- trace$getCodonSpecificParameterTrace(0)
lambdaPrimeTrace <- trace$getCodonSpecificParameterTrace(1)
save(list = c("paramBase", "currentAlpha", "currentLambdaPrime", "proposedAlpha",
"proposedLambdaPrime", "model", "alphaTrace", "lambdaPrimeTrace"),
file=file)
}
#called from "writeParameterObject."
writeParameterObject.Rcpp_PANSEParameter <- function(parameter, file){
paramBase <- extractBaseInfo(parameter)
currentAlpha <- parameter$currentAlphaParameter
currentLambdaPrime <- parameter$currentLambdaPrimeParameter
proposedAlpha <- parameter$proposedAlphaParameter
proposedLambdaPrime <- parameter$proposedLambdaPrimeParameter
model = "PANSE"
trace <- parameter$getTraceObject()
alphaTrace <- trace$getCodonSpecificParameterTrace(0)
lambdaPrimeTrace <- trace$getCodonSpecificParameterTrace(1)
save(list = c("paramBase", "currentAlpha", "currentLambdaPrime", "proposedAlpha",
"proposedLambdaPrime", "model", "alphaTrace", "lambdaPrimeTrace"),
file=file)
}
#called from "writeParameterObject."
writeParameterObject.Rcpp_FONSEParameter <- function(parameter, file)
{
paramBase <- extractBaseInfo(parameter)
currentMutation <- parameter$currentMutationParameter
currentSelection <- parameter$currentSelectionParameter
model = "FONSE"
mutationPrior <- parameter$getMutationPriorStandardDeviation()
trace <- parameter$getTraceObject()
mutationTrace <- trace$getCodonSpecificParameterTrace(0)
selectionTrace <- trace$getCodonSpecificParameterTrace(1)
save(list = c("paramBase", "currentMutation", "currentSelection",
"model","mutationPrior", "mutationTrace", "selectionTrace"),
file=file)
}
#' Load Parameter Object
#'
#' @param files A list of parameter filenames to be loaded. If multiple files are given,
#' the parameter objects will be concatenated in the order provided
#'
#' @return Returns an initialized Parameter object.
#'
#' @description \code{loadParameterObject} will load a parameter object from the filesystem
#'
#' @details The function loads one or multiple files. In the case of multiple file, e.g. due to the use of check pointing, the files will
#' be concatenated to one parameter object. See \link{writeParameterObject} for the writing of parameter objects
#'
#' @examples
#' \dontrun{
#' # load a single parameter object
#' parameter <- loadParameterObject("parameter.Rda")
#'
#' # load and concatenate multiple parameter object
#' parameter <- loadParameterObject(c("parameter1.Rda", "parameter2.Rda"))
#' }
#'
loadParameterObject <- function(files)
{
#A temporary env is set up to stop R errors.
firstModel <- "Invalid model"
for (i in 1:length(files)){
tempEnv <- new.env();
load(file = files[i], envir = tempEnv)
if (i == 1){
firstModel <- tempEnv$model
}else{
if (firstModel != tempEnv$model){
stop("The models do not match between files")
}#end of error check
}#end of if-else
}#end of for
# browser()
if (firstModel == "ROC"){
parameter <- new(ROCParameter)
parameter <- loadROCParameterObject(parameter, files)
}else if (firstModel == "PA") {
parameter <- new(PAParameter)
parameter <- loadPAParameterObject(parameter, files)
}else if (firstModel == "PANSE") {
parameter <- new(PANSEParameter)
parameter <- loadPANSEParameterObject(parameter, files)
}else if (firstModel == "FONSE") {
parameter <- new(FONSEParameter)
parameter <- loadFONSEParameterObject(parameter, files)
}else{
stop("File data corrupted")
}
return(parameter)
}
#Sets all the common variables in the Parameter objects.
setBaseInfo <- function(parameter, files)
{
for (i in 1:length(files)) {
tempEnv <- new.env();
load(file = files[i], envir = tempEnv)
if (i == 1) {
categories <- tempEnv$paramBase$categories
categories.matrix <- do.call("rbind", tempEnv$paramBase$categories)
numMixtures <- tempEnv$paramBase$numMix
numMutationCategories <- tempEnv$paramBase$numMut
numSelectionCategories <- tempEnv$paramBase$numSel
mixtureAssignment <- tempEnv$paramBase$curMixAssignment
lastIteration <- tempEnv$paramBase$lastIteration
max <- tempEnv$paramBase$lastIteration + 1
grouplist <- tempEnv$paramBase$grouplist
stdDevSynthesisRateTraces <- vector("list", length = numSelectionCategories)
for (j in 1:numSelectionCategories) {
stdDevSynthesisRateTraces[[j]] <- tempEnv$paramBase$stdDevSynthesisRateTraces[[j]][1:max]
}
stdDevSynthesisRateAcceptanceRateTrace <- tempEnv$paramBase$stdDevSynthesisRateAcceptRatTrace
synthesisRateTrace <- vector("list", length = numSelectionCategories)
for (j in 1:numSelectionCategories) {
for (k in 1:length(tempEnv$paramBase$synthRateTrace[[j]])){
synthesisRateTrace[[j]][[k]] <- tempEnv$paramBase$synthRateTrace[[j]][[k]][1:max]
}
}
synthesisRateAcceptanceRateTrace <- tempEnv$paramBase$synthAcceptRatTrace
mixtureAssignmentTrace <- vector("list", length = length(tempEnv$paramBase$mixAssignTrace))
for (j in 1:length(tempEnv$paramBase$mixAssignTrace)){
mixtureAssignmentTrace[[j]] <- tempEnv$paramBase$mixAssignTrace[[j]][1:max]
}
mixtureProbabilitiesTrace <- c()
for (j in 1:numMixtures) {
mixtureProbabilitiesTrace[[j]] <- tempEnv$paramBase$mixProbTrace[[j]][1:max]
}
codonSpecificAcceptanceRateTrace <- tempEnv$paramBase$codonSpecificAcceptRatTrace
} else {
if (sum(categories.matrix != do.call("rbind", tempEnv$paramBase$categories)) != 0){
stop("categories is not the same between all files")
}#end of error check
if (numMixtures != tempEnv$paramBase$numMix){
stop("The number of mixtures is not the same between files")
}
if (numMutationCategories != tempEnv$paramBase$numMut){
stop("The number of mutation categories is not the same between files")
}
if (numSelectionCategories != tempEnv$paramBase$numSel){
stop("The number of selection categories is not the same between files")
}
if (length(mixtureAssignment) != length(tempEnv$paramBase$curMixAssignment)){
stop("The length of the mixture assignment is not the same between files.
Make sure the same genome is used on each run.")
}
if(length(grouplist) != length(tempEnv$paramBase$grouplist)){
stop("Number of Amino Acids/Codons is not the same between files.")
}
curStdDevSynthesisRateTraces <- tempEnv$paramBase$stdDevSynthesisRateTraces
curStdDevSynthesisRateAcceptanceRateTrace <- tempEnv$paramBase$stdDevSynthesisRateAcceptRatTrace
curSynthesisRateTrace <- tempEnv$paramBase$synthRateTrace
curSynthesisRateAcceptanceRateTrace <- tempEnv$paramBase$synthAcceptRatTrace
curMixtureAssignmentTrace <- tempEnv$paramBase$mixAssignTrace
curMixtureProbabilitiesTrace <- tempEnv$paramBase$mixProbTrace
curCodonSpecificAcceptanceRateTrace <- tempEnv$paramBase$codonSpecificAcceptRatTrace
lastIteration <- lastIteration + tempEnv$paramBase$lastIteration
#assuming all checks have passed, time to concatenate traces
max <- tempEnv$paramBase$lastIteration + 1
combineTwoDimensionalTrace(stdDevSynthesisRateTraces, curStdDevSynthesisRateTraces, max)
size <- length(curStdDevSynthesisRateAcceptanceRateTrace)
stdDevSynthesisRateAcceptanceRateTrace <- c(stdDevSynthesisRateAcceptanceRateTrace,
curStdDevSynthesisRateAcceptanceRateTrace[2:size])
combineThreeDimensionalTrace(synthesisRateTrace, curSynthesisRateTrace, max)
size <- length(curSynthesisRateAcceptanceRateTrace)
combineThreeDimensionalTrace(synthesisRateAcceptanceRateTrace, curSynthesisRateAcceptanceRateTrace, size)
combineTwoDimensionalTrace(mixtureAssignmentTrace, curMixtureAssignmentTrace, max)
combineTwoDimensionalTrace(mixtureProbabilitiesTrace, curMixtureProbabilitiesTrace, max)
size <- length(curCodonSpecificAcceptanceRateTrace)
combineTwoDimensionalTrace(codonSpecificAcceptanceRateTrace, curCodonSpecificAcceptanceRateTrace, size)
}
}
parameter$setCategories(categories)
parameter$setCategoriesForTrace()
parameter$numMixtures <- numMixtures
parameter$numMutationCategories <- numMutationCategories
parameter$numSelectionCategories <- numSelectionCategories
parameter$setMixtureAssignment(tempEnv$paramBase$curMixAssignment) #want the last in the file sequence
parameter$setLastIteration(lastIteration)
parameter$setGroupList(grouplist)
trace <- parameter$getTraceObject()
trace$setStdDevSynthesisRateTraces(stdDevSynthesisRateTraces)
trace$setStdDevSynthesisRateAcceptanceRateTrace(stdDevSynthesisRateAcceptanceRateTrace)
trace$setSynthesisRateTrace(synthesisRateTrace)
trace$setSynthesisRateAcceptanceRateTrace(synthesisRateAcceptanceRateTrace)
trace$setMixtureAssignmentTrace(mixtureAssignmentTrace)
trace$setMixtureProbabilitiesTrace(mixtureProbabilitiesTrace)
trace$setCodonSpecificAcceptanceRateTrace(codonSpecificAcceptanceRateTrace)
parameter$setTraceObject(trace)
return(parameter)
}
#Called from "loadParameterObject."
loadROCParameterObject <- function(parameter, files)
{
parameter <- setBaseInfo(parameter, files)
for (i in 1:length(files)){
tempEnv <- new.env();
load(file = files[i], envir = tempEnv)
numMutationCategories <- tempEnv$paramBase$numMut
numSelectionCategories <- tempEnv$paramBase$numSel
max <- tempEnv$paramBase$lastIteration + 1
if (i == 1){
withPhi <- tempEnv$withPhi
if (withPhi){
phiGroups <- length(tempEnv$synthesisOffsetTrace)
synthesisOffsetTrace <- c()
for (j in 1:phiGroups) {
synthesisOffsetTrace[[j]] <- tempEnv$synthesisOffsetTrace[[j]][1:max]
}
synthesisOffsetAcceptanceRateTrace <- tempEnv$synthesisOffsetAcceptRatTrace
observedSynthesisNoiseTrace <- c()
for (j in 1:phiGroups) {
observedSynthesisNoiseTrace[[j]] <- tempEnv$observedSynthesisNoiseTrace[[j]][1:max]
}
#need number of phi groups, not the number of mixtures apparently.
}else {
synthesisOffsetTrace <- c()
synthesisOffsetAcceptanceRateTrace <- c()
observedSynthesisNoiseTrace <- c()
}
codonSpecificParameterTraceMut <- vector("list", length=numMutationCategories)
for (j in 1:numMutationCategories) {
codonSpecificParameterTraceMut[[j]] <- vector("list", length=length(tempEnv$mutationTrace[[j]]))
for (k in 1:length(tempEnv$mutationTrace[[j]])){
codonSpecificParameterTraceMut[[j]][[k]] <- tempEnv$mutationTrace[[j]][[k]][1:max]
#codonSpecificParameterTraceSel[[j]][[k]] <- tempEnv$selectionTrace[[j]][[k]][1:max]
}
}
codonSpecificParameterTraceSel <- vector("list", length=numSelectionCategories)
for (j in 1:numSelectionCategories) {
codonSpecificParameterTraceSel[[j]] <- vector("list", length=length(tempEnv$selectionTrace[[j]]))
for (k in 1:length(tempEnv$selectionTrace[[j]])){
#codonSpecificParameterTraceMut[[j]][[k]] <- tempEnv$mutationTrace[[j]][[k]][1:max]
codonSpecificParameterTraceSel[[j]][[k]] <- tempEnv$selectionTrace[[j]][[k]][1:max]
}
}
}else{
curSynthesisOffsetTrace <- tempEnv$synthesisOffsetTrace
curSynthesisOffsetAcceptanceRateTrace <- tempEnv$synthesisOffsetAcceptRatTrace
curObservedSynthesisNoiseTrace <- tempEnv$observedSynthesisNoiseTrace
curCodonSpecificParameterTraceMut <- tempEnv$mutationTrace
curCodonSpecificParameterTraceSel <- tempEnv$selectionTrace
if (withPhi != tempEnv$withPhi){
stop("Runs do not match in concern in with.phi")
}
if (withPhi){
combineTwoDimensionalTrace(synthesisOffsetTrace, curSynthesisOffsetTrace, max)
size <- length(curSynthesisOffsetAcceptanceRateTrace)
combineTwoDimensionalTrace(synthesisOffsetAcceptanceRateTrace, curSynthesisOffsetAcceptanceRateTrace, size)
combineTwoDimensionalTrace(observedSynthesisNoiseTrace, curObservedSynthesisNoiseTrace, max)
}
combineThreeDimensionalTrace(codonSpecificParameterTraceMut, curCodonSpecificParameterTraceMut, max)
combineThreeDimensionalTrace(codonSpecificParameterTraceSel, curCodonSpecificParameterTraceSel, max)
}#end of if-else
}#end of for loop (files)
trace <- parameter$getTraceObject()
trace$setSynthesisOffsetTrace(synthesisOffsetTrace)
trace$setSynthesisOffsetAcceptanceRateTrace(synthesisOffsetAcceptanceRateTrace)
trace$setObservedSynthesisNoiseTrace(observedSynthesisNoiseTrace)
trace$setCodonSpecificParameterTrace(codonSpecificParameterTraceMut, 0)
trace$setCodonSpecificParameterTrace(codonSpecificParameterTraceSel, 1)
parameter$currentMutationParameter <- tempEnv$currentMutation
parameter$currentSelectionParameter <- tempEnv$currentSelection
parameter$proposedMutationParameter <- tempEnv$proposedMutation
parameter$proposedSelectionParameter <- tempEnv$proposedSelection
parameter$setTraceObject(trace)
return(parameter)
}
#Called from "loadParameterObject."
loadPAParameterObject <- function(parameter, files)
{
parameter <- setBaseInfo(parameter, files)
for (i in 1:length(files)){
tempEnv <- new.env();
load(file = files[i], envir = tempEnv)
max <- tempEnv$paramBase$lastIteration + 1
numMixtures <- tempEnv$paramBase$numMix
numMutationCategories <- tempEnv$paramBase$numMut
numSelectionCategories <- tempEnv$paramBase$numSel
if (i == 1){
#for future use: This may break if PA is ran with more than
#one mixture, in this case just follow the format of the
#ROC CSP parameters.
alphaTrace <- vector("list", length=numMutationCategories)
for (j in 1:numMutationCategories) {
for (k in 1:length(tempEnv$alphaTrace[[j]])){
alphaTrace[[j]][[k]] <- tempEnv$alphaTrace[[j]][[k]][1:max]
}
}
lambdaPrimeTrace <- vector("list", length=numSelectionCategories)
for (j in 1:numSelectionCategories) {
for (k in 1:length(tempEnv$lambdaPrimeTrace[[j]])){
lambdaPrimeTrace[[j]][[k]] <- tempEnv$lambdaPrimeTrace[[j]][[k]][1:max]
}
}
}else{
curAlphaTrace <- tempEnv$alphaTrace
curLambdaPrimeTrace <- tempEnv$lambdaPrimeTrace
combineThreeDimensionalTrace(alphaTrace, curAlphaTrace, max)
combineThreeDimensionalTrace(lambdaPrimeTrace, curLambdaPrimeTrace, max)
}
}#end of for loop (files)
parameter$currentAlphaParameter <- tempEnv$currentAlpha
parameter$proposedAlphaParameter <- tempEnv$proposedAlpha
parameter$currentLambdaPrimeParameter <- tempEnv$currentLambdaPrime
parameter$proposedLambdaPrimeParameter <- tempEnv$proposedLambdaPrime
trace <- parameter$getTraceObject()
trace$setCodonSpecificParameterTrace(alphaTrace, 0)
trace$setCodonSpecificParameterTrace(lambdaPrimeTrace, 1)
parameter$setTraceObject(trace)
return(parameter)
}
loadPANSEParameterObject <- function(parameter, files)
{
parameter <- setBaseInfo(parameter, files)
for (i in 1:length(files)){
tempEnv <- new.env();
load(file = files[i], envir = tempEnv)
max <- tempEnv$paramBase$lastIteration + 1
numMixtures <- tempEnv$paramBase$numMix
numMutationCategories <- tempEnv$paramBase$numMut
numSelectionCategories <- tempEnv$paramBase$numSel
if (i == 1){
#for future use: This may break if PANSE is ran with more than
#one mixture, in this case just follow the format of the
#ROC CSP parameters.
alphaTrace <- vector("list", length=numMutationCategories)
for (j in 1:numMutationCategories) {
for (k in 1:length(tempEnv$alphaTrace[[j]])){
alphaTrace[[j]][[k]] <- tempEnv$alphaTrace[[j]][[k]][1:max]
}
}
lambdaPrimeTrace <- vector("list", length=numSelectionCategories)
for (j in 1:numSelectionCategories) {
for (k in 1:length(tempEnv$lambdaPrimeTrace[[j]])){
lambdaPrimeTrace[[j]][[k]] <- tempEnv$lambdaPrimeTrace[[j]][[k]][1:max]
}
}
}else{
curAlphaTrace <- tempEnv$alphaTrace
curLambdaPrimeTrace <- tempEnv$lambdaPrimeTrace
combineThreeDimensionalTrace(alphaTrace, curAlphaTrace, max)
combineThreeDimensionalTrace(lambdaPrimeTrace, curLambdaPrimeTrace, max)
}
}#end of for loop (files)
parameter$currentAlphaParameter <- tempEnv$currentAlpha
parameter$proposedAlphaParameter <- tempEnv$proposedAlpha
parameter$currentLambdaPrimeParameter <- tempEnv$currentLambdaPrime
parameter$proposedLambdaPrimeParameter <- tempEnv$proposedLambdaPrime
trace <- parameter$getTraceObject()
trace$setCodonSpecificParameterTrace(alphaTrace, 0)
trace$setCodonSpecificParameterTrace(lambdaPrimeTrace, 1)
parameter$setTraceObject(trace)
return(parameter)
}
#Called from "loadParameterObject."
loadFONSEParameterObject <- function(parameter, files)
{
parameter <- setBaseInfo(parameter, files)
for (i in 1:length(files)){
tempEnv <- new.env();
load(file = files[i], envir = tempEnv)
numMutationCategories <- tempEnv$paramBase$numMut
numSelectionCategories <- tempEnv$paramBase$numSel
max <- tempEnv$paramBase$lastIteration + 1
if (i == 1){
codonSpecificParameterTraceMut <- vector("list", length=numMutationCategories)
for (j in 1:numMutationCategories) {
codonSpecificParameterTraceMut[[j]] <- vector("list", length=length(tempEnv$mutationTrace[[j]]))
for (k in 1:length(tempEnv$mutationTrace[[j]])){
codonSpecificParameterTraceMut[[j]][[k]] <- tempEnv$mutationTrace[[j]][[k]][1:max]
#codonSpecificParameterTraceSel[[j]][[k]] <- tempEnv$selectionTrace[[j]][[k]][1:max]
}
}
codonSpecificParameterTraceSel <- vector("list", length=numSelectionCategories)
for (j in 1:numSelectionCategories) {
codonSpecificParameterTraceSel[[j]] <- vector("list", length=length(tempEnv$selectionTrace[[j]]))
for (k in 1:length(tempEnv$selectionTrace[[j]])){
#codonSpecificParameterTraceMut[[j]][[k]] <- tempEnv$mutationTrace[[j]][[k]][1:max]
codonSpecificParameterTraceSel[[j]][[k]] <- tempEnv$selectionTrace[[j]][[k]][1:max]
}
}
}else{
curCodonSpecificParameterTraceMut <- tempEnv$mutationTrace
curCodonSpecificParameterTraceSel <- tempEnv$selectionTrace
combineThreeDimensionalTrace(codonSpecificParameterTraceMut, curCodonSpecificParameterTraceMut, max)
combineThreeDimensionalTrace(codonSpecificParameterTraceSel, curCodonSpecificParameterTraceSel, max)
}#end of if-else
}#end of for loop (files)
trace <- parameter$getTraceObject()
trace$setCodonSpecificParameterTrace(codonSpecificParameterTraceMut, 0)
trace$setCodonSpecificParameterTrace(codonSpecificParameterTraceSel, 1)
parameter$currentMutationParameter <- tempEnv$currentMutation
parameter$currentSelectionParameter <- tempEnv$currentSelection
parameter$setTraceObject(trace)
return(parameter)
}
#' Take the geometric mean of a vector
#'
#' @param x A vector of numerical .
#'
#' @param rm.invalid Boolean value for handling 0, negative, or NA values in the vector. Default is TRUE and will not
#' include these values in the calculation. If FALSE, these values will be replaced by the value give to \code{default} and will
#' be included in the calculation.
#'
#' @param default Numerical value that serves as the value to replace 0, negative, or NA values in the calculation when rm.invalid is FALSE.
#' Default is 1e-5.
#'
#' @return Returns the geometric mean of a vector.
#'
#' @description \code{geom_mean} will calculate the geometric mean of a list of numerical values.
#'
#' @details This function is a special version of the geometric mean specifically for AnaCoda.
#' Most models in Anacoda assume a log normal distribution for phi values, thus all values in \code{x} are expectd to be positive.
#' geom_mean returns the geometric mean of a vector and can handle 0, negative, or NA values.
#'
#' @examples
#' x <- c(1, 2, 3, 4)
#' geom_mean(x)
#'
#' y<- c(1, NA, 3, 4, 0, -1)
#' # Only take the mean of non-Na values greater than 0
#' geom_mean(y)
#'
#' # Replace values <= 0 or NAs with a default value 0.001 and then take the mean
#' geom_mean(y, rm.invalid = FALSE, default = 0.001)
#'
geom_mean <- function(x, rm.invalid = TRUE, default = 1e-5)
{
if(!rm.invalid)
{
x[x <= 0 | is.na(x)] <- default
} else{
x <- x[which(x > 0 & !is.na(x))]
}
total <- prod(x) ^ (1/length(x))
return(total)
}
#Intended to combine 2D traces (vector of vectors) read in from C++. The first
#element of the second trace is omited since it should be the same as the
#last value of the first trace.
combineTwoDimensionalTrace <- function(trace1, trace2, max){
for (size in 1:length(trace1))
{
trace1[[size]]<- c(trace1[[size]], trace2[[size]][2:max])
}
}
#Intended to combine 3D traces (vector of vectors of vectors) read in from C++. The first
#element of the second trace is omited since it should be the same as the
#last value of the first trace.
combineThreeDimensionalTrace <- function(trace1, trace2, max){
for (size in 1:length(trace1)){
for (sizeTwo in 1:length(trace1[[size]])){
trace1[[size]][[sizeTwo]] <- c(trace1[[size]][[sizeTwo]],
trace2[[size]][[sizeTwo]][2:max])
}
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{clust}
\alias{clust}
\title{Clustering fragment contributions}
\usage{
clust(data, molids = NULL)
}
\arguments{
\item{data}{vector of fragment contributions.}
\item{molids}{vector of molecule IDs corresponding to fragment contributions}
}
\value{
Mclust model object or NULL if data has a single unique observation value (see details).
}
\description{
Clustering fragment contributions
}
\details{
Mclust model is a gaussian mixture model based on integrated complete-
data likelihood optimization criterion. If all values in data are equal
or a single value provided, then NULL is returned.
}
\examples{
file_name <- system.file("extdata", "BBB_frag_contributions.txt", package = "rspci")
df <- load_data(file_name)
dx <- dplyr::filter(df, FragID == "OH (aliphatic)", Model == "consensus", Property == "overall")
m <- clust(dx$Contribution, dx$MolID)
}
| /man/clust.Rd | no_license | DrrDom/rspci | R | false | true | 953 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{clust}
\alias{clust}
\title{Clustering fragment contributions}
\usage{
clust(data, molids = NULL)
}
\arguments{
\item{data}{vector of fragment contributions.}
\item{molids}{vector of molecule IDs corresponding to fragment contributions}
}
\value{
Mclust model object or NULL if data has a single unique observation value (see details).
}
\description{
Clustering fragment contributions
}
\details{
Mclust model is a gaussian mixture model based on integrated complete-
data likelihood optimization criterion. If all values in data are equal
or a single value provided, then NULL is returned.
}
\examples{
file_name <- system.file("extdata", "BBB_frag_contributions.txt", package = "rspci")
df <- load_data(file_name)
dx <- dplyr::filter(df, FragID == "OH (aliphatic)", Model == "consensus", Property == "overall")
m <- clust(dx$Contribution, dx$MolID)
}
|
rm(list=ls())
library(MASS)
library(dplyr)
library(nnet)
library(generalhoslem)
library(VGAM)
library(pROC)
library(data.table)
#set working directory and read data
setwd("E:/Purdue/2020fall/STAT526/project")
psodata = read.csv(file = "final.csv")
#preprocess data
index = (psodata$alcohol==2)
psodata$alcohol[index] = 0
index = (psodata$HBP==2)
psodata$HBP[index] = 0
index = (psodata$diabetes==2)
psodata$diabetes[index] = 0
index = (psodata$smoke==2)
psodata$smoke[index] = 0
psodata$gender = as.factor(psodata$gender)
psodata$ethnicity = as.factor(psodata$ethnicity)
psodata$countrybirth = as.factor(psodata$countrybirth)
psodata$alcohol = as.factor(psodata$alcohol)
psodata$HBP = as.factor(psodata$HBP)
psodata$diabetes = as.factor(psodata$diabetes)
psodata$smoke = as.factor(psodata$smoke)
index = (psodata$psoriasis==4)
psodata$psoriasis[index] = 3
index = (psodata$psoriasis>0)
psodata$psoriasis[index] = 1
psodata$psoriasis = as.factor(psodata$psoriasis)
head(psodata)
#logistic model
mod1 = glm(psoriasis ~ ., family=binomial, data=psodata)
summary(mod1)
logitgof(psodata$psoriasis, fitted(mod1))
mod2 = step(mod1, trace=0)
summary(mod2)
logitgof(psodata$psoriasis, fitted(mod2))
confint(mod2)
predictor_vars = psodata[c(-1,-5,-6,-10)]
probs<- mod2$fitted.values
# #find threshold based on accuracy
# T = seq(0,1,0.01)
# best_t = 0
# meanvalue = 0
# for(t in T){
# predicted.classes <- ifelse(probs > t, 1, 0)
# m = mean(predicted.classes == psodata$psoriasis)
# if(m>meanvalue){
# meanvalue = m
# best_t = t
# }
# }
# print(best_t)
# print(meanvalue)
#find threshold based on f1_score
T = seq(0,1,0.01)
best_t = 0
f1score = 0
best_cm = list()
for(t in T){
prediction <- ifelse(probs > t, 1, 0)
dt = data.table('1'=c(length(which(prediction==1 & psodata$psoriasis==1)), length(which(prediction==1 & psodata$psoriasis==0))),
'0'=c(length(which(prediction==0 & psodata$psoriasis==1)), length(which(prediction==0 & psodata$psoriasis==0))))
cm = as.matrix(dt)
if(cm[1,1]==0){
f1 = 0
}
else{
prec = cm[1,1]/(cm[1,1]+cm[1,2])
recall = cm[1,1]/(cm[1,1]+cm[2,1])
f1 = 2*prec*recall/(recall+prec)
}
if(f1>f1score){
best_t = t
f1score=f1
best_cm = cm
}
}
print(best_t)
print(f1score)
print(best_cm)
| /codes/logistic.R | no_license | daihui-lu/statisticalAnalysisOfFactorsOnPsoriasis | R | false | false | 2,401 | r | rm(list=ls())
library(MASS)
library(dplyr)
library(nnet)
library(generalhoslem)
library(VGAM)
library(pROC)
library(data.table)
#set working directory and read data
setwd("E:/Purdue/2020fall/STAT526/project")
psodata = read.csv(file = "final.csv")
#preprocess data
index = (psodata$alcohol==2)
psodata$alcohol[index] = 0
index = (psodata$HBP==2)
psodata$HBP[index] = 0
index = (psodata$diabetes==2)
psodata$diabetes[index] = 0
index = (psodata$smoke==2)
psodata$smoke[index] = 0
psodata$gender = as.factor(psodata$gender)
psodata$ethnicity = as.factor(psodata$ethnicity)
psodata$countrybirth = as.factor(psodata$countrybirth)
psodata$alcohol = as.factor(psodata$alcohol)
psodata$HBP = as.factor(psodata$HBP)
psodata$diabetes = as.factor(psodata$diabetes)
psodata$smoke = as.factor(psodata$smoke)
index = (psodata$psoriasis==4)
psodata$psoriasis[index] = 3
index = (psodata$psoriasis>0)
psodata$psoriasis[index] = 1
psodata$psoriasis = as.factor(psodata$psoriasis)
head(psodata)
#logistic model
mod1 = glm(psoriasis ~ ., family=binomial, data=psodata)
summary(mod1)
logitgof(psodata$psoriasis, fitted(mod1))
mod2 = step(mod1, trace=0)
summary(mod2)
logitgof(psodata$psoriasis, fitted(mod2))
confint(mod2)
predictor_vars = psodata[c(-1,-5,-6,-10)]
probs<- mod2$fitted.values
# #find threshold based on accuracy
# T = seq(0,1,0.01)
# best_t = 0
# meanvalue = 0
# for(t in T){
# predicted.classes <- ifelse(probs > t, 1, 0)
# m = mean(predicted.classes == psodata$psoriasis)
# if(m>meanvalue){
# meanvalue = m
# best_t = t
# }
# }
# print(best_t)
# print(meanvalue)
#find threshold based on f1_score
T = seq(0,1,0.01)
best_t = 0
f1score = 0
best_cm = list()
for(t in T){
prediction <- ifelse(probs > t, 1, 0)
dt = data.table('1'=c(length(which(prediction==1 & psodata$psoriasis==1)), length(which(prediction==1 & psodata$psoriasis==0))),
'0'=c(length(which(prediction==0 & psodata$psoriasis==1)), length(which(prediction==0 & psodata$psoriasis==0))))
cm = as.matrix(dt)
if(cm[1,1]==0){
f1 = 0
}
else{
prec = cm[1,1]/(cm[1,1]+cm[1,2])
recall = cm[1,1]/(cm[1,1]+cm[2,1])
f1 = 2*prec*recall/(recall+prec)
}
if(f1>f1score){
best_t = t
f1score=f1
best_cm = cm
}
}
print(best_t)
print(f1score)
print(best_cm)
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/plug-and-play.R
\name{add_meas_SS_param}
\alias{add_meas_SS_param}
\title{add parameters for a SS measurement slice among cases (conditional independence)}
\usage{
add_meas_SS_param(nslice, Mobs, prior, cause_list)
}
\arguments{
\item{nslice}{the total number of SS measurement slices}
\item{Mobs}{see \code{data_nplcm} described in \code{\link{nplcm}}}
\item{prior}{see \code{model_options} described in \code{\link{nplcm}}}
\item{cause_list}{the list of causes in \code{model_options} described in \code{\link{nplcm}}}
}
\value{
a list of two elements: the first is \code{plug}, the .bug code; the second is \code{parameters}
that stores model parameters introduced by this plugged measurement slice
}
\description{
add parameters for a SS measurement slice among cases (conditional independence)
}
| /man/add_meas_SS_param.Rd | permissive | gitter-badger/baker | R | false | false | 916 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/plug-and-play.R
\name{add_meas_SS_param}
\alias{add_meas_SS_param}
\title{add parameters for a SS measurement slice among cases (conditional independence)}
\usage{
add_meas_SS_param(nslice, Mobs, prior, cause_list)
}
\arguments{
\item{nslice}{the total number of SS measurement slices}
\item{Mobs}{see \code{data_nplcm} described in \code{\link{nplcm}}}
\item{prior}{see \code{model_options} described in \code{\link{nplcm}}}
\item{cause_list}{the list of causes in \code{model_options} described in \code{\link{nplcm}}}
}
\value{
a list of two elements: the first is \code{plug}, the .bug code; the second is \code{parameters}
that stores model parameters introduced by this plugged measurement slice
}
\description{
add parameters for a SS measurement slice among cases (conditional independence)
}
|
library(shiny)
library(shinydashboard)
library(leaflet)
library(aws.s3)
s3BucketName <- scan("bucketname.txt", what = "txt")
s3File <- scan("filepath.txt", what = "txt")
#list of buckets on S3
bucketlist(key = Sys.getenv("AWS_ACCESS_KEY_ID"),
secret = Sys.getenv("AWS_SECRET_ACCESS_KEY"),
region = Sys.getenv("AWS_DEFAULT_REGION"))
# files in the bucket
file_names <- get_bucket_df(s3BucketName)
myMap <- s3readRDS(object = file_names[file_names$Key == s3File, "Key"], bucket = s3BucketName)
ui <- dashboardPage(
dashboardHeader(),
dashboardSidebar(),
dashboardBody(
leafletOutput("mymap", height = "92vh") #this text is an ID that must match `output$var_name` below***
)
)
server <- function(input, output, session) {
output$mymap <- renderLeaflet({ #****
myMap
})
}
shinyApp(ui, server) | /app.R | no_license | FreyGeospatial/RShiny_S3 | R | false | false | 861 | r | library(shiny)
library(shinydashboard)
library(leaflet)
library(aws.s3)
s3BucketName <- scan("bucketname.txt", what = "txt")
s3File <- scan("filepath.txt", what = "txt")
#list of buckets on S3
bucketlist(key = Sys.getenv("AWS_ACCESS_KEY_ID"),
secret = Sys.getenv("AWS_SECRET_ACCESS_KEY"),
region = Sys.getenv("AWS_DEFAULT_REGION"))
# files in the bucket
file_names <- get_bucket_df(s3BucketName)
myMap <- s3readRDS(object = file_names[file_names$Key == s3File, "Key"], bucket = s3BucketName)
ui <- dashboardPage(
dashboardHeader(),
dashboardSidebar(),
dashboardBody(
leafletOutput("mymap", height = "92vh") #this text is an ID that must match `output$var_name` below***
)
)
server <- function(input, output, session) {
output$mymap <- renderLeaflet({ #****
myMap
})
}
shinyApp(ui, server) |
#load libraries
library(quantreg)
library(glmnet)
library(magrittr)
library(purrr)
#load data
#data.full <- readRDS()
debug.data <- readRDS("/Users/Matt Multach/Desktop/Dissertation/Dissertation_Git/Data_Generation/Data_Storage/debug_data_091720.RData")
#LAD lasso function with two-way CV for selecting both lambda and nu/gamma
ladlasso.sim.fnct <- function(data) {
#create simulation tracker
tracker <- as.vector(unlist(data$conditions))
#print tracker of status
cat("n = " , tracker[1] , " , p = " , tracker[2] ,
" , eta.x = " , tracker[3] , " , eta.y = " , tracker[4] ,
" , g = " , tracker[5] , " , h = " , tracker[6] ,
";\n")
#load X, Y, p, n
X <- data$X
Y <- data$Y
p <- data$conditions$p
n <- length(Y)
#set seed for generating ridge coefficients for weighting
seed.ridge <- data$seeds[ , "seed.4"]
set.seed(seed.ridge)
#set possible lambda and nu/gamma values
lambda.try <- seq(log(0.01) , log(1400) , length.out = 100)
lambda.try <- exp(lambda.try)
nu.try <- exp(seq(log(0.01) , log(10) , length.out = 100))
#set seed for generating nu/gamma for weighting
seed.pre.nu <- data$seeds[ , "seed.5"]
set.seed(seed.pre.nu)
seed.nu <- sample(rnorm(n = 1000000000) , size = length(nu.try) , replace = FALSE)
#find ridge coefs for adaptive weighting
lambda.try <- exp(seq(log(0.01) , log(1400) , length.out = 100))
ridge.model <- cv.glmnet(x = X , y = Y , lambda = lambda.try , alpha = 0)
lambda.ridge.opt <- ridge.model$lambda.min
best.ridge.coefs <- predict(ridge.model , type = "coefficients" ,
s = lambda.ridge.opt)[-1]
##initialize list of best ladlasso results from each nu/gamma
ladlasso.nu.cv <- list()
#loop to generate results from each nu/gamma
for(i in 1:length(nu.try)) {
#set seed for random process
seed <- seed.nu[i]
set.seed(seed)
#set adaptive weights
weights <- 1 / (abs(best.ridge.coefs)^nu.try[i])
#create empty vector for BIC values for each possible lambda value
BIC <- rep(0 , 100)
#generate model for each possible lambda value
for (k in 1:100){
rqfit <- rq.fit.lasso(X , Y , lambda = lambda.try[k] * weights)
betalad_tmp <- rqfit$coef
betalad_tmp <- betalad_tmp * (betalad_tmp > 1e-8)
mse <- mean(abs(rqfit$resi))
mdsize <- length(which(betalad_tmp != 0))
BIC[k] <- log(mse) + mdsize * log(n) / n
}
#indicator for BIC-minimizing lambda/model
step <- which.min(BIC)
#generate LAD lasso coefficients for minimizing lambda/model
betalad <- rq.fit.lasso(X , Y , lambda = lambda.try[step] * weights)$coef
ladlasso <- betalad * (betalad > 1e-8)
#store best lambda for given nu/gamma
lambda.ladlasso.opt <- lambda.try[step]
#store coefficients
coeff2.lad <- ladlasso # get rid of intercept
#generate y-hats for each observation
pred.lad <- X %*% coeff2.lad #+ coeff.lad[1]
#store number of nonzero coefs
st.lad <- sum(coeff2.lad != 0) # number nonzero
#generate MSE and sd(MSE) for model
mse.lad <- sum((Y - pred.lad) ^ 2) / (n - st.lad - 1)
sd.mse.lad <- sd((Y - pred.lad) ^ 2 / (n - st.lad - 1))
#save list of all info from best model
ladlasso.nu.cv[[i]] <- list(other.info = list(fit = pred.lad ,
st = st.lad) ,
metrics_and_info = list(BIC.min = min(BIC) ,
which.BIC.min = step ,
model.seed.ridge = seed.ridge ,
model.seed.prenu = seed.pre.nu ,
model.seed.nu = seed ,
ridge.coefs = best.ridge.coefs ,
weights = weights ,
nu = nu.try[i] ,
lambda = lambda.try[step] ,
coefs = coeff2.lad ,
mpe = mse.lad ,
mpe.sd = sd.mse.lad ,
fpr = length(which(coeff2.lad[c(5:p)] != 0)) / length(coeff2.lad[c(5:p)]) ,
fnr = length(which(coeff2.lad[c(1:4)] == 0)) / length(coeff2.lad[1:4])))
}
#find/store minimizing nu/gamma, seeds, minimized BIC/step
ladlasso.nu.cv.mpe <- numeric()
ladlasso.seeds.ridge <- numeric()
ladlasso.seeds.prenu <- numeric()
ladlasso.seeds.nu <- numeric()
ladlasso.BIC.mins <- numeric()
ladlasso.which.BIC.mins <- numeric()
for(i in 1:length(ladlasso.nu.cv)) {
ladlasso.nu.cv.mpe[i] <- ladlasso.nu.cv[[i]]$metrics_and_info$mpe
ladlasso.seeds.ridge[i] <- ladlasso.nu.cv[[i]]$metrics_and_info$model.seed.ridge
ladlasso.seeds.nu[i] <- ladlasso.nu.cv[[i]]$metrics_and_info$model.seed.nu
ladlasso.seeds.prenu[i] <- ladlasso.nu.cv[[i]]$metrics_and_info$model.seed.prenu
ladlasso.BIC.mins[i] <- ladlasso.nu.cv[[i]]$metrics_and_info$BIC.min
ladlasso.which.BIC.mins[i] <- ladlasso.nu.cv[[i]]$metrics_and_info$which.BIC.min
}
#store BEST ladlasso result plus all seeds
###below is used to check that seeds are regenerated properly and not uniform
return(list(BICs = ladlasso.BIC.mins ,
which.BICs = ladlasso.which.BIC.mins ,
mpes = ladlasso.nu.cv.mpe ,
seeds.ridge = ladlasso.seeds.ridge ,
seeds.prenu = ladlasso.seeds.prenu ,
seeds.nu = ladlasso.seeds.nu ,
model = ladlasso.nu.cv[[which.min(ladlasso.nu.cv.mpe)]] ,
important = list(diagnostics = data.frame(cbind(data.seed = tracker[7] ,
model.seed.ridge = ladlasso.nu.cv[[which.min(ladlasso.nu.cv.mpe)]]$metrics_and_info$model.seed.ridge ,
model.seed.prenu = ladlasso.nu.cv[[which.min(ladlasso.nu.cv.mpe)]]$metrics_and_info$model.seed.prenu ,
model.seed.nu = ladlasso.nu.cv[[which.min(ladlasso.nu.cv.mpe)]]$metrics_and_info$model.seed.nu)) ,
coefs = ladlasso.nu.cv[[which.min(ladlasso.nu.cv.mpe)]]$metrics_and_info$coefs ,
weights = ladlasso.nu.cv[[which.min(ladlasso.nu.cv.mpe)]]$metrics_and_info$weights ,
info = data.frame(cbind(n = tracker[1] ,
p = tracker[2] ,
eta.x = tracker[3] ,
eta.y = tracker[4] ,
g = tracker[5] ,
h = tracker[6] ,
data.seed = tracker[7] ,
model.seed.ridge = ladlasso.nu.cv[[which.min(ladlasso.nu.cv.mpe)]]$metrics_and_info$model.seed.ridge ,
model.seed.prenu = ladlasso.nu.cv[[which.min(ladlasso.nu.cv.mpe)]]$metrics_and_info$model.seed.prenu ,
model.seed.nu = ladlasso.nu.cv[[which.min(ladlasso.nu.cv.mpe)]]$metrics_and_info$model.seed.nu) ,
lambda = ladlasso.nu.cv[[which.min(ladlasso.nu.cv.mpe)]]$metrics_and_info$lambda ,
nu = ladlasso.nu.cv[[which.min(ladlasso.nu.cv.mpe)]]$metrics_and_info$nu ,
mpe = ladlasso.nu.cv[[which.min(ladlasso.nu.cv.mpe)]]$metrics_and_info$mpe ,
mpe.sd = ladlasso.nu.cv[[which.min(ladlasso.nu.cv.mpe)]]$metrics_and_info$mpe.sd ,
fpr = ladlasso.nu.cv[[which.min(ladlasso.nu.cv.mpe)]]$metrics_and_info$fpr ,
fnr = ladlasso.nu.cv[[which.min(ladlasso.nu.cv.mpe)]]$metrics_and_info$fnr)
)
)
)
}
#run across full dataset
ladlasso.debug <- debug.data %>%
map(safely(ladlasso.sim.fnct))
saveRDS(ladlasso.debug , "/Users/Matt Multach/Dropbox/USC_Grad2/Courses/Dissertation/Dissertation_Git/Data_Storage/Error_Storage/ladlasso_debug.RData")
{
#dealing with error/result from map(safely())
#create empty lists for error + result
##ladlasso.error <- list()
##ladlasso.result <- list()
##ladlasso.final <- list()
#split data into separate error and result lists
##for(i in 1:length(ladlasso.debug)) {
#iteration tracker
## cat("i = " , i , "\n")
#fill error list
## ladlasso.error[[i]] <- list(error = ladlasso.debug[[i]]$error ,
## condition = as.data.frame(unlist(debug.data[[i]]$condition) ,
## n = n , p = p ,
## eta.x = eta.x , eta.y = eta.y ,
## g = g , h = h , seed = seed))
#fill in results if results aren't NULL from safely()
## ladlasso.result[[i]] <- ladlasso.debug[[i]]$result
#fill final list
## if(!is.null(ladlasso.debug[[i]]$result)) {
## ladlasso.final[[i]] <- ladlasso.debug[[i]]$result$important
## } else {
## ladlasso.final[[i]] <- ladlasso.error[[i]]
## }
##}
#combine diagnostics
##diagnostics <- data.frame(matrix(ncol = 4 , nrow = length(debug.data)))
##colnames(diagnostics) <- c("data.seed" , "model.seed.ridge" , "model.seed.prenu" , "model.seed.nu")
##for(i in 1:length(ladlasso.final)) {
## diagnostics[i , "data.seed"] <- ladlasso.final[[i]]$diagnostics$data.seed
## diagnostics[i , "model.seed.ridge"] <- ladlasso.final[[i]]$diagnostics$model.seed.ridge
## diagnostics[i , "model.seed.prenu"] <- ladlasso.final[[i]]$diagnostics$model.seed.prenu
## diagnostics[i , "model.seed.nu"] <- ladlasso.final[[i]]$diagnostics$model.seed.nu
##}
#save files
##saveRDS(ladlasso.result , "/Users/Matt Multach/Dropbox/USC_Grad2/Courses/Dissertation/Dissertation_Git/Data_Storage/Model_Storage/ladlasso_result_DEBUG.RData")
##saveRDS(ladlasso.error , "/Users/Matt Multach/Dropbox/USC_Grad2/Courses/Dissertation/Dissertation_Git/Data_Storage/Error_Storage/ladlasso_error_DEBUG.RData")
##saveRDS(ladlasso.final , "/Users/Matt Multach/Dropbox/USC_Grad2/Courses/Dissertation/Dissertation_Git/Data_Storage/MainResults_Storage/ladlasso_resultmain_DEBUG.RData")
##saveRDS(diagnostics , "/Users/Matt Multach/Dropbox/USC_Grad2/Courses/Dissertation/Dissertation_Git/Data_Storage/Diagnostics_Storage/ladlasso_diagnostics_DEBUG.RData")
} | /Model_Application/Multiple_Dataset/LADLasso2.R | no_license | multach87/Dissertation | R | false | false | 12,484 | r | #load libraries
library(quantreg)
library(glmnet)
library(magrittr)
library(purrr)
#load data
#data.full <- readRDS()
debug.data <- readRDS("/Users/Matt Multach/Desktop/Dissertation/Dissertation_Git/Data_Generation/Data_Storage/debug_data_091720.RData")
#LAD lasso function with two-way CV for selecting both lambda and nu/gamma
ladlasso.sim.fnct <- function(data) {
#create simulation tracker
tracker <- as.vector(unlist(data$conditions))
#print tracker of status
cat("n = " , tracker[1] , " , p = " , tracker[2] ,
" , eta.x = " , tracker[3] , " , eta.y = " , tracker[4] ,
" , g = " , tracker[5] , " , h = " , tracker[6] ,
";\n")
#load X, Y, p, n
X <- data$X
Y <- data$Y
p <- data$conditions$p
n <- length(Y)
#set seed for generating ridge coefficients for weighting
seed.ridge <- data$seeds[ , "seed.4"]
set.seed(seed.ridge)
#set possible lambda and nu/gamma values
lambda.try <- seq(log(0.01) , log(1400) , length.out = 100)
lambda.try <- exp(lambda.try)
nu.try <- exp(seq(log(0.01) , log(10) , length.out = 100))
#set seed for generating nu/gamma for weighting
seed.pre.nu <- data$seeds[ , "seed.5"]
set.seed(seed.pre.nu)
seed.nu <- sample(rnorm(n = 1000000000) , size = length(nu.try) , replace = FALSE)
#find ridge coefs for adaptive weighting
lambda.try <- exp(seq(log(0.01) , log(1400) , length.out = 100))
ridge.model <- cv.glmnet(x = X , y = Y , lambda = lambda.try , alpha = 0)
lambda.ridge.opt <- ridge.model$lambda.min
best.ridge.coefs <- predict(ridge.model , type = "coefficients" ,
s = lambda.ridge.opt)[-1]
##initialize list of best ladlasso results from each nu/gamma
ladlasso.nu.cv <- list()
#loop to generate results from each nu/gamma
for(i in 1:length(nu.try)) {
#set seed for random process
seed <- seed.nu[i]
set.seed(seed)
#set adaptive weights
weights <- 1 / (abs(best.ridge.coefs)^nu.try[i])
#create empty vector for BIC values for each possible lambda value
BIC <- rep(0 , 100)
#generate model for each possible lambda value
for (k in 1:100){
rqfit <- rq.fit.lasso(X , Y , lambda = lambda.try[k] * weights)
betalad_tmp <- rqfit$coef
betalad_tmp <- betalad_tmp * (betalad_tmp > 1e-8)
mse <- mean(abs(rqfit$resi))
mdsize <- length(which(betalad_tmp != 0))
BIC[k] <- log(mse) + mdsize * log(n) / n
}
#indicator for BIC-minimizing lambda/model
step <- which.min(BIC)
#generate LAD lasso coefficients for minimizing lambda/model
betalad <- rq.fit.lasso(X , Y , lambda = lambda.try[step] * weights)$coef
ladlasso <- betalad * (betalad > 1e-8)
#store best lambda for given nu/gamma
lambda.ladlasso.opt <- lambda.try[step]
#store coefficients
coeff2.lad <- ladlasso # get rid of intercept
#generate y-hats for each observation
pred.lad <- X %*% coeff2.lad #+ coeff.lad[1]
#store number of nonzero coefs
st.lad <- sum(coeff2.lad != 0) # number nonzero
#generate MSE and sd(MSE) for model
mse.lad <- sum((Y - pred.lad) ^ 2) / (n - st.lad - 1)
sd.mse.lad <- sd((Y - pred.lad) ^ 2 / (n - st.lad - 1))
#save list of all info from best model
ladlasso.nu.cv[[i]] <- list(other.info = list(fit = pred.lad ,
st = st.lad) ,
metrics_and_info = list(BIC.min = min(BIC) ,
which.BIC.min = step ,
model.seed.ridge = seed.ridge ,
model.seed.prenu = seed.pre.nu ,
model.seed.nu = seed ,
ridge.coefs = best.ridge.coefs ,
weights = weights ,
nu = nu.try[i] ,
lambda = lambda.try[step] ,
coefs = coeff2.lad ,
mpe = mse.lad ,
mpe.sd = sd.mse.lad ,
fpr = length(which(coeff2.lad[c(5:p)] != 0)) / length(coeff2.lad[c(5:p)]) ,
fnr = length(which(coeff2.lad[c(1:4)] == 0)) / length(coeff2.lad[1:4])))
}
#find/store minimizing nu/gamma, seeds, minimized BIC/step
ladlasso.nu.cv.mpe <- numeric()
ladlasso.seeds.ridge <- numeric()
ladlasso.seeds.prenu <- numeric()
ladlasso.seeds.nu <- numeric()
ladlasso.BIC.mins <- numeric()
ladlasso.which.BIC.mins <- numeric()
for(i in 1:length(ladlasso.nu.cv)) {
ladlasso.nu.cv.mpe[i] <- ladlasso.nu.cv[[i]]$metrics_and_info$mpe
ladlasso.seeds.ridge[i] <- ladlasso.nu.cv[[i]]$metrics_and_info$model.seed.ridge
ladlasso.seeds.nu[i] <- ladlasso.nu.cv[[i]]$metrics_and_info$model.seed.nu
ladlasso.seeds.prenu[i] <- ladlasso.nu.cv[[i]]$metrics_and_info$model.seed.prenu
ladlasso.BIC.mins[i] <- ladlasso.nu.cv[[i]]$metrics_and_info$BIC.min
ladlasso.which.BIC.mins[i] <- ladlasso.nu.cv[[i]]$metrics_and_info$which.BIC.min
}
#store BEST ladlasso result plus all seeds
###below is used to check that seeds are regenerated properly and not uniform
return(list(BICs = ladlasso.BIC.mins ,
which.BICs = ladlasso.which.BIC.mins ,
mpes = ladlasso.nu.cv.mpe ,
seeds.ridge = ladlasso.seeds.ridge ,
seeds.prenu = ladlasso.seeds.prenu ,
seeds.nu = ladlasso.seeds.nu ,
model = ladlasso.nu.cv[[which.min(ladlasso.nu.cv.mpe)]] ,
important = list(diagnostics = data.frame(cbind(data.seed = tracker[7] ,
model.seed.ridge = ladlasso.nu.cv[[which.min(ladlasso.nu.cv.mpe)]]$metrics_and_info$model.seed.ridge ,
model.seed.prenu = ladlasso.nu.cv[[which.min(ladlasso.nu.cv.mpe)]]$metrics_and_info$model.seed.prenu ,
model.seed.nu = ladlasso.nu.cv[[which.min(ladlasso.nu.cv.mpe)]]$metrics_and_info$model.seed.nu)) ,
coefs = ladlasso.nu.cv[[which.min(ladlasso.nu.cv.mpe)]]$metrics_and_info$coefs ,
weights = ladlasso.nu.cv[[which.min(ladlasso.nu.cv.mpe)]]$metrics_and_info$weights ,
info = data.frame(cbind(n = tracker[1] ,
p = tracker[2] ,
eta.x = tracker[3] ,
eta.y = tracker[4] ,
g = tracker[5] ,
h = tracker[6] ,
data.seed = tracker[7] ,
model.seed.ridge = ladlasso.nu.cv[[which.min(ladlasso.nu.cv.mpe)]]$metrics_and_info$model.seed.ridge ,
model.seed.prenu = ladlasso.nu.cv[[which.min(ladlasso.nu.cv.mpe)]]$metrics_and_info$model.seed.prenu ,
model.seed.nu = ladlasso.nu.cv[[which.min(ladlasso.nu.cv.mpe)]]$metrics_and_info$model.seed.nu) ,
lambda = ladlasso.nu.cv[[which.min(ladlasso.nu.cv.mpe)]]$metrics_and_info$lambda ,
nu = ladlasso.nu.cv[[which.min(ladlasso.nu.cv.mpe)]]$metrics_and_info$nu ,
mpe = ladlasso.nu.cv[[which.min(ladlasso.nu.cv.mpe)]]$metrics_and_info$mpe ,
mpe.sd = ladlasso.nu.cv[[which.min(ladlasso.nu.cv.mpe)]]$metrics_and_info$mpe.sd ,
fpr = ladlasso.nu.cv[[which.min(ladlasso.nu.cv.mpe)]]$metrics_and_info$fpr ,
fnr = ladlasso.nu.cv[[which.min(ladlasso.nu.cv.mpe)]]$metrics_and_info$fnr)
)
)
)
}
#run across full dataset
ladlasso.debug <- debug.data %>%
map(safely(ladlasso.sim.fnct))
saveRDS(ladlasso.debug , "/Users/Matt Multach/Dropbox/USC_Grad2/Courses/Dissertation/Dissertation_Git/Data_Storage/Error_Storage/ladlasso_debug.RData")
{
#dealing with error/result from map(safely())
#create empty lists for error + result
##ladlasso.error <- list()
##ladlasso.result <- list()
##ladlasso.final <- list()
#split data into separate error and result lists
##for(i in 1:length(ladlasso.debug)) {
#iteration tracker
## cat("i = " , i , "\n")
#fill error list
## ladlasso.error[[i]] <- list(error = ladlasso.debug[[i]]$error ,
## condition = as.data.frame(unlist(debug.data[[i]]$condition) ,
## n = n , p = p ,
## eta.x = eta.x , eta.y = eta.y ,
## g = g , h = h , seed = seed))
#fill in results if results aren't NULL from safely()
## ladlasso.result[[i]] <- ladlasso.debug[[i]]$result
#fill final list
## if(!is.null(ladlasso.debug[[i]]$result)) {
## ladlasso.final[[i]] <- ladlasso.debug[[i]]$result$important
## } else {
## ladlasso.final[[i]] <- ladlasso.error[[i]]
## }
##}
#combine diagnostics
##diagnostics <- data.frame(matrix(ncol = 4 , nrow = length(debug.data)))
##colnames(diagnostics) <- c("data.seed" , "model.seed.ridge" , "model.seed.prenu" , "model.seed.nu")
##for(i in 1:length(ladlasso.final)) {
## diagnostics[i , "data.seed"] <- ladlasso.final[[i]]$diagnostics$data.seed
## diagnostics[i , "model.seed.ridge"] <- ladlasso.final[[i]]$diagnostics$model.seed.ridge
## diagnostics[i , "model.seed.prenu"] <- ladlasso.final[[i]]$diagnostics$model.seed.prenu
## diagnostics[i , "model.seed.nu"] <- ladlasso.final[[i]]$diagnostics$model.seed.nu
##}
#save files
##saveRDS(ladlasso.result , "/Users/Matt Multach/Dropbox/USC_Grad2/Courses/Dissertation/Dissertation_Git/Data_Storage/Model_Storage/ladlasso_result_DEBUG.RData")
##saveRDS(ladlasso.error , "/Users/Matt Multach/Dropbox/USC_Grad2/Courses/Dissertation/Dissertation_Git/Data_Storage/Error_Storage/ladlasso_error_DEBUG.RData")
##saveRDS(ladlasso.final , "/Users/Matt Multach/Dropbox/USC_Grad2/Courses/Dissertation/Dissertation_Git/Data_Storage/MainResults_Storage/ladlasso_resultmain_DEBUG.RData")
##saveRDS(diagnostics , "/Users/Matt Multach/Dropbox/USC_Grad2/Courses/Dissertation/Dissertation_Git/Data_Storage/Diagnostics_Storage/ladlasso_diagnostics_DEBUG.RData")
} |
\name{Tosls}
\alias{Tosls}
\title{method}
\usage{
Tosls(x, ...)
}
\arguments{
\item{x}{a numeric design matrix for the model.}
\item{...}{not used}
}
\description{
method
}
\author{
Zaghdoudi Taha
}
| /man/tosls.Rd | no_license | cran/tosls | R | false | false | 223 | rd | \name{Tosls}
\alias{Tosls}
\title{method}
\usage{
Tosls(x, ...)
}
\arguments{
\item{x}{a numeric design matrix for the model.}
\item{...}{not used}
}
\description{
method
}
\author{
Zaghdoudi Taha
}
|
hc <- read.csv.sql("household_power_consumption.txt",
sql = "select * from file where (Date like '1/2/2007')|| (Date like '2/2/2007')", sep=";", eol = "\n")
closeAllConnections()
hc$DateTime <- paste(hc$Date, hc$Time, sep=" ")
hc$DateTime = strptime(hc$DateTime, "%d/%m/%Y %H:%M:%S")
with(hc, {
plot(DateTime,Sub_metering_1, type="l", ylab="Energy sub metering",
xlab="", col="black")
lines(DateTime, Sub_metering_2, col="red")
lines(DateTime, Sub_metering_3, col="blue")
})
legend("topright", col=c("black","red","blue"), lty=c(1,1,1),
legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
dev.copy(png, file="Plot3.png", width =480, height =480)
dev.off() | /Plot3.R | no_license | sammok123/ExData_Plotting1 | R | false | false | 700 | r | hc <- read.csv.sql("household_power_consumption.txt",
sql = "select * from file where (Date like '1/2/2007')|| (Date like '2/2/2007')", sep=";", eol = "\n")
closeAllConnections()
hc$DateTime <- paste(hc$Date, hc$Time, sep=" ")
hc$DateTime = strptime(hc$DateTime, "%d/%m/%Y %H:%M:%S")
with(hc, {
plot(DateTime,Sub_metering_1, type="l", ylab="Energy sub metering",
xlab="", col="black")
lines(DateTime, Sub_metering_2, col="red")
lines(DateTime, Sub_metering_3, col="blue")
})
legend("topright", col=c("black","red","blue"), lty=c(1,1,1),
legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
dev.copy(png, file="Plot3.png", width =480, height =480)
dev.off() |
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
## Return a matrix that is the inverse of 'x'
}
| /cachematrix.R | no_license | prayasjain/ProgrammingAssignment2 | R | false | false | 885 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
## Return a matrix that is the inverse of 'x'
}
|
#!/bin/env Rscript
# Author: Maddalena Cella mc2820@ic.ac.uk
# Script: DataWrangTidy.R
# Description: Data wrangling using tidyverse
# Input: Rscript DataWrangTidy.R
# Output: some tibbles
# Arguments: 0
# Date: October 2020
library(tidyverse)
library(tidyr)
library(dplyr)
rm(list=ls())
MyData <- as.matrix(read.csv("../Data/PoundHillData.csv",header = FALSE))
class(MyData)
#loading the metadata
MyMetaData <- read.csv("../Data/PoundHillMetaData.csv",header = TRUE, sep=";")
class(MyMetaData)
tibble::as_tibble(MyData) #corresponds to head()
#All blank cells in the data are true absences,
#in the sense that species was actually not present in that quadrat. So we can replace those blanks with zero
MyData[MyData == ""] = 0
#convert long to wide format
MyData <- t(MyData)
head(MyData)
#first create a temporary dataframe with just the data, without the column names
TempData <- as.data.frame(MyData[-1,],stringsAsFactors = F)
tibble::as_tibble(TempData)
colnames(TempData) <- MyData[1,] # assign column names from original data
tibble::as_tibble(TempData)
rownames(TempData) <- NULL
tibble::as_tibble(TempData)
MyWrangledData <- gather(TempData, "Species", "Count", 5:dim(TempData)[2]) #dim() gives number of columns and number of rows
tibble::as_tibble(MyWrangledData)
tibble::as_tibble(MyWrangledData[(dim(MyWrangledData)[1]-5):dim(MyWrangledData)[1],]) #like tail()
MyWrangledData[, "Cultivation"] <- as.factor(MyWrangledData[, "Cultivation"])
MyWrangledData[, "Block"] <- as.factor(MyWrangledData[, "Block"])
MyWrangledData[, "Plot"] <- as.factor(MyWrangledData[, "Plot"])
MyWrangledData[, "Quadrat"] <- as.factor(MyWrangledData[, "Quadrat"])
MyWrangledData[, "Count"] <- as.integer(MyWrangledData[, "Count"])
str(MyWrangledData)
tidyverse_packages(include_self = TRUE) # the include_self = TRUE means list "tidyverse" as well
require(tidyverse)
#let’s convert the dataframe to a “tibble
tibble::as_tibble(MyWrangledData)
dplyr::glimpse(MyWrangledData) #like str(), but nicer!
dplyr::filter(MyWrangledData, Count>100) #like subset(), but nicer!
dplyr::slice(MyWrangledData, 10:15) # Look at an arbitrary set of data rows
| /Week3/Code/DataWrangTidy.R | permissive | MaddalenaCella/CMEECourseWork | R | false | false | 2,160 | r | #!/bin/env Rscript
# Author: Maddalena Cella mc2820@ic.ac.uk
# Script: DataWrangTidy.R
# Description: Data wrangling using tidyverse
# Input: Rscript DataWrangTidy.R
# Output: some tibbles
# Arguments: 0
# Date: October 2020
library(tidyverse)
library(tidyr)
library(dplyr)
rm(list=ls())
MyData <- as.matrix(read.csv("../Data/PoundHillData.csv",header = FALSE))
class(MyData)
#loading the metadata
MyMetaData <- read.csv("../Data/PoundHillMetaData.csv",header = TRUE, sep=";")
class(MyMetaData)
tibble::as_tibble(MyData) #corresponds to head()
#All blank cells in the data are true absences,
#in the sense that species was actually not present in that quadrat. So we can replace those blanks with zero
MyData[MyData == ""] = 0
#convert long to wide format
MyData <- t(MyData)
head(MyData)
#first create a temporary dataframe with just the data, without the column names
TempData <- as.data.frame(MyData[-1,],stringsAsFactors = F)
tibble::as_tibble(TempData)
colnames(TempData) <- MyData[1,] # assign column names from original data
tibble::as_tibble(TempData)
rownames(TempData) <- NULL
tibble::as_tibble(TempData)
MyWrangledData <- gather(TempData, "Species", "Count", 5:dim(TempData)[2]) #dim() gives number of columns and number of rows
tibble::as_tibble(MyWrangledData)
tibble::as_tibble(MyWrangledData[(dim(MyWrangledData)[1]-5):dim(MyWrangledData)[1],]) #like tail()
MyWrangledData[, "Cultivation"] <- as.factor(MyWrangledData[, "Cultivation"])
MyWrangledData[, "Block"] <- as.factor(MyWrangledData[, "Block"])
MyWrangledData[, "Plot"] <- as.factor(MyWrangledData[, "Plot"])
MyWrangledData[, "Quadrat"] <- as.factor(MyWrangledData[, "Quadrat"])
MyWrangledData[, "Count"] <- as.integer(MyWrangledData[, "Count"])
str(MyWrangledData)
tidyverse_packages(include_self = TRUE) # the include_self = TRUE means list "tidyverse" as well
require(tidyverse)
#let’s convert the dataframe to a “tibble
tibble::as_tibble(MyWrangledData)
dplyr::glimpse(MyWrangledData) #like str(), but nicer!
dplyr::filter(MyWrangledData, Count>100) #like subset(), but nicer!
dplyr::slice(MyWrangledData, 10:15) # Look at an arbitrary set of data rows
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/str-split-twice.R
\name{str_split_twice}
\alias{str_split_twice}
\title{Extract numeric values from string.}
\usage{
str_split_twice(char, min_only = TRUE)
}
\arguments{
\item{char}{Character string.}
\item{min_only}{Logical specifying if only the first numeric value (\code{TRUE}) or
all numeric values (\code{FALSE}) should be returned. Default is \code{TRUE}.}
}
\value{
numeric values inside \code{char} string.
}
\description{
The function splits any character string at each tab and space and returns
all (min_only = FALSE) or only the first (min_only = T) numeric value found in the string.
}
\examples{
str_split_twice(char = "Hello 15")
str_split_twice(char = "flag1 15 16\t15", min_only = FALSE)
}
| /man/str_split_twice.Rd | no_license | hollyannperryman/atlantistools | R | false | true | 790 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/str-split-twice.R
\name{str_split_twice}
\alias{str_split_twice}
\title{Extract numeric values from string.}
\usage{
str_split_twice(char, min_only = TRUE)
}
\arguments{
\item{char}{Character string.}
\item{min_only}{Logical specifying if only the first numeric value (\code{TRUE}) or
all numeric values (\code{FALSE}) should be returned. Default is \code{TRUE}.}
}
\value{
numeric values inside \code{char} string.
}
\description{
The function splits any character string at each tab and space and returns
all (min_only = FALSE) or only the first (min_only = T) numeric value found in the string.
}
\examples{
str_split_twice(char = "Hello 15")
str_split_twice(char = "flag1 15 16\t15", min_only = FALSE)
}
|
# Jest.S
#
# Usual invocation to compute J function
# if F and G are not required
#
# $Revision: 4.26 $ $Date: 2022/01/04 05:30:06 $
#
#
#
Jest <- function(X, ..., eps=NULL, r=NULL, breaks=NULL, correction=NULL) {
X <- as.ppp(X)
W <- Window(X)
brks <- handle.r.b.args(r, breaks, window=W, pixeps=eps,
rmaxdefault=rmax.rule("J", W, intensity(X)))
checkspacing <- !isFALSE(list(...)$checkspacing)
#' compute F and G
FF <- Fest(X, eps, breaks=brks, correction=correction,
checkspacing=checkspacing)
G <- Gest(X, breaks=brks, correction=correction)
# initialise fv object
rvals <- FF$r
rmax <- max(rvals)
Z <- fv(data.frame(r=rvals, theo=1),
"r", substitute(J(r), NULL),
"theo",
. ~ r,
c(0,rmax),
c("r", "%s[pois](r)"),
c("distance argument r", "theoretical Poisson %s"),
fname="J")
# compute J function estimates
# this has to be done manually because of the mismatch between names
Fnames <- names(FF)
Gnames <- names(G)
bothnames <- intersect(Fnames, Gnames)
if("raw" %in% bothnames) {
Jun <- ratiotweak(1-G$raw, 1-FF$raw)
Z <- bind.fv(Z, data.frame(un=Jun), "hat(%s)[un](r)",
"uncorrected estimate of %s", "un")
attr(Z, "alim") <- range(rvals[FF$raw <= 0.9])
}
if("rs" %in% bothnames) {
Jrs <- ratiotweak(1-G$rs, 1-FF$rs)
Z <- bind.fv(Z, data.frame(rs=Jrs), "hat(%s)[rs](r)",
"border corrected estimate of %s", "rs")
attr(Z, "alim") <- range(rvals[FF$rs <= 0.9])
}
if("han" %in% Gnames && "cs" %in% Fnames) {
Jhan <- ratiotweak(1-G$han, 1-FF$cs)
Z <- bind.fv(Z, data.frame(han=Jhan), "hat(%s)[han](r)",
"Hanisch-style estimate of %s", "han")
attr(Z, "alim") <- range(rvals[FF$cs <= 0.9])
}
if("km" %in% bothnames) {
Jkm <- ratiotweak(1-G$km, 1-FF$km)
Z <- bind.fv(Z, data.frame(km=Jkm), "hat(%s)[km](r)",
"Kaplan-Meier estimate of %s", "km")
attr(Z, "alim") <- range(rvals[FF$km <= 0.9])
}
if("hazard" %in% bothnames) {
Jhaz <- G$hazard - FF$hazard
Z <- bind.fv(Z, data.frame(hazard=Jhaz), "hazard(r)",
"Kaplan-Meier estimate of derivative of log(%s)")
}
# set default plotting values and order
nama <- names(Z)
fvnames(Z, ".") <- rev(nama[!(nama %in% c("r", "hazard"))])
# add more info
attr(Z, "F") <- FF
attr(Z, "G") <- G
attr(Z, "conserve") <- attr(FF, "conserve")
unitname(Z) <- unitname(X)
return(Z)
}
| /R/Jest.R | no_license | spatstat/spatstat.core | R | false | false | 2,540 | r | # Jest.S
#
# Usual invocation to compute J function
# if F and G are not required
#
# $Revision: 4.26 $ $Date: 2022/01/04 05:30:06 $
#
#
#
Jest <- function(X, ..., eps=NULL, r=NULL, breaks=NULL, correction=NULL) {
X <- as.ppp(X)
W <- Window(X)
brks <- handle.r.b.args(r, breaks, window=W, pixeps=eps,
rmaxdefault=rmax.rule("J", W, intensity(X)))
checkspacing <- !isFALSE(list(...)$checkspacing)
#' compute F and G
FF <- Fest(X, eps, breaks=brks, correction=correction,
checkspacing=checkspacing)
G <- Gest(X, breaks=brks, correction=correction)
# initialise fv object
rvals <- FF$r
rmax <- max(rvals)
Z <- fv(data.frame(r=rvals, theo=1),
"r", substitute(J(r), NULL),
"theo",
. ~ r,
c(0,rmax),
c("r", "%s[pois](r)"),
c("distance argument r", "theoretical Poisson %s"),
fname="J")
# compute J function estimates
# this has to be done manually because of the mismatch between names
Fnames <- names(FF)
Gnames <- names(G)
bothnames <- intersect(Fnames, Gnames)
if("raw" %in% bothnames) {
Jun <- ratiotweak(1-G$raw, 1-FF$raw)
Z <- bind.fv(Z, data.frame(un=Jun), "hat(%s)[un](r)",
"uncorrected estimate of %s", "un")
attr(Z, "alim") <- range(rvals[FF$raw <= 0.9])
}
if("rs" %in% bothnames) {
Jrs <- ratiotweak(1-G$rs, 1-FF$rs)
Z <- bind.fv(Z, data.frame(rs=Jrs), "hat(%s)[rs](r)",
"border corrected estimate of %s", "rs")
attr(Z, "alim") <- range(rvals[FF$rs <= 0.9])
}
if("han" %in% Gnames && "cs" %in% Fnames) {
Jhan <- ratiotweak(1-G$han, 1-FF$cs)
Z <- bind.fv(Z, data.frame(han=Jhan), "hat(%s)[han](r)",
"Hanisch-style estimate of %s", "han")
attr(Z, "alim") <- range(rvals[FF$cs <= 0.9])
}
if("km" %in% bothnames) {
Jkm <- ratiotweak(1-G$km, 1-FF$km)
Z <- bind.fv(Z, data.frame(km=Jkm), "hat(%s)[km](r)",
"Kaplan-Meier estimate of %s", "km")
attr(Z, "alim") <- range(rvals[FF$km <= 0.9])
}
if("hazard" %in% bothnames) {
Jhaz <- G$hazard - FF$hazard
Z <- bind.fv(Z, data.frame(hazard=Jhaz), "hazard(r)",
"Kaplan-Meier estimate of derivative of log(%s)")
}
# set default plotting values and order
nama <- names(Z)
fvnames(Z, ".") <- rev(nama[!(nama %in% c("r", "hazard"))])
# add more info
attr(Z, "F") <- FF
attr(Z, "G") <- G
attr(Z, "conserve") <- attr(FF, "conserve")
unitname(Z) <- unitname(X)
return(Z)
}
|
library('tidyverse')
conflicted::conflict_prefer('lag','dplyr')
interactive <- readxl::read_xlsx('results/4-2021/fim-4-2021.xlsx') %>%
fim::prepare_interactive() %>% filter(year > 2018)
contributions <- readxl::read_xlsx('results/4-2021/fim-4-2021.xlsx') %>%
select(date, ends_with('cont')) %>%
mutate(date = yearquarter(lubridate::as_date(date))) %>%
filter(date >= yearquarter('2019 Q1'))
summary <- contributions %>%
transmute(date,
federal_purchases = federal_nom_cont,
state_purchases = state_local_nom_cont,
grants_cont = federal_cgrants_cont + federal_igrants_cont,
federal_purchases_after_grants_cont = federal_nom_cont + grants_cont,
state_purchases_after_grants_cont = state_local_nom_cont - grants_cont,
total_transfers_cont = federal_transfers_cont + state_transfers_cont,
federal_transfers_cont,
federal_social_benefits_without_ui_or_rebate_contribution = federal_social_benefits_cont,
rebate_checks_cont,
federal_unemployment_insurance_cont,
federal_health_outlays_cont,
federal_subsidies_cont,
state_transfers_cont,
state_social_benefits_without_ui_contribution = state_social_benefits_cont,
state_unemployment_insurance_cont,
state_health_outlays_cont,
state_subsidies_cont,
taxes_cont = federal_taxes_cont + state_taxes_cont,
federal_taxes_cont,
state_taxes_cont) %>%
rename_with(.fn = ~snakecase::to_title_case(.),
.cols = everything()) %>%
mutate(across(where(is.numeric),
~ . / 100))
library(openxlsx)
wb <- createWorkbook()
options("openxlsx.borderColour" = "#4F80BD")
options("openxlsx.borderStyle" = "thin")
modifyBaseFont(wb, fontSize = 14)
addWorksheet(wb, sheetName = "Summary of contributions", gridLines = FALSE)
freezePane(wb, sheet = 1, firstRow = TRUE, firstCol = TRUE) ## freeze first row and column
writeDataTable(wb, sheet = 1, x = summary,
colNames = TRUE, rowNames = FALSE,
tableStyle = "TableStyleLight9")
setColWidths(wb, sheet = 1, cols = "A", widths = 18)
saveWorkbook(wb, "results/4-2021/summary.xlsx", overwrite = TRUE) #
readxl::read_xlsx('results/4-2021/fim-4-2021.xlsx') %>%
select( -ends_with('cont'), federal_nom_pi) %>%
mutate(date = yearquarter(lubridate::as_date(date))) %>%
filter(date >= yearquarter('2021 Q1')) %>%
select(federal_nom)
contributions %>%
transmute(date,
federal_purchases = federal_nom_cont - non_health_grants_cont,
state_purchases = state_local_nom_cont,
grants_cont = federal_cgrants_cont + federal_igrants_cont + non_health_grants_cont,
federal_purchases_after_grants_cont = federal_nom_cont + grants_cont,
state_purchases_after_grants_cont = state_local_nom_cont - grants_cont,
total_transfers_cont = federal_transfers_cont + state_transfers_cont,
federal_transfers_cont,
federal_social_benefits_without_ui_or_rebate_contribution = federal_social_benefits_cont,
rebate_checks_cont,
federal_unemployment_insurance_cont,
federal_health_outlays_cont,
federal_subsidies_cont,
state_transfers_cont,
state_social_benefits_without_ui_contribution = state_social_benefits_cont,
state_unemployment_insurance_cont,
state_health_outlays_cont,
state_subsidies_cont,
taxes_cont = federal_taxes_cont + state_taxes_cont,
federal_taxes_cont,
state_taxes_cont) %>%
rename_with(.fn = ~snakecase::to_title_case(.),
.cols = everything()) %>%
mutate(across(where(is.numeric),
~ . / 100))
# Grants with ARPA ------------------------------------------------------
#
fim <- readxl::read_xlsx('results/4-2021/fim-4-2021.xlsx')
projections %>% mutate(date = as_date(date)) %>% select(date, gf, gf_g) %>% filter(date > '2020-12-31') %>% View()
federal_purchases_breakdown <-
fim %>%
filter(date > '2020-12-31') %>%
summarise(date, federal_nom_pi, gdppoth,gdp,
federal_purchases = federal_nom - non_health_grants - add_federal_purchases,
grants_cont = federal_cgrants_cont + federal_igrants_cont + non_health_grants_cont,
non_health_grants,
non_health_grants_cont,
add_federal_purchases,
federal_nom)
federal_purchases_breakdown %>%
mutate(federal_nom_cont = 400 * (federal_purchases - lag(federal_purchases) * (1 + federal_nom_pi + gdppoth)) / lag(gdp), .keep = 'used',
non_health_grants_cont,
grants_cont)
fim %>%
filter(date> '2020-12-31') %>%
select(date, fiscal_impact)
| /reports/contributions_table.R | no_license | Hutchins-Center/Fiscal-Impact-Measure | R | false | false | 4,860 | r | library('tidyverse')
conflicted::conflict_prefer('lag','dplyr')
interactive <- readxl::read_xlsx('results/4-2021/fim-4-2021.xlsx') %>%
fim::prepare_interactive() %>% filter(year > 2018)
contributions <- readxl::read_xlsx('results/4-2021/fim-4-2021.xlsx') %>%
select(date, ends_with('cont')) %>%
mutate(date = yearquarter(lubridate::as_date(date))) %>%
filter(date >= yearquarter('2019 Q1'))
summary <- contributions %>%
transmute(date,
federal_purchases = federal_nom_cont,
state_purchases = state_local_nom_cont,
grants_cont = federal_cgrants_cont + federal_igrants_cont,
federal_purchases_after_grants_cont = federal_nom_cont + grants_cont,
state_purchases_after_grants_cont = state_local_nom_cont - grants_cont,
total_transfers_cont = federal_transfers_cont + state_transfers_cont,
federal_transfers_cont,
federal_social_benefits_without_ui_or_rebate_contribution = federal_social_benefits_cont,
rebate_checks_cont,
federal_unemployment_insurance_cont,
federal_health_outlays_cont,
federal_subsidies_cont,
state_transfers_cont,
state_social_benefits_without_ui_contribution = state_social_benefits_cont,
state_unemployment_insurance_cont,
state_health_outlays_cont,
state_subsidies_cont,
taxes_cont = federal_taxes_cont + state_taxes_cont,
federal_taxes_cont,
state_taxes_cont) %>%
rename_with(.fn = ~snakecase::to_title_case(.),
.cols = everything()) %>%
mutate(across(where(is.numeric),
~ . / 100))
library(openxlsx)
wb <- createWorkbook()
options("openxlsx.borderColour" = "#4F80BD")
options("openxlsx.borderStyle" = "thin")
modifyBaseFont(wb, fontSize = 14)
addWorksheet(wb, sheetName = "Summary of contributions", gridLines = FALSE)
freezePane(wb, sheet = 1, firstRow = TRUE, firstCol = TRUE) ## freeze first row and column
writeDataTable(wb, sheet = 1, x = summary,
colNames = TRUE, rowNames = FALSE,
tableStyle = "TableStyleLight9")
setColWidths(wb, sheet = 1, cols = "A", widths = 18)
saveWorkbook(wb, "results/4-2021/summary.xlsx", overwrite = TRUE) #
readxl::read_xlsx('results/4-2021/fim-4-2021.xlsx') %>%
select( -ends_with('cont'), federal_nom_pi) %>%
mutate(date = yearquarter(lubridate::as_date(date))) %>%
filter(date >= yearquarter('2021 Q1')) %>%
select(federal_nom)
contributions %>%
transmute(date,
federal_purchases = federal_nom_cont - non_health_grants_cont,
state_purchases = state_local_nom_cont,
grants_cont = federal_cgrants_cont + federal_igrants_cont + non_health_grants_cont,
federal_purchases_after_grants_cont = federal_nom_cont + grants_cont,
state_purchases_after_grants_cont = state_local_nom_cont - grants_cont,
total_transfers_cont = federal_transfers_cont + state_transfers_cont,
federal_transfers_cont,
federal_social_benefits_without_ui_or_rebate_contribution = federal_social_benefits_cont,
rebate_checks_cont,
federal_unemployment_insurance_cont,
federal_health_outlays_cont,
federal_subsidies_cont,
state_transfers_cont,
state_social_benefits_without_ui_contribution = state_social_benefits_cont,
state_unemployment_insurance_cont,
state_health_outlays_cont,
state_subsidies_cont,
taxes_cont = federal_taxes_cont + state_taxes_cont,
federal_taxes_cont,
state_taxes_cont) %>%
rename_with(.fn = ~snakecase::to_title_case(.),
.cols = everything()) %>%
mutate(across(where(is.numeric),
~ . / 100))
# Grants with ARPA ------------------------------------------------------
#
fim <- readxl::read_xlsx('results/4-2021/fim-4-2021.xlsx')
projections %>% mutate(date = as_date(date)) %>% select(date, gf, gf_g) %>% filter(date > '2020-12-31') %>% View()
federal_purchases_breakdown <-
fim %>%
filter(date > '2020-12-31') %>%
summarise(date, federal_nom_pi, gdppoth,gdp,
federal_purchases = federal_nom - non_health_grants - add_federal_purchases,
grants_cont = federal_cgrants_cont + federal_igrants_cont + non_health_grants_cont,
non_health_grants,
non_health_grants_cont,
add_federal_purchases,
federal_nom)
federal_purchases_breakdown %>%
mutate(federal_nom_cont = 400 * (federal_purchases - lag(federal_purchases) * (1 + federal_nom_pi + gdppoth)) / lag(gdp), .keep = 'used',
non_health_grants_cont,
grants_cont)
fim %>%
filter(date> '2020-12-31') %>%
select(date, fiscal_impact)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kdtools.R
\name{kd_sort}
\alias{kd_sort}
\alias{kd_order}
\alias{kd_is_sorted}
\title{Sort multidimensional data}
\usage{
kd_sort(x, ...)
kd_order(x, ...)
kd_is_sorted(x)
}
\arguments{
\item{x}{a matrix or arrayvec object}
\item{...}{other arguments}
}
\description{
Sort multidimensional data
}
\details{
The algorithm used is a divide-and-conquer quicksort variant that
recursively partions an range of tuples using the median of each successive
dimension. Ties are resolved by cycling over successive dimensions. The
result is an ordering of tuples matching their order if they were inserted
into a kd-tree.
\code{kd_order} returns permutation vector that will order
the rows of the original matrix, exactly as \code{\link{order}}.
}
\note{
The matrix version will be slower because of data structure
conversions.
}
\examples{
x = matrix(runif(200), 100)
y = kd_sort(x)
kd_is_sorted(y)
kd_order(x)
plot(y, type = "o", pch = 19, col = "steelblue", asp = 1)
}
\seealso{
\code{\link{arrayvec}}
}
| /fuzzedpackages/kdtools/man/kdsort.Rd | no_license | akhikolla/testpackages | R | false | true | 1,093 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kdtools.R
\name{kd_sort}
\alias{kd_sort}
\alias{kd_order}
\alias{kd_is_sorted}
\title{Sort multidimensional data}
\usage{
kd_sort(x, ...)
kd_order(x, ...)
kd_is_sorted(x)
}
\arguments{
\item{x}{a matrix or arrayvec object}
\item{...}{other arguments}
}
\description{
Sort multidimensional data
}
\details{
The algorithm used is a divide-and-conquer quicksort variant that
recursively partions an range of tuples using the median of each successive
dimension. Ties are resolved by cycling over successive dimensions. The
result is an ordering of tuples matching their order if they were inserted
into a kd-tree.
\code{kd_order} returns permutation vector that will order
the rows of the original matrix, exactly as \code{\link{order}}.
}
\note{
The matrix version will be slower because of data structure
conversions.
}
\examples{
x = matrix(runif(200), 100)
y = kd_sort(x)
kd_is_sorted(y)
kd_order(x)
plot(y, type = "o", pch = 19, col = "steelblue", asp = 1)
}
\seealso{
\code{\link{arrayvec}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get.R
\name{getItems}
\alias{getItems}
\title{Retrieve multiple data items by name}
\usage{
getItems(dgeObj, itemNames)
}
\arguments{
\item{dgeObj}{A class DGEobj created by function initDGEobj()}
\item{itemNames}{A character string, character vector, or list names to retrieve}
}
\value{
A list
}
\description{
Retrieve multiple data items by name
}
\examples{
# example DGEobj
exObj <- readRDS(system.file("exampleObj.RDS", package = "DGEobj"))
myList <- getItems(exObj, list("counts", "geneData"))
names(myList)
}
| /man/getItems.Rd | no_license | agenius-navyasri/DGEobj | R | false | true | 614 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get.R
\name{getItems}
\alias{getItems}
\title{Retrieve multiple data items by name}
\usage{
getItems(dgeObj, itemNames)
}
\arguments{
\item{dgeObj}{A class DGEobj created by function initDGEobj()}
\item{itemNames}{A character string, character vector, or list names to retrieve}
}
\value{
A list
}
\description{
Retrieve multiple data items by name
}
\examples{
# example DGEobj
exObj <- readRDS(system.file("exampleObj.RDS", package = "DGEobj"))
myList <- getItems(exObj, list("counts", "geneData"))
names(myList)
}
|
# SKAT comparison Test
suppressPackageStartupMessages(library(SKAT))
# get temp directory file locations
args <- commandArgs(trailingOnly = TRUE)
G <- read.table(args[1],header =FALSE)
cols = length(G[1,])
G = matrix(unlist(G),ncol = cols,byrow = FALSE)
Cov <- read.table(args[2],header =FALSE)
cols = length(Cov[1,])
Cov = matrix(unlist(Cov),ncol = cols,byrow = FALSE)
#Read in and convert to vectors
pheno <- suppressWarnings(read.table(args[3]))
W <- suppressWarnings(read.table(args[4]))
#This algo takes in sqrt(W) not just simply W
W = unlist(sqrt(W))
pheno = unlist(pheno)
#run SKAT
start.time = Sys.time()
obj <-SKAT_Null_Model(pheno ~ Cov - 1, out_type = args[6], Adjustment=FALSE)
end.time = Sys.time()
nullModelTime = end.time - start.time
#print(sprintf("Null Model time: %f",nullModelTime))
#print(G)
#print(G %*% diag(sqrt(W)))
#start.time = Sys.time()
sol <- suppressWarnings(SKAT(G,obj,kernel = "linear.weighted",weights = W))
#skatStatTime = Sys.time() - start.time
#print(sprintf("SKAT time: %f",skatStatTime))
#print(sprintf("VCS: %f",sol$Q[1]))
#print(sprintf("p value: %f",sol[1]))
output = c(sol$Q[1],sol[1]$p.value)
#print(output)
#save(list = ls(all.names = TRUE),file = sprintf("/Users/charlesc/Documents/Software/R/workspaces/skatTest%f.RData",runif(1,2.0,5.0)))
write(output,args[5],ncolumns = 2,sep = " ") | /hail/src/test/resources/skatTest.R | permissive | hail-is/hail | R | false | false | 1,350 | r | # SKAT comparison Test
suppressPackageStartupMessages(library(SKAT))
# get temp directory file locations
args <- commandArgs(trailingOnly = TRUE)
G <- read.table(args[1],header =FALSE)
cols = length(G[1,])
G = matrix(unlist(G),ncol = cols,byrow = FALSE)
Cov <- read.table(args[2],header =FALSE)
cols = length(Cov[1,])
Cov = matrix(unlist(Cov),ncol = cols,byrow = FALSE)
#Read in and convert to vectors
pheno <- suppressWarnings(read.table(args[3]))
W <- suppressWarnings(read.table(args[4]))
#This algo takes in sqrt(W) not just simply W
W = unlist(sqrt(W))
pheno = unlist(pheno)
#run SKAT
start.time = Sys.time()
obj <-SKAT_Null_Model(pheno ~ Cov - 1, out_type = args[6], Adjustment=FALSE)
end.time = Sys.time()
nullModelTime = end.time - start.time
#print(sprintf("Null Model time: %f",nullModelTime))
#print(G)
#print(G %*% diag(sqrt(W)))
#start.time = Sys.time()
sol <- suppressWarnings(SKAT(G,obj,kernel = "linear.weighted",weights = W))
#skatStatTime = Sys.time() - start.time
#print(sprintf("SKAT time: %f",skatStatTime))
#print(sprintf("VCS: %f",sol$Q[1]))
#print(sprintf("p value: %f",sol[1]))
output = c(sol$Q[1],sol[1]$p.value)
#print(output)
#save(list = ls(all.names = TRUE),file = sprintf("/Users/charlesc/Documents/Software/R/workspaces/skatTest%f.RData",runif(1,2.0,5.0)))
write(output,args[5],ncolumns = 2,sep = " ") |
#Predict flower species(classify)
iris
head(iris)
dim(iris)
names(iris)
str(iris)
table(iris$Species)
#split train and test
set.seed(1234)
s = sample(150,100)
iris_train = iris[s,]
iris_test = iris[-s,]
dim(iris_train)
names(iris_train)
dim(iris_test)
#################
#C5.0 is a good classification technique but does not
#provide good plots. For that rpart is good
library(C50)
Model_C50 <-C5.0(iris_train[,-5],iris_train[,5])
Model_C50
summary(Model_C50)
plot(Model_C50)
#Predicting on Train
P1_train=predict(Model_C50,iris_train);P1_train
table(iris_train[,5],Predicted=P1_train)
#Predicting on Test
P1_test = predict(Model_C50,iris_test);P1_test
table(iris_test[,5],Predicted=P1_test)
#################
#rpart
library(rpart)
library(rpart.plot)
#install.packages("party")
library(party)
Model_rpart= rpart(Species~.,data=iris_train, method="class")
#######plotting rpart using ctree of party rpart(Species~.,data=iris_train, method="class")library
plot(Model_rpart,main="Classifcation Tree for Amount",
margin=0.15,uniform=TRUE)
text(Model_rpart,use.n=T)
#######
# Another visualization
plot(ctree(Species~.,data=iris_train))
Model_rpart
summary(Model_rpart)
# plot(Model_rpart)
# rpart.plot(Model_rpart,type=3)
# rpart.plot(Model_rpart)
#Predicting on Train
P1_train_rpart=predict(Model_rpart,iris_train,type="class")
table(iris_train$Species,predicted=P1_train_rpart)
#Predicting on Test
P1_test_rpart=predict(Model_rpart,iris_test,type="class")
# missing value handing in R using C50 and CART
iris_test$Sepal.Width[iris_test$Sepal.Width==3.7] <- NA
dtc50_test <- predict(dtC50,newdata=iris_test, type="class")
cart_test <- predict(Model_rpart,newdata=iris_test, type="class")
| /Week 5 (MACHINE LEARNING PART 1)/R Codes/Send/Del_Decision_Tree/iris.R | no_license | amitksingh1697/dataAnalyticsUsing_R_Programming | R | false | false | 1,783 | r | #Predict flower species(classify)
iris
head(iris)
dim(iris)
names(iris)
str(iris)
table(iris$Species)
#split train and test
set.seed(1234)
s = sample(150,100)
iris_train = iris[s,]
iris_test = iris[-s,]
dim(iris_train)
names(iris_train)
dim(iris_test)
#################
#C5.0 is a good classification technique but does not
#provide good plots. For that rpart is good
library(C50)
Model_C50 <-C5.0(iris_train[,-5],iris_train[,5])
Model_C50
summary(Model_C50)
plot(Model_C50)
#Predicting on Train
P1_train=predict(Model_C50,iris_train);P1_train
table(iris_train[,5],Predicted=P1_train)
#Predicting on Test
P1_test = predict(Model_C50,iris_test);P1_test
table(iris_test[,5],Predicted=P1_test)
#################
#rpart
library(rpart)
library(rpart.plot)
#install.packages("party")
library(party)
Model_rpart= rpart(Species~.,data=iris_train, method="class")
#######plotting rpart using ctree of party rpart(Species~.,data=iris_train, method="class")library
plot(Model_rpart,main="Classifcation Tree for Amount",
margin=0.15,uniform=TRUE)
text(Model_rpart,use.n=T)
#######
# Another visualization
plot(ctree(Species~.,data=iris_train))
Model_rpart
summary(Model_rpart)
# plot(Model_rpart)
# rpart.plot(Model_rpart,type=3)
# rpart.plot(Model_rpart)
#Predicting on Train
P1_train_rpart=predict(Model_rpart,iris_train,type="class")
table(iris_train$Species,predicted=P1_train_rpart)
#Predicting on Test
P1_test_rpart=predict(Model_rpart,iris_test,type="class")
# missing value handing in R using C50 and CART
iris_test$Sepal.Width[iris_test$Sepal.Width==3.7] <- NA
dtc50_test <- predict(dtC50,newdata=iris_test, type="class")
cart_test <- predict(Model_rpart,newdata=iris_test, type="class")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ec2_operations.R
\name{ec2_describe_aggregate_id_format}
\alias{ec2_describe_aggregate_id_format}
\title{Describes the longer ID format settings for all resource types in a
specific Region}
\usage{
ec2_describe_aggregate_id_format(DryRun)
}
\arguments{
\item{DryRun}{Checks whether you have the required permissions for the action, without
actually making the request, and provides an error response. If you have
the required permissions, the error response is \code{DryRunOperation}.
Otherwise, it is \code{UnauthorizedOperation}.}
}
\value{
A list with the following syntax:\preformatted{list(
UseLongIdsAggregated = TRUE|FALSE,
Statuses = list(
list(
Deadline = as.POSIXct(
"2015-01-01"
),
Resource = "string",
UseLongIds = TRUE|FALSE
)
)
)
}
}
\description{
Describes the longer ID format settings for all resource types in a
specific Region. This request is useful for performing a quick audit to
determine whether a specific Region is fully opted in for longer IDs
(17-character IDs).
This request only returns information about resource types that support
longer IDs.
The following resource types support longer IDs: \code{bundle} |
\code{conversion-task} | \code{customer-gateway} | \code{dhcp-options} |
\code{elastic-ip-allocation} | \code{elastic-ip-association} | \code{export-task} |
\code{flow-log} | \code{image} | \code{import-task} | \code{instance} | \code{internet-gateway} |
\code{network-acl} | \code{network-acl-association} | \code{network-interface} |
\code{network-interface-attachment} | \code{prefix-list} | \code{reservation} |
\code{route-table} | \code{route-table-association} | \code{security-group} |
\code{snapshot} | \code{subnet} | \code{subnet-cidr-block-association} | \code{volume} |
\code{vpc} | \code{vpc-cidr-block-association} | \code{vpc-endpoint} |
\code{vpc-peering-connection} | \code{vpn-connection} | \code{vpn-gateway}.
}
\section{Request syntax}{
\preformatted{svc$describe_aggregate_id_format(
DryRun = TRUE|FALSE
)
}
}
\keyword{internal}
| /cran/paws.compute/man/ec2_describe_aggregate_id_format.Rd | permissive | TWarczak/paws | R | false | true | 2,120 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ec2_operations.R
\name{ec2_describe_aggregate_id_format}
\alias{ec2_describe_aggregate_id_format}
\title{Describes the longer ID format settings for all resource types in a
specific Region}
\usage{
ec2_describe_aggregate_id_format(DryRun)
}
\arguments{
\item{DryRun}{Checks whether you have the required permissions for the action, without
actually making the request, and provides an error response. If you have
the required permissions, the error response is \code{DryRunOperation}.
Otherwise, it is \code{UnauthorizedOperation}.}
}
\value{
A list with the following syntax:\preformatted{list(
UseLongIdsAggregated = TRUE|FALSE,
Statuses = list(
list(
Deadline = as.POSIXct(
"2015-01-01"
),
Resource = "string",
UseLongIds = TRUE|FALSE
)
)
)
}
}
\description{
Describes the longer ID format settings for all resource types in a
specific Region. This request is useful for performing a quick audit to
determine whether a specific Region is fully opted in for longer IDs
(17-character IDs).
This request only returns information about resource types that support
longer IDs.
The following resource types support longer IDs: \code{bundle} |
\code{conversion-task} | \code{customer-gateway} | \code{dhcp-options} |
\code{elastic-ip-allocation} | \code{elastic-ip-association} | \code{export-task} |
\code{flow-log} | \code{image} | \code{import-task} | \code{instance} | \code{internet-gateway} |
\code{network-acl} | \code{network-acl-association} | \code{network-interface} |
\code{network-interface-attachment} | \code{prefix-list} | \code{reservation} |
\code{route-table} | \code{route-table-association} | \code{security-group} |
\code{snapshot} | \code{subnet} | \code{subnet-cidr-block-association} | \code{volume} |
\code{vpc} | \code{vpc-cidr-block-association} | \code{vpc-endpoint} |
\code{vpc-peering-connection} | \code{vpn-connection} | \code{vpn-gateway}.
}
\section{Request syntax}{
\preformatted{svc$describe_aggregate_id_format(
DryRun = TRUE|FALSE
)
}
}
\keyword{internal}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.