content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
scotpop <- read_excel("./Scotland_midyearpop_est2019.xlsx")
# load("casefreqs.RData") ## national cumulative cases and deaths by sex and one year age group
####### incidence and mortality using national population estimates ######################
case.freqs <- data.frame(Age=as.integer(rownames(case.freqs)),
Females=as.integer(case.freqs[, 1]),
Males=as.integer(case.freqs[, 2]))
case.long <- reshape2::melt(case.freqs, id="Age")
colnames(case.long) <- c("Age", "Sex", "Cases")
death.freqs <- data.frame(Age=as.integer(rownames(death.freqs)),
Females=as.integer(death.freqs[, 1]),
Males=as.integer(death.freqs[, 2]))
death.long <- reshape2::melt(death.freqs, id="Age")
colnames(death.long) <- c("Age", "Sex", "Deaths")
scotpop.long <- reshape2::melt(scotpop[, -2], id="Age")
colnames(scotpop.long) <- c("Age", "Sex", "Population")
discrim <- merge(scotpop.long, case.long, by=c("Age", "Sex"), all.x=TRUE)
discrim <- merge(discrim, death.long, by=c("Age", "Sex"), all.x=TRUE)
discrim$Cases[is.na(discrim$Cases)] <- 0
discrim$Deaths[is.na(discrim$Deaths)] <- 0
discrim$Sex <- as.factor(discrim$Sex)
discrim$Noncases <- discrim$Population - discrim$Cases
y.cases <- cbind(as.integer(discrim$Cases), as.integer(discrim$Noncases))
discrim$Survivors <- discrim$Population - discrim$Deaths
y.deaths <- cbind(as.integer(discrim$Deaths), as.integer(discrim$Survivors))
cases.model <- glm(formula=y.cases ~ Sex + Age, family="binomial", data=discrim)
deaths.model <- glm(formula=y.deaths ~ Sex + Age, family="binomial", data=discrim)
cases.model.coeffs <- summary(cases.model)$coefficients
deaths.model.coeffs <- summary(deaths.model)$coefficients
logistic.coeffs <- data.frame(severecase=cases.model.coeffs[, 1],
death=deaths.model.coeffs[, 1])
male <- discrim$Sex=="Males"
female <- discrim$Sex=="Females"
gam.model.MaleDeaths <- gam::gam(formula=y.deaths[male, ] ~ s(Age), family=binomial("logit"),
data=discrim[male, ])
gam.model.FemaleDeaths <- gam::gam(formula=y.deaths[female, ] ~ s(Age), family=binomial("logit"),
data=discrim[female, ])
gam.model.MaleCases<- gam::gam(formula=y.cases[male, ] ~ s(Age), family=binomial("logit"),
data=discrim[male, ])
gam.model.FemaleCases <- gam::gam(formula=y.cases[female, ] ~ s(Age), family=binomial("logit"),
data=discrim[female, ])
gam.male <- data.frame(Cases=car::logit(gam.model.MaleCases$fitted.values),
Deaths=car::logit(gam.model.MaleDeaths$fitted.values),
Age=discrim$Age[male])
gam.male.long <- reshape2::melt(data=gam.male, id="Age")
colnames(gam.male.long)[2] <- "Status"
gam.male.long$Sex <- "Males"
gam.female <- data.frame(Cases=car::logit(gam.model.FemaleCases$fitted.values),
Deaths=car::logit(gam.model.FemaleDeaths$fitted.values),
Age=discrim$Age[female])
gam.female.long <- reshape2::melt(data=gam.female, id="Age")
colnames(gam.female.long)[2] <- "Status"
gam.female.long$Sex <- "Females"
gam <- rbind(gam.male.long, gam.female.long)
###############################################################
logodds.posterior <- predict(object=cases.model, newdata=discrim, type="link")
logodds.prior <- log(sum(discrim$Cases) / sum(discrim$Noncases))
log.likratio <- logodds.posterior - logodds.prior
discrim$W <- log.likratio / log(2)
lambda1 <- sum(discrim$W * discrim$Cases) / sum(discrim$Cases)
lambda0 <- sum(-discrim$W * discrim$Noncases) / sum(discrim$Noncases)
cases.Lambda.agesex <- 0.5 * (lambda0 + lambda1)
logodds.posterior <- predict(object=deaths.model, newdata=discrim, type="link")
logodds.prior <- log(sum(discrim$Deaths) / sum(discrim$Survivors))
log.likratio <- logodds.posterior - logodds.prior
discrim$W <- log.likratio / log(2)
lambda1 <- sum(discrim$W * discrim$Deaths) / sum(discrim$Deaths)
lambda0 <- sum(-discrim$W * discrim$Survivors) / sum(discrim$Survivors)
deaths.Lambda.agesex <- 0.5 * (lambda0 + lambda1)
| /incidencemortality.R | no_license | pmckeigue/covid-scotland_public | R | false | false | 4,168 | r |
scotpop <- read_excel("./Scotland_midyearpop_est2019.xlsx")
# load("casefreqs.RData") ## national cumulative cases and deaths by sex and one year age group
####### incidence and mortality using national population estimates ######################
case.freqs <- data.frame(Age=as.integer(rownames(case.freqs)),
Females=as.integer(case.freqs[, 1]),
Males=as.integer(case.freqs[, 2]))
case.long <- reshape2::melt(case.freqs, id="Age")
colnames(case.long) <- c("Age", "Sex", "Cases")
death.freqs <- data.frame(Age=as.integer(rownames(death.freqs)),
Females=as.integer(death.freqs[, 1]),
Males=as.integer(death.freqs[, 2]))
death.long <- reshape2::melt(death.freqs, id="Age")
colnames(death.long) <- c("Age", "Sex", "Deaths")
scotpop.long <- reshape2::melt(scotpop[, -2], id="Age")
colnames(scotpop.long) <- c("Age", "Sex", "Population")
discrim <- merge(scotpop.long, case.long, by=c("Age", "Sex"), all.x=TRUE)
discrim <- merge(discrim, death.long, by=c("Age", "Sex"), all.x=TRUE)
discrim$Cases[is.na(discrim$Cases)] <- 0
discrim$Deaths[is.na(discrim$Deaths)] <- 0
discrim$Sex <- as.factor(discrim$Sex)
discrim$Noncases <- discrim$Population - discrim$Cases
y.cases <- cbind(as.integer(discrim$Cases), as.integer(discrim$Noncases))
discrim$Survivors <- discrim$Population - discrim$Deaths
y.deaths <- cbind(as.integer(discrim$Deaths), as.integer(discrim$Survivors))
cases.model <- glm(formula=y.cases ~ Sex + Age, family="binomial", data=discrim)
deaths.model <- glm(formula=y.deaths ~ Sex + Age, family="binomial", data=discrim)
cases.model.coeffs <- summary(cases.model)$coefficients
deaths.model.coeffs <- summary(deaths.model)$coefficients
logistic.coeffs <- data.frame(severecase=cases.model.coeffs[, 1],
death=deaths.model.coeffs[, 1])
male <- discrim$Sex=="Males"
female <- discrim$Sex=="Females"
gam.model.MaleDeaths <- gam::gam(formula=y.deaths[male, ] ~ s(Age), family=binomial("logit"),
data=discrim[male, ])
gam.model.FemaleDeaths <- gam::gam(formula=y.deaths[female, ] ~ s(Age), family=binomial("logit"),
data=discrim[female, ])
gam.model.MaleCases<- gam::gam(formula=y.cases[male, ] ~ s(Age), family=binomial("logit"),
data=discrim[male, ])
gam.model.FemaleCases <- gam::gam(formula=y.cases[female, ] ~ s(Age), family=binomial("logit"),
data=discrim[female, ])
gam.male <- data.frame(Cases=car::logit(gam.model.MaleCases$fitted.values),
Deaths=car::logit(gam.model.MaleDeaths$fitted.values),
Age=discrim$Age[male])
gam.male.long <- reshape2::melt(data=gam.male, id="Age")
colnames(gam.male.long)[2] <- "Status"
gam.male.long$Sex <- "Males"
gam.female <- data.frame(Cases=car::logit(gam.model.FemaleCases$fitted.values),
Deaths=car::logit(gam.model.FemaleDeaths$fitted.values),
Age=discrim$Age[female])
gam.female.long <- reshape2::melt(data=gam.female, id="Age")
colnames(gam.female.long)[2] <- "Status"
gam.female.long$Sex <- "Females"
gam <- rbind(gam.male.long, gam.female.long)
###############################################################
logodds.posterior <- predict(object=cases.model, newdata=discrim, type="link")
logodds.prior <- log(sum(discrim$Cases) / sum(discrim$Noncases))
log.likratio <- logodds.posterior - logodds.prior
discrim$W <- log.likratio / log(2)
lambda1 <- sum(discrim$W * discrim$Cases) / sum(discrim$Cases)
lambda0 <- sum(-discrim$W * discrim$Noncases) / sum(discrim$Noncases)
cases.Lambda.agesex <- 0.5 * (lambda0 + lambda1)
logodds.posterior <- predict(object=deaths.model, newdata=discrim, type="link")
logodds.prior <- log(sum(discrim$Deaths) / sum(discrim$Survivors))
log.likratio <- logodds.posterior - logodds.prior
discrim$W <- log.likratio / log(2)
lambda1 <- sum(discrim$W * discrim$Deaths) / sum(discrim$Deaths)
lambda0 <- sum(-discrim$W * discrim$Survivors) / sum(discrim$Survivors)
deaths.Lambda.agesex <- 0.5 * (lambda0 + lambda1)
|
testlist <- list(lims = structure(c(2.63555450669983e-82, Inf), .Dim = 1:2), points = structure(1.29549941127325e-318, .Dim = c(1L, 1L )))
result <- do.call(palm:::pbc_distances,testlist)
str(result) | /palm/inst/testfiles/pbc_distances/libFuzzer_pbc_distances/pbc_distances_valgrind_files/1612987587-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 208 | r | testlist <- list(lims = structure(c(2.63555450669983e-82, Inf), .Dim = 1:2), points = structure(1.29549941127325e-318, .Dim = c(1L, 1L )))
result <- do.call(palm:::pbc_distances,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{replace_confidential}
\alias{replace_confidential}
\title{Replace confidential values}
\usage{
replace_confidential(.data, replacement_value = NA_integer_)
}
\arguments{
\item{.data}{The data set to have its confidential values removed.}
\item{replacement_value}{The value to replace the confidential ones with. Defaults to NA_integer.}
}
\description{
The NZ census commonly uses the notation of '..C' when values are below a
certain threshold that they may reveal private details of certain individuals.
However, it is often required in an analysis to replace these values and
convert the column to an integer (to include ..C they need to be character).
}
\details{
This function takes the data set, and a replacement value and replaces all of the ..C values.
Mainly for use within the transform_census function.
}
| /man/replace_confidential.Rd | permissive | phildonovan/nzcensr | R | false | true | 910 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{replace_confidential}
\alias{replace_confidential}
\title{Replace confidential values}
\usage{
replace_confidential(.data, replacement_value = NA_integer_)
}
\arguments{
\item{.data}{The data set to have its confidential values removed.}
\item{replacement_value}{The value to replace the confidential ones with. Defaults to NA_integer.}
}
\description{
The NZ census commonly uses the notation of '..C' when values are below a
certain threshold that they may reveal private details of certain individuals.
However, it is often required in an analysis to replace these values and
convert the column to an integer (to include ..C they need to be character).
}
\details{
This function takes the data set, and a replacement value and replaces all of the ..C values.
Mainly for use within the transform_census function.
}
|
# Copyright 2016 Steven E. Pav. All Rights Reserved.
# Author: Steven E. Pav
# This file is part of fromo.
#
# fromo is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# fromo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with fromo. If not, see <http://www.gnu.org/licenses/>.
# env var:
# nb:
# see also:
# todo:
# changelog:
#
# Created: 2016.03.28
# Copyright: Steven E. Pav, 2016-2016
# Author: Steven E. Pav
# Comments: Steven E. Pav
# helpers#FOLDUP
set.char.seed <- function(str) {
set.seed(as.integer(charToRaw(str)))
}
THOROUGHNESS <- getOption('test.thoroughness',1.0)
# slow version of t_running; requires a helper function.
slow_op <- function(v,func,outsize=1,missfill=NA,
time=NULL,time_deltas=NULL,window=Inf,wts=NULL,lb_time=NULL,
na_rm=FALSE,min_df=0,lookahead=0,variable_win=FALSE,wts_as_delta=TRUE,...) {
if (is.null(time)) {
if (is.null(time_deltas) && !is.null(wts) && wts_as_delta) {
time_deltas <- wts
} else {
stop('bad input')
}
time <- cumsum(time_deltas)
}
if (is.null(lb_time)) {
lb_time <- time
}
lb_time <- lb_time + lookahead
if (variable_win) {
tprev <- c(-Inf,lb_time[1:(length(lb_time)-1)])
} else {
tprev <- lb_time - window
}
# fix weights.
sapply(seq_along(lb_time),
function(idx) {
tf <- lb_time[idx]
t0 <- tprev[idx]
takeus <- (t0 < time) & (time <= tf)
if (na_rm) {
takeus <- takeus & !is.na(v)
if (!is.null(wts)) {
takeus <- takeus & !is.na(wts)
}
}
if (any(takeus)) {
vsub <- v[takeus]
if (is.null(wts)) {
retv <- func(vsub,...)
} else {
subwts <- wts[takeus]
retv <- func(vsub,wts=subwts,...)
}
} else {
retv <- rep(missfill,outsize)
}
retv
})
}
slow_t_running_sum <- function(v,...) {
func <- function(v,wts=NULL,...) {
if (is.null(wts)) {
return(prod(sd3(v,...)[c(2:3)]))
}
return(sum(v*wts))
}
as.numeric(slow_op(v=v,func=func,missfill=0,...))
}
slow_t_running_mean <- function(v,...) {
func <- function(v,...) { sd3(v,...)[2] }
as.numeric(slow_op(v=v,func=func,...))
}
slow_t_running_sd <- function(v,...) {
func <- function(v,...) { sd3(v,...)[1] }
matrix(slow_op(v=v,func=func,...),ncol=1)
}
slow_t_running_skew <- function(v,...) {
func <- function(v,...) {
return(skew4(v,...)[1])
}
matrix(slow_op(v=v,func=func,...),ncol=1)
}
slow_t_running_kurt <- function(v,...) {
func <- function(v,...) { kurt5(v,...)[1] }
matrix(slow_op(v=v,func=func,...),ncol=1)
}
slow_t_running_sd3 <- function(v,...) {
func <- function(v,...) { sd3(v,...) }
t(slow_op(v=v,func=func,outsize=3,...))
}
slow_t_running_skew4 <- function(v,...) {
func <- function(v,...) {
if (length(v) > 2) {
return(skew4(v,...))
} else {
return(c(NA,sd3(v,...)))
}
}
t(slow_op(v=v,func=func,outsize=4,...))
}
slow_t_running_kurt5 <- function(v,...) {
func <- function(v,...) {
if (length(v) > 3) {
return(kurt5(v,...))
} else if (length(v) > 2) {
return(c(NA,skew4(v,...)))
} else {
return(c(NA,NA,sd3(v,...)))
}
}
t(slow_op(v=v,func=func,outsize=5,...))
}
reference_sd <- function(x,wts=NULL,na_rm=FALSE,normalize_wts=FALSE,min_df=0,used_df=1) {
if (na_rm) {
isok <- !is.na(x)
if (!is.null(wts)) {
isok <- isok & !is.na(wts) & wts >= 0
}
x <- x[isok]
if (!is.null(wts)) {
wts <- wts[isok]
}
}
if (length(x) < min_df) {
return(NA)
}
if (!is.null(wts)) {
wsum <- sum(wts)
mu <- sum(x*wts) / wsum
deno <- wsum - used_df * ifelse(normalize_wts,wsum / length(x),1)
vv <- sum(wts * (x - mu)^2) / deno
} else {
wsum <- length(x)
mu <- sum(x) / wsum
vv <- sum((x - mu)^2) / (wsum - used_df)
}
return(sqrt(vv))
}
# not quite the same as slow_t_running_sd above
reference_t_running_sd <- function(v,...) {
matrix(slow_op(v=v,func=reference_sd,...),ncol=1)
}
#UNFOLD
context("first moments")#FOLDUP
test_that("sd, skew, kurt are correct",{#FOLDUP
set.char.seed("c4007dba-2010-481e-abe5-f07d3ce94eb4")
x <- rnorm(1000)
expect_error(sid <- sd3(x),NA)
expect_error(ske <- skew4(x),NA)
expect_error(krt <- kurt5(x),NA)
expect_equal(length(sid),3)
expect_equal(length(ske),4)
expect_equal(length(krt),5)
# compare computations to gold standard
# length
expect_equal(sid[3],length(x))
expect_equal(sid[3],ske[4])
expect_equal(sid[3],krt[5])
# mean
expect_equal(sid[2],mean(x),tolerance=1e-9)
expect_equal(sid[2],ske[3],tolerance=1e-9)
expect_equal(sid[2],krt[4],tolerance=1e-9)
# standard dev
expect_equal(sid[1],ske[2],tolerance=1e-9)
expect_equal(sid[1],krt[3],tolerance=1e-9)
expect_equal(sid[1],sd(x),tolerance=1e-9)
# skew
expect_equal(ske[1],krt[2],tolerance=1e-9)
if (require(moments)) {
na_rm <- TRUE
dumb_count <- sum(sign(abs(x)+1),na.rm=na_rm)
dumb_mean <- mean(x,na.rm=na_rm)
dumb_sd <- sd(x,na.rm=na_rm)
dumb_skew <- moments::skewness(x,na.rm=na_rm)
dumb_exkurt <- moments::kurtosis(x,na.rm=na_rm) - 3.0
dumb_cmom2 <- moments::moment(x,central=TRUE,na.rm=na_rm,order=2)
dumb_cmom3 <- moments::moment(x,central=TRUE,na.rm=na_rm,order=3)
dumb_cmom4 <- moments::moment(x,central=TRUE,na.rm=na_rm,order=4)
dumb_cmom5 <- moments::moment(x,central=TRUE,na.rm=na_rm,order=5)
dumb_cmom6 <- moments::moment(x,central=TRUE,na.rm=na_rm,order=6)
# skew
expect_equal(ske[1],dumb_skew,tolerance=1e-9)
# kurtosis
expect_equal(krt[1],dumb_exkurt,tolerance=1e-9)
# oops. problems with centered moments in terms of the used_df; need a
# better test...
cmoms <- cent_moments(x,max_order=6,used_df=0)
dumbv <- c(dumb_cmom6,dumb_cmom5,dumb_cmom4,dumb_cmom3,dumb_cmom2,dumb_mean,dumb_count)
expect_equal(max(abs(cmoms-dumbv)),0,tolerance=1e-9)
if (require(PDQutils)) {
cumuls <- cent_cumulants(x,max_order=length(cmoms)-1)
dumbv0 <- c(dumb_cmom6,dumb_cmom5,dumb_cmom4,dumb_cmom3,dumb_cmom2,dumb_mean,dumb_count)
dumbv1 <- PDQutils::moment2cumulant(c(0,rev(dumbv0)[3:length(dumbv0)]))
dumbv <- c(rev(dumbv1[2:length(dumbv1)]),dumb_mean,dumb_count)
expect_equal(max(abs(cumuls-dumbv)),0,tolerance=1e-12)
}
}
if (require(e1071)) {
dumb_skew <- e1071::skewness(x,type=3)
equiv_skew <- ske[1] * ((ske[4]-1)/(ske[4]))^(3/2)
expect_equal(dumb_skew,equiv_skew,tolerance=1e-12)
}
# 2FIX: add cent_moments and std_moments
# 2FIX: check NA
# sentinel
expect_true(TRUE)
})#UNFOLD
test_that("unit weighted sd, skew, kurt are correct",{#FOLDUP
set.char.seed("b652ccd2-478b-44d4-90e2-2ca2bad99d25")
x <- rnorm(1000)
ones <- rep(1,length(x))
expect_equal(sd3(x),sd3(x,wts=ones),tolerance=1e-9)
expect_equal(skew4(x),skew4(x,wts=ones),tolerance=1e-9)
expect_equal(kurt5(x),kurt5(x,wts=ones),tolerance=1e-9)
# 2FIX: probably normalize_wts=FALSE should be the default???? for speed?
expect_equal(running_sd(x),running_sd(x,wts=ones,normalize_wts=TRUE),tolerance=1e-9)
expect_equal(running_skew(x),running_skew(x,wts=ones,normalize_wts=TRUE),tolerance=1e-9)
expect_equal(running_kurt(x),running_kurt(x,wts=ones,normalize_wts=TRUE),tolerance=1e-9)
expect_equal(running_sd(x),running_sd(x,wts=ones,normalize_wts=FALSE),tolerance=1e-9)
expect_equal(running_skew(x),running_skew(x,wts=ones,normalize_wts=FALSE),tolerance=1e-9)
expect_equal(running_kurt(x),running_kurt(x,wts=ones,normalize_wts=FALSE),tolerance=1e-9)
# 2FIX: add more.
# sentinel
expect_true(TRUE)
})#UNFOLD
test_that("normalize weights works",{#FOLDUP
set.char.seed("2694ae87-62d4-4154-9c32-864f9a6e648d")
x <- rnorm(25)
wts <- runif(length(x))
expect_equal(sd3(x,wts=wts,normalize_wts=TRUE),
sd3(x,wts=2*wts,normalize_wts=TRUE),tolerance=1e-9)
expect_equal(skew4(x,wts=wts,normalize_wts=TRUE),
skew4(x,wts=2*wts,normalize_wts=TRUE),tolerance=1e-9)
expect_equal(kurt5(x,wts=wts,normalize_wts=TRUE),
kurt5(x,wts=2*wts,normalize_wts=TRUE),tolerance=1e-9)
expect_equal(running_sd(x,wts=wts,normalize_wts=TRUE),
running_sd(x,wts=2*wts,normalize_wts=TRUE),tolerance=1e-9)
expect_equal(running_skew(x,wts=wts,normalize_wts=TRUE),
running_skew(x,wts=2*wts,normalize_wts=TRUE),tolerance=1e-9)
expect_equal(running_kurt(x,wts=wts,normalize_wts=TRUE),
running_kurt(x,wts=2*wts,normalize_wts=TRUE),tolerance=1e-9)
expect_equal(running_sd3(x,wts=wts,normalize_wts=TRUE),
running_sd3(x,wts=2*wts,normalize_wts=TRUE),tolerance=1e-9)
expect_equal(running_skew4(x,wts=wts,normalize_wts=TRUE),
running_skew4(x,wts=2*wts,normalize_wts=TRUE),tolerance=1e-9)
expect_equal(running_kurt5(x,wts=wts,normalize_wts=TRUE),
running_kurt5(x,wts=2*wts,normalize_wts=TRUE),tolerance=1e-9)
expect_equal(running_sharpe(x,wts=wts,normalize_wts=TRUE),
running_sharpe(x,wts=2*wts,normalize_wts=TRUE),tolerance=1e-9)
expect_equal(running_sharpe(x,wts=wts,normalize_wts=TRUE,compute_se=TRUE),
running_sharpe(x,wts=2*wts,normalize_wts=TRUE,compute_se=TRUE),tolerance=1e-9)
expect_equal(running_centered(x,wts=wts,normalize_wts=TRUE),
running_centered(x,wts=2*wts,normalize_wts=TRUE),tolerance=1e-9)
expect_equal(running_apx_median(x,wts=wts,normalize_wts=TRUE),
running_apx_median(x,wts=2*wts,normalize_wts=TRUE),tolerance=1e-9)
expect_equal(running_tstat(x,wts=wts,normalize_wts=TRUE),
running_tstat(x,wts=2*wts,normalize_wts=TRUE),tolerance=1e-9)
expect_equal(running_zscored(x,wts=wts,normalize_wts=TRUE),
running_zscored(x,wts=2*wts,normalize_wts=TRUE),tolerance=1e-9)
expect_equal(running_scaled(x,wts=wts,normalize_wts=TRUE),
running_scaled(x,wts=2*wts,normalize_wts=TRUE),tolerance=1e-9)
ptiles <- c(0.1,0.25,0.5,0.75,0.9)
expect_equal(running_apx_quantiles(x,p=ptiles,wts=wts,normalize_wts=TRUE),
running_apx_quantiles(x,p=ptiles,wts=2*wts,normalize_wts=TRUE),tolerance=1e-9)
expect_equal(running_cent_moments(x,wts=wts,normalize_wts=TRUE),
running_cent_moments(x,wts=2*wts,normalize_wts=TRUE),tolerance=1e-9)
expect_equal(running_std_moments(x,wts=wts,normalize_wts=TRUE),
running_std_moments(x,wts=2*wts,normalize_wts=TRUE),tolerance=1e-9)
expect_equal(running_cumulants(x,wts=wts,normalize_wts=TRUE),
running_cumulants(x,wts=2*wts,normalize_wts=TRUE),tolerance=1e-9)
})#UNFOLD
test_that("weight scaling what you expect",{#FOLDUP
set.char.seed("efaa75ac-bb9e-4e4a-a375-7028f099366e")
x <- rnorm(50)
wts <- runif(length(x))
expect_error(sid_1 <- sd3(x,wts=wts,normalize_wts=FALSE,sg_df=0),NA)
expect_error(ske_1 <- skew4(x,wts=wts,normalize_wts=FALSE,sg_df=0),NA)
expect_error(krt_1 <- kurt5(x,wts=wts,normalize_wts=FALSE,sg_df=0),NA)
expect_error(sid_2 <- sd3(x,wts=2*wts,normalize_wts=FALSE,sg_df=0),NA)
expect_error(ske_2 <- skew4(x,wts=2*wts,normalize_wts=FALSE,sg_df=0),NA)
expect_error(krt_2 <- kurt5(x,wts=2*wts,normalize_wts=FALSE,sg_df=0),NA)
expect_equal(sid_1 * c(1,1,2),sid_2,tolerance=1e-9)
expect_equal(ske_1 * c(1,1,1,2),ske_2,tolerance=1e-9)
expect_equal(krt_1 * c(1,1,1,1,2),krt_2,tolerance=1e-9)
})#UNFOLD
test_that("weighted sd, skew, kurt are correct",{#FOLDUP
set.char.seed("4e17d837-69c1-41d1-906f-c82224d7ce41")
x <- rnorm(1000)
wts <- runif(length(x))
expect_error(sid <- sd3(x,wts=wts,normalize_wts=TRUE),NA)
expect_error(ske <- skew4(x,wts=wts,normalize_wts=TRUE),NA)
expect_error(krt <- kurt5(x,wts=wts,normalize_wts=TRUE),NA)
# 2FIX: add more here to check correctness ...
expect_equal(length(sid),3)
expect_equal(length(ske),4)
expect_equal(length(krt),5)
# compare computations to gold standard
# length
expect_equal(sid[3],length(x))
expect_equal(sid[3],ske[4])
expect_equal(sid[3],krt[5])
# mean
expect_equal(sid[2],weighted.mean(x,w=wts),tolerance=1e-9)
expect_equal(sid[2],ske[3],tolerance=1e-9)
expect_equal(sid[2],krt[4],tolerance=1e-9)
# standard dev
expect_equal(sid[1],ske[2],tolerance=1e-9)
expect_equal(sid[1],krt[3],tolerance=1e-9)
wsd <- sqrt(sum(((x - weighted.mean(x,w=wts))^2) * (wts / mean(wts))) / (length(x) - 1))
# 2FIX!!!
expect_equal(sid[1],wsd,tolerance=1e-9)
# skew
expect_equal(ske[1],krt[2],tolerance=1e-9)
na_rm <- TRUE
dumb_count <- length(x)
dumb_mean <- weighted.mean(x,w=wts)
dumb_sd <- sqrt(sum(((x - weighted.mean(x,w=wts))^2) * (wts / mean(wts))) / (length(x) - 1))
wcmom <- function(vec,wts,ord) {
wz <- wts / mean(wts)
mean(wz * ((x - weighted.mean(x,w=wz))^ord))
}
dumb_wcmom2 <- wcmom(x,wts,2)
dumb_wcmom3 <- wcmom(x,wts,3)
dumb_wcmom4 <- wcmom(x,wts,4)
dumb_wcmom5 <- wcmom(x,wts,5)
dumb_wcmom6 <- wcmom(x,wts,6)
cmoms <- cent_moments(x,wts=wts,max_order=6,used_df=0,normalize_wts=TRUE)
dumbv <- c(dumb_wcmom6,dumb_wcmom5,dumb_wcmom4,dumb_wcmom3,dumb_wcmom2,dumb_mean,dumb_count)
expect_equal(cmoms,dumbv,tolerance=1e-9)
dumb_skew <- dumb_wcmom3 / (dumb_wcmom2^(3/2))
dumb_exkurt <- (dumb_wcmom4 / (dumb_wcmom2^(2))) - 3
# skew
expect_equal(ske[1],dumb_skew,tolerance=1e-9)
# kurtosis
expect_equal(krt[1],dumb_exkurt,tolerance=1e-9)
})#UNFOLD
#UNFOLD
tomat <- function(cbound) {
dumbv <- as.matrix(cbound)
attr(dumbv,'dimnames') <- NULL
dumbv
}
context("running ops are correct")
test_that("running ops are correct",{#FOLDUP
skip_on_cran()
ptiles <- c(0.1,0.25,0.5,0.75,0.9)
set.char.seed("7ffe0035-2d0c-4586-a1a5-6321c7cf8694")
for (xlen in c(20,100)) {
for (xmu in c(1e3,1e6)) {
toler <- xmu ^ (1/3)
x <- rnorm(xlen,mean=xmu)
for (window in c(15,50,Inf)) {
for (restart_period in c(20,1000)) {
for (na_rm in c(FALSE,TRUE)) {
dumb_count <- sapply(seq_along(x),function(iii) { sum(sign(abs(x[max(1,iii-window+1):iii])+1),na.rm=na_rm) },simplify=TRUE)
dumb_sum <- sapply(seq_along(x),function(iii) { sum(x[max(1,iii-window+1):iii],na.rm=na_rm) },simplify=TRUE)
dumb_mean <- sapply(seq_along(x),function(iii) { mean(x[max(1,iii-window+1):iii],na.rm=na_rm) },simplify=TRUE)
dumb_sd <- sapply(seq_along(x),function(iii) { sd(x[max(1,iii-window+1):iii],na.rm=na_rm) },simplify=TRUE)
dumb_skew <- sapply(seq_along(x),function(iii) { moments::skewness(x[max(1,iii-window+1):iii],na.rm=na_rm) },simplify=TRUE)
dumb_exkurt <- sapply(seq_along(x),function(iii) { moments::kurtosis(x[max(1,iii-window+1):iii],na.rm=na_rm) - 3.0 },simplify=TRUE)
dumb_cmom2 <- sapply(seq_along(x),function(iii) { moments::moment(x[max(1,iii-window+1):iii],central=TRUE,na.rm=na_rm,order=2) },simplify=TRUE)
dumb_cmom3 <- sapply(seq_along(x),function(iii) { moments::moment(x[max(1,iii-window+1):iii],central=TRUE,na.rm=na_rm,order=3) },simplify=TRUE)
dumb_cmom4 <- sapply(seq_along(x),function(iii) { moments::moment(x[max(1,iii-window+1):iii],central=TRUE,na.rm=na_rm,order=4) },simplify=TRUE)
dumb_cmom5 <- sapply(seq_along(x),function(iii) { moments::moment(x[max(1,iii-window+1):iii],central=TRUE,na.rm=na_rm,order=5) },simplify=TRUE)
dumb_cmom6 <- sapply(seq_along(x),function(iii) { moments::moment(x[max(1,iii-window+1):iii],central=TRUE,na.rm=na_rm,order=6) },simplify=TRUE)
# SD
expect_error(fastv <- running_sd(x,window=window,restart_period=restart_period,na_rm=na_rm),NA)
dumbv <- tomat(dumb_sd)
expect_equal(dumbv[2:xlen],fastv[2:xlen],tolerance=1e-7 * toler)
expect_error(fastv <- running_sd3(x,window=window,restart_period=restart_period,na_rm=na_rm),NA)
dumbv <- tomat(cbind(dumb_sd,dumb_mean,dumb_count))
expect_equal(dumbv[2:xlen,],fastv[2:xlen,],tolerance=1e-7 * toler)
# skew
expect_error(fastv <- running_skew(x,window=window,restart_period=restart_period,na_rm=na_rm),NA)
dumbv <- tomat(dumb_skew)
expect_equal(dumbv[3:xlen],fastv[3:xlen],tolerance=1e-6 * toler)
expect_error(fastv <- running_skew4(x,window=window,restart_period=restart_period,na_rm=na_rm),NA)
dumbv <- tomat(cbind(dumb_skew,dumb_sd,dumb_mean,dumb_count))
expect_equal(dumbv[3:xlen,],fastv[3:xlen,],tolerance=1e-7 * toler)
# excess kurtosis
expect_error(fastv <- running_kurt(x,window=window,restart_period=restart_period,na_rm=na_rm),NA)
dumbv <- tomat(dumb_exkurt)
expect_equal(dumbv[4:xlen],fastv[4:xlen],tolerance=1e-6 * toler)
expect_error(fastv <- running_kurt5(x,window=window,restart_period=restart_period,na_rm=na_rm),NA)
dumbv <- tomat(cbind(dumb_exkurt,dumb_skew,dumb_sd,dumb_mean,dumb_count))
expect_equal(dumbv[4:xlen,],fastv[4:xlen,],tolerance=1e-6 * toler)
# higher order moments
expect_error(fastv <- running_cent_moments(x,window=window,max_order=6L,used_df=0L,restart_period=restart_period,na_rm=na_rm),NA)
dumbv <- tomat(cbind(dumb_cmom6,dumb_cmom5,dumb_cmom4,dumb_cmom3,dumb_cmom2,dumb_mean,dumb_count))
expect_equal(dumbv[6:xlen,],fastv[6:xlen,],tolerance=1e-6 * toler)
expect_error(fastv <- running_cent_moments(x,window=window,max_order=6L,max_order_only=TRUE,used_df=0L,restart_period=restart_period,na_rm=na_rm),NA)
dumbv <- tomat(dumb_cmom6)
expect_equal(dumbv[6:xlen,],fastv[6:xlen,],tolerance=1e-7 * toler)
expect_error(fastv <- running_std_moments(x,window=window,max_order=6L,used_df=0L,restart_period=restart_period,na_rm=na_rm),NA)
dumbv <- tomat(cbind(dumb_cmom6 / (dumb_cmom2^3),dumb_cmom5 / (dumb_cmom2^2.5),dumb_cmom4 / (dumb_cmom2^2.0),dumb_cmom3 / (dumb_cmom2^1.5),sqrt(dumb_cmom2),dumb_mean,dumb_count))
expect_equal(dumbv[6:xlen,],fastv[6:xlen,],tolerance=1e-7 * toler)
# running sum and mean
expect_error(fastv <- running_sum(x,window=window,restart_period=restart_period,na_rm=na_rm),NA)
dumbv <- dumb_sum
expect_equal(dumbv[2:xlen],fastv[2:xlen],tolerance=1e-7 * toler)
expect_error(fastv <- running_mean(x,window=window,restart_period=restart_period,na_rm=na_rm),NA)
dumbv <- dumb_mean
expect_equal(dumbv[2:xlen],fastv[2:xlen],tolerance=1e-7 * toler)
if (require(PDQutils)) {
# cumulants
expect_error(fastv <- running_cumulants(x,window=window,max_order=6L,used_df=0L,restart_period=restart_period,na_rm=na_rm),NA)
pre_dumbv <- cbind(dumb_cmom6,dumb_cmom5,dumb_cmom4,dumb_cmom3,dumb_cmom2,dumb_mean,dumb_count)
dumbv <- t(sapply(seq_along(x),function(iii) {
rv <- rev(PDQutils::moment2cumulant(c(0,rev(pre_dumbv[iii,1:(ncol(pre_dumbv)-2)]))))
rv <- rv[-length(rv)]
c(rv,pre_dumbv[iii,ncol(pre_dumbv) + (-1:0)])
},simplify='matrix'))
expect_equal(max(abs(dumbv[6:xlen,] - fastv[6:xlen,])),0,tolerance=1e-8 * toler)
# quantiles
expect_error(fastv <- running_apx_quantiles(x,ptiles,max_order=ncol(dumbv)-1,used_df=0L,window=window,restart_period=restart_period,na_rm=na_rm),NA)
dumbq <- t(sapply(seq_along(x),function(iii) {
PDQutils::qapx_cf(ptiles,raw.cumulants=rev(dumbv[iii,1:(ncol(dumbv)-1)]))
}, simplify=TRUE))
expect_equal(max(abs(dumbq[8:xlen,] - fastv[8:xlen,])),0,tolerance=1e-8 * toler)
}
}
}
}
}
}
})#UNFOLD
test_that("running adjustments are correct",{#FOLDUP
skip_on_cran()
set.char.seed("967d2149-fbff-4d82-b227-ca3e1034bddb")
for (xlen in c(20,100)) {
x <- rnorm(xlen)
for (window in c(5,50,Inf)) {
for (restart_period in c(10,1000)) {
for (na_rm in c(FALSE,TRUE)) {
dumb_count <- sapply(seq_along(x),function(iii) { sum(sign(abs(x[max(1,iii-window+1):iii])+1),na.rm=na_rm) },simplify=TRUE)
dumb_mean <- sapply(seq_along(x),function(iii) { mean(x[max(1,iii-window+1):iii],na.rm=na_rm) },simplify=TRUE)
dumb_sd <- sapply(seq_along(x),function(iii) { sd(x[max(1,iii-window+1):iii],na.rm=na_rm) },simplify=TRUE)
expect_error(fastv <- running_centered(x,window=window,restart_period=restart_period,na_rm=na_rm),NA)
# the dumb value:
dumbv <- x - dumb_mean;
expect_equal(max(abs(dumbv - fastv)),0,tolerance=1e-12)
expect_error(fastv <- running_scaled(x,window=window,restart_period=restart_period,na_rm=na_rm),NA)
# the dumb value:
dumbv <- x / dumb_sd
expect_equal(max(abs(dumbv[2:length(x)] - fastv[2:length(x)])),0,tolerance=1e-12)
expect_error(fastv <- running_zscored(x,window=window,restart_period=restart_period,na_rm=na_rm),NA)
# the dumb value:
dumbv <- (x - dumb_mean) / dumb_sd
expect_equal(max(abs(dumbv[2:length(x)] - fastv[2:length(x)])),0,tolerance=1e-12)
expect_error(fastv <- running_sharpe(x,window=window,restart_period=restart_period,na_rm=na_rm),NA)
# the dumb value:
dumbv <- dumb_mean / dumb_sd
expect_equal(max(abs(dumbv[2:length(x)] - fastv[2:length(x)])),0,tolerance=1e-12)
expect_error(fastv <- running_tstat(x,window=window,restart_period=restart_period,na_rm=na_rm),NA)
# the dumb value:
dumbv <- (dumb_mean * sqrt(dumb_count)) / dumb_sd
expect_equal(max(abs(dumbv[2:length(x)] - fastv[2:length(x)])),0,tolerance=1e-12)
expect_error(fastv <- running_sharpe(x,window=window,restart_period=restart_period,na_rm=na_rm,compute_se=TRUE),NA)
# the dumb value:
dumb_sr <- dumb_mean / dumb_sd
expect_equal(max(abs(dumb_sr[2:length(x)] - fastv[2:length(x),1])),0,tolerance=1e-12)
if (require(moments)) {
dumb_skew <- sapply(seq_along(x),function(iii) { moments::skewness(x[max(1,iii-window+1):iii],na.rm=na_rm) },simplify=TRUE)
dumb_exkurt <- sapply(seq_along(x),function(iii) { moments::kurtosis(x[max(1,iii-window+1):iii],na.rm=na_rm) - 3.0 },simplify=TRUE)
dumb_merse <- sqrt((1 + 0.25 * (2+dumb_exkurt) * dumb_sr^2 - dumb_skew * dumb_sr) / dumb_count)
expect_equal(max(abs(dumb_merse[5:length(x)] - fastv[5:length(x),2])),0,tolerance=1e-9)
}
}
}
}
}
})#UNFOLD
context("weighted running ops are correct")
test_that("running weights work correctly",{#FOLDUP
skip_on_cran()
set.char.seed("b82d252c-681b-4b98-9bb3-ffd17feeb4a1")
na_rm <- FALSE
restart_period <- 1000
for (xlen in c(20,50)) {
x <- rnorm(xlen)
for (wts in list(rep(1L,xlen), runif(xlen,min=2,max=7))) {
for (window in c(5,30,Inf)) { # FOLDUP
# 2FIX: add to this!
slow_count <- sapply(seq_along(x),function(iii) { sum(sign(abs(x[max(1,iii-window+1):iii])+1),na.rm=na_rm) },simplify=TRUE)
slow_sumwt <- sapply(seq_along(x),function(iii) { sum(wts[max(1,iii-window+1):iii],na.rm=na_rm) },simplify=TRUE)
slow_mean <- sapply(seq_along(x),function(iii) {
mydx <- max(1,iii-window+1):iii
mywts <- wts[mydx]
sum(mywts * x[mydx],na.rm=na_rm) / slow_sumwt[iii]
},simplify=TRUE)
slow_var <- sapply(seq_along(x),function(iii) {
mydx <- max(1,iii-window+1):iii
mywts <- wts[mydx]
sum(mywts * (x[mydx] - slow_mean[iii])^2,na.rm=na_rm) / (slow_sumwt[iii] - 1)
},simplify=TRUE)
slow_sd <- sqrt(slow_var)
# the normalize version;
slow_nvar <- sapply(seq_along(x),function(iii) {
mydx <- max(1,iii-window+1):iii
mywts <- wts[mydx]
(slow_count[iii]/slow_sumwt[iii]) * sum(mywts * (x[mydx] - slow_mean[iii])^2,na.rm=na_rm) / (slow_count[iii] - 1)
},simplify=TRUE)
slow_nsd <- sqrt(slow_nvar)
slow_cent3 <- sapply(seq_along(x),function(iii) {
mydx <- max(1,iii-window+1):iii
mywts <- wts[mydx]
sum(mywts * (x[mydx] - slow_mean[iii])^3,na.rm=na_rm) / (slow_sumwt[iii])
},simplify=TRUE)
slow_cent4 <- sapply(seq_along(x),function(iii) {
mydx <- max(1,iii-window+1):iii
mywts <- wts[mydx]
sum(mywts * (x[mydx] - slow_mean[iii])^4,na.rm=na_rm) / (slow_sumwt[iii])
},simplify=TRUE)
expect_error(fastv <- running_mean(x,wts=wts,min_df=0,window=window,na_rm=na_rm),NA)
expect_equal(fastv,slow_mean,tolerance=1e-8)
expect_error(fastv <- running_centered(x,wts=wts,window=window,restart_period=restart_period,na_rm=na_rm),NA)
slowv <- x - slow_mean;
expect_equal(as.numeric(fastv),slowv,tolerance=1e-8)
for (nw in c(TRUE,FALSE)) {
if (nw) {
use_sd <- slow_nsd
use_df <- slow_count
} else {
use_sd <- slow_sd
use_df <- slow_sumwt
}
expect_error(fastv <- running_sd(x,wts=wts,window=window,na_rm=na_rm,normalize_wts=nw),NA)
expect_equal(as.numeric(fastv),use_sd,tolerance=1e-8)
expect_error(fastv <- running_sd3(x,wts=wts,window=window,na_rm=na_rm,normalize_wts=nw),NA)
expect_equal(as.numeric(fastv[,1]),use_sd,tolerance=1e-8)
expect_equal(as.numeric(fastv[,2]),slow_mean,tolerance=1e-8)
expect_equal(as.numeric(fastv[,3]),use_df,tolerance=1e-8)
expect_error(fastv <- running_scaled(x,wts=wts,window=window,restart_period=restart_period,na_rm=na_rm,normalize_wts=nw),NA)
slowv <- x / use_sd
expect_equal(as.numeric(fastv[2:length(x)]),slowv[2:length(x)],tolerance=1e-8)
expect_error(fastv <- running_zscored(x,wts=wts,window=window,restart_period=restart_period,na_rm=na_rm,normalize_wts=nw),NA)
slowv <- (x - slow_mean) / use_sd
expect_equal(slowv[2:length(x)],fastv[2:length(x)],tolerance=1e-12)
expect_error(fastv <- running_sharpe(x,wts=wts,window=window,restart_period=restart_period,na_rm=na_rm,normalize_wts=nw),NA)
slowv <- slow_mean / use_sd
expect_equal(slowv[2:length(x)],fastv[2:length(x)],tolerance=1e-12)
expect_error(fastv <- running_tstat(x,wts=wts,window=window,restart_period=restart_period,na_rm=na_rm,normalize_wts=nw),NA)
slowv <- (slow_mean * sqrt(use_df)) / use_sd
expect_equal(slowv[2:length(x)],fastv[2:length(x)],tolerance=1e-12)
expect_error(fastv <- running_cent_moments(x,wts=wts,window=window,max_order=3L,max_order_only=TRUE,restart_period=restart_period,na_rm=na_rm,normalize_wts=nw),NA)
slowv <- slow_cent3
expect_equal(slowv[3:length(x)],fastv[3:length(x)],tolerance=1e-12)
expect_error(fastv <- running_cent_moments(x,wts=wts,window=window,max_order=4L,max_order_only=TRUE,restart_period=restart_period,na_rm=na_rm,normalize_wts=nw),NA)
slowv <- slow_cent4
expect_equal(slowv[4:length(x)],fastv[4:length(x)],tolerance=1e-12)
}
}# UNFOLD
}
}
})#UNFOLD
context("t_running for trivial case")
test_that("vs running ops",{#FOLDUP
skip_on_cran()
set.char.seed("712463ec-f266-4de7-89d2-ce3c824327b0")
na_rm <- FALSE
ptiles <- c(0.1,0.25,0.5,0.75,0.9)
for (xlen in c(20,50)) {
x <- rnorm(xlen)
times <- seq_along(x)
for (wts in list(NULL,rep(1L,xlen), runif(xlen,min=1.2,max=3.5))) {
# 2FIX? Inf window?
for (window in c(5,30,Inf)) { # FOLDUP
# to avoid roundoff issues on double times.
t_window <- window - 0.1
expect_error(box <- running_sum(x,wts=wts,window=window,na_rm=na_rm),NA)
expect_error(tbox <- t_running_sum(x,time=times,wts=wts,window=t_window,na_rm=na_rm),NA)
expect_equal(box,tbox,tolerance=1e-8)
expect_error(box <- running_mean(x,wts=wts,min_df=0,window=window,na_rm=na_rm),NA)
expect_error(tbox <- t_running_mean(x,time=times,wts=wts,min_df=0,window=t_window,na_rm=na_rm),NA)
expect_equal(box,tbox,tolerance=1e-8)
for (nw in c(TRUE,FALSE)) {
expect_error(box <- running_sd(x,wts=wts,window=window,na_rm=na_rm,normalize_wts=nw),NA)
expect_error(tbox <- t_running_sd(x,time=times,wts=wts,window=t_window,na_rm=na_rm,normalize_wts=nw),NA)
expect_equal(box,tbox,tolerance=1e-8)
expect_error(box <- running_skew(x,wts=wts,window=window,na_rm=na_rm,normalize_wts=nw),NA)
# the 0.1 is to avoid roundoff issues on the double times.
expect_error(tbox <- t_running_skew(x,time=times,wts=wts,window=t_window,na_rm=na_rm,normalize_wts=nw),NA)
expect_equal(box,tbox,tolerance=1e-8)
expect_error(box <- running_kurt(x,wts=wts,window=window,na_rm=na_rm,normalize_wts=nw),NA)
# the 0.1 is to avoid roundoff issues on the double times.
expect_error(tbox <- t_running_kurt(x,time=times,wts=wts,window=t_window,na_rm=na_rm,normalize_wts=nw),NA)
expect_equal(box,tbox,tolerance=1e-8)
expect_error(box <- running_sd3(x,wts=wts,window=window,na_rm=na_rm,normalize_wts=nw),NA)
# the 0.1 is to avoid roundoff issues on the double times.
expect_error(tbox <- t_running_sd3(x,time=times,wts=wts,window=t_window,na_rm=na_rm,normalize_wts=nw),NA)
expect_equal(box,tbox,tolerance=1e-8)
expect_error(box <- running_skew4(x,wts=wts,window=window,na_rm=na_rm,normalize_wts=nw),NA)
# the 0.1 is to avoid roundoff issues on the double times.
expect_error(tbox <- t_running_skew4(x,time=times,wts=wts,window=t_window,na_rm=na_rm,normalize_wts=nw),NA)
expect_equal(box,tbox,tolerance=1e-8)
expect_error(box <- running_kurt5(x,wts=wts,window=window,na_rm=na_rm,normalize_wts=nw),NA)
# the 0.1 is to avoid roundoff issues on the double times.
expect_error(tbox <- t_running_kurt5(x,time=times,wts=wts,window=t_window,na_rm=na_rm,normalize_wts=nw),NA)
expect_equal(box,tbox,tolerance=1e-8)
expect_error(box <- running_centered(x,wts=wts,window=window,na_rm=na_rm,normalize_wts=nw),NA)
# the 0.1 is to avoid roundoff issues on the double times.
expect_error(tbox <- t_running_centered(x,time=times,wts=wts,window=t_window,na_rm=na_rm,normalize_wts=nw),NA)
expect_equal(box,tbox,tolerance=1e-8)
expect_error(box <- running_scaled(x,wts=wts,window=window,na_rm=na_rm,normalize_wts=nw),NA)
# the 0.1 is to avoid roundoff issues on the double times.
expect_error(tbox <- t_running_scaled(x,time=times,wts=wts,window=t_window,na_rm=na_rm,normalize_wts=nw),NA)
expect_equal(box,tbox,tolerance=1e-8)
expect_error(box <- running_zscored(x,wts=wts,window=window,na_rm=na_rm,normalize_wts=nw),NA)
# the 0.1 is to avoid roundoff issues on the double times.
expect_error(tbox <- t_running_zscored(x,time=times,wts=wts,window=t_window,na_rm=na_rm,normalize_wts=nw),NA)
expect_equal(box,tbox,tolerance=1e-8)
expect_error(box <- running_tstat(x,wts=wts,window=window,na_rm=na_rm,normalize_wts=nw),NA)
# the 0.1 is to avoid roundoff issues on the double times.
expect_error(tbox <- t_running_tstat(x,time=times,wts=wts,window=t_window,na_rm=na_rm,normalize_wts=nw),NA)
expect_equal(box,tbox,tolerance=1e-8)
for (cse in c(TRUE,FALSE)) {
expect_error(box <- running_sharpe(x,wts=wts,window=window,na_rm=na_rm,compute_se=cse,normalize_wts=nw),NA)
# the 0.1 is to avoid roundoff issues on the double times.
expect_error(tbox <- t_running_sharpe(x,time=times,wts=wts,window=t_window,na_rm=na_rm,compute_se=cse,normalize_wts=nw),NA)
expect_equal(box,tbox,tolerance=1e-8)
}
expect_error(box <- running_apx_median(x,wts=wts,window=window,na_rm=na_rm,normalize_wts=nw),NA)
# the 0.1 is to avoid roundoff issues on the double times.
expect_error(tbox <- t_running_apx_median(x,time=times,wts=wts,window=t_window,na_rm=na_rm,normalize_wts=nw),NA)
expect_equal(box,tbox,tolerance=1e-8)
expect_error(box <- running_apx_quantiles(x,ptiles,max_order=3,wts=wts,window=window,na_rm=na_rm,normalize_wts=nw),NA)
# the 0.1 is to avoid roundoff issues on the double times.
expect_error(tbox <- t_running_apx_quantiles(x,ptiles,max_order=3,time=times,wts=wts,window=t_window,na_rm=na_rm,normalize_wts=nw),NA)
expect_equal(box,tbox,tolerance=1e-8)
}
}# UNFOLD
}
}
})#UNFOLD
context("t_running vs slow version")
test_that("check em",{#FOLDUP
skip_on_cran()
set.char.seed("91b0bd37-0b8e-49d6-8333-039a7d7f7dd5")
na_rm <- FALSE
for (xlen in c(40,90)) {# FOLDUP
x <- rnorm(xlen)
for (times in list(NULL,cumsum(runif(length(x),min=0.2,max=0.4)))) {
for (wts in list(NULL,rep(1L,xlen),runif(xlen,min=1.1,max=2.1))) {
wts_as_delta <- is.null(times) & !is.null(wts)
if (!is.null(times) || (wts_as_delta && !is.null(wts))) {
for (window in c(11.5,20.5,Inf)) { # FOLDUP
for (lb_time in list(NULL,3+cumsum(runif(10,min=0.4,max=1.1)))) {
slow <- slow_t_running_sum(x,time=times,wts=wts,window=window,lb_time=lb_time,na_rm=na_rm,wts_as_delta=wts_as_delta)
expect_error(fast <- t_running_sum(x,time=times,wts=wts,window=window,lb_time=lb_time,na_rm=na_rm,wts_as_delta=wts_as_delta),NA)
expect_equal(fast,slow,tolerance=1e-8)
slow <- slow_t_running_mean(x,time=times,wts=wts,window=window,lb_time=lb_time,na_rm=na_rm,wts_as_delta=wts_as_delta)
expect_error(fast <- t_running_mean(x,time=times,wts=wts,window=window,lb_time=lb_time,na_rm=na_rm,wts_as_delta=wts_as_delta),NA)
expect_equal(fast,slow,tolerance=1e-8)
for (nw in c(TRUE,FALSE)) {
slow <- slow_t_running_sd(x,time=times,wts=wts,window=window,lb_time=lb_time,na_rm=na_rm,wts_as_delta=wts_as_delta,normalize_wts=nw)
expect_error(fast <- t_running_sd(x,time=times,wts=wts,window=window,lb_time=lb_time,min_df=1,na_rm=na_rm,wts_as_delta=wts_as_delta,normalize_wts=nw),NA)
expect_equal(fast,slow,tolerance=1e-8)
slow <- slow_t_running_skew(x,time=times,wts=wts,window=window,lb_time=lb_time,na_rm=na_rm,wts_as_delta=wts_as_delta,normalize_wts=nw)
expect_error(fast <- t_running_skew(x,time=times,wts=wts,window=window,lb_time=lb_time,na_rm=na_rm,wts_as_delta=wts_as_delta,normalize_wts=nw),NA)
expect_equal(fast,slow,tolerance=1e-8)
slow <- slow_t_running_kurt(x,time=times,wts=wts,window=window,lb_time=lb_time,na_rm=na_rm,wts_as_delta=wts_as_delta,normalize_wts=nw)
expect_error(fast <- t_running_kurt(x,time=times,wts=wts,window=window,lb_time=lb_time,na_rm=na_rm,wts_as_delta=wts_as_delta,normalize_wts=nw),NA)
expect_equal(fast,slow,tolerance=1e-8)
slow <- slow_t_running_sd3(x,time=times,wts=wts,window=window,lb_time=lb_time,na_rm=na_rm,wts_as_delta=wts_as_delta,normalize_wts=nw)
expect_error(fast <- t_running_sd3(x,time=times,wts=wts,window=window,lb_time=lb_time,na_rm=na_rm,wts_as_delta=wts_as_delta,normalize_wts=nw),NA)
# ignore the df computation in slow when empty
slow[fast[,3]==0,3] <- 0
slow[is.na(fast[,1]),1] <- NA
expect_equal(fast,slow,tolerance=1e-8)
slow <- slow_t_running_skew4(x,time=times,wts=wts,window=window,lb_time=lb_time,na_rm=na_rm,normalize_wts=nw)
expect_error(fast <- t_running_skew4(x,time=times,wts=wts,window=window,lb_time=lb_time,na_rm=na_rm,normalize_wts=nw),NA)
# ignore the df computation in slow when empty
okrow <- !is.na(fast[,4]) & fast[,4] > 3 & row(fast)[,4] > 3
expect_equal(fast[okrow,],slow[okrow,],tolerance=1e-8)
slow <- slow_t_running_kurt5(x,time=times,wts=wts,window=window,lb_time=lb_time,na_rm=na_rm,normalize_wts=nw)
expect_error(fast <- t_running_kurt5(x,time=times,wts=wts,window=window,lb_time=lb_time,na_rm=na_rm,normalize_wts=nw),NA)
okrow <- !is.na(fast[,5]) & fast[,5] > 4 & row(fast)[,5] > 4
expect_equal(fast[okrow,],slow[okrow,],tolerance=1e-8)
}
}
}# UNFOLD
}
}
}
}# UNFOLD
})#UNFOLD
context("t_running_sd")
# t_running_sd is a bellwether for the other methods
# as it goes, so goes the other Welford based functions
test_that("check it",{#FOLDUP
skip_on_cran()
set.char.seed("79f60eda-7799-46e6-9096-6817b2d4473b")
na_rm <- FALSE
for (xlen in c(20,50)) {# FOLDUP
x <- rnorm(xlen)
for (times in list(NULL,cumsum(runif(length(x),min=0.2,max=0.4)))) {
for (wts in list(NULL,rep(1L,xlen),runif(xlen,min=1.2,max=2.1))) {
wts_as_delta <- is.null(times) & !is.null(wts)
if (!is.null(times) || (wts_as_delta && !is.null(wts))) {
for (window in c(11.5,20.5,Inf)) { # FOLDUP
for (lb_time in list(NULL,cumsum(runif(20,min=0.2,max=1)))) {
for (nw in c(TRUE,FALSE)) {
expect_error(slow <- reference_t_running_sd(x,time=times,wts=wts,wts_as_delta=TRUE,window=window,lb_time=lb_time,na_rm=na_rm,min_df=1,normalize_wts=nw),NA)
expect_error(fast <- t_running_sd(x,time=times,wts=wts,wts_as_delta=TRUE,used_df=1,window=window,lb_time=lb_time,min_df=1,na_rm=na_rm,normalize_wts=nw),NA)
expect_equal(fast,slow,tolerance=1e-7)
}
}
}# UNFOLD
}
}
}
}# UNFOLD
})#UNFOLD
#for vim modeline: (do not edit)
# vim:ts=2:sw=2:tw=79:fdm=marker:fmr=FOLDUP,UNFOLD:cms=#%s:syn=r:ft=r:ai:si:cin:nu:fo=croql:cino=p0t0c5(0:
| /tests/testthat/test-correctness.r | no_license | shabbychef/fromo | R | false | false | 36,785 | r | # Copyright 2016 Steven E. Pav. All Rights Reserved.
# Author: Steven E. Pav
# This file is part of fromo.
#
# fromo is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# fromo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with fromo. If not, see <http://www.gnu.org/licenses/>.
# env var:
# nb:
# see also:
# todo:
# changelog:
#
# Created: 2016.03.28
# Copyright: Steven E. Pav, 2016-2016
# Author: Steven E. Pav
# Comments: Steven E. Pav
# helpers#FOLDUP
set.char.seed <- function(str) {
set.seed(as.integer(charToRaw(str)))
}
THOROUGHNESS <- getOption('test.thoroughness',1.0)
# slow version of t_running; requires a helper function.
slow_op <- function(v,func,outsize=1,missfill=NA,
time=NULL,time_deltas=NULL,window=Inf,wts=NULL,lb_time=NULL,
na_rm=FALSE,min_df=0,lookahead=0,variable_win=FALSE,wts_as_delta=TRUE,...) {
if (is.null(time)) {
if (is.null(time_deltas) && !is.null(wts) && wts_as_delta) {
time_deltas <- wts
} else {
stop('bad input')
}
time <- cumsum(time_deltas)
}
if (is.null(lb_time)) {
lb_time <- time
}
lb_time <- lb_time + lookahead
if (variable_win) {
tprev <- c(-Inf,lb_time[1:(length(lb_time)-1)])
} else {
tprev <- lb_time - window
}
# fix weights.
sapply(seq_along(lb_time),
function(idx) {
tf <- lb_time[idx]
t0 <- tprev[idx]
takeus <- (t0 < time) & (time <= tf)
if (na_rm) {
takeus <- takeus & !is.na(v)
if (!is.null(wts)) {
takeus <- takeus & !is.na(wts)
}
}
if (any(takeus)) {
vsub <- v[takeus]
if (is.null(wts)) {
retv <- func(vsub,...)
} else {
subwts <- wts[takeus]
retv <- func(vsub,wts=subwts,...)
}
} else {
retv <- rep(missfill,outsize)
}
retv
})
}
slow_t_running_sum <- function(v,...) {
func <- function(v,wts=NULL,...) {
if (is.null(wts)) {
return(prod(sd3(v,...)[c(2:3)]))
}
return(sum(v*wts))
}
as.numeric(slow_op(v=v,func=func,missfill=0,...))
}
slow_t_running_mean <- function(v,...) {
func <- function(v,...) { sd3(v,...)[2] }
as.numeric(slow_op(v=v,func=func,...))
}
slow_t_running_sd <- function(v,...) {
func <- function(v,...) { sd3(v,...)[1] }
matrix(slow_op(v=v,func=func,...),ncol=1)
}
slow_t_running_skew <- function(v,...) {
func <- function(v,...) {
return(skew4(v,...)[1])
}
matrix(slow_op(v=v,func=func,...),ncol=1)
}
slow_t_running_kurt <- function(v,...) {
func <- function(v,...) { kurt5(v,...)[1] }
matrix(slow_op(v=v,func=func,...),ncol=1)
}
slow_t_running_sd3 <- function(v,...) {
func <- function(v,...) { sd3(v,...) }
t(slow_op(v=v,func=func,outsize=3,...))
}
slow_t_running_skew4 <- function(v,...) {
func <- function(v,...) {
if (length(v) > 2) {
return(skew4(v,...))
} else {
return(c(NA,sd3(v,...)))
}
}
t(slow_op(v=v,func=func,outsize=4,...))
}
slow_t_running_kurt5 <- function(v,...) {
func <- function(v,...) {
if (length(v) > 3) {
return(kurt5(v,...))
} else if (length(v) > 2) {
return(c(NA,skew4(v,...)))
} else {
return(c(NA,NA,sd3(v,...)))
}
}
t(slow_op(v=v,func=func,outsize=5,...))
}
reference_sd <- function(x,wts=NULL,na_rm=FALSE,normalize_wts=FALSE,min_df=0,used_df=1) {
if (na_rm) {
isok <- !is.na(x)
if (!is.null(wts)) {
isok <- isok & !is.na(wts) & wts >= 0
}
x <- x[isok]
if (!is.null(wts)) {
wts <- wts[isok]
}
}
if (length(x) < min_df) {
return(NA)
}
if (!is.null(wts)) {
wsum <- sum(wts)
mu <- sum(x*wts) / wsum
deno <- wsum - used_df * ifelse(normalize_wts,wsum / length(x),1)
vv <- sum(wts * (x - mu)^2) / deno
} else {
wsum <- length(x)
mu <- sum(x) / wsum
vv <- sum((x - mu)^2) / (wsum - used_df)
}
return(sqrt(vv))
}
# not quite the same as slow_t_running_sd above
reference_t_running_sd <- function(v,...) {
matrix(slow_op(v=v,func=reference_sd,...),ncol=1)
}
#UNFOLD
context("first moments")#FOLDUP
test_that("sd, skew, kurt are correct",{#FOLDUP
set.char.seed("c4007dba-2010-481e-abe5-f07d3ce94eb4")
x <- rnorm(1000)
expect_error(sid <- sd3(x),NA)
expect_error(ske <- skew4(x),NA)
expect_error(krt <- kurt5(x),NA)
expect_equal(length(sid),3)
expect_equal(length(ske),4)
expect_equal(length(krt),5)
# compare computations to gold standard
# length
expect_equal(sid[3],length(x))
expect_equal(sid[3],ske[4])
expect_equal(sid[3],krt[5])
# mean
expect_equal(sid[2],mean(x),tolerance=1e-9)
expect_equal(sid[2],ske[3],tolerance=1e-9)
expect_equal(sid[2],krt[4],tolerance=1e-9)
# standard dev
expect_equal(sid[1],ske[2],tolerance=1e-9)
expect_equal(sid[1],krt[3],tolerance=1e-9)
expect_equal(sid[1],sd(x),tolerance=1e-9)
# skew
expect_equal(ske[1],krt[2],tolerance=1e-9)
if (require(moments)) {
na_rm <- TRUE
dumb_count <- sum(sign(abs(x)+1),na.rm=na_rm)
dumb_mean <- mean(x,na.rm=na_rm)
dumb_sd <- sd(x,na.rm=na_rm)
dumb_skew <- moments::skewness(x,na.rm=na_rm)
dumb_exkurt <- moments::kurtosis(x,na.rm=na_rm) - 3.0
dumb_cmom2 <- moments::moment(x,central=TRUE,na.rm=na_rm,order=2)
dumb_cmom3 <- moments::moment(x,central=TRUE,na.rm=na_rm,order=3)
dumb_cmom4 <- moments::moment(x,central=TRUE,na.rm=na_rm,order=4)
dumb_cmom5 <- moments::moment(x,central=TRUE,na.rm=na_rm,order=5)
dumb_cmom6 <- moments::moment(x,central=TRUE,na.rm=na_rm,order=6)
# skew
expect_equal(ske[1],dumb_skew,tolerance=1e-9)
# kurtosis
expect_equal(krt[1],dumb_exkurt,tolerance=1e-9)
# oops. problems with centered moments in terms of the used_df; need a
# better test...
cmoms <- cent_moments(x,max_order=6,used_df=0)
dumbv <- c(dumb_cmom6,dumb_cmom5,dumb_cmom4,dumb_cmom3,dumb_cmom2,dumb_mean,dumb_count)
expect_equal(max(abs(cmoms-dumbv)),0,tolerance=1e-9)
if (require(PDQutils)) {
cumuls <- cent_cumulants(x,max_order=length(cmoms)-1)
dumbv0 <- c(dumb_cmom6,dumb_cmom5,dumb_cmom4,dumb_cmom3,dumb_cmom2,dumb_mean,dumb_count)
dumbv1 <- PDQutils::moment2cumulant(c(0,rev(dumbv0)[3:length(dumbv0)]))
dumbv <- c(rev(dumbv1[2:length(dumbv1)]),dumb_mean,dumb_count)
expect_equal(max(abs(cumuls-dumbv)),0,tolerance=1e-12)
}
}
if (require(e1071)) {
dumb_skew <- e1071::skewness(x,type=3)
equiv_skew <- ske[1] * ((ske[4]-1)/(ske[4]))^(3/2)
expect_equal(dumb_skew,equiv_skew,tolerance=1e-12)
}
# 2FIX: add cent_moments and std_moments
# 2FIX: check NA
# sentinel
expect_true(TRUE)
})#UNFOLD
test_that("unit weighted sd, skew, kurt are correct",{#FOLDUP
set.char.seed("b652ccd2-478b-44d4-90e2-2ca2bad99d25")
x <- rnorm(1000)
ones <- rep(1,length(x))
expect_equal(sd3(x),sd3(x,wts=ones),tolerance=1e-9)
expect_equal(skew4(x),skew4(x,wts=ones),tolerance=1e-9)
expect_equal(kurt5(x),kurt5(x,wts=ones),tolerance=1e-9)
# 2FIX: probably normalize_wts=FALSE should be the default???? for speed?
expect_equal(running_sd(x),running_sd(x,wts=ones,normalize_wts=TRUE),tolerance=1e-9)
expect_equal(running_skew(x),running_skew(x,wts=ones,normalize_wts=TRUE),tolerance=1e-9)
expect_equal(running_kurt(x),running_kurt(x,wts=ones,normalize_wts=TRUE),tolerance=1e-9)
expect_equal(running_sd(x),running_sd(x,wts=ones,normalize_wts=FALSE),tolerance=1e-9)
expect_equal(running_skew(x),running_skew(x,wts=ones,normalize_wts=FALSE),tolerance=1e-9)
expect_equal(running_kurt(x),running_kurt(x,wts=ones,normalize_wts=FALSE),tolerance=1e-9)
# 2FIX: add more.
# sentinel
expect_true(TRUE)
})#UNFOLD
test_that("normalize weights works",{#FOLDUP
set.char.seed("2694ae87-62d4-4154-9c32-864f9a6e648d")
x <- rnorm(25)
wts <- runif(length(x))
expect_equal(sd3(x,wts=wts,normalize_wts=TRUE),
sd3(x,wts=2*wts,normalize_wts=TRUE),tolerance=1e-9)
expect_equal(skew4(x,wts=wts,normalize_wts=TRUE),
skew4(x,wts=2*wts,normalize_wts=TRUE),tolerance=1e-9)
expect_equal(kurt5(x,wts=wts,normalize_wts=TRUE),
kurt5(x,wts=2*wts,normalize_wts=TRUE),tolerance=1e-9)
expect_equal(running_sd(x,wts=wts,normalize_wts=TRUE),
running_sd(x,wts=2*wts,normalize_wts=TRUE),tolerance=1e-9)
expect_equal(running_skew(x,wts=wts,normalize_wts=TRUE),
running_skew(x,wts=2*wts,normalize_wts=TRUE),tolerance=1e-9)
expect_equal(running_kurt(x,wts=wts,normalize_wts=TRUE),
running_kurt(x,wts=2*wts,normalize_wts=TRUE),tolerance=1e-9)
expect_equal(running_sd3(x,wts=wts,normalize_wts=TRUE),
running_sd3(x,wts=2*wts,normalize_wts=TRUE),tolerance=1e-9)
expect_equal(running_skew4(x,wts=wts,normalize_wts=TRUE),
running_skew4(x,wts=2*wts,normalize_wts=TRUE),tolerance=1e-9)
expect_equal(running_kurt5(x,wts=wts,normalize_wts=TRUE),
running_kurt5(x,wts=2*wts,normalize_wts=TRUE),tolerance=1e-9)
expect_equal(running_sharpe(x,wts=wts,normalize_wts=TRUE),
running_sharpe(x,wts=2*wts,normalize_wts=TRUE),tolerance=1e-9)
expect_equal(running_sharpe(x,wts=wts,normalize_wts=TRUE,compute_se=TRUE),
running_sharpe(x,wts=2*wts,normalize_wts=TRUE,compute_se=TRUE),tolerance=1e-9)
expect_equal(running_centered(x,wts=wts,normalize_wts=TRUE),
running_centered(x,wts=2*wts,normalize_wts=TRUE),tolerance=1e-9)
expect_equal(running_apx_median(x,wts=wts,normalize_wts=TRUE),
running_apx_median(x,wts=2*wts,normalize_wts=TRUE),tolerance=1e-9)
expect_equal(running_tstat(x,wts=wts,normalize_wts=TRUE),
running_tstat(x,wts=2*wts,normalize_wts=TRUE),tolerance=1e-9)
expect_equal(running_zscored(x,wts=wts,normalize_wts=TRUE),
running_zscored(x,wts=2*wts,normalize_wts=TRUE),tolerance=1e-9)
expect_equal(running_scaled(x,wts=wts,normalize_wts=TRUE),
running_scaled(x,wts=2*wts,normalize_wts=TRUE),tolerance=1e-9)
ptiles <- c(0.1,0.25,0.5,0.75,0.9)
expect_equal(running_apx_quantiles(x,p=ptiles,wts=wts,normalize_wts=TRUE),
running_apx_quantiles(x,p=ptiles,wts=2*wts,normalize_wts=TRUE),tolerance=1e-9)
expect_equal(running_cent_moments(x,wts=wts,normalize_wts=TRUE),
running_cent_moments(x,wts=2*wts,normalize_wts=TRUE),tolerance=1e-9)
expect_equal(running_std_moments(x,wts=wts,normalize_wts=TRUE),
running_std_moments(x,wts=2*wts,normalize_wts=TRUE),tolerance=1e-9)
expect_equal(running_cumulants(x,wts=wts,normalize_wts=TRUE),
running_cumulants(x,wts=2*wts,normalize_wts=TRUE),tolerance=1e-9)
})#UNFOLD
test_that("weight scaling what you expect",{#FOLDUP
set.char.seed("efaa75ac-bb9e-4e4a-a375-7028f099366e")
x <- rnorm(50)
wts <- runif(length(x))
expect_error(sid_1 <- sd3(x,wts=wts,normalize_wts=FALSE,sg_df=0),NA)
expect_error(ske_1 <- skew4(x,wts=wts,normalize_wts=FALSE,sg_df=0),NA)
expect_error(krt_1 <- kurt5(x,wts=wts,normalize_wts=FALSE,sg_df=0),NA)
expect_error(sid_2 <- sd3(x,wts=2*wts,normalize_wts=FALSE,sg_df=0),NA)
expect_error(ske_2 <- skew4(x,wts=2*wts,normalize_wts=FALSE,sg_df=0),NA)
expect_error(krt_2 <- kurt5(x,wts=2*wts,normalize_wts=FALSE,sg_df=0),NA)
expect_equal(sid_1 * c(1,1,2),sid_2,tolerance=1e-9)
expect_equal(ske_1 * c(1,1,1,2),ske_2,tolerance=1e-9)
expect_equal(krt_1 * c(1,1,1,1,2),krt_2,tolerance=1e-9)
})#UNFOLD
test_that("weighted sd, skew, kurt are correct",{#FOLDUP
set.char.seed("4e17d837-69c1-41d1-906f-c82224d7ce41")
x <- rnorm(1000)
wts <- runif(length(x))
expect_error(sid <- sd3(x,wts=wts,normalize_wts=TRUE),NA)
expect_error(ske <- skew4(x,wts=wts,normalize_wts=TRUE),NA)
expect_error(krt <- kurt5(x,wts=wts,normalize_wts=TRUE),NA)
# 2FIX: add more here to check correctness ...
expect_equal(length(sid),3)
expect_equal(length(ske),4)
expect_equal(length(krt),5)
# compare computations to gold standard
# length
expect_equal(sid[3],length(x))
expect_equal(sid[3],ske[4])
expect_equal(sid[3],krt[5])
# mean
expect_equal(sid[2],weighted.mean(x,w=wts),tolerance=1e-9)
expect_equal(sid[2],ske[3],tolerance=1e-9)
expect_equal(sid[2],krt[4],tolerance=1e-9)
# standard dev
expect_equal(sid[1],ske[2],tolerance=1e-9)
expect_equal(sid[1],krt[3],tolerance=1e-9)
wsd <- sqrt(sum(((x - weighted.mean(x,w=wts))^2) * (wts / mean(wts))) / (length(x) - 1))
# 2FIX!!!
expect_equal(sid[1],wsd,tolerance=1e-9)
# skew
expect_equal(ske[1],krt[2],tolerance=1e-9)
na_rm <- TRUE
dumb_count <- length(x)
dumb_mean <- weighted.mean(x,w=wts)
dumb_sd <- sqrt(sum(((x - weighted.mean(x,w=wts))^2) * (wts / mean(wts))) / (length(x) - 1))
wcmom <- function(vec,wts,ord) {
wz <- wts / mean(wts)
mean(wz * ((x - weighted.mean(x,w=wz))^ord))
}
dumb_wcmom2 <- wcmom(x,wts,2)
dumb_wcmom3 <- wcmom(x,wts,3)
dumb_wcmom4 <- wcmom(x,wts,4)
dumb_wcmom5 <- wcmom(x,wts,5)
dumb_wcmom6 <- wcmom(x,wts,6)
cmoms <- cent_moments(x,wts=wts,max_order=6,used_df=0,normalize_wts=TRUE)
dumbv <- c(dumb_wcmom6,dumb_wcmom5,dumb_wcmom4,dumb_wcmom3,dumb_wcmom2,dumb_mean,dumb_count)
expect_equal(cmoms,dumbv,tolerance=1e-9)
dumb_skew <- dumb_wcmom3 / (dumb_wcmom2^(3/2))
dumb_exkurt <- (dumb_wcmom4 / (dumb_wcmom2^(2))) - 3
# skew
expect_equal(ske[1],dumb_skew,tolerance=1e-9)
# kurtosis
expect_equal(krt[1],dumb_exkurt,tolerance=1e-9)
})#UNFOLD
#UNFOLD
tomat <- function(cbound) {
dumbv <- as.matrix(cbound)
attr(dumbv,'dimnames') <- NULL
dumbv
}
context("running ops are correct")
test_that("running ops are correct",{#FOLDUP
skip_on_cran()
ptiles <- c(0.1,0.25,0.5,0.75,0.9)
set.char.seed("7ffe0035-2d0c-4586-a1a5-6321c7cf8694")
for (xlen in c(20,100)) {
for (xmu in c(1e3,1e6)) {
toler <- xmu ^ (1/3)
x <- rnorm(xlen,mean=xmu)
for (window in c(15,50,Inf)) {
for (restart_period in c(20,1000)) {
for (na_rm in c(FALSE,TRUE)) {
dumb_count <- sapply(seq_along(x),function(iii) { sum(sign(abs(x[max(1,iii-window+1):iii])+1),na.rm=na_rm) },simplify=TRUE)
dumb_sum <- sapply(seq_along(x),function(iii) { sum(x[max(1,iii-window+1):iii],na.rm=na_rm) },simplify=TRUE)
dumb_mean <- sapply(seq_along(x),function(iii) { mean(x[max(1,iii-window+1):iii],na.rm=na_rm) },simplify=TRUE)
dumb_sd <- sapply(seq_along(x),function(iii) { sd(x[max(1,iii-window+1):iii],na.rm=na_rm) },simplify=TRUE)
dumb_skew <- sapply(seq_along(x),function(iii) { moments::skewness(x[max(1,iii-window+1):iii],na.rm=na_rm) },simplify=TRUE)
dumb_exkurt <- sapply(seq_along(x),function(iii) { moments::kurtosis(x[max(1,iii-window+1):iii],na.rm=na_rm) - 3.0 },simplify=TRUE)
dumb_cmom2 <- sapply(seq_along(x),function(iii) { moments::moment(x[max(1,iii-window+1):iii],central=TRUE,na.rm=na_rm,order=2) },simplify=TRUE)
dumb_cmom3 <- sapply(seq_along(x),function(iii) { moments::moment(x[max(1,iii-window+1):iii],central=TRUE,na.rm=na_rm,order=3) },simplify=TRUE)
dumb_cmom4 <- sapply(seq_along(x),function(iii) { moments::moment(x[max(1,iii-window+1):iii],central=TRUE,na.rm=na_rm,order=4) },simplify=TRUE)
dumb_cmom5 <- sapply(seq_along(x),function(iii) { moments::moment(x[max(1,iii-window+1):iii],central=TRUE,na.rm=na_rm,order=5) },simplify=TRUE)
dumb_cmom6 <- sapply(seq_along(x),function(iii) { moments::moment(x[max(1,iii-window+1):iii],central=TRUE,na.rm=na_rm,order=6) },simplify=TRUE)
# SD
expect_error(fastv <- running_sd(x,window=window,restart_period=restart_period,na_rm=na_rm),NA)
dumbv <- tomat(dumb_sd)
expect_equal(dumbv[2:xlen],fastv[2:xlen],tolerance=1e-7 * toler)
expect_error(fastv <- running_sd3(x,window=window,restart_period=restart_period,na_rm=na_rm),NA)
dumbv <- tomat(cbind(dumb_sd,dumb_mean,dumb_count))
expect_equal(dumbv[2:xlen,],fastv[2:xlen,],tolerance=1e-7 * toler)
# skew
expect_error(fastv <- running_skew(x,window=window,restart_period=restart_period,na_rm=na_rm),NA)
dumbv <- tomat(dumb_skew)
expect_equal(dumbv[3:xlen],fastv[3:xlen],tolerance=1e-6 * toler)
expect_error(fastv <- running_skew4(x,window=window,restart_period=restart_period,na_rm=na_rm),NA)
dumbv <- tomat(cbind(dumb_skew,dumb_sd,dumb_mean,dumb_count))
expect_equal(dumbv[3:xlen,],fastv[3:xlen,],tolerance=1e-7 * toler)
# excess kurtosis
expect_error(fastv <- running_kurt(x,window=window,restart_period=restart_period,na_rm=na_rm),NA)
dumbv <- tomat(dumb_exkurt)
expect_equal(dumbv[4:xlen],fastv[4:xlen],tolerance=1e-6 * toler)
expect_error(fastv <- running_kurt5(x,window=window,restart_period=restart_period,na_rm=na_rm),NA)
dumbv <- tomat(cbind(dumb_exkurt,dumb_skew,dumb_sd,dumb_mean,dumb_count))
expect_equal(dumbv[4:xlen,],fastv[4:xlen,],tolerance=1e-6 * toler)
# higher order moments
expect_error(fastv <- running_cent_moments(x,window=window,max_order=6L,used_df=0L,restart_period=restart_period,na_rm=na_rm),NA)
dumbv <- tomat(cbind(dumb_cmom6,dumb_cmom5,dumb_cmom4,dumb_cmom3,dumb_cmom2,dumb_mean,dumb_count))
expect_equal(dumbv[6:xlen,],fastv[6:xlen,],tolerance=1e-6 * toler)
expect_error(fastv <- running_cent_moments(x,window=window,max_order=6L,max_order_only=TRUE,used_df=0L,restart_period=restart_period,na_rm=na_rm),NA)
dumbv <- tomat(dumb_cmom6)
expect_equal(dumbv[6:xlen,],fastv[6:xlen,],tolerance=1e-7 * toler)
expect_error(fastv <- running_std_moments(x,window=window,max_order=6L,used_df=0L,restart_period=restart_period,na_rm=na_rm),NA)
dumbv <- tomat(cbind(dumb_cmom6 / (dumb_cmom2^3),dumb_cmom5 / (dumb_cmom2^2.5),dumb_cmom4 / (dumb_cmom2^2.0),dumb_cmom3 / (dumb_cmom2^1.5),sqrt(dumb_cmom2),dumb_mean,dumb_count))
expect_equal(dumbv[6:xlen,],fastv[6:xlen,],tolerance=1e-7 * toler)
# running sum and mean
expect_error(fastv <- running_sum(x,window=window,restart_period=restart_period,na_rm=na_rm),NA)
dumbv <- dumb_sum
expect_equal(dumbv[2:xlen],fastv[2:xlen],tolerance=1e-7 * toler)
expect_error(fastv <- running_mean(x,window=window,restart_period=restart_period,na_rm=na_rm),NA)
dumbv <- dumb_mean
expect_equal(dumbv[2:xlen],fastv[2:xlen],tolerance=1e-7 * toler)
if (require(PDQutils)) {
# cumulants
expect_error(fastv <- running_cumulants(x,window=window,max_order=6L,used_df=0L,restart_period=restart_period,na_rm=na_rm),NA)
pre_dumbv <- cbind(dumb_cmom6,dumb_cmom5,dumb_cmom4,dumb_cmom3,dumb_cmom2,dumb_mean,dumb_count)
dumbv <- t(sapply(seq_along(x),function(iii) {
rv <- rev(PDQutils::moment2cumulant(c(0,rev(pre_dumbv[iii,1:(ncol(pre_dumbv)-2)]))))
rv <- rv[-length(rv)]
c(rv,pre_dumbv[iii,ncol(pre_dumbv) + (-1:0)])
},simplify='matrix'))
expect_equal(max(abs(dumbv[6:xlen,] - fastv[6:xlen,])),0,tolerance=1e-8 * toler)
# quantiles
expect_error(fastv <- running_apx_quantiles(x,ptiles,max_order=ncol(dumbv)-1,used_df=0L,window=window,restart_period=restart_period,na_rm=na_rm),NA)
dumbq <- t(sapply(seq_along(x),function(iii) {
PDQutils::qapx_cf(ptiles,raw.cumulants=rev(dumbv[iii,1:(ncol(dumbv)-1)]))
}, simplify=TRUE))
expect_equal(max(abs(dumbq[8:xlen,] - fastv[8:xlen,])),0,tolerance=1e-8 * toler)
}
}
}
}
}
}
})#UNFOLD
test_that("running adjustments are correct",{#FOLDUP
skip_on_cran()
set.char.seed("967d2149-fbff-4d82-b227-ca3e1034bddb")
for (xlen in c(20,100)) {
x <- rnorm(xlen)
for (window in c(5,50,Inf)) {
for (restart_period in c(10,1000)) {
for (na_rm in c(FALSE,TRUE)) {
dumb_count <- sapply(seq_along(x),function(iii) { sum(sign(abs(x[max(1,iii-window+1):iii])+1),na.rm=na_rm) },simplify=TRUE)
dumb_mean <- sapply(seq_along(x),function(iii) { mean(x[max(1,iii-window+1):iii],na.rm=na_rm) },simplify=TRUE)
dumb_sd <- sapply(seq_along(x),function(iii) { sd(x[max(1,iii-window+1):iii],na.rm=na_rm) },simplify=TRUE)
expect_error(fastv <- running_centered(x,window=window,restart_period=restart_period,na_rm=na_rm),NA)
# the dumb value:
dumbv <- x - dumb_mean;
expect_equal(max(abs(dumbv - fastv)),0,tolerance=1e-12)
expect_error(fastv <- running_scaled(x,window=window,restart_period=restart_period,na_rm=na_rm),NA)
# the dumb value:
dumbv <- x / dumb_sd
expect_equal(max(abs(dumbv[2:length(x)] - fastv[2:length(x)])),0,tolerance=1e-12)
expect_error(fastv <- running_zscored(x,window=window,restart_period=restart_period,na_rm=na_rm),NA)
# the dumb value:
dumbv <- (x - dumb_mean) / dumb_sd
expect_equal(max(abs(dumbv[2:length(x)] - fastv[2:length(x)])),0,tolerance=1e-12)
expect_error(fastv <- running_sharpe(x,window=window,restart_period=restart_period,na_rm=na_rm),NA)
# the dumb value:
dumbv <- dumb_mean / dumb_sd
expect_equal(max(abs(dumbv[2:length(x)] - fastv[2:length(x)])),0,tolerance=1e-12)
expect_error(fastv <- running_tstat(x,window=window,restart_period=restart_period,na_rm=na_rm),NA)
# the dumb value:
dumbv <- (dumb_mean * sqrt(dumb_count)) / dumb_sd
expect_equal(max(abs(dumbv[2:length(x)] - fastv[2:length(x)])),0,tolerance=1e-12)
expect_error(fastv <- running_sharpe(x,window=window,restart_period=restart_period,na_rm=na_rm,compute_se=TRUE),NA)
# the dumb value:
dumb_sr <- dumb_mean / dumb_sd
expect_equal(max(abs(dumb_sr[2:length(x)] - fastv[2:length(x),1])),0,tolerance=1e-12)
if (require(moments)) {
dumb_skew <- sapply(seq_along(x),function(iii) { moments::skewness(x[max(1,iii-window+1):iii],na.rm=na_rm) },simplify=TRUE)
dumb_exkurt <- sapply(seq_along(x),function(iii) { moments::kurtosis(x[max(1,iii-window+1):iii],na.rm=na_rm) - 3.0 },simplify=TRUE)
dumb_merse <- sqrt((1 + 0.25 * (2+dumb_exkurt) * dumb_sr^2 - dumb_skew * dumb_sr) / dumb_count)
expect_equal(max(abs(dumb_merse[5:length(x)] - fastv[5:length(x),2])),0,tolerance=1e-9)
}
}
}
}
}
})#UNFOLD
context("weighted running ops are correct")
test_that("running weights work correctly",{#FOLDUP
skip_on_cran()
set.char.seed("b82d252c-681b-4b98-9bb3-ffd17feeb4a1")
na_rm <- FALSE
restart_period <- 1000
for (xlen in c(20,50)) {
x <- rnorm(xlen)
for (wts in list(rep(1L,xlen), runif(xlen,min=2,max=7))) {
for (window in c(5,30,Inf)) { # FOLDUP
# 2FIX: add to this!
slow_count <- sapply(seq_along(x),function(iii) { sum(sign(abs(x[max(1,iii-window+1):iii])+1),na.rm=na_rm) },simplify=TRUE)
slow_sumwt <- sapply(seq_along(x),function(iii) { sum(wts[max(1,iii-window+1):iii],na.rm=na_rm) },simplify=TRUE)
slow_mean <- sapply(seq_along(x),function(iii) {
mydx <- max(1,iii-window+1):iii
mywts <- wts[mydx]
sum(mywts * x[mydx],na.rm=na_rm) / slow_sumwt[iii]
},simplify=TRUE)
slow_var <- sapply(seq_along(x),function(iii) {
mydx <- max(1,iii-window+1):iii
mywts <- wts[mydx]
sum(mywts * (x[mydx] - slow_mean[iii])^2,na.rm=na_rm) / (slow_sumwt[iii] - 1)
},simplify=TRUE)
slow_sd <- sqrt(slow_var)
# the normalize version;
slow_nvar <- sapply(seq_along(x),function(iii) {
mydx <- max(1,iii-window+1):iii
mywts <- wts[mydx]
(slow_count[iii]/slow_sumwt[iii]) * sum(mywts * (x[mydx] - slow_mean[iii])^2,na.rm=na_rm) / (slow_count[iii] - 1)
},simplify=TRUE)
slow_nsd <- sqrt(slow_nvar)
slow_cent3 <- sapply(seq_along(x),function(iii) {
mydx <- max(1,iii-window+1):iii
mywts <- wts[mydx]
sum(mywts * (x[mydx] - slow_mean[iii])^3,na.rm=na_rm) / (slow_sumwt[iii])
},simplify=TRUE)
slow_cent4 <- sapply(seq_along(x),function(iii) {
mydx <- max(1,iii-window+1):iii
mywts <- wts[mydx]
sum(mywts * (x[mydx] - slow_mean[iii])^4,na.rm=na_rm) / (slow_sumwt[iii])
},simplify=TRUE)
expect_error(fastv <- running_mean(x,wts=wts,min_df=0,window=window,na_rm=na_rm),NA)
expect_equal(fastv,slow_mean,tolerance=1e-8)
expect_error(fastv <- running_centered(x,wts=wts,window=window,restart_period=restart_period,na_rm=na_rm),NA)
slowv <- x - slow_mean;
expect_equal(as.numeric(fastv),slowv,tolerance=1e-8)
for (nw in c(TRUE,FALSE)) {
if (nw) {
use_sd <- slow_nsd
use_df <- slow_count
} else {
use_sd <- slow_sd
use_df <- slow_sumwt
}
expect_error(fastv <- running_sd(x,wts=wts,window=window,na_rm=na_rm,normalize_wts=nw),NA)
expect_equal(as.numeric(fastv),use_sd,tolerance=1e-8)
expect_error(fastv <- running_sd3(x,wts=wts,window=window,na_rm=na_rm,normalize_wts=nw),NA)
expect_equal(as.numeric(fastv[,1]),use_sd,tolerance=1e-8)
expect_equal(as.numeric(fastv[,2]),slow_mean,tolerance=1e-8)
expect_equal(as.numeric(fastv[,3]),use_df,tolerance=1e-8)
expect_error(fastv <- running_scaled(x,wts=wts,window=window,restart_period=restart_period,na_rm=na_rm,normalize_wts=nw),NA)
slowv <- x / use_sd
expect_equal(as.numeric(fastv[2:length(x)]),slowv[2:length(x)],tolerance=1e-8)
expect_error(fastv <- running_zscored(x,wts=wts,window=window,restart_period=restart_period,na_rm=na_rm,normalize_wts=nw),NA)
slowv <- (x - slow_mean) / use_sd
expect_equal(slowv[2:length(x)],fastv[2:length(x)],tolerance=1e-12)
expect_error(fastv <- running_sharpe(x,wts=wts,window=window,restart_period=restart_period,na_rm=na_rm,normalize_wts=nw),NA)
slowv <- slow_mean / use_sd
expect_equal(slowv[2:length(x)],fastv[2:length(x)],tolerance=1e-12)
expect_error(fastv <- running_tstat(x,wts=wts,window=window,restart_period=restart_period,na_rm=na_rm,normalize_wts=nw),NA)
slowv <- (slow_mean * sqrt(use_df)) / use_sd
expect_equal(slowv[2:length(x)],fastv[2:length(x)],tolerance=1e-12)
expect_error(fastv <- running_cent_moments(x,wts=wts,window=window,max_order=3L,max_order_only=TRUE,restart_period=restart_period,na_rm=na_rm,normalize_wts=nw),NA)
slowv <- slow_cent3
expect_equal(slowv[3:length(x)],fastv[3:length(x)],tolerance=1e-12)
expect_error(fastv <- running_cent_moments(x,wts=wts,window=window,max_order=4L,max_order_only=TRUE,restart_period=restart_period,na_rm=na_rm,normalize_wts=nw),NA)
slowv <- slow_cent4
expect_equal(slowv[4:length(x)],fastv[4:length(x)],tolerance=1e-12)
}
}# UNFOLD
}
}
})#UNFOLD
context("t_running for trivial case")
test_that("vs running ops",{#FOLDUP
skip_on_cran()
set.char.seed("712463ec-f266-4de7-89d2-ce3c824327b0")
na_rm <- FALSE
ptiles <- c(0.1,0.25,0.5,0.75,0.9)
for (xlen in c(20,50)) {
x <- rnorm(xlen)
times <- seq_along(x)
for (wts in list(NULL,rep(1L,xlen), runif(xlen,min=1.2,max=3.5))) {
# 2FIX? Inf window?
for (window in c(5,30,Inf)) { # FOLDUP
# to avoid roundoff issues on double times.
t_window <- window - 0.1
expect_error(box <- running_sum(x,wts=wts,window=window,na_rm=na_rm),NA)
expect_error(tbox <- t_running_sum(x,time=times,wts=wts,window=t_window,na_rm=na_rm),NA)
expect_equal(box,tbox,tolerance=1e-8)
expect_error(box <- running_mean(x,wts=wts,min_df=0,window=window,na_rm=na_rm),NA)
expect_error(tbox <- t_running_mean(x,time=times,wts=wts,min_df=0,window=t_window,na_rm=na_rm),NA)
expect_equal(box,tbox,tolerance=1e-8)
for (nw in c(TRUE,FALSE)) {
expect_error(box <- running_sd(x,wts=wts,window=window,na_rm=na_rm,normalize_wts=nw),NA)
expect_error(tbox <- t_running_sd(x,time=times,wts=wts,window=t_window,na_rm=na_rm,normalize_wts=nw),NA)
expect_equal(box,tbox,tolerance=1e-8)
expect_error(box <- running_skew(x,wts=wts,window=window,na_rm=na_rm,normalize_wts=nw),NA)
# the 0.1 is to avoid roundoff issues on the double times.
expect_error(tbox <- t_running_skew(x,time=times,wts=wts,window=t_window,na_rm=na_rm,normalize_wts=nw),NA)
expect_equal(box,tbox,tolerance=1e-8)
expect_error(box <- running_kurt(x,wts=wts,window=window,na_rm=na_rm,normalize_wts=nw),NA)
# the 0.1 is to avoid roundoff issues on the double times.
expect_error(tbox <- t_running_kurt(x,time=times,wts=wts,window=t_window,na_rm=na_rm,normalize_wts=nw),NA)
expect_equal(box,tbox,tolerance=1e-8)
expect_error(box <- running_sd3(x,wts=wts,window=window,na_rm=na_rm,normalize_wts=nw),NA)
# the 0.1 is to avoid roundoff issues on the double times.
expect_error(tbox <- t_running_sd3(x,time=times,wts=wts,window=t_window,na_rm=na_rm,normalize_wts=nw),NA)
expect_equal(box,tbox,tolerance=1e-8)
expect_error(box <- running_skew4(x,wts=wts,window=window,na_rm=na_rm,normalize_wts=nw),NA)
# the 0.1 is to avoid roundoff issues on the double times.
expect_error(tbox <- t_running_skew4(x,time=times,wts=wts,window=t_window,na_rm=na_rm,normalize_wts=nw),NA)
expect_equal(box,tbox,tolerance=1e-8)
expect_error(box <- running_kurt5(x,wts=wts,window=window,na_rm=na_rm,normalize_wts=nw),NA)
# the 0.1 is to avoid roundoff issues on the double times.
expect_error(tbox <- t_running_kurt5(x,time=times,wts=wts,window=t_window,na_rm=na_rm,normalize_wts=nw),NA)
expect_equal(box,tbox,tolerance=1e-8)
expect_error(box <- running_centered(x,wts=wts,window=window,na_rm=na_rm,normalize_wts=nw),NA)
# the 0.1 is to avoid roundoff issues on the double times.
expect_error(tbox <- t_running_centered(x,time=times,wts=wts,window=t_window,na_rm=na_rm,normalize_wts=nw),NA)
expect_equal(box,tbox,tolerance=1e-8)
expect_error(box <- running_scaled(x,wts=wts,window=window,na_rm=na_rm,normalize_wts=nw),NA)
# the 0.1 is to avoid roundoff issues on the double times.
expect_error(tbox <- t_running_scaled(x,time=times,wts=wts,window=t_window,na_rm=na_rm,normalize_wts=nw),NA)
expect_equal(box,tbox,tolerance=1e-8)
expect_error(box <- running_zscored(x,wts=wts,window=window,na_rm=na_rm,normalize_wts=nw),NA)
# the 0.1 is to avoid roundoff issues on the double times.
expect_error(tbox <- t_running_zscored(x,time=times,wts=wts,window=t_window,na_rm=na_rm,normalize_wts=nw),NA)
expect_equal(box,tbox,tolerance=1e-8)
expect_error(box <- running_tstat(x,wts=wts,window=window,na_rm=na_rm,normalize_wts=nw),NA)
# the 0.1 is to avoid roundoff issues on the double times.
expect_error(tbox <- t_running_tstat(x,time=times,wts=wts,window=t_window,na_rm=na_rm,normalize_wts=nw),NA)
expect_equal(box,tbox,tolerance=1e-8)
for (cse in c(TRUE,FALSE)) {
expect_error(box <- running_sharpe(x,wts=wts,window=window,na_rm=na_rm,compute_se=cse,normalize_wts=nw),NA)
# the 0.1 is to avoid roundoff issues on the double times.
expect_error(tbox <- t_running_sharpe(x,time=times,wts=wts,window=t_window,na_rm=na_rm,compute_se=cse,normalize_wts=nw),NA)
expect_equal(box,tbox,tolerance=1e-8)
}
expect_error(box <- running_apx_median(x,wts=wts,window=window,na_rm=na_rm,normalize_wts=nw),NA)
# the 0.1 is to avoid roundoff issues on the double times.
expect_error(tbox <- t_running_apx_median(x,time=times,wts=wts,window=t_window,na_rm=na_rm,normalize_wts=nw),NA)
expect_equal(box,tbox,tolerance=1e-8)
expect_error(box <- running_apx_quantiles(x,ptiles,max_order=3,wts=wts,window=window,na_rm=na_rm,normalize_wts=nw),NA)
# the 0.1 is to avoid roundoff issues on the double times.
expect_error(tbox <- t_running_apx_quantiles(x,ptiles,max_order=3,time=times,wts=wts,window=t_window,na_rm=na_rm,normalize_wts=nw),NA)
expect_equal(box,tbox,tolerance=1e-8)
}
}# UNFOLD
}
}
})#UNFOLD
context("t_running vs slow version")
test_that("check em",{#FOLDUP
skip_on_cran()
set.char.seed("91b0bd37-0b8e-49d6-8333-039a7d7f7dd5")
na_rm <- FALSE
for (xlen in c(40,90)) {# FOLDUP
x <- rnorm(xlen)
for (times in list(NULL,cumsum(runif(length(x),min=0.2,max=0.4)))) {
for (wts in list(NULL,rep(1L,xlen),runif(xlen,min=1.1,max=2.1))) {
wts_as_delta <- is.null(times) & !is.null(wts)
if (!is.null(times) || (wts_as_delta && !is.null(wts))) {
for (window in c(11.5,20.5,Inf)) { # FOLDUP
for (lb_time in list(NULL,3+cumsum(runif(10,min=0.4,max=1.1)))) {
slow <- slow_t_running_sum(x,time=times,wts=wts,window=window,lb_time=lb_time,na_rm=na_rm,wts_as_delta=wts_as_delta)
expect_error(fast <- t_running_sum(x,time=times,wts=wts,window=window,lb_time=lb_time,na_rm=na_rm,wts_as_delta=wts_as_delta),NA)
expect_equal(fast,slow,tolerance=1e-8)
slow <- slow_t_running_mean(x,time=times,wts=wts,window=window,lb_time=lb_time,na_rm=na_rm,wts_as_delta=wts_as_delta)
expect_error(fast <- t_running_mean(x,time=times,wts=wts,window=window,lb_time=lb_time,na_rm=na_rm,wts_as_delta=wts_as_delta),NA)
expect_equal(fast,slow,tolerance=1e-8)
for (nw in c(TRUE,FALSE)) {
slow <- slow_t_running_sd(x,time=times,wts=wts,window=window,lb_time=lb_time,na_rm=na_rm,wts_as_delta=wts_as_delta,normalize_wts=nw)
expect_error(fast <- t_running_sd(x,time=times,wts=wts,window=window,lb_time=lb_time,min_df=1,na_rm=na_rm,wts_as_delta=wts_as_delta,normalize_wts=nw),NA)
expect_equal(fast,slow,tolerance=1e-8)
slow <- slow_t_running_skew(x,time=times,wts=wts,window=window,lb_time=lb_time,na_rm=na_rm,wts_as_delta=wts_as_delta,normalize_wts=nw)
expect_error(fast <- t_running_skew(x,time=times,wts=wts,window=window,lb_time=lb_time,na_rm=na_rm,wts_as_delta=wts_as_delta,normalize_wts=nw),NA)
expect_equal(fast,slow,tolerance=1e-8)
slow <- slow_t_running_kurt(x,time=times,wts=wts,window=window,lb_time=lb_time,na_rm=na_rm,wts_as_delta=wts_as_delta,normalize_wts=nw)
expect_error(fast <- t_running_kurt(x,time=times,wts=wts,window=window,lb_time=lb_time,na_rm=na_rm,wts_as_delta=wts_as_delta,normalize_wts=nw),NA)
expect_equal(fast,slow,tolerance=1e-8)
slow <- slow_t_running_sd3(x,time=times,wts=wts,window=window,lb_time=lb_time,na_rm=na_rm,wts_as_delta=wts_as_delta,normalize_wts=nw)
expect_error(fast <- t_running_sd3(x,time=times,wts=wts,window=window,lb_time=lb_time,na_rm=na_rm,wts_as_delta=wts_as_delta,normalize_wts=nw),NA)
# ignore the df computation in slow when empty
slow[fast[,3]==0,3] <- 0
slow[is.na(fast[,1]),1] <- NA
expect_equal(fast,slow,tolerance=1e-8)
slow <- slow_t_running_skew4(x,time=times,wts=wts,window=window,lb_time=lb_time,na_rm=na_rm,normalize_wts=nw)
expect_error(fast <- t_running_skew4(x,time=times,wts=wts,window=window,lb_time=lb_time,na_rm=na_rm,normalize_wts=nw),NA)
# ignore the df computation in slow when empty
okrow <- !is.na(fast[,4]) & fast[,4] > 3 & row(fast)[,4] > 3
expect_equal(fast[okrow,],slow[okrow,],tolerance=1e-8)
slow <- slow_t_running_kurt5(x,time=times,wts=wts,window=window,lb_time=lb_time,na_rm=na_rm,normalize_wts=nw)
expect_error(fast <- t_running_kurt5(x,time=times,wts=wts,window=window,lb_time=lb_time,na_rm=na_rm,normalize_wts=nw),NA)
okrow <- !is.na(fast[,5]) & fast[,5] > 4 & row(fast)[,5] > 4
expect_equal(fast[okrow,],slow[okrow,],tolerance=1e-8)
}
}
}# UNFOLD
}
}
}
}# UNFOLD
})#UNFOLD
context("t_running_sd")
# t_running_sd is a bellwether for the other methods
# as it goes, so goes the other Welford based functions
test_that("check it",{#FOLDUP
skip_on_cran()
set.char.seed("79f60eda-7799-46e6-9096-6817b2d4473b")
na_rm <- FALSE
for (xlen in c(20,50)) {# FOLDUP
x <- rnorm(xlen)
for (times in list(NULL,cumsum(runif(length(x),min=0.2,max=0.4)))) {
for (wts in list(NULL,rep(1L,xlen),runif(xlen,min=1.2,max=2.1))) {
wts_as_delta <- is.null(times) & !is.null(wts)
if (!is.null(times) || (wts_as_delta && !is.null(wts))) {
for (window in c(11.5,20.5,Inf)) { # FOLDUP
for (lb_time in list(NULL,cumsum(runif(20,min=0.2,max=1)))) {
for (nw in c(TRUE,FALSE)) {
expect_error(slow <- reference_t_running_sd(x,time=times,wts=wts,wts_as_delta=TRUE,window=window,lb_time=lb_time,na_rm=na_rm,min_df=1,normalize_wts=nw),NA)
expect_error(fast <- t_running_sd(x,time=times,wts=wts,wts_as_delta=TRUE,used_df=1,window=window,lb_time=lb_time,min_df=1,na_rm=na_rm,normalize_wts=nw),NA)
expect_equal(fast,slow,tolerance=1e-7)
}
}
}# UNFOLD
}
}
}
}# UNFOLD
})#UNFOLD
#for vim modeline: (do not edit)
# vim:ts=2:sw=2:tw=79:fdm=marker:fmr=FOLDUP,UNFOLD:cms=#%s:syn=r:ft=r:ai:si:cin:nu:fo=croql:cino=p0t0c5(0:
|
Example: Multiplication Table
# R Program to find the multiplicationtable (from 1 to 10)
# take input from the user
num = as.integer(readline(prompt = "Enter a number: "))
# use for loop to iterate 10 times
for(i in 1:10) {
print(paste(num,'x', i, '=', num*i))
}
/*
Output
Enter a number: 7
[1] "7 x 1 = 7"
[1] "7 x 2 = 14"
[1] "7 x 3 = 21"
[1] "7 x 4 = 28"
[1] "7 x 5 = 35"
[1] "7 x 6 = 42"
[1] "7 x 7 = 49"
[1] "7 x 8 = 56"
[1] "7 x 9 = 63"
[1] "7 x 10 = 70"
*/
| /R_Programming/MultiplicationTable.R | no_license | Yaseen549/r-programming-snippets | R | false | false | 466 | r | Example: Multiplication Table
# R Program to find the multiplicationtable (from 1 to 10)
# take input from the user
num = as.integer(readline(prompt = "Enter a number: "))
# use for loop to iterate 10 times
for(i in 1:10) {
print(paste(num,'x', i, '=', num*i))
}
/*
Output
Enter a number: 7
[1] "7 x 1 = 7"
[1] "7 x 2 = 14"
[1] "7 x 3 = 21"
[1] "7 x 4 = 28"
[1] "7 x 5 = 35"
[1] "7 x 6 = 42"
[1] "7 x 7 = 49"
[1] "7 x 8 = 56"
[1] "7 x 9 = 63"
[1] "7 x 10 = 70"
*/
|
# ---- New Strain Collection ----
#' Create new strain collection
#'
#' A convenience function to create a new strain collection keyfile
#'
#' @param id ID of new library. This ID needs to be unique in the database.
#' @param nplates Number of plates in the library.
#' @param format Size of plates. Defaults to 96.
#' @param dim Aspect ratio of rows to columns. Defaults to \code{c(2, 3)}.
#'
#' @export
new_strain_collection <- function(id, nplates, format = 96, dim = c(2, 3)) {
nrow <- dim[1] * sqrt(format / prod(dim))
ncol <- dim[2] * sqrt(format / prod(dim))
LETTERS <- expand_letters(nrow, LETTERS)
data_frame(
strain_collection_id = id,
strain_id = '',
plate = (1:nplates) %>% rep(each = format),
row = (LETTERS[1:(format / ncol)]) %>% rep(times = nplates, each = ncol),
column = (1:(format / nrow)) %>% rep(length.out = format * nplates),
plate_control = FALSE,
strain_collection_notes = '') %>%
write.csv(file = paste0(id, '.csv'), row.names = FALSE)
}
| /R/new-templates.R | no_license | EricBryantPhD/screenmill | R | false | false | 1,011 | r | # ---- New Strain Collection ----
#' Create new strain collection
#'
#' A convenience function to create a new strain collection keyfile
#'
#' @param id ID of new library. This ID needs to be unique in the database.
#' @param nplates Number of plates in the library.
#' @param format Size of plates. Defaults to 96.
#' @param dim Aspect ratio of rows to columns. Defaults to \code{c(2, 3)}.
#'
#' @export
new_strain_collection <- function(id, nplates, format = 96, dim = c(2, 3)) {
nrow <- dim[1] * sqrt(format / prod(dim))
ncol <- dim[2] * sqrt(format / prod(dim))
LETTERS <- expand_letters(nrow, LETTERS)
data_frame(
strain_collection_id = id,
strain_id = '',
plate = (1:nplates) %>% rep(each = format),
row = (LETTERS[1:(format / ncol)]) %>% rep(times = nplates, each = ncol),
column = (1:(format / nrow)) %>% rep(length.out = format * nplates),
plate_control = FALSE,
strain_collection_notes = '') %>%
write.csv(file = paste0(id, '.csv'), row.names = FALSE)
}
|
shinyUI(
bootstrapPage(
verbatimTextOutput("queryText"),
p("On this page you can find your personal research data. How would you interpret this?"),
plotOutput("plot")
)
) | /interface/ui.R | no_license | KarelVerbrugge/loopedlogging | R | false | false | 243 | r | shinyUI(
bootstrapPage(
verbatimTextOutput("queryText"),
p("On this page you can find your personal research data. How would you interpret this?"),
plotOutput("plot")
)
) |
predict.mpr <-
function(object, newdata, type=c("survivor", "hazard", "percentile"), tvec, prob=0.5, ...){
family <- match.arg(object$model$family, names(mprdists))
famlist <- mprdists[family][[1]]
ncomp <- famlist$ncomp
est <- coef(object)
beta <- est$beta
alpha <- est$alpha
tau <- est$tau
formula <- object$formula
rhs <- eval(formula[[3]])
formb <- rhs[[1]]
forma <- rhs[[2]]
formt <- rhs[[3]]
xvars <- object$xvars
xlevels <- object$xlevels
xfac <- names(object$xlevels)
newnam <- colnames(newdata)
mvars <- match(xvars, newnam)
vna <- is.na(mvars)
if(any(vna)){
errmess <- paste("The following variables not found:",
paste(xvars[vna], collapse=", ") )
stop(errmess)
}
nums <- match(setdiff(xvars,xfac), newnam)
facs <- match(xfac, newnam)
if(length(nums) > 0){
for(i in 1:length(nums)){
newdata[,nums[i]] <- as.numeric(newdata[,nums[i]])
}
}
if(length(facs) > 0){
for(i in 1:length(facs)){
newdata[,facs[i]] <- as.factor(newdata[,facs[i]])
}
}
blevels <- xlevels[match(attr(terms(formb), "term.labels"), xfac)]
alevels <- xlevels[match(attr(terms(forma), "term.labels"), xfac)]
tlevels <- xlevels[match(attr(terms(formt), "term.labels"), xfac)]
if(!is.null(blevels)){ blevels <- blevels[!is.na(names(blevels))] }
if(!is.null(alevels)){ alevels <- alevels[!is.na(names(alevels))] }
if(!is.null(tlevels)){ tlevels <- tlevels[!is.na(names(tlevels))] }
Xb <- model.matrix(terms(formb), data=newdata, xlev=blevels)
Xa <- model.matrix(terms(forma), data=newdata, xlev=alevels)
Xt <- model.matrix(terms(formt), data=newdata, xlev=tlevels)
parmat <- cbind(Xb%*%beta, Xa%*%alpha, Xt%*%tau)
type <- match.arg(type)
switch(type,
survivor = {
mprsurv <- Vectorize(famlist$surv, vectorize.args="ti")
out <- mprsurv(parmat, tvec)
},
hazard = {
mprhaz <- Vectorize(famlist$haz, vectorize.args="ti")
out <- mprhaz(parmat, tvec)
},
percentile = {
out <- as.matrix(famlist$sim(parmat, 1-prob))
}, )
out
}
| /R/predict.mpr.R | no_license | cran/mpr | R | false | false | 2,272 | r | predict.mpr <-
function(object, newdata, type=c("survivor", "hazard", "percentile"), tvec, prob=0.5, ...){
family <- match.arg(object$model$family, names(mprdists))
famlist <- mprdists[family][[1]]
ncomp <- famlist$ncomp
est <- coef(object)
beta <- est$beta
alpha <- est$alpha
tau <- est$tau
formula <- object$formula
rhs <- eval(formula[[3]])
formb <- rhs[[1]]
forma <- rhs[[2]]
formt <- rhs[[3]]
xvars <- object$xvars
xlevels <- object$xlevels
xfac <- names(object$xlevels)
newnam <- colnames(newdata)
mvars <- match(xvars, newnam)
vna <- is.na(mvars)
if(any(vna)){
errmess <- paste("The following variables not found:",
paste(xvars[vna], collapse=", ") )
stop(errmess)
}
nums <- match(setdiff(xvars,xfac), newnam)
facs <- match(xfac, newnam)
if(length(nums) > 0){
for(i in 1:length(nums)){
newdata[,nums[i]] <- as.numeric(newdata[,nums[i]])
}
}
if(length(facs) > 0){
for(i in 1:length(facs)){
newdata[,facs[i]] <- as.factor(newdata[,facs[i]])
}
}
blevels <- xlevels[match(attr(terms(formb), "term.labels"), xfac)]
alevels <- xlevels[match(attr(terms(forma), "term.labels"), xfac)]
tlevels <- xlevels[match(attr(terms(formt), "term.labels"), xfac)]
if(!is.null(blevels)){ blevels <- blevels[!is.na(names(blevels))] }
if(!is.null(alevels)){ alevels <- alevels[!is.na(names(alevels))] }
if(!is.null(tlevels)){ tlevels <- tlevels[!is.na(names(tlevels))] }
Xb <- model.matrix(terms(formb), data=newdata, xlev=blevels)
Xa <- model.matrix(terms(forma), data=newdata, xlev=alevels)
Xt <- model.matrix(terms(formt), data=newdata, xlev=tlevels)
parmat <- cbind(Xb%*%beta, Xa%*%alpha, Xt%*%tau)
type <- match.arg(type)
switch(type,
survivor = {
mprsurv <- Vectorize(famlist$surv, vectorize.args="ti")
out <- mprsurv(parmat, tvec)
},
hazard = {
mprhaz <- Vectorize(famlist$haz, vectorize.args="ti")
out <- mprhaz(parmat, tvec)
},
percentile = {
out <- as.matrix(famlist$sim(parmat, 1-prob))
}, )
out
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cogmapr-indic.R
\name{ConceptCentrality}
\alias{ConceptCentrality}
\title{Centralities of concepts}
\usage{
ConceptCentrality(project, filters = NULL, units = "all", weighted.icm = FALSE)
}
\arguments{
\item{project}{A QDA project, a list as generated by the ProjectCMap function.}
\item{filters}{A list of named strings that will filter the relationships showed in the SCM. e.g. =list(coding_class = "A_coding_class", document_part = "A_document_part")=. To date, these filters are linked to the nature of relationships.}
\item{units}{A string vector giving the names of the units (i.e. classes linked to documents) that will be include in the SCM. It is a second type of filter.}
\item{weighted.icm}{A boolean. If FALSE, the weight of the relationships in the ICM will be fixed to 1.}
}
\value{
A data frame with the value of the centrality (n) of vertices.
}
\description{
Compute the centrality of concepts
}
\details{
Compute the centrality of concepts
}
\examples{
project_name <- "a_new_project"
main_path <- paste0(system.file("testdata", package = "cogmapr"), '/')
my.project <- ProjectCMap(main_path, project_name)
ConceptCentrality(my.project)
}
| /man/ConceptCentrality.Rd | no_license | cran/cogmapr | R | false | true | 1,240 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cogmapr-indic.R
\name{ConceptCentrality}
\alias{ConceptCentrality}
\title{Centralities of concepts}
\usage{
ConceptCentrality(project, filters = NULL, units = "all", weighted.icm = FALSE)
}
\arguments{
\item{project}{A QDA project, a list as generated by the ProjectCMap function.}
\item{filters}{A list of named strings that will filter the relationships showed in the SCM. e.g. =list(coding_class = "A_coding_class", document_part = "A_document_part")=. To date, these filters are linked to the nature of relationships.}
\item{units}{A string vector giving the names of the units (i.e. classes linked to documents) that will be include in the SCM. It is a second type of filter.}
\item{weighted.icm}{A boolean. If FALSE, the weight of the relationships in the ICM will be fixed to 1.}
}
\value{
A data frame with the value of the centrality (n) of vertices.
}
\description{
Compute the centrality of concepts
}
\details{
Compute the centrality of concepts
}
\examples{
project_name <- "a_new_project"
main_path <- paste0(system.file("testdata", package = "cogmapr"), '/')
my.project <- ProjectCMap(main_path, project_name)
ConceptCentrality(my.project)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/logit.R
\name{logit}
\alias{logit}
\title{logit function}
\usage{
logit(p)
}
\arguments{
\item{p}{a proportion}
}
\value{
the logit of a proportion
}
\description{
logit function
}
\examples{
logit(.5)
}
| /man/logit.Rd | no_license | AurMad/STOCfree | R | false | true | 283 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/logit.R
\name{logit}
\alias{logit}
\title{logit function}
\usage{
logit(p)
}
\arguments{
\item{p}{a proportion}
}
\value{
the logit of a proportion
}
\description{
logit function
}
\examples{
logit(.5)
}
|
context("test-utils.R")
test_that("make_atac_cds makes a valid cds object", {
#skip_on_bioc()
data("cicero_data")
#### make_atac_cds ####
test_cds <- make_atac_cds(cicero_data)
expect_is(test_cds, "CellDataSet")
expect_equal(nrow(exprs(test_cds)), 6146)
expect_equal(ncol(exprs(test_cds)), 200)
expect_match(row.names(test_cds)[1], "chr18_10025_10225")
expect_match(colnames(test_cds)[1], "AGCGATAGAACGAATTCGGCGCAATGACCCTATCCT")
expect_is(exprs(test_cds), "dgCMatrix")
test_cds <-make_atac_cds(cicero_data, binarize=TRUE)
expect_is(test_cds, "CellDataSet")
expect_equal(nrow(exprs(test_cds)), 6146)
expect_equal(ncol(exprs(test_cds)), 200)
expect_match(row.names(test_cds)[1], "chr18_10025_10225")
expect_match(colnames(test_cds)[1], "AGCGATAGAACGAATTCGGCGCAATGACCCTATCCT")
expect_is(exprs(test_cds), "dgCMatrix")
expect_error(test_cds <- make_atac_cds(3),
"Input must be file path, matrix, or data.frame")
test_cds <-make_atac_cds("../cicero_data_sub.txt", binarize=TRUE)
expect_is(test_cds, "CellDataSet")
expect_equal(nrow(exprs(test_cds)), 2148)
expect_equal(ncol(exprs(test_cds)), 7)
expect_match(row.names(test_cds)[1], "chr18_10025_10225")
expect_match(colnames(test_cds)[1], "AGCGATAGGCGCTATGGTGGAATTCAGTCAGGACGT")
expect_is(exprs(test_cds), "dgCMatrix")
})
#### ranges_for_coords ####
test_that("ranges_for_coords works", {
#skip_on_bioc()
wn <- ranges_for_coords("chr1:2039-30239", with_names = TRUE)
wmd <- ranges_for_coords(c("chr1:2049-203902", "chrX:489249-1389389"),
meta_data_df = data.frame(dat = c("1", "X")))
wmdn <- ranges_for_coords(c("chr1:2049-203902", "chrX:489249-1389389"),
with_names = TRUE,
meta_data_df = data.frame(dat = c("1", "X"),
stringsAsFactors = FALSE))
expect_is(ranges_for_coords("chr1_2039_30239"), "GRanges")
expect_is(ranges_for_coords("chr1:2039:30239"), "GRanges")
expect_is(ranges_for_coords("chr1-2039-30239"), "GRanges")
expect_is(ranges_for_coords("chr1:2,039-30,239"), "GRanges")
expect_is(ranges_for_coords(c("chr1:2,039-30,239", "chrX:28884:101293")),
"GRanges")
expect_is(ranges_for_coords(c("chr1:2,039-30,239", "chrX:28884:101293"),
with_names = TRUE), "GRanges")
expect_is(wn, "GRanges")
expect_is(wmd, "GRanges")
expect_match(wn$coord_string, "chr1:2039-30239")
expect_match(as.character(wmd$dat[2]), "X")
expect_match(wmdn$coord_string[1], "chr1:2049-203902")
expect_match(as.character(wmdn$dat[2]), "X")
})
#### df_for_coords ####
test_that("df_for_coords works", {
#skip_on_bioc()
expect_is(df_for_coords(c("chr1:2,039-30,239", "chrX:28884:101293")),
"data.frame")
expect_equal(df_for_coords(c("chr1:2,039-30,239",
"chrX:28884:101293"))$bp2[1], 30239)
})
#### annotate_cds_by_site ####
test_that("annotate_cds_by_site works", {
#skip_on_bioc()
data("cicero_data")
#### make_atac_cds ####
test_cds <- make_atac_cds(cicero_data)
feat <- data.frame(chr = c("chr18", "chr18", "chr18", "chr18"),
bp1 = c(10000, 10800, 50000, 100000),
bp2 = c(10700, 11000, 60000, 110000),
type = c("Acetylated", "Methylated",
"Acetylated", "Methylated"),
stringsAsFactors = FALSE)
test_cds2 <- annotate_cds_by_site(test_cds, feat, verbose = TRUE)
test_cds3 <- annotate_cds_by_site(test_cds, feat, all=TRUE, verbose = TRUE)
expect_is(test_cds2, "CellDataSet")
expect_is(test_cds3, "CellDataSet")
expect_equal(nrow(fData(test_cds2)), nrow(fData(test_cds)))
expect_equal(nrow(fData(test_cds3)), nrow(fData(test_cds)))
expect_equal(ncol(fData(test_cds2)), ncol(fData(test_cds)) + 2)
expect_equal(ncol(fData(test_cds3)), ncol(fData(test_cds)) + 2)
expect_equal(fData(test_cds2)$overlap[2], 201)
expect_equal(fData(test_cds3)$overlap[2], "98,201")
expect_equal(fData(test_cds2)$type[2], "Methylated")
expect_equal(fData(test_cds3)$type[2], "Acetylated,Methylated")
expect_true(is.na(fData(test_cds2)$overlap[3]))
expect_true(is.na(fData(test_cds3)$overlap[3]))
expect_true(is.na(fData(test_cds2)$type[3]))
expect_true(is.na(fData(test_cds3)$type[3]))
test_cds2 <- annotate_cds_by_site(test_cds, feat)
test_cds3 <- annotate_cds_by_site(test_cds, feat, all=TRUE)
expect_is(test_cds2, "CellDataSet")
expect_is(test_cds3, "CellDataSet")
expect_equal(nrow(fData(test_cds2)), nrow(fData(test_cds)))
expect_equal(nrow(fData(test_cds3)), nrow(fData(test_cds)))
expect_equal(ncol(fData(test_cds2)), ncol(fData(test_cds)) + 2)
expect_equal(ncol(fData(test_cds3)), ncol(fData(test_cds)) + 2)
expect_equal(fData(test_cds2)$overlap[2], 201)
expect_equal(fData(test_cds3)$overlap[2], "98,201")
expect_equal(fData(test_cds2)$type[2], "Methylated")
expect_equal(fData(test_cds3)$type[2], "Acetylated,Methylated")
expect_true(is.na(fData(test_cds2)$overlap[3]))
expect_true(is.na(fData(test_cds3)$overlap[3]))
expect_true(is.na(fData(test_cds2)$type[3]))
expect_true(is.na(fData(test_cds3)$type[3]))
test_cds2 <- annotate_cds_by_site(test_cds, "../feat.txt", verbose =TRUE)
test_cds3 <- annotate_cds_by_site(test_cds, "../feat.txt", all=TRUE)
expect_is(test_cds2, "CellDataSet")
expect_is(test_cds3, "CellDataSet")
expect_equal(nrow(fData(test_cds2)), nrow(fData(test_cds)))
expect_equal(nrow(fData(test_cds3)), nrow(fData(test_cds)))
expect_equal(ncol(fData(test_cds2)), ncol(fData(test_cds)) + 2)
expect_equal(ncol(fData(test_cds3)), ncol(fData(test_cds)) + 2)
expect_equal(fData(test_cds2)$overlap[2], 201)
expect_equal(fData(test_cds3)$overlap[2], "98,201")
expect_equal(fData(test_cds2)$V4[2], "Methylated")
expect_equal(fData(test_cds3)$V4[2], "Acetylated,Methylated")
expect_true(is.na(fData(test_cds2)$overlap[3]))
expect_true(is.na(fData(test_cds3)$overlap[3]))
expect_true(is.na(fData(test_cds2)$V4[3]))
expect_true(is.na(fData(test_cds3)$V4[3]))
test_cds2 <- annotate_cds_by_site(test_cds, "../feat_head.txt",
header = TRUE)
test_cds3 <- annotate_cds_by_site(test_cds, "../feat_head.txt",
header = TRUE, all=TRUE)
expect_is(test_cds2, "CellDataSet")
expect_is(test_cds3, "CellDataSet")
expect_equal(nrow(fData(test_cds2)), nrow(fData(test_cds)))
expect_equal(nrow(fData(test_cds3)), nrow(fData(test_cds)))
expect_equal(ncol(fData(test_cds2)), ncol(fData(test_cds)) + 2)
expect_equal(ncol(fData(test_cds3)), ncol(fData(test_cds)) + 2)
expect_equal(fData(test_cds2)$overlap[2], 201)
expect_equal(fData(test_cds3)$overlap[2], "98,201")
expect_equal(fData(test_cds2)$type[2], "Methylated")
expect_equal(fData(test_cds3)$type[2], "Acetylated,Methylated")
expect_true(is.na(fData(test_cds2)$overlap[3]))
expect_true(is.na(fData(test_cds3)$overlap[3]))
expect_true(is.na(fData(test_cds2)$type[3]))
expect_true(is.na(fData(test_cds3)$type[3]))
# check tie
feat2 <- data.frame(chr = c("chr18", "chr18", "chr18", "chr18"),
bp1 = c(10125, 10125, 50000, 100000),
bp2 = c(10703, 10703, 60000, 110000),
type = c("Acetylated", "Methylated",
"Acetylated", "Methylated"),
stringsAsFactors = FALSE)
test_cds2 <- annotate_cds_by_site(test_cds, feat2, all=FALSE)
expect_equal(fData(test_cds2)$type[2], "Acetylated")
test_cds2 <- annotate_cds_by_site(test_cds, feat2, all=FALSE, maxgap = 901)
expect_equal(fData(test_cds2)$type[3], "Acetylated")
# check maxgap = "nearest"
test_cds2 <- annotate_cds_by_site(test_cds, feat2, all=FALSE, maxgap = "nearest")
expect_equal(sum(is.na(fData(test_cds2)$type)), 0)
})
#### make_sparse_matrix ####
test_that("make_sparse_matrix works", {
#skip_on_bioc()
df <- data.frame(icol = c("chr18_30209631_30210783",
"chr18_45820294_45821666",
"chr18_32820116_32820994"),
jcol = c("chr18_41888433_41890138",
"chr18_33038287_33039444",
"chr18_25533921_25534483"),
xcol = c(1,2,3))
sm <- make_sparse_matrix(df, "icol", "jcol", "xcol")
expect_equal(sm["chr18_30209631_30210783", "chr18_41888433_41890138"], 1)
expect_equal(sm["chr18_45820294_45821666", "chr18_33038287_33039444"], 2)
expect_equal(sm["chr18_25533921_25534483", "chr18_32820116_32820994"], 3)
expect_equal(sm["chr18_25533921_25534483", "chr18_30209631_30210783"], 0)
expect_error(make_sparse_matrix(df, "icol", "xcol", "jcol"),
"x.name column must be numeric")
expect_error(make_sparse_matrix(df, "icol", "hannah", "jcol"),
"i.name, j.name, and x.name must be columns in data")
})
#### compare_connections ####
# IN test-runCicero.R
#### find_overlapping_coordinates ####
test_that("find_overlapping_coordinates works", {
#skip_on_bioc()
test_coords <- c("chr18_10025_10225", "chr18_10603_11103", "chr18_11604_13986",
"chr18_157883_158536", "chr18_217477_218555",
"chr18_245734_246234")
expect_equal(length(find_overlapping_coordinates(test_coords,
"chr18:10,100-1246234")), 6)
expect_equal(length(find_overlapping_coordinates(test_coords,
"chr18_10227_10601")), 0)
expect_equal(length(find_overlapping_coordinates(test_coords,
"chr18_10227_10601",
maxgap = 1)), 2)
expect_equal(length(find_overlapping_coordinates(test_coords,
c("chr18_10227_10602",
"chr18:11604-246234"))), 5)
expect_equal(length(find_overlapping_coordinates(test_coords,
c("chr18_10226_10602",
"chr18:11604-246234"),
maxgap = 1)), 6)
expect(all(is.na(find_overlapping_coordinates(test_coords,
c("chr19_10226_10602",
"chr19:11604-246234"),
maxgap = 1))))
expect(all(is.na(find_overlapping_coordinates(test_coords,
c("chr18_1022600_1060200",
"chr18:1160400-24623400"),
maxgap = 1))))
})
| /tests/testthat/test-utils.R | permissive | shamoni/cicero-release | R | false | false | 10,875 | r | context("test-utils.R")
test_that("make_atac_cds makes a valid cds object", {
#skip_on_bioc()
data("cicero_data")
#### make_atac_cds ####
test_cds <- make_atac_cds(cicero_data)
expect_is(test_cds, "CellDataSet")
expect_equal(nrow(exprs(test_cds)), 6146)
expect_equal(ncol(exprs(test_cds)), 200)
expect_match(row.names(test_cds)[1], "chr18_10025_10225")
expect_match(colnames(test_cds)[1], "AGCGATAGAACGAATTCGGCGCAATGACCCTATCCT")
expect_is(exprs(test_cds), "dgCMatrix")
test_cds <-make_atac_cds(cicero_data, binarize=TRUE)
expect_is(test_cds, "CellDataSet")
expect_equal(nrow(exprs(test_cds)), 6146)
expect_equal(ncol(exprs(test_cds)), 200)
expect_match(row.names(test_cds)[1], "chr18_10025_10225")
expect_match(colnames(test_cds)[1], "AGCGATAGAACGAATTCGGCGCAATGACCCTATCCT")
expect_is(exprs(test_cds), "dgCMatrix")
expect_error(test_cds <- make_atac_cds(3),
"Input must be file path, matrix, or data.frame")
test_cds <-make_atac_cds("../cicero_data_sub.txt", binarize=TRUE)
expect_is(test_cds, "CellDataSet")
expect_equal(nrow(exprs(test_cds)), 2148)
expect_equal(ncol(exprs(test_cds)), 7)
expect_match(row.names(test_cds)[1], "chr18_10025_10225")
expect_match(colnames(test_cds)[1], "AGCGATAGGCGCTATGGTGGAATTCAGTCAGGACGT")
expect_is(exprs(test_cds), "dgCMatrix")
})
#### ranges_for_coords ####
test_that("ranges_for_coords works", {
#skip_on_bioc()
wn <- ranges_for_coords("chr1:2039-30239", with_names = TRUE)
wmd <- ranges_for_coords(c("chr1:2049-203902", "chrX:489249-1389389"),
meta_data_df = data.frame(dat = c("1", "X")))
wmdn <- ranges_for_coords(c("chr1:2049-203902", "chrX:489249-1389389"),
with_names = TRUE,
meta_data_df = data.frame(dat = c("1", "X"),
stringsAsFactors = FALSE))
expect_is(ranges_for_coords("chr1_2039_30239"), "GRanges")
expect_is(ranges_for_coords("chr1:2039:30239"), "GRanges")
expect_is(ranges_for_coords("chr1-2039-30239"), "GRanges")
expect_is(ranges_for_coords("chr1:2,039-30,239"), "GRanges")
expect_is(ranges_for_coords(c("chr1:2,039-30,239", "chrX:28884:101293")),
"GRanges")
expect_is(ranges_for_coords(c("chr1:2,039-30,239", "chrX:28884:101293"),
with_names = TRUE), "GRanges")
expect_is(wn, "GRanges")
expect_is(wmd, "GRanges")
expect_match(wn$coord_string, "chr1:2039-30239")
expect_match(as.character(wmd$dat[2]), "X")
expect_match(wmdn$coord_string[1], "chr1:2049-203902")
expect_match(as.character(wmdn$dat[2]), "X")
})
#### df_for_coords ####
test_that("df_for_coords works", {
#skip_on_bioc()
expect_is(df_for_coords(c("chr1:2,039-30,239", "chrX:28884:101293")),
"data.frame")
expect_equal(df_for_coords(c("chr1:2,039-30,239",
"chrX:28884:101293"))$bp2[1], 30239)
})
#### annotate_cds_by_site ####
test_that("annotate_cds_by_site works", {
#skip_on_bioc()
data("cicero_data")
#### make_atac_cds ####
test_cds <- make_atac_cds(cicero_data)
feat <- data.frame(chr = c("chr18", "chr18", "chr18", "chr18"),
bp1 = c(10000, 10800, 50000, 100000),
bp2 = c(10700, 11000, 60000, 110000),
type = c("Acetylated", "Methylated",
"Acetylated", "Methylated"),
stringsAsFactors = FALSE)
test_cds2 <- annotate_cds_by_site(test_cds, feat, verbose = TRUE)
test_cds3 <- annotate_cds_by_site(test_cds, feat, all=TRUE, verbose = TRUE)
expect_is(test_cds2, "CellDataSet")
expect_is(test_cds3, "CellDataSet")
expect_equal(nrow(fData(test_cds2)), nrow(fData(test_cds)))
expect_equal(nrow(fData(test_cds3)), nrow(fData(test_cds)))
expect_equal(ncol(fData(test_cds2)), ncol(fData(test_cds)) + 2)
expect_equal(ncol(fData(test_cds3)), ncol(fData(test_cds)) + 2)
expect_equal(fData(test_cds2)$overlap[2], 201)
expect_equal(fData(test_cds3)$overlap[2], "98,201")
expect_equal(fData(test_cds2)$type[2], "Methylated")
expect_equal(fData(test_cds3)$type[2], "Acetylated,Methylated")
expect_true(is.na(fData(test_cds2)$overlap[3]))
expect_true(is.na(fData(test_cds3)$overlap[3]))
expect_true(is.na(fData(test_cds2)$type[3]))
expect_true(is.na(fData(test_cds3)$type[3]))
test_cds2 <- annotate_cds_by_site(test_cds, feat)
test_cds3 <- annotate_cds_by_site(test_cds, feat, all=TRUE)
expect_is(test_cds2, "CellDataSet")
expect_is(test_cds3, "CellDataSet")
expect_equal(nrow(fData(test_cds2)), nrow(fData(test_cds)))
expect_equal(nrow(fData(test_cds3)), nrow(fData(test_cds)))
expect_equal(ncol(fData(test_cds2)), ncol(fData(test_cds)) + 2)
expect_equal(ncol(fData(test_cds3)), ncol(fData(test_cds)) + 2)
expect_equal(fData(test_cds2)$overlap[2], 201)
expect_equal(fData(test_cds3)$overlap[2], "98,201")
expect_equal(fData(test_cds2)$type[2], "Methylated")
expect_equal(fData(test_cds3)$type[2], "Acetylated,Methylated")
expect_true(is.na(fData(test_cds2)$overlap[3]))
expect_true(is.na(fData(test_cds3)$overlap[3]))
expect_true(is.na(fData(test_cds2)$type[3]))
expect_true(is.na(fData(test_cds3)$type[3]))
test_cds2 <- annotate_cds_by_site(test_cds, "../feat.txt", verbose =TRUE)
test_cds3 <- annotate_cds_by_site(test_cds, "../feat.txt", all=TRUE)
expect_is(test_cds2, "CellDataSet")
expect_is(test_cds3, "CellDataSet")
expect_equal(nrow(fData(test_cds2)), nrow(fData(test_cds)))
expect_equal(nrow(fData(test_cds3)), nrow(fData(test_cds)))
expect_equal(ncol(fData(test_cds2)), ncol(fData(test_cds)) + 2)
expect_equal(ncol(fData(test_cds3)), ncol(fData(test_cds)) + 2)
expect_equal(fData(test_cds2)$overlap[2], 201)
expect_equal(fData(test_cds3)$overlap[2], "98,201")
expect_equal(fData(test_cds2)$V4[2], "Methylated")
expect_equal(fData(test_cds3)$V4[2], "Acetylated,Methylated")
expect_true(is.na(fData(test_cds2)$overlap[3]))
expect_true(is.na(fData(test_cds3)$overlap[3]))
expect_true(is.na(fData(test_cds2)$V4[3]))
expect_true(is.na(fData(test_cds3)$V4[3]))
test_cds2 <- annotate_cds_by_site(test_cds, "../feat_head.txt",
header = TRUE)
test_cds3 <- annotate_cds_by_site(test_cds, "../feat_head.txt",
header = TRUE, all=TRUE)
expect_is(test_cds2, "CellDataSet")
expect_is(test_cds3, "CellDataSet")
expect_equal(nrow(fData(test_cds2)), nrow(fData(test_cds)))
expect_equal(nrow(fData(test_cds3)), nrow(fData(test_cds)))
expect_equal(ncol(fData(test_cds2)), ncol(fData(test_cds)) + 2)
expect_equal(ncol(fData(test_cds3)), ncol(fData(test_cds)) + 2)
expect_equal(fData(test_cds2)$overlap[2], 201)
expect_equal(fData(test_cds3)$overlap[2], "98,201")
expect_equal(fData(test_cds2)$type[2], "Methylated")
expect_equal(fData(test_cds3)$type[2], "Acetylated,Methylated")
expect_true(is.na(fData(test_cds2)$overlap[3]))
expect_true(is.na(fData(test_cds3)$overlap[3]))
expect_true(is.na(fData(test_cds2)$type[3]))
expect_true(is.na(fData(test_cds3)$type[3]))
# check tie
feat2 <- data.frame(chr = c("chr18", "chr18", "chr18", "chr18"),
bp1 = c(10125, 10125, 50000, 100000),
bp2 = c(10703, 10703, 60000, 110000),
type = c("Acetylated", "Methylated",
"Acetylated", "Methylated"),
stringsAsFactors = FALSE)
test_cds2 <- annotate_cds_by_site(test_cds, feat2, all=FALSE)
expect_equal(fData(test_cds2)$type[2], "Acetylated")
test_cds2 <- annotate_cds_by_site(test_cds, feat2, all=FALSE, maxgap = 901)
expect_equal(fData(test_cds2)$type[3], "Acetylated")
# check maxgap = "nearest"
test_cds2 <- annotate_cds_by_site(test_cds, feat2, all=FALSE, maxgap = "nearest")
expect_equal(sum(is.na(fData(test_cds2)$type)), 0)
})
#### make_sparse_matrix ####
test_that("make_sparse_matrix works", {
#skip_on_bioc()
df <- data.frame(icol = c("chr18_30209631_30210783",
"chr18_45820294_45821666",
"chr18_32820116_32820994"),
jcol = c("chr18_41888433_41890138",
"chr18_33038287_33039444",
"chr18_25533921_25534483"),
xcol = c(1,2,3))
sm <- make_sparse_matrix(df, "icol", "jcol", "xcol")
expect_equal(sm["chr18_30209631_30210783", "chr18_41888433_41890138"], 1)
expect_equal(sm["chr18_45820294_45821666", "chr18_33038287_33039444"], 2)
expect_equal(sm["chr18_25533921_25534483", "chr18_32820116_32820994"], 3)
expect_equal(sm["chr18_25533921_25534483", "chr18_30209631_30210783"], 0)
expect_error(make_sparse_matrix(df, "icol", "xcol", "jcol"),
"x.name column must be numeric")
expect_error(make_sparse_matrix(df, "icol", "hannah", "jcol"),
"i.name, j.name, and x.name must be columns in data")
})
#### compare_connections ####
# IN test-runCicero.R
#### find_overlapping_coordinates ####
test_that("find_overlapping_coordinates works", {
#skip_on_bioc()
test_coords <- c("chr18_10025_10225", "chr18_10603_11103", "chr18_11604_13986",
"chr18_157883_158536", "chr18_217477_218555",
"chr18_245734_246234")
expect_equal(length(find_overlapping_coordinates(test_coords,
"chr18:10,100-1246234")), 6)
expect_equal(length(find_overlapping_coordinates(test_coords,
"chr18_10227_10601")), 0)
expect_equal(length(find_overlapping_coordinates(test_coords,
"chr18_10227_10601",
maxgap = 1)), 2)
expect_equal(length(find_overlapping_coordinates(test_coords,
c("chr18_10227_10602",
"chr18:11604-246234"))), 5)
expect_equal(length(find_overlapping_coordinates(test_coords,
c("chr18_10226_10602",
"chr18:11604-246234"),
maxgap = 1)), 6)
expect(all(is.na(find_overlapping_coordinates(test_coords,
c("chr19_10226_10602",
"chr19:11604-246234"),
maxgap = 1))))
expect(all(is.na(find_overlapping_coordinates(test_coords,
c("chr18_1022600_1060200",
"chr18:1160400-24623400"),
maxgap = 1))))
})
|
library(dplyr)
questions = readLines("questions")
choices = readLines("choices")
ids = sprintf("ch05_%04d", 1:length(questions))
title1 = "ch05"
title2 = "Get it right"
title3 = "Complete the sentences"
images = readLines("images")
audios = ""
audiotexts = ""
tags = "joke"
t0 = paste(ids, title1, title2, title3, questions, choices, images, audios, audiotexts, tags, sep = ";")
writeLines(t0, "anki.txt")
print("OUTPUT: anki.txt")
| /scripts/paste_files.R | permissive | mertnuhoglu/anki_english | R | false | false | 435 | r | library(dplyr)
questions = readLines("questions")
choices = readLines("choices")
ids = sprintf("ch05_%04d", 1:length(questions))
title1 = "ch05"
title2 = "Get it right"
title3 = "Complete the sentences"
images = readLines("images")
audios = ""
audiotexts = ""
tags = "joke"
t0 = paste(ids, title1, title2, title3, questions, choices, images, audios, audiotexts, tags, sep = ";")
writeLines(t0, "anki.txt")
print("OUTPUT: anki.txt")
|
#' Modify column headers in gtsummary tables
#'
#' Column labels can be modified to include calculated statistics;
#' e.g. the N can be dynamically included by wrapping it in curly brackets
#' (following [glue::glue] syntax).
#'
#' @param x gtsummary object, e.g. `tbl_summary` or `tbl_regression`
#' @param stat_by String specifying text to include above the summary statistics
#' stratified by a variable. Only use with stratified `tbl_summary` objects.
#' The following fields are available for use in the
#' headers:
#' * `{n}` number of observations in each group,
#' * `{N}` total number of observations,
#' * `{p}` percentage in each group,
#' * `{level}` the 'by' variable level,
#' * `"fisher.test"` for a Fisher's exact test,
#'
#' Syntax follows [glue::glue],
#' e.g. `stat_by = "**{level}**, N = {n} ({style_percent(p)\%})"`.
#' The `by` argument from the parent `tbl_summary()` cannot be `NULL`.
#' @param ... Specifies column label of any other column in `.$table_body`.
#' Argument is the column name, and the value is the new column header
#' (e.g. `p.value = "Model P-values"`). Use
#' `print(x$table_body)` to see columns available.
#' @param text_interpret indicates whether text will be interpreted as markdown (`"md"`)
#' or HTML (`"html"`). The text is interpreted with the {gt} package's `md()` or
#' `html()` functions. The default is `"md"`, and is ignored when the print engine
#' is not {gt}.
#' @family tbl_summary tools
#' @family tbl_regression tools
#' @family tbl_uvregression tools
#' @family tbl_survival tools
#' @author Daniel D. Sjoberg
#' @examples
#' # Example 1 ----------------------------------
#' modify_header_ex1 <-
#' trial[c("age", "grade", "response")] %>%
#' tbl_summary() %>%
#' modify_header(stat_0 = "**All Patients**, N = {N}")
#'
#' # Example 2 ----------------------------------
#' modify_header_ex2 <-
#' trial[c("age", "grade", "response", "trt")] %>%
#' tbl_summary(by = trt) %>%
#' modify_header(
#' stat_by = "**{level}**, N = {n} ({style_percent(p, symbol = TRUE)})"
#' )
#' @return Function return the same class of gtsummary object supplied
#' @export
#' @section Example Output:
#' \if{html}{Example 1}
#'
#' \if{html}{\figure{modify_header_ex1.png}{options: width=31\%}}
#'
#' \if{html}{Example 2}
#'
#' \if{html}{\figure{modify_header_ex2.png}{options: width=50\%}}
modify_header <- function(x, stat_by = NULL, ..., text_interpret = c("md", "html")) {
# converting the passed ... to a list, OR if nothing passed to NULL
if (length(list(...)) == 0) {
passed_dots <- NULL
} else {
passed_dots <- list(...)
}
do.call(
modify_header_internal,
c(list(
x = x, stat_by = stat_by, text_interpret = text_interpret,
.save_call = TRUE
), passed_dots)
)
}
| /R/modify_header.R | permissive | ClinicoPath/gtsummary | R | false | false | 2,779 | r | #' Modify column headers in gtsummary tables
#'
#' Column labels can be modified to include calculated statistics;
#' e.g. the N can be dynamically included by wrapping it in curly brackets
#' (following [glue::glue] syntax).
#'
#' @param x gtsummary object, e.g. `tbl_summary` or `tbl_regression`
#' @param stat_by String specifying text to include above the summary statistics
#' stratified by a variable. Only use with stratified `tbl_summary` objects.
#' The following fields are available for use in the
#' headers:
#' * `{n}` number of observations in each group,
#' * `{N}` total number of observations,
#' * `{p}` percentage in each group,
#' * `{level}` the 'by' variable level,
#' * `"fisher.test"` for a Fisher's exact test,
#'
#' Syntax follows [glue::glue],
#' e.g. `stat_by = "**{level}**, N = {n} ({style_percent(p)\%})"`.
#' The `by` argument from the parent `tbl_summary()` cannot be `NULL`.
#' @param ... Specifies column label of any other column in `.$table_body`.
#' Argument is the column name, and the value is the new column header
#' (e.g. `p.value = "Model P-values"`). Use
#' `print(x$table_body)` to see columns available.
#' @param text_interpret indicates whether text will be interpreted as markdown (`"md"`)
#' or HTML (`"html"`). The text is interpreted with the {gt} package's `md()` or
#' `html()` functions. The default is `"md"`, and is ignored when the print engine
#' is not {gt}.
#' @family tbl_summary tools
#' @family tbl_regression tools
#' @family tbl_uvregression tools
#' @family tbl_survival tools
#' @author Daniel D. Sjoberg
#' @examples
#' # Example 1 ----------------------------------
#' modify_header_ex1 <-
#' trial[c("age", "grade", "response")] %>%
#' tbl_summary() %>%
#' modify_header(stat_0 = "**All Patients**, N = {N}")
#'
#' # Example 2 ----------------------------------
#' modify_header_ex2 <-
#' trial[c("age", "grade", "response", "trt")] %>%
#' tbl_summary(by = trt) %>%
#' modify_header(
#' stat_by = "**{level}**, N = {n} ({style_percent(p, symbol = TRUE)})"
#' )
#' @return Function return the same class of gtsummary object supplied
#' @export
#' @section Example Output:
#' \if{html}{Example 1}
#'
#' \if{html}{\figure{modify_header_ex1.png}{options: width=31\%}}
#'
#' \if{html}{Example 2}
#'
#' \if{html}{\figure{modify_header_ex2.png}{options: width=50\%}}
modify_header <- function(x, stat_by = NULL, ..., text_interpret = c("md", "html")) {
# converting the passed ... to a list, OR if nothing passed to NULL
if (length(list(...)) == 0) {
passed_dots <- NULL
} else {
passed_dots <- list(...)
}
do.call(
modify_header_internal,
c(list(
x = x, stat_by = stat_by, text_interpret = text_interpret,
.save_call = TRUE
), passed_dots)
)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{baseflow_data_used_in_first_round_of_SFR.rds}
\alias{baseflow_data_used_in_first_round_of_SFR.rds}
\title{[DATASET] Baseflow used in initial MODFLOW/SFR application}
\format{A data frame with 874453 rows and 4 variables:
\itemize{
\item{\code{siteNo}}{character USGS gage ID.}
\item{\code{Date}}{date Date associated with baseflow estimate. }
\item{\code{baseFlow}}{double Estimated baseflow, in cubic feet per second.}
\item{\code{comment}}{character One of 'estimated gaged', 'estimated ungaged', or 'calculated'.}
}}
\usage{
readr::read_rds("data/baseflow_data_used_in_first_round_of_SFR.rds")
}
\description{
Original random forest model output as supplied to the MODFLOW modelers.
}
\details{
This is the data file used in the initial application of the SFR MODFLOW package as applied to the
MERAS study area. The file contains a mix of random forest model forecasts and observed values for baseflow.
Values with a comment field value of 'estimated ungaged' or 'estimated gaged' represent outputs from the random
forest model. Values with the comment field listed as 'calculated' represent observed values of baseflow
as calculated from streamflow records using hydrograph separation methods.
}
\keyword{datasets}
| /man/baseflow_data_used_in_first_round_of_SFR.rds.Rd | no_license | ldecicco-USGS/mapRandomForest | R | false | true | 1,333 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{baseflow_data_used_in_first_round_of_SFR.rds}
\alias{baseflow_data_used_in_first_round_of_SFR.rds}
\title{[DATASET] Baseflow used in initial MODFLOW/SFR application}
\format{A data frame with 874453 rows and 4 variables:
\itemize{
\item{\code{siteNo}}{character USGS gage ID.}
\item{\code{Date}}{date Date associated with baseflow estimate. }
\item{\code{baseFlow}}{double Estimated baseflow, in cubic feet per second.}
\item{\code{comment}}{character One of 'estimated gaged', 'estimated ungaged', or 'calculated'.}
}}
\usage{
readr::read_rds("data/baseflow_data_used_in_first_round_of_SFR.rds")
}
\description{
Original random forest model output as supplied to the MODFLOW modelers.
}
\details{
This is the data file used in the initial application of the SFR MODFLOW package as applied to the
MERAS study area. The file contains a mix of random forest model forecasts and observed values for baseflow.
Values with a comment field value of 'estimated ungaged' or 'estimated gaged' represent outputs from the random
forest model. Values with the comment field listed as 'calculated' represent observed values of baseflow
as calculated from streamflow records using hydrograph separation methods.
}
\keyword{datasets}
|
## =============================================================================
## A descriptive analysis of the SARS-CoV-2 pandemy in 2020
## =============================================================================
library("openxlsx")
# ------------------------------------------------------------------------------
# The data consists of CSV file. Each gives a daily report of the number of
# infections, deaths, and so on. They will be read in in the following and
# pre-processed to a single data frame.
# ------------------------------------------------------------------------------
# Read single date from file.
data_20200318 <- read.csv("./data/raw/03-18-2020.csv", stringsAsFactors = FALSE)
# Convert Last.Update column from string to date. Daytime is dropped.
data_20200318$Last.Update <- as.Date(data_20200318$Last.Update, "%Y-%m-%d")
# Sum cases for all regions of one country.
sum_country <- aggregate(
cbind(Confirmed, Deaths, Recovered) ~ Country.Region,
data = data_20200318,
sum
)
# Select the latest date of update of all regions of one country as the date of
# last update for the whole country.
update_country <- aggregate(
Last.Update ~ Country.Region,
data = data_20200318,
max
)
# Create final processed data frame and ovwerwrite the raw data.
data_20200318 <- merge(sum_country, update_country)
# ------------------------------------------------------------------------------
# The pre-processed data is written to a file.
# ------------------------------------------------------------------------------
# Write to a CSV file.
write.csv(
data_20200318,
"./data/processed/processed_data.csv",
row.names = FALSE
)
# Write to an Excel file.
write.xlsx(data_20200318, "./data/processed/processed_data.xlsx")
| /projects/corona/analysis.R | no_license | RSchleutker/RWorkshop | R | false | false | 1,764 | r | ## =============================================================================
## A descriptive analysis of the SARS-CoV-2 pandemy in 2020
## =============================================================================
library("openxlsx")
# ------------------------------------------------------------------------------
# The data consists of CSV file. Each gives a daily report of the number of
# infections, deaths, and so on. They will be read in in the following and
# pre-processed to a single data frame.
# ------------------------------------------------------------------------------
# Read single date from file.
data_20200318 <- read.csv("./data/raw/03-18-2020.csv", stringsAsFactors = FALSE)
# Convert Last.Update column from string to date. Daytime is dropped.
data_20200318$Last.Update <- as.Date(data_20200318$Last.Update, "%Y-%m-%d")
# Sum cases for all regions of one country.
sum_country <- aggregate(
cbind(Confirmed, Deaths, Recovered) ~ Country.Region,
data = data_20200318,
sum
)
# Select the latest date of update of all regions of one country as the date of
# last update for the whole country.
update_country <- aggregate(
Last.Update ~ Country.Region,
data = data_20200318,
max
)
# Create final processed data frame and ovwerwrite the raw data.
data_20200318 <- merge(sum_country, update_country)
# ------------------------------------------------------------------------------
# The pre-processed data is written to a file.
# ------------------------------------------------------------------------------
# Write to a CSV file.
write.csv(
data_20200318,
"./data/processed/processed_data.csv",
row.names = FALSE
)
# Write to an Excel file.
write.xlsx(data_20200318, "./data/processed/processed_data.xlsx")
|
install.packages(c("rvest","XML","magrittr"))
library(rvest)
library(XML)
library(magrittr)
# Amazon Reviews #############################
aurl <- "https://www.amazon.in/Redmi-Note-Neptune-Blue-128GB/dp/B07SSGJYH3/ref=sr_1_1?dchild=1&keywords=mi+note7pro+reviews&qid=1591598741&sr=8-1#customerReviews"
amazon_reviews <- NULL
for (i in 1:10){
murl <- read_html(as.character(paste(aurl,i,sep="=")))
rev <- murl %>% html_nodes(".review-text") %>% html_text()
amazon_reviews <- c(amazon_reviews,rev)
}
length(amazon_reviews)
write.table(amazon_reviews,"apple.txt",row.names = F)
install.packages("tm") # for text mining
install.packages(c("SnowballC","textstem")) # for text stemming
install.packages("wordcloud") # word-cloud generator
install.packages("RColorBrewer") # color palettes
library('tm')
library("SnowballC")
library("wordcloud")
library("RColorBrewer")
library('textstem')
# Importing apple reviews data
x <- as.character(amazon_reviews)
x <- iconv(x, "UTF-8") #Unicode Transformation Format. The '8' means it uses 8-bit blocks to represent a character
# Load the data as a corpus
x <- Corpus(VectorSource(x))
inspect(x[1])
# Convert the text to lower case
x1 <- tm_map(x, tolower)
inspect(x1[1])
# Remove numbers
x1 <- tm_map(x1, removeNumbers)
# Remove punctuations
x1 <- tm_map(x1, removePunctuation)
# Remove english common stopwords
x1 <- tm_map(x1, removeWords, stopwords('english'))
# Remove your own stop word
# specify your stopwords as a character vector
x1 <- tm_map(x1, removeWords, c("phone", "mi","the","will"))
#striping white spaces
x1 <- tm_map(x1, stripWhitespace)
inspect(x1[1])
# Text stemming
x1<-lemmatize_words(x1)
#x1 <- tm_map(x1, stemDocument)
# Term document matrix
# converting unstructured data to structured format using TDM
tdm <- TermDocumentMatrix(x1)
tdm <- as.matrix(tdm)
#Frequency
v <- sort(rowSums(tdm),decreasing=TRUE)
d <- data.frame(word = names(v),freq=v)
head(d, 10)
# Bar plot
w <- rowSums(tdm)
w_sub <- subset(w, w >= 10)
barplot(w_sub, las=3, col = rainbow(20))
# Term laptop repeats in all most all documents
x1 <- tm_map(x1, removeWords, c('phone','air',"mobile",'can','will',"amazon",'phone','mi','product'))
x1 <- tm_map(x1, stripWhitespace)
tdm <- TermDocumentMatrix(x1)
tdm <- as.matrix(tdm)
w1 <- rowSums(tdm)
# Word cloud
#with all the words
wordcloud(words = names(w1), freq = w1, random.order = F, colors = rainbow(20), scale=c(2,.2), rot.per = 0.3)
# lOADING +VE AND -VE dictonaries
pos.words = scan(file.choose(), what="character", comment.char=";")
neg.words = scan(file.choose(), what="character", comment.char=";")
pos.words = c(pos.words,"wow", "kudos", "hurray")
# Positive wordcloud
pos.matches = match(names(w), c(pos.words))
pos.matches = !is.na(pos.matches)
freq_pos <- w[pos.matches]
p_names <- names(freq_pos)
wordcloud(p_names,freq_pos,scale=c(3.5,.2),colors = rainbow(20))
# Negative wordcloud
neg.matches = match(names(w), c(neg.words))
neg.matches = !is.na(neg.matches)
freq_neg <- w[neg.matches]
n_names <- names(freq_neg)
wordcloud(n_names,freq_neg,scale=c(3.5,.2),colors = brewer.pal(8,"Dark2"))
#Association between words
tdm <- TermDocumentMatrix(x1)
findAssocs(tdm, c("screen"),corlimit = 0.3)
# Sentiment Analysis #
library(syuzhet)
library(lubridate)
library(ggplot2)
library(scales)
library(reshape2)
library(dplyr)
# Read File
amzon_reviews <- read.delim('apple.TXT')
reviews <- as.character(amzon_reviews[-1,])
class(reviews)
# Obtain Sentiment scores
s <- get_nrc_sentiment(reviews)
head(s)
reviews[6]
# on tweet 6, you have 3 for anger,8 for anticipation ,2 for disgust ,4 for fear
# 4 for joy, each one for sadness and surprise, 8 for trust , 9 words for negative and 10 positive.
get_nrc_sentiment('ridiculous')
#ridiculous has 1 anger 1 disgust and 1 negative
# barplot
barplot(colSums(s), las = 2.5, col = rainbow(10),ylab = 'Count',main= 'Sentiment scores for Amazon Reviews
for mobile')
| /amazon.r | no_license | Swetapadma94/Text-Mining | R | false | false | 4,052 | r | install.packages(c("rvest","XML","magrittr"))
library(rvest)
library(XML)
library(magrittr)
# Amazon Reviews #############################
aurl <- "https://www.amazon.in/Redmi-Note-Neptune-Blue-128GB/dp/B07SSGJYH3/ref=sr_1_1?dchild=1&keywords=mi+note7pro+reviews&qid=1591598741&sr=8-1#customerReviews"
amazon_reviews <- NULL
for (i in 1:10){
murl <- read_html(as.character(paste(aurl,i,sep="=")))
rev <- murl %>% html_nodes(".review-text") %>% html_text()
amazon_reviews <- c(amazon_reviews,rev)
}
length(amazon_reviews)
write.table(amazon_reviews,"apple.txt",row.names = F)
install.packages("tm") # for text mining
install.packages(c("SnowballC","textstem")) # for text stemming
install.packages("wordcloud") # word-cloud generator
install.packages("RColorBrewer") # color palettes
library('tm')
library("SnowballC")
library("wordcloud")
library("RColorBrewer")
library('textstem')
# Importing apple reviews data
x <- as.character(amazon_reviews)
x <- iconv(x, "UTF-8") #Unicode Transformation Format. The '8' means it uses 8-bit blocks to represent a character
# Load the data as a corpus
x <- Corpus(VectorSource(x))
inspect(x[1])
# Convert the text to lower case
x1 <- tm_map(x, tolower)
inspect(x1[1])
# Remove numbers
x1 <- tm_map(x1, removeNumbers)
# Remove punctuations
x1 <- tm_map(x1, removePunctuation)
# Remove english common stopwords
x1 <- tm_map(x1, removeWords, stopwords('english'))
# Remove your own stop word
# specify your stopwords as a character vector
x1 <- tm_map(x1, removeWords, c("phone", "mi","the","will"))
#striping white spaces
x1 <- tm_map(x1, stripWhitespace)
inspect(x1[1])
# Text stemming
x1<-lemmatize_words(x1)
#x1 <- tm_map(x1, stemDocument)
# Term document matrix
# converting unstructured data to structured format using TDM
tdm <- TermDocumentMatrix(x1)
tdm <- as.matrix(tdm)
#Frequency
v <- sort(rowSums(tdm),decreasing=TRUE)
d <- data.frame(word = names(v),freq=v)
head(d, 10)
# Bar plot
w <- rowSums(tdm)
w_sub <- subset(w, w >= 10)
barplot(w_sub, las=3, col = rainbow(20))
# Term laptop repeats in all most all documents
x1 <- tm_map(x1, removeWords, c('phone','air',"mobile",'can','will',"amazon",'phone','mi','product'))
x1 <- tm_map(x1, stripWhitespace)
tdm <- TermDocumentMatrix(x1)
tdm <- as.matrix(tdm)
w1 <- rowSums(tdm)
# Word cloud
#with all the words
wordcloud(words = names(w1), freq = w1, random.order = F, colors = rainbow(20), scale=c(2,.2), rot.per = 0.3)
# lOADING +VE AND -VE dictonaries
pos.words = scan(file.choose(), what="character", comment.char=";")
neg.words = scan(file.choose(), what="character", comment.char=";")
pos.words = c(pos.words,"wow", "kudos", "hurray")
# Positive wordcloud
pos.matches = match(names(w), c(pos.words))
pos.matches = !is.na(pos.matches)
freq_pos <- w[pos.matches]
p_names <- names(freq_pos)
wordcloud(p_names,freq_pos,scale=c(3.5,.2),colors = rainbow(20))
# Negative wordcloud
neg.matches = match(names(w), c(neg.words))
neg.matches = !is.na(neg.matches)
freq_neg <- w[neg.matches]
n_names <- names(freq_neg)
wordcloud(n_names,freq_neg,scale=c(3.5,.2),colors = brewer.pal(8,"Dark2"))
#Association between words
tdm <- TermDocumentMatrix(x1)
findAssocs(tdm, c("screen"),corlimit = 0.3)
# Sentiment Analysis #
library(syuzhet)
library(lubridate)
library(ggplot2)
library(scales)
library(reshape2)
library(dplyr)
# Read File
amzon_reviews <- read.delim('apple.TXT')
reviews <- as.character(amzon_reviews[-1,])
class(reviews)
# Obtain Sentiment scores
s <- get_nrc_sentiment(reviews)
head(s)
reviews[6]
# on tweet 6, you have 3 for anger,8 for anticipation ,2 for disgust ,4 for fear
# 4 for joy, each one for sadness and surprise, 8 for trust , 9 words for negative and 10 positive.
get_nrc_sentiment('ridiculous')
#ridiculous has 1 anger 1 disgust and 1 negative
# barplot
barplot(colSums(s), las = 2.5, col = rainbow(10),ylab = 'Count',main= 'Sentiment scores for Amazon Reviews
for mobile')
|
#' @examples
#' cols <- c("#FCAE91", "#FB6A4A", "#CB181D", "#BDD7E7", "#6BAED6", "#2171B5")
#' ggpar(vars = list("gear", "cyl", "gear"), data=mtcars) +
#' # method="hammock", text.angle=0, ratio=0.2) +
#' scale_fill_manual(values=cols) + scale_colour_manual(values=cols) +
#' theme_bw()
#' mtcars$cyl <- factor(mtcars$cyl, levels = c("8","6","4"))
#' mtcars$gear <- factor(mtcars$gear)
#' ggpar(list("gear", "cyl", "gear"), data=mtcars)
#' ggpar(list("cyl", "gear", "cyl"), data=mtcars)
ggpar <- function (data, vars, width = 0.25, alpha = 0.6, labels = TRUE, method = "parset", ...) {
get_ribbons <- function(xpos, dx, dy) {
dframe <- data.frame(dx = dx, dy = dy)
dxy <- dframe %>% group_by(dx, dy) %>% tally()
dxy$ypos <- sum(dxy$n) - cumsum(dxy$n)
dxy$xpos <- xpos + width/2
dyx <- dframe %>% group_by(dy, dx) %>% tally()
dyx$ypos <- sum(dyx$n) - cumsum(dyx$n)
dyx$xpos <- xpos + 1 - width/2
dfm <- rbind(dxy, dyx)
if (method == "parset") {
gr <- geom_ribbon(aes(x=xpos,
ymin=ypos,
ymax= ypos+n, group=interaction(dx, dy),
fill=dx,
colour=dx), alpha = alpha, data = dfm)
}
if (method == "hammock") {
gr <- geom_ribbon(aes(x=xpos,
ymin=ypos,
ymax= ypos+n, group=interaction(dx, dy),
fill=dx,
colour=dx), alpha = alpha, data = dfm)
}
gr
}
stopifnot(length(vars) >= 2)
data_ <- data[,as.character(vars)]
for (i in 1:length(vars)) {
data_[,i] <- as.factor(data_[,i])
levels(data_[,i]) <- paste(vars[[i]], levels(data_[,i]), sep=":")
}
data__ <- suppressWarnings(tidyr::gather(data_, factor_key = TRUE))
bars <- list(geom_bar(data = data__, aes(x = key, color = value, fill=value),
width = width, ...),
scale_x_discrete("", labels = as.character(vars)))
ribbons <- list()
for (i in 1:(length(vars)-1)) {
ribbons[[i]] <- get_ribbons(i, data_[,i], data_[,i+1])
}
label <- list()
if (labels) {
for (i in 1:(length(vars))) {
browser()
dx <- data_%>% group_by_(vars[[i]]) %>% tally()
dx$xpos <- i
dx$ypos <- sum(dx$n) - cumsum(dx$n) + dx$n/2
names(dx)[1] <- "key"
# browser()
dx <- dx %>% tidyr::separate(key, into=c("key", "value"), sep =":")
label[[i]] <- list(
geom_text(aes(x = xpos, y = ypos, label = value), colour = "grey10",
nudge_x = .01, nudge_y = 1/sum(dx$n), data = dx),
geom_text(aes(x = xpos, y = ypos, label = value), colour = "grey90", data = dx))
}
}
ggplot() +ribbons + bars + label
}
| /inst/new-try.R | no_license | cran/ggparallel | R | false | false | 2,721 | r | #' @examples
#' cols <- c("#FCAE91", "#FB6A4A", "#CB181D", "#BDD7E7", "#6BAED6", "#2171B5")
#' ggpar(vars = list("gear", "cyl", "gear"), data=mtcars) +
#' # method="hammock", text.angle=0, ratio=0.2) +
#' scale_fill_manual(values=cols) + scale_colour_manual(values=cols) +
#' theme_bw()
#' mtcars$cyl <- factor(mtcars$cyl, levels = c("8","6","4"))
#' mtcars$gear <- factor(mtcars$gear)
#' ggpar(list("gear", "cyl", "gear"), data=mtcars)
#' ggpar(list("cyl", "gear", "cyl"), data=mtcars)
ggpar <- function (data, vars, width = 0.25, alpha = 0.6, labels = TRUE, method = "parset", ...) {
get_ribbons <- function(xpos, dx, dy) {
dframe <- data.frame(dx = dx, dy = dy)
dxy <- dframe %>% group_by(dx, dy) %>% tally()
dxy$ypos <- sum(dxy$n) - cumsum(dxy$n)
dxy$xpos <- xpos + width/2
dyx <- dframe %>% group_by(dy, dx) %>% tally()
dyx$ypos <- sum(dyx$n) - cumsum(dyx$n)
dyx$xpos <- xpos + 1 - width/2
dfm <- rbind(dxy, dyx)
if (method == "parset") {
gr <- geom_ribbon(aes(x=xpos,
ymin=ypos,
ymax= ypos+n, group=interaction(dx, dy),
fill=dx,
colour=dx), alpha = alpha, data = dfm)
}
if (method == "hammock") {
gr <- geom_ribbon(aes(x=xpos,
ymin=ypos,
ymax= ypos+n, group=interaction(dx, dy),
fill=dx,
colour=dx), alpha = alpha, data = dfm)
}
gr
}
stopifnot(length(vars) >= 2)
data_ <- data[,as.character(vars)]
for (i in 1:length(vars)) {
data_[,i] <- as.factor(data_[,i])
levels(data_[,i]) <- paste(vars[[i]], levels(data_[,i]), sep=":")
}
data__ <- suppressWarnings(tidyr::gather(data_, factor_key = TRUE))
bars <- list(geom_bar(data = data__, aes(x = key, color = value, fill=value),
width = width, ...),
scale_x_discrete("", labels = as.character(vars)))
ribbons <- list()
for (i in 1:(length(vars)-1)) {
ribbons[[i]] <- get_ribbons(i, data_[,i], data_[,i+1])
}
label <- list()
if (labels) {
for (i in 1:(length(vars))) {
browser()
dx <- data_%>% group_by_(vars[[i]]) %>% tally()
dx$xpos <- i
dx$ypos <- sum(dx$n) - cumsum(dx$n) + dx$n/2
names(dx)[1] <- "key"
# browser()
dx <- dx %>% tidyr::separate(key, into=c("key", "value"), sep =":")
label[[i]] <- list(
geom_text(aes(x = xpos, y = ypos, label = value), colour = "grey10",
nudge_x = .01, nudge_y = 1/sum(dx$n), data = dx),
geom_text(aes(x = xpos, y = ypos, label = value), colour = "grey90", data = dx))
}
}
ggplot() +ribbons + bars + label
}
|
#!/usr/bin/env Rscript
#reduce_covSums.R
#PARSE ARUGMENTS
suppressMessages(library(optparse))
suppressMessages(library(tidyverse))
suppressMessages(library(tidyverse))
suppressMessages(library(plotrix))
suppressMessages(library(EnvStats))
option_list = list(
make_option(c("--cov"), type="character", default=NULL,
help="covInfile"),
make_option(c("--bed"), type="character", default=NULL,
help="bedInfile"),
make_option(c("--minCount"), type="integer", default=1,
help="Minimum number of reads a site must have to be counted"),
make_option(c("--minRep"), type="double", default=0,
help="Minimum proportion of samples with filter passing data for the site to be kept"),
make_option(c("--pFalsePos"), type="double", default=0.01,
help="Minimum proportion of samples with filter passing data for the site to be kept"),
make_option(c("--methAlpha"), type="double", default=0.05,
help="Theshold for type 1 error for calling a site methylated given probability of false methylation call pFalsePos"),
make_option(c("--o"), type="character", default='gbm_stats',
help="Output name")
)
print("Parsing arugments...")
opt_parser = OptionParser(option_list=option_list);
opt = parse_args(opt_parser);
covFile = opt$cov
bedFile = opt$bed
minCount = opt$minCount
minRep = opt$minRep
propFalsePos = opt$pFalsePos
methAlpha = opt$methAlpha
outName = opt$o
#function to summarize stats
getStats = function(inputDF, gout){
inputDF %>%
summarize(chr=uchr,
start = s,
end = e,
name = name,
nM = sum(nM, na.rm=TRUE),
nU = sum(nU, na.rm=TRUE),
NcpgMeasured = n(),
Nmcpg = sum(methylated),
mCpG_per_CpG = Nmcpg/NcpgMeasured,
fracMeth = sum(nM, na.rm=TRUE) / ( sum(nM, na.rm=TRUE) + sum(nU, na.rm=TRUE)),
mnMeth = mean(pct.meth, na.rm=TRUE),
medMeth = median(pct.meth, na.rm=TRUE),
#geoMnMeth = geomMean(mPct, na.rm=TRUE),
sdMeth = sd(pct.meth),
stdErrMeth = std.error(pct.meth),
maxMeth = max(pct.meth),
minMeth = min(pct.meth))
}
#READ IN DATA
print('Reading in bed file...')
bdat = read.table(bedFile, stringsAsFactors=FALSE)
colnames(bdat)=c('chr', 'start', 'end', 'name')
bdat = as_tibble(bdat)
print('Reading in cov file...')
cdat = read.table(covFile, stringsAsFactors=FALSE)
colnames(cdat)=c('chr', 'start', 'end', 'pct.meth', 'nM', 'nU', 'fileName')
cdat = as_tibble(cdat)
uchrs0 = unique(bdat$chr)
uchrs = uchrs0[uchrs0 %in% cdat$chr]
if (length(uchrs)==0){
print('No gene regions found in this cov file.')
print('Exiting')
quit()
}
res = data.frame()
#LOOP THROUGH CHROMOSOMES AND GENES
for (chrNum in 1:length(uchrs)){
uchr=uchrs[chrNum]
print(paste(uchr, '...', sep=''))
if (chrNum %% 100 == 0){
print(paste('chr', chrNum, 'of', length(uchrs)))
}
csub = cdat %>%
filter(chr==uchr)
bsub = bdat %>%
filter(chr==uchr)
if (nrow(csub)==0){
next
}
for (i in 1:nrow(bsub)){
s=as.numeric(bsub[i,'start'])
e=as.numeric(bsub[i,'end'])
name=as.character(bsub[i,'name'])
wsub = csub %>%
filter(start >= s,
end <= e)
if (nrow(wsub) > 0){
dat=wsub
#DO MIN COUNT FILTER
# print("Filtering by read count...")
f1 = dat %>%
mutate(tot=nM+nU) %>%
filter(tot>=minCount)
before=nrow(dat)
after=nrow(f1)
pct = round(after/before, digits=3)*100
#print(paste(c(pct, '% of sites passed minCount >= ', minCount), collapse=''))
#DO MIN REP FILTER
# print('Filtering by representation...')
totSamples = length(unique(f1$fileName))
keepSites = f1 %>%
group_by(chr, start, end) %>%
summarize(N=n(), rep=n()/totSamples, keep= (n()/totSamples) >=minRep) %>%
filter(keep)
f2 = f1 %>%
filter(chr %in% keepSites$chr & start %in% keepSites$start)
before = nrow(f1)
after = nrow(f2)
pct = round(after/before, digits=3)*100
#print(paste(c(pct, '% of sites passed minRep >= ', minRep), collapse=''))
#MAKE METHYLATION CALLS FOR EACH SAMPLE
# print('Making site methylation calls...')
f3 = f2 %>%
mutate(pFalsePos = unlist(map2(.x=nM,
.y=tot,
~ binom.test(.x, .y, propFalsePos, alternative="greater")$p.value))
)
#CALL METH AND MERGE UP WITH GFF DATA FOR LENGTHS
mdat = f3 %>%
mutate(methylated = pFalsePos < methAlpha,
mPct = nM/(nM+nU))
NcpgMeasured = length((mdat$start))
Nmcpg = sum(mdat$methylated)
#funciton to write out stats
subres = getStats(mdat)
res = rbind(res, subres)
}
}
}
print(paste('writing results to', outName))
write_tsv(res, path=outName)
| /processing_scripts/basic_methylation_from_bed.R | no_license | grovesdixon/invert_meth_and_transcription | R | false | false | 5,003 | r | #!/usr/bin/env Rscript
#reduce_covSums.R
#PARSE ARUGMENTS
suppressMessages(library(optparse))
suppressMessages(library(tidyverse))
suppressMessages(library(tidyverse))
suppressMessages(library(plotrix))
suppressMessages(library(EnvStats))
option_list = list(
make_option(c("--cov"), type="character", default=NULL,
help="covInfile"),
make_option(c("--bed"), type="character", default=NULL,
help="bedInfile"),
make_option(c("--minCount"), type="integer", default=1,
help="Minimum number of reads a site must have to be counted"),
make_option(c("--minRep"), type="double", default=0,
help="Minimum proportion of samples with filter passing data for the site to be kept"),
make_option(c("--pFalsePos"), type="double", default=0.01,
help="Minimum proportion of samples with filter passing data for the site to be kept"),
make_option(c("--methAlpha"), type="double", default=0.05,
help="Theshold for type 1 error for calling a site methylated given probability of false methylation call pFalsePos"),
make_option(c("--o"), type="character", default='gbm_stats',
help="Output name")
)
print("Parsing arugments...")
opt_parser = OptionParser(option_list=option_list);
opt = parse_args(opt_parser);
covFile = opt$cov
bedFile = opt$bed
minCount = opt$minCount
minRep = opt$minRep
propFalsePos = opt$pFalsePos
methAlpha = opt$methAlpha
outName = opt$o
#function to summarize stats
getStats = function(inputDF, gout){
inputDF %>%
summarize(chr=uchr,
start = s,
end = e,
name = name,
nM = sum(nM, na.rm=TRUE),
nU = sum(nU, na.rm=TRUE),
NcpgMeasured = n(),
Nmcpg = sum(methylated),
mCpG_per_CpG = Nmcpg/NcpgMeasured,
fracMeth = sum(nM, na.rm=TRUE) / ( sum(nM, na.rm=TRUE) + sum(nU, na.rm=TRUE)),
mnMeth = mean(pct.meth, na.rm=TRUE),
medMeth = median(pct.meth, na.rm=TRUE),
#geoMnMeth = geomMean(mPct, na.rm=TRUE),
sdMeth = sd(pct.meth),
stdErrMeth = std.error(pct.meth),
maxMeth = max(pct.meth),
minMeth = min(pct.meth))
}
#READ IN DATA
print('Reading in bed file...')
bdat = read.table(bedFile, stringsAsFactors=FALSE)
colnames(bdat)=c('chr', 'start', 'end', 'name')
bdat = as_tibble(bdat)
print('Reading in cov file...')
cdat = read.table(covFile, stringsAsFactors=FALSE)
colnames(cdat)=c('chr', 'start', 'end', 'pct.meth', 'nM', 'nU', 'fileName')
cdat = as_tibble(cdat)
uchrs0 = unique(bdat$chr)
uchrs = uchrs0[uchrs0 %in% cdat$chr]
if (length(uchrs)==0){
print('No gene regions found in this cov file.')
print('Exiting')
quit()
}
res = data.frame()
#LOOP THROUGH CHROMOSOMES AND GENES
for (chrNum in 1:length(uchrs)){
uchr=uchrs[chrNum]
print(paste(uchr, '...', sep=''))
if (chrNum %% 100 == 0){
print(paste('chr', chrNum, 'of', length(uchrs)))
}
csub = cdat %>%
filter(chr==uchr)
bsub = bdat %>%
filter(chr==uchr)
if (nrow(csub)==0){
next
}
for (i in 1:nrow(bsub)){
s=as.numeric(bsub[i,'start'])
e=as.numeric(bsub[i,'end'])
name=as.character(bsub[i,'name'])
wsub = csub %>%
filter(start >= s,
end <= e)
if (nrow(wsub) > 0){
dat=wsub
#DO MIN COUNT FILTER
# print("Filtering by read count...")
f1 = dat %>%
mutate(tot=nM+nU) %>%
filter(tot>=minCount)
before=nrow(dat)
after=nrow(f1)
pct = round(after/before, digits=3)*100
#print(paste(c(pct, '% of sites passed minCount >= ', minCount), collapse=''))
#DO MIN REP FILTER
# print('Filtering by representation...')
totSamples = length(unique(f1$fileName))
keepSites = f1 %>%
group_by(chr, start, end) %>%
summarize(N=n(), rep=n()/totSamples, keep= (n()/totSamples) >=minRep) %>%
filter(keep)
f2 = f1 %>%
filter(chr %in% keepSites$chr & start %in% keepSites$start)
before = nrow(f1)
after = nrow(f2)
pct = round(after/before, digits=3)*100
#print(paste(c(pct, '% of sites passed minRep >= ', minRep), collapse=''))
#MAKE METHYLATION CALLS FOR EACH SAMPLE
# print('Making site methylation calls...')
f3 = f2 %>%
mutate(pFalsePos = unlist(map2(.x=nM,
.y=tot,
~ binom.test(.x, .y, propFalsePos, alternative="greater")$p.value))
)
#CALL METH AND MERGE UP WITH GFF DATA FOR LENGTHS
mdat = f3 %>%
mutate(methylated = pFalsePos < methAlpha,
mPct = nM/(nM+nU))
NcpgMeasured = length((mdat$start))
Nmcpg = sum(mdat$methylated)
#funciton to write out stats
subres = getStats(mdat)
res = rbind(res, subres)
}
}
}
print(paste('writing results to', outName))
write_tsv(res, path=outName)
|
# Function: Format Columns
format_cols <- function(cols) {
# format cols
formatted_cols <- cols %>%
# to lower case
str_to_lower() %>%
# eliminate white space, special characters...etc.
str_replace_all(pattern = " ", replacement = "_") %>%
str_replace_all(pattern = "\\.", replacement = "_") %>%
str_replace_all(pattern = "\\,", replacement = "_") %>%
str_replace_all(pattern = "\\-", replacement = "_") %>%
str_replace_all(pattern = "\\(", replacement = "") %>%
str_replace_all(pattern = "\\)", replacement = "") %>%
str_replace_all(pattern = "\\%", replacement = "pct") %>%
str_replace_all(pattern = "\\$", replacement = "usd") %>%
str_replace_all(pattern = "\\?", replacement = "") %>%
str_replace_all(pattern = "\\!", replacement = "") %>%
str_replace_all(pattern = "\\#", replacement = "") %>%
# add an "_" if starting with a numeric
str_replace(pattern = "(^\\d+)(.*)", replacement = "col_\\1\\2")
# return formatted cols
return(formatted_cols)
}
# Function: Parse TXT File
parse_txt <- function(path) {
# reading txt by lines
mapping_txt <- readLines(path)
# parameters
n_row <- length(mapping_txt)
flag <- rep(1, n_row)
var_name <- vector(mode = "character", length = n_row)
var_desc <- vector(mode = "character", length = n_row)
level_code <- vector(mode = "character", length = n_row)
level_desc <- vector(mode = "character", length = n_row)
# parse rows
for (i in seq_along(mapping_txt)) {
# if blank row then skip
if (mapping_txt[i] %in% c("\t\t", "", "\t", " \t", " ", "\t\t\t", " ")) {
flag[i] <- 0
}
# parsing variable name and description
if (str_detect(mapping_txt[i], "^\\w*: .*")) {
#flag[i] <- 0
var_name[i] <- str_trim(str_split(string = mapping_txt[i], pattern = ":")[[1]][1])
var_desc[i] <- str_trim(str_split(string = mapping_txt[i], pattern = ":")[[1]][2])
# parsing variable levels
} else {
var_name[i] <- var_name[i - 1]
var_desc[i] <- var_desc[i - 1]
level_code[i] <- str_trim(str_split(string = mapping_txt[i], pattern = "\t")[[1]][1])
level_desc[i] <- str_split(string = mapping_txt[i], pattern = "\t")[[1]][2]
}
}
# bind columns into a dataframe
output_df <- bind_cols(flag = flag,
variable_name = var_name,
variable_description = var_desc,
level_code = level_code,
level_description = level_desc) %>%
filter(flag == 1) %>%
select(-flag)
# output
return(output_df)
}
# Function: recode columns
recode_columns <- function(mappin_df, col, col_name) {
# recode
if (col_name %in% unique(mapping_df[["variable_name"]])) {
recoded_col <- plyr::mapvalues(x = col,
from = mapping_df[mapping_df[["variable_name"]] == col_name, "level_code", drop = T],
to = mapping_df[mapping_df[["variable_name"]] == col_name, "level_description", drop = T],
warn_missing = F) %>%
as.character()
} else{
recoded_col <- col
}
# output
return(recoded_col)
}
# Function: Calculate R2
calc_r2 <- function(actual, prediction) {
# residual sum of squares
rss <- sum((prediction - actual)^2)
# total sum of squares
tss <- sum((actual - mean(actual))^2) ## total sum of squares
# r2
r2 <- 1 - rss/tss
# output
return(r2)
}
# Function: remove missing levels
remove_missing_levels <- function(model, test_df) {
# drop empty factor levels in test data
test_df <- test_df %>%
droplevels()
# do nothing if no factors are present
if (length(model[["xlevels"]]) == 0) {
return(test_df)
}
# extract model factors and levels
model_factors_df <- map2(.x = names(model$xlevels),
.y = model$xlevels,
.f = function(factor, levels) data.frame(factor, levels, stringsAsFactors = F)) %>%
bind_rows()
# select column names in test data that are factor predictors in trained model
predictors <- names(test_df[names(test_df) %in% model_factors_df[["factor"]]])
# for each factor predictor in your data, if the level is not in the model set the value to NA
for (i in seq_along(predictors)) {
# identify model levels
model_levels <- model_factors_df[model_factors_df[["factor"]] == predictors[i], "levels", drop = T]
# identify test levels
test_levels <- test_df[, predictors[i]]
# found flag
found_flag <- test_levels %in% model_levels
# if any missing, then set to NA
if (any(!found_flag)) {
# missing levels
missing_levels <- str_c(as.character(unique(test_levels[!found_flag])), collapse = ",")
# set to NA
test_df[!found_flag, predictors[i]] <- NA
# drop empty factor levels in test data
test_df <- test_df %>%
droplevels()
# message console
message(glue("In {predictors[i]}: setting missing level(s) {missing_levels} to NA"))
}
}
# output
return(test_df)
}
# Function: find optimal cp
find_optimal_cp <- function(cptable, cv_sd_flag) {
# define the minimum cross-validated error
index_min_error <- which.min(cptable[, 4])
# min error
min_error <- cptable[index_min_error, 4]
# min error sd
sd_min_error <- cptable[index_min_error, 5]
# optimum line
if (cv_sd_flag == 1) {
optimal_line <- min_error + sd_min_error
} else {
optimal_line <- min_error
}
# optimal cp index
optimal_cp_index <- which.min(abs((cptable[, 4] - optimal_line)))
# optimal cp
optimal_cp <- cptable[optimal_cp_index, 1]
# output
return(optimal_cp)
}
# Function: plot variable importance
plot_variable_importance <- function(variable, score, scale = T, top_n, model_type) {
# scale if needed
if (scale == T) {
score <- score/sum(score) * 100
}
# create data frame
importance_df <- data.frame(variable, score) %>%
arrange(desc(score))
# filter
if (!is.na(top_n)) {
importance_df <- importance_df[1:top_n, ]
}
# plot
ggplot(data = importance_df, mapping = aes(x = reorder(variable, score), y = score)) +
geom_bar(stat = "identity") +
#labs(title = "Variable Importance - RANGER") +
ggtitle(label = str_c("Variable Importance", model_type, sep = " - ")) +
xlab("variable") +
ylab("importance scores in %") +
coord_flip()
}
| /src/utils.R | no_license | matescharnitzky/regression | R | false | false | 6,596 | r |
# Function: Format Columns
format_cols <- function(cols) {
# format cols
formatted_cols <- cols %>%
# to lower case
str_to_lower() %>%
# eliminate white space, special characters...etc.
str_replace_all(pattern = " ", replacement = "_") %>%
str_replace_all(pattern = "\\.", replacement = "_") %>%
str_replace_all(pattern = "\\,", replacement = "_") %>%
str_replace_all(pattern = "\\-", replacement = "_") %>%
str_replace_all(pattern = "\\(", replacement = "") %>%
str_replace_all(pattern = "\\)", replacement = "") %>%
str_replace_all(pattern = "\\%", replacement = "pct") %>%
str_replace_all(pattern = "\\$", replacement = "usd") %>%
str_replace_all(pattern = "\\?", replacement = "") %>%
str_replace_all(pattern = "\\!", replacement = "") %>%
str_replace_all(pattern = "\\#", replacement = "") %>%
# add an "_" if starting with a numeric
str_replace(pattern = "(^\\d+)(.*)", replacement = "col_\\1\\2")
# return formatted cols
return(formatted_cols)
}
# Function: Parse TXT File
parse_txt <- function(path) {
# reading txt by lines
mapping_txt <- readLines(path)
# parameters
n_row <- length(mapping_txt)
flag <- rep(1, n_row)
var_name <- vector(mode = "character", length = n_row)
var_desc <- vector(mode = "character", length = n_row)
level_code <- vector(mode = "character", length = n_row)
level_desc <- vector(mode = "character", length = n_row)
# parse rows
for (i in seq_along(mapping_txt)) {
# if blank row then skip
if (mapping_txt[i] %in% c("\t\t", "", "\t", " \t", " ", "\t\t\t", " ")) {
flag[i] <- 0
}
# parsing variable name and description
if (str_detect(mapping_txt[i], "^\\w*: .*")) {
#flag[i] <- 0
var_name[i] <- str_trim(str_split(string = mapping_txt[i], pattern = ":")[[1]][1])
var_desc[i] <- str_trim(str_split(string = mapping_txt[i], pattern = ":")[[1]][2])
# parsing variable levels
} else {
var_name[i] <- var_name[i - 1]
var_desc[i] <- var_desc[i - 1]
level_code[i] <- str_trim(str_split(string = mapping_txt[i], pattern = "\t")[[1]][1])
level_desc[i] <- str_split(string = mapping_txt[i], pattern = "\t")[[1]][2]
}
}
# bind columns into a dataframe
output_df <- bind_cols(flag = flag,
variable_name = var_name,
variable_description = var_desc,
level_code = level_code,
level_description = level_desc) %>%
filter(flag == 1) %>%
select(-flag)
# output
return(output_df)
}
# Function: recode columns
recode_columns <- function(mappin_df, col, col_name) {
# recode
if (col_name %in% unique(mapping_df[["variable_name"]])) {
recoded_col <- plyr::mapvalues(x = col,
from = mapping_df[mapping_df[["variable_name"]] == col_name, "level_code", drop = T],
to = mapping_df[mapping_df[["variable_name"]] == col_name, "level_description", drop = T],
warn_missing = F) %>%
as.character()
} else{
recoded_col <- col
}
# output
return(recoded_col)
}
# Function: Calculate R2
calc_r2 <- function(actual, prediction) {
# residual sum of squares
rss <- sum((prediction - actual)^2)
# total sum of squares
tss <- sum((actual - mean(actual))^2) ## total sum of squares
# r2
r2 <- 1 - rss/tss
# output
return(r2)
}
# Function: remove missing levels
remove_missing_levels <- function(model, test_df) {
# drop empty factor levels in test data
test_df <- test_df %>%
droplevels()
# do nothing if no factors are present
if (length(model[["xlevels"]]) == 0) {
return(test_df)
}
# extract model factors and levels
model_factors_df <- map2(.x = names(model$xlevels),
.y = model$xlevels,
.f = function(factor, levels) data.frame(factor, levels, stringsAsFactors = F)) %>%
bind_rows()
# select column names in test data that are factor predictors in trained model
predictors <- names(test_df[names(test_df) %in% model_factors_df[["factor"]]])
# for each factor predictor in your data, if the level is not in the model set the value to NA
for (i in seq_along(predictors)) {
# identify model levels
model_levels <- model_factors_df[model_factors_df[["factor"]] == predictors[i], "levels", drop = T]
# identify test levels
test_levels <- test_df[, predictors[i]]
# found flag
found_flag <- test_levels %in% model_levels
# if any missing, then set to NA
if (any(!found_flag)) {
# missing levels
missing_levels <- str_c(as.character(unique(test_levels[!found_flag])), collapse = ",")
# set to NA
test_df[!found_flag, predictors[i]] <- NA
# drop empty factor levels in test data
test_df <- test_df %>%
droplevels()
# message console
message(glue("In {predictors[i]}: setting missing level(s) {missing_levels} to NA"))
}
}
# output
return(test_df)
}
# Function: find optimal cp
find_optimal_cp <- function(cptable, cv_sd_flag) {
# define the minimum cross-validated error
index_min_error <- which.min(cptable[, 4])
# min error
min_error <- cptable[index_min_error, 4]
# min error sd
sd_min_error <- cptable[index_min_error, 5]
# optimum line
if (cv_sd_flag == 1) {
optimal_line <- min_error + sd_min_error
} else {
optimal_line <- min_error
}
# optimal cp index
optimal_cp_index <- which.min(abs((cptable[, 4] - optimal_line)))
# optimal cp
optimal_cp <- cptable[optimal_cp_index, 1]
# output
return(optimal_cp)
}
# Function: plot variable importance
plot_variable_importance <- function(variable, score, scale = T, top_n, model_type) {
# scale if needed
if (scale == T) {
score <- score/sum(score) * 100
}
# create data frame
importance_df <- data.frame(variable, score) %>%
arrange(desc(score))
# filter
if (!is.na(top_n)) {
importance_df <- importance_df[1:top_n, ]
}
# plot
ggplot(data = importance_df, mapping = aes(x = reorder(variable, score), y = score)) +
geom_bar(stat = "identity") +
#labs(title = "Variable Importance - RANGER") +
ggtitle(label = str_c("Variable Importance", model_type, sep = " - ")) +
xlab("variable") +
ylab("importance scores in %") +
coord_flip()
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aasim_classes.r
\name{addPerson.sim}
\alias{addPerson.sim}
\title{Add Person to persons list in a simulation}
\usage{
addPerson.sim(sim, name, initials, curAge, gender, retireAge, mort.factor = 1)
}
\arguments{
\item{sim}{Object of type sim (simulation)}
\item{name}{Name of person}
\item{initials}{Initials or short name, useful for display}
\item{curAge}{Current age (simulation assumes person just turned this age)}
\item{gender}{'M' or 'Male' or 'F' or 'Female'}
\item{retireAge}{Retirement age.}
\item{mort.factor}{Mortality factor, default = 1. This is multiplied by each mortality rate. Values >1 decrease life expectancy.}
}
\value{
sim object with person added to simulation
}
\description{
Add Person to persons list in a simulation
}
\examples{
\dontrun{sim1<-addPerson.sim(sim, name, initials, curAge, gender, retireAge, mort.factor)}
\dontrun{sim1<-addPerson.sim(sim1,"Rex Macey","RM",56,"M",65,1.0)}
}
| /man/addPerson.sim.Rd | no_license | ihavenoahidea/aasim | R | false | true | 1,001 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aasim_classes.r
\name{addPerson.sim}
\alias{addPerson.sim}
\title{Add Person to persons list in a simulation}
\usage{
addPerson.sim(sim, name, initials, curAge, gender, retireAge, mort.factor = 1)
}
\arguments{
\item{sim}{Object of type sim (simulation)}
\item{name}{Name of person}
\item{initials}{Initials or short name, useful for display}
\item{curAge}{Current age (simulation assumes person just turned this age)}
\item{gender}{'M' or 'Male' or 'F' or 'Female'}
\item{retireAge}{Retirement age.}
\item{mort.factor}{Mortality factor, default = 1. This is multiplied by each mortality rate. Values >1 decrease life expectancy.}
}
\value{
sim object with person added to simulation
}
\description{
Add Person to persons list in a simulation
}
\examples{
\dontrun{sim1<-addPerson.sim(sim, name, initials, curAge, gender, retireAge, mort.factor)}
\dontrun{sim1<-addPerson.sim(sim1,"Rex Macey","RM",56,"M",65,1.0)}
}
|
##
## MCMC sampler for orthogonal data augmentation
##
mcmc.pcaMA <- function(Y.list, X.o, H.list, params, tune, epsilon = 0.001){ #Y.new, X.new, for log scoring rule
##
## functions and subroutines
##
make.mh <- function(i, sigma.squared, Sigma.t, Sigma.t.inv, gamma, beta.tilde.gamma){
if(sum(gamma[[i]]) == 0){
- n.o[i] / 2 * sigma.squared - 1 / 2 * determinant(Sigma.t[[i]], logarithm = TRUE)[1]$mod[1] - 1 / (2 * sigma.squared) * t(Y.list[[i]]) %*% Sigma.t.inv[[i]] %*% (Y.list[[i]])
} else {
- n.o[i] / 2 * sigma.squared - 1 / 2 * determinant(Sigma.t[[i]], logarithm = TRUE)[1]$mod[1] - 1 / (2 * sigma.squared) * t(Y.list[[i]] - HX.o.list[[i]][, gamma[[i]] == 1] %*% beta.tilde.gamma[[i]]) %*% Sigma.t.inv[[i]] %*% (Y.list[[i]] - HX.o.list[[i]][, gamma[[i]] == 1] %*% beta.tilde.gamma[[i]])
}
}
make.gamma.mh <- function(i, gamma, beta.hat, Sigma.full.inv, Y.c, tX.Sigma.full.inv.X, tX.Sigma.full.inv){
sum(gamma[[i]] * log(pi.prior) + (1 - gamma[[i]]) * log(1 - pi.prior)) - 1 / (2 * sigma.squared) * (t(gamma[[i]] * beta.hat[[i]]) %*% tX.Sigma.full.inv.X %*% (gamma[[i]] * beta.hat[[i]]) - 2 * t(gamma[[i]] * beta.hat[[i]]) %*% tX.Sigma.full.inv %*% Y.c[[i]] + t(gamma[[i]] * beta.hat[[i]]) %*% Lambda %*% (gamma[[i]] * beta.hat[[i]]))
}
##
## initialize fixed values
##
n.mcmc <- params$n.mcmc
alpha <- params$alpha
pi.prior <- params$pi.prior
lambda <- params$lambda
alpha.eta <- params$alpha.eta
beta.eta <- params$beta.eta
phi.lower <- params$phi.lower
phi.upper <- params$phi.upper
D <- params$D
# sigma.tune <- tune$sigma.tune
phi.tune <- tune$phi.tune
sigma.eta.tune <- tune$sigma.eta.tune
gama.tune <- tune$gamma.tune
t <- length(Y.list)
X.pca <- prcomp(X.o)
X <- X.pca$x
tX <- t(X)
delta <- X.pca$sdev^2
p <- dim(X)[2]
m <- dim(X)[1]
n.o <- vector(length = t)
n.u <- vector(length = t)
for(i in 1:t){
n.o[i] <- length(Y.list[[i]])
n.u[i] <- m - n.o[i]
}
I.full <- diag(m)
I.o <- vector('list', length = t)
I.u <- vector('list', length = t)
for(i in 1:t){
I.o[[i]] <- diag(n.o[i])
I.u[[i]] <- diag(n.u[i])
}
## initialize random values
##
## choose a better starting value once the code is up and running
##
sigma.squared <- 1
##
Psi <- vector('list', length = t)
gamma <- vector('list', length = t)
for(i in 1:t){
gamma[[i]] <- rbinom(p, 1, pi.prior)
}
gamma.star <- gamma
Lambda <- diag(lambda)
Lambda.gamma <- vector('list', length = t)
for(i in 1:t){
if(sum(gamma[[i]]) == 0){
Lambda.gamma[[i]] <- 0
} else {
Lambda.gamma[[i]] <- diag(lambda[gamma[[i]] == 1])
}
}
##
H.u.list <- vector('list', length = t)
for(i in 1:t){
H.u.list[[i]] <- (1:m)[ - H.list[[i]]]
}
HX.o.list <- vector('list', length = t)
tHX.o.list <- vector('list', length = t)
HX.u.list <- vector('list', length = t)
tHX.u.list <- vector('list', length = t)
for(i in 1:t){
HX.o.list[[i]] <- X[H.list[[i]], ]
tHX.o.list[[i]] <- t(HX.o.list[[i]])
HX.u.list[[i]] <- X[H.u.list[[i]], ]
tHX.u.list[[i]] <- t(HX.u.list[[i]])
}
## initialize spatial covariance
sigma.squared.eta <- 1 / rgamma(1, alpha.eta, beta.eta)
phi <- runif(1, phi.lower, phi.upper)
D.t <- vector('list', length = t)
Sigma.t <- vector('list', length = t)
Sigma.t.inv <- vector('list', length = t)
Sigma.t.star <- vector('list', length = t)
Sigma.t.inv.star <- vector('list', length = t)
for(i in 1:t){
D.t[[i]] <- D[H.list[[i]], H.list[[i]]]
Sigma.t[[i]] <- I.o[[i]] + sigma.squared.eta * exp( - D.t[[i]] / phi)
Sigma.t.inv[[i]] <- solve(Sigma.t[[i]])
}
Sigma.full <- I.full + sigma.squared.eta * exp( - D / phi)
Sigma.full.inv <- solve(Sigma.full)
tX.Sigma.full.inv.X <- tX %*% Sigma.full.inv %*% X
tX.Sigma.full.inv <- tX %*% Sigma.full.inv
## initialize Y.u
Y.u <- vector('list', length = t)
projectXontoY <- solve(t(X) %*% Sigma.full.inv %*% X) %*% t(X) %*% Sigma.full.inv
beta.tilde.gamma <- vector('list', length = t)
for(i in 1:t){
if(sum(gamma[[i]]) == 0){
##
} else {
beta.tilde.gamma[[i]] <- solve(1 / sigma.squared * tHX.o.list[[i]][gamma[[i]] == 1, ] %*% Sigma.t.inv[[i]] %*% HX.o.list[[i]][, gamma[[i]] == 1] + 1 / sigma.squared * Lambda.gamma[[i]]) %*% tHX.o.list[[i]][gamma[[i]] == 1, ] %*% Y.list[[i]]
}
}
## initialize sigma.squared
tmp <- vector(length = t)
for(i in 1:t){
if(sum(gamma[[i]]) == 0){
tmp[i] <- t(Y.list[[i]]) %*% Sigma.t.inv[[i]] %*% (Y.list[[i]]) + t(beta.tilde.gamma[[i]]) %*% Lambda.gamma[[i]] %*% beta.tilde.gamma[[i]]
} else {
tmp[i] <- t(Y.list[[i]] - HX.o.list[[i]][, gamma[[i]] == 1] %*% beta.tilde.gamma[[i]]) %*% Sigma.t.inv[[i]] %*% (Y.list[[i]] - HX.o.list[[i]][, gamma[[i]] == 1] %*% beta.tilde.gamma[[i]]) + t(beta.tilde.gamma[[i]]) %*% Lambda.gamma[[i]] %*% beta.tilde.gamma[[i]]
}
}
sigma.squared <- 1 / rgamma(1, (sum(n.o) + sum(unlist(gamma))) / 2, sum(tmp) / 2)
## initialize variables
O <- vector('list', length = t)
rho <- vector('list', length = t)
##
## setup save variables
##
gamma.save <- array(dim = c(p, t, n.mcmc))
sigma.squared.save <- vector(length = n.mcmc)
sigma.squared.eta.save <- vector(length = n.mcmc)
phi.save <- vector(length = n.mcmc)
beta.save <- array(dim = c(p, t, n.mcmc))
rho.save <- array(dim = c(p, t, n.mcmc))
Y.pred <- array(dim = c(m, t, n.mcmc))
delta.save <- delta
phi.accept <- 0
eta.accept <- 0
gamma.accept <- 0
##
## begin mcmc
##
for(k in 1:n.mcmc){
# if(k %% 1000 == 0){
cat(k, ' ')
# }
##
## sample Y.u
##
for(i in 1:t){
if(sum(gamma[[i]]) == 0){
Y.u[[i]] <- Sigma.full[H.u.list[[i]], H.list[[i]]] %*% Sigma.t.inv[[i]] %*% Y.list[[i]]
} else {
Y.u[[i]] <- HX.u.list[[i]][, gamma[[i]] == 1] %*% beta.tilde.gamma[[i]] + Sigma.full[H.u.list[[i]], H.list[[i]]] %*% Sigma.t.inv[[i]] %*% (Y.list[[i]] - HX.o.list[[i]][, gamma[[i]] == 1] %*% beta.tilde.gamma[[i]])
}
}
Y.c <- vector('list', length = t)
for(i in 1:t){
Y.c[[i]] <- vector(length = m)
Y.c[[i]][H.list[[i]]] <- Y.list[[i]]
Y.c[[i]][H.u.list[[i]]] <- Y.u[[i]]
}
beta.hat <- vector('list', length = t)
projectXontoY <- solve(t(X) %*% Sigma.full.inv %*% X) %*% t(X) %*% Sigma.full.inv
for(i in 1:t){
beta.hat[[i]] <- projectXontoY %*% Y.c[[i]]
}
##
## sample sigma.squared
##
tmp <- vector(length = t)
for(i in 1:t){
if(sum(gamma[[i]] == 0)){
tmp[i] <- t(Y.list[[i]]) %*% Sigma.t.inv[[i]] %*% (Y.list[[i]]) + t(beta.tilde.gamma[[i]]) %*% Lambda.gamma[[i]] %*% beta.tilde.gamma[[i]]
} else {
tmp[i] <- t(Y.list[[i]] - HX.o.list[[i]][, gamma[[i]] == 1] %*% beta.tilde.gamma[[i]]) %*% Sigma.t.inv[[i]] %*% (Y.list[[i]] - HX.o.list[[i]][, gamma[[i]] == 1] %*% beta.tilde.gamma[[i]]) + t(beta.tilde.gamma[[i]]) %*% Lambda.gamma[[i]] %*% beta.tilde.gamma[[i]]
}
}
sigma.squared <- 1 / rgamma(1, (sum(n.o) + sum(unlist(gamma))) / 2, sum(tmp) / 2)
##
## sample gammma
##
for(i in 1:t){
for(j in 1:p){
if(runif(1) > gamma.tune){
if(gamma[[i]][j] == 0){
gamma.star[[i]][j] <- 1
} else {
gamma.star[[i]][j] <- 0
}
}
}
}
gamma[[1]]
gamma.star
mh.gamma.1 <- sum(sapply(1:t, make.gamma.mh, gamma = gamma.star, beta.hat = beta.hat, Sigma.full.inv = Sigma.full.inv, Y.c = Y.c, tX.Sigma.full.inv.X = tX.Sigma.full.inv.X, tX.Sigma.full.inv = tX.Sigma.full.inv))
mh.gamma.2 <- sum(sapply(1:t, make.gamma.mh, gamma = gamma, beta.hat = beta.hat, Sigma.full.inv = Sigma.full.inv, Y.c = Y.c, tX.Sigma.full.inv.X = tX.Sigma.full.inv.X, tX.Sigma.full.inv = tX.Sigma.full.inv))
mh.gamma <- exp(mh.gamma.1 - mh.gamma.2)
if(mh.gamma > runif(1)){
gamma <- gamma.star
gamma.accept <- 1 / n.mcmc + gamma.accept
}
# for(i in 1:t){ ## using log scale
# Psi[[i]] <- 1 / 2 * log(lambda / sigma.squared) - 1 / (2 * sigma.squared) * (beta.hat[[i]]^2 * (lambda - 1000 * delta)) + log(pi.prior) - log(1 - pi.prior)
# rho[[i]] <- exp(Psi[[i]] - log(1 + exp(Psi[[i]])))
# }
#
for(i in 1:t){
# gamma[[i]] <- rbinom(p, 1, rho[[i]])
if(sum(gamma[[i]]) == 0){
Lambda.gamma[[i]] <- 0
} else {
Lambda.gamma[[i]] <- diag(lambda[gamma[[i]] == 1])
}
}
##
## sample beta.tilde.gamma
##
for(i in 1:t){
if(sum(gamma[[i]]) == 0){
beta.tilde.gamma[[i]] <- 0
} else {
beta.tilde.gamma[[i]] <- solve(1 / sigma.squared * tHX.o.list[[i]][gamma[[i]] == 1, ] %*% Sigma.t.inv[[i]] %*% HX.o.list[[i]][, gamma[[i]] == 1] + 1 / sigma.squared * Lambda.gamma[[i]]) %*% tHX.o.list[[i]][gamma[[i]] == 1, ] %*% Sigma.t.inv[[i]] %*% Y.list[[i]]
}
}
## sample sigma.squared.eta
sigma.squared.eta.star <- rnorm(1, sigma.squared.eta, sigma.eta.tune)
if(sigma.squared.eta.star > 0){
for(i in 1:t){
Sigma.t.star[[i]] <- I.o[[i]] + sigma.squared.eta.star * exp( - D.t[[i]] / phi)
Sigma.t.inv.star[[i]] <- solve(Sigma.t.star[[i]])
}
mh.eta.1 <- sum(sapply(1:t, make.mh, sigma.squared = sigma.squared, Sigma.t = Sigma.t.star, Sigma.t.inv = Sigma.t.inv.star, gamma = gamma, beta.tilde.gamma = beta.tilde.gamma))
mh.eta.2 <- sum(sapply(1:t, make.mh, sigma.squared = sigma.squared, Sigma.t = Sigma.t, Sigma.t.inv = Sigma.t.inv, gamma = gamma, beta.tilde.gamma = beta.tilde.gamma))
mh.eta <- exp(mh.eta.1 - mh.eta.2)
if(mh.eta > runif(1)){
sigma.squared.eta <- sigma.squared.eta.star
Sigma.t <- Sigma.t.star
Sigma.t.inv <- Sigma.t.inv.star
eta.accept <- 1 / n.mcmc + eta.accept
}
}
##
## sample phi
##
phi.star <- rnorm(1, phi, phi.tune)
if(phi.star > phi.lower && phi.star < phi.upper){
for(i in 1:t){
Sigma.t.star[[i]] <- I.o[[i]] + sigma.squared.eta * exp( - D.t[[i]] / phi.star)
Sigma.t.inv.star[[i]] <- solve(Sigma.t.star[[i]])
}
mh.phi.1 <- sum(sapply(1:t, make.mh, sigma.squared = sigma.squared, Sigma.t = Sigma.t.star, Sigma.t.inv = Sigma.t.inv.star, gamma = gamma, beta.tilde.gamma = beta.tilde.gamma))
mh.phi.2 <- sum(sapply(1:t, make.mh, sigma.squared = sigma.squared, Sigma.t = Sigma.t, Sigma.t.inv = Sigma.t.inv, gamma = gamma, beta.tilde.gamma = beta.tilde.gamma))
mh.phi <- exp(mh.phi.1 - mh.phi.2)
if(mh.phi > runif(1)){
phi <- phi.star
Sigma.t <- Sigma.t.star
Sigma.t.inv <- Sigma.t.inv.star
phi.accept <- 1 / n.mcmc + phi.accept
}
}
##
## Sigma.full
##
Sigma.full <- I.full * sigma.squared.eta * exp( - D / phi)
Sigma.full.inv <- solve(Sigma.full)
tX.Sigma.full.inv.X <- tX %*% Sigma.full.inv %*% X
tX.Sigma.full.inv <- tX %*% Sigma.full.inv
##
## log scoring rule
##
# log.score <- sum(dnorm(Y.new, mean = cbind(X.new[, 1], X.new[, 2:(p)][, gamma == 1]) %*% beta.tilde.gamma, sd = sqrt(sigma.squared), log = TRUE))
##
## save samples
##
Y.pred[, , k] <- matrix(unlist(Y.c), nrow = m, ncol = t, byrow = FALSE)
gamma.save[, , k] <- matrix(unlist(gamma), nrow = p, ncol = t, byrow = FALSE)
sigma.squared.save[k] <- sigma.squared
sigma.squared.eta.save[k] <- sigma.squared.eta
phi.save[k] <- phi
beta.save[, , k] <- matrix(unlist(beta.hat), nrow = p, ncol = t, byrow = FALSE)
# rho.save[, , k] <- matrix(unlist(rho), nrow = p, ncol = t, byrow = FALSE)
# delta.save <- delta
# log.score.save[k] <- log.score
}
list(gamma.save = gamma.save, sigma.squared.save = sigma.squared.save, beta.save = beta.save, rho.save = rho.save, delta.save = delta.save, Y.pred = Y.pred, eta.accept = eta.accept, phi.accept = phi.accept, gamma.accept = gamma.accept, sigma.squared.eta.save = sigma.squared.eta.save, phi.save = phi.save)#, log.score.save = log.score.save)
# list(gamma.save = gamma.save, sigma.squared.save = sigma.squared.save, beta.save = beta.save, delta.save = delta.save, Y.pred = Y.pred)#, log.score.save = log.score.save)
}
| /modelAveraging/mcmc.pcaModelAveraging.spatial.R | no_license | jtipton25/1dSpatialSim | R | false | false | 12,323 | r | ##
## MCMC sampler for orthogonal data augmentation
##
mcmc.pcaMA <- function(Y.list, X.o, H.list, params, tune, epsilon = 0.001){ #Y.new, X.new, for log scoring rule
##
## functions and subroutines
##
make.mh <- function(i, sigma.squared, Sigma.t, Sigma.t.inv, gamma, beta.tilde.gamma){
if(sum(gamma[[i]]) == 0){
- n.o[i] / 2 * sigma.squared - 1 / 2 * determinant(Sigma.t[[i]], logarithm = TRUE)[1]$mod[1] - 1 / (2 * sigma.squared) * t(Y.list[[i]]) %*% Sigma.t.inv[[i]] %*% (Y.list[[i]])
} else {
- n.o[i] / 2 * sigma.squared - 1 / 2 * determinant(Sigma.t[[i]], logarithm = TRUE)[1]$mod[1] - 1 / (2 * sigma.squared) * t(Y.list[[i]] - HX.o.list[[i]][, gamma[[i]] == 1] %*% beta.tilde.gamma[[i]]) %*% Sigma.t.inv[[i]] %*% (Y.list[[i]] - HX.o.list[[i]][, gamma[[i]] == 1] %*% beta.tilde.gamma[[i]])
}
}
make.gamma.mh <- function(i, gamma, beta.hat, Sigma.full.inv, Y.c, tX.Sigma.full.inv.X, tX.Sigma.full.inv){
sum(gamma[[i]] * log(pi.prior) + (1 - gamma[[i]]) * log(1 - pi.prior)) - 1 / (2 * sigma.squared) * (t(gamma[[i]] * beta.hat[[i]]) %*% tX.Sigma.full.inv.X %*% (gamma[[i]] * beta.hat[[i]]) - 2 * t(gamma[[i]] * beta.hat[[i]]) %*% tX.Sigma.full.inv %*% Y.c[[i]] + t(gamma[[i]] * beta.hat[[i]]) %*% Lambda %*% (gamma[[i]] * beta.hat[[i]]))
}
##
## initialize fixed values
##
n.mcmc <- params$n.mcmc
alpha <- params$alpha
pi.prior <- params$pi.prior
lambda <- params$lambda
alpha.eta <- params$alpha.eta
beta.eta <- params$beta.eta
phi.lower <- params$phi.lower
phi.upper <- params$phi.upper
D <- params$D
# sigma.tune <- tune$sigma.tune
phi.tune <- tune$phi.tune
sigma.eta.tune <- tune$sigma.eta.tune
gama.tune <- tune$gamma.tune
t <- length(Y.list)
X.pca <- prcomp(X.o)
X <- X.pca$x
tX <- t(X)
delta <- X.pca$sdev^2
p <- dim(X)[2]
m <- dim(X)[1]
n.o <- vector(length = t)
n.u <- vector(length = t)
for(i in 1:t){
n.o[i] <- length(Y.list[[i]])
n.u[i] <- m - n.o[i]
}
I.full <- diag(m)
I.o <- vector('list', length = t)
I.u <- vector('list', length = t)
for(i in 1:t){
I.o[[i]] <- diag(n.o[i])
I.u[[i]] <- diag(n.u[i])
}
## initialize random values
##
## choose a better starting value once the code is up and running
##
sigma.squared <- 1
##
Psi <- vector('list', length = t)
gamma <- vector('list', length = t)
for(i in 1:t){
gamma[[i]] <- rbinom(p, 1, pi.prior)
}
gamma.star <- gamma
Lambda <- diag(lambda)
Lambda.gamma <- vector('list', length = t)
for(i in 1:t){
if(sum(gamma[[i]]) == 0){
Lambda.gamma[[i]] <- 0
} else {
Lambda.gamma[[i]] <- diag(lambda[gamma[[i]] == 1])
}
}
##
H.u.list <- vector('list', length = t)
for(i in 1:t){
H.u.list[[i]] <- (1:m)[ - H.list[[i]]]
}
HX.o.list <- vector('list', length = t)
tHX.o.list <- vector('list', length = t)
HX.u.list <- vector('list', length = t)
tHX.u.list <- vector('list', length = t)
for(i in 1:t){
HX.o.list[[i]] <- X[H.list[[i]], ]
tHX.o.list[[i]] <- t(HX.o.list[[i]])
HX.u.list[[i]] <- X[H.u.list[[i]], ]
tHX.u.list[[i]] <- t(HX.u.list[[i]])
}
## initialize spatial covariance
sigma.squared.eta <- 1 / rgamma(1, alpha.eta, beta.eta)
phi <- runif(1, phi.lower, phi.upper)
D.t <- vector('list', length = t)
Sigma.t <- vector('list', length = t)
Sigma.t.inv <- vector('list', length = t)
Sigma.t.star <- vector('list', length = t)
Sigma.t.inv.star <- vector('list', length = t)
for(i in 1:t){
D.t[[i]] <- D[H.list[[i]], H.list[[i]]]
Sigma.t[[i]] <- I.o[[i]] + sigma.squared.eta * exp( - D.t[[i]] / phi)
Sigma.t.inv[[i]] <- solve(Sigma.t[[i]])
}
Sigma.full <- I.full + sigma.squared.eta * exp( - D / phi)
Sigma.full.inv <- solve(Sigma.full)
tX.Sigma.full.inv.X <- tX %*% Sigma.full.inv %*% X
tX.Sigma.full.inv <- tX %*% Sigma.full.inv
## initialize Y.u
Y.u <- vector('list', length = t)
projectXontoY <- solve(t(X) %*% Sigma.full.inv %*% X) %*% t(X) %*% Sigma.full.inv
beta.tilde.gamma <- vector('list', length = t)
for(i in 1:t){
if(sum(gamma[[i]]) == 0){
##
} else {
beta.tilde.gamma[[i]] <- solve(1 / sigma.squared * tHX.o.list[[i]][gamma[[i]] == 1, ] %*% Sigma.t.inv[[i]] %*% HX.o.list[[i]][, gamma[[i]] == 1] + 1 / sigma.squared * Lambda.gamma[[i]]) %*% tHX.o.list[[i]][gamma[[i]] == 1, ] %*% Y.list[[i]]
}
}
## initialize sigma.squared
tmp <- vector(length = t)
for(i in 1:t){
if(sum(gamma[[i]]) == 0){
tmp[i] <- t(Y.list[[i]]) %*% Sigma.t.inv[[i]] %*% (Y.list[[i]]) + t(beta.tilde.gamma[[i]]) %*% Lambda.gamma[[i]] %*% beta.tilde.gamma[[i]]
} else {
tmp[i] <- t(Y.list[[i]] - HX.o.list[[i]][, gamma[[i]] == 1] %*% beta.tilde.gamma[[i]]) %*% Sigma.t.inv[[i]] %*% (Y.list[[i]] - HX.o.list[[i]][, gamma[[i]] == 1] %*% beta.tilde.gamma[[i]]) + t(beta.tilde.gamma[[i]]) %*% Lambda.gamma[[i]] %*% beta.tilde.gamma[[i]]
}
}
sigma.squared <- 1 / rgamma(1, (sum(n.o) + sum(unlist(gamma))) / 2, sum(tmp) / 2)
## initialize variables
O <- vector('list', length = t)
rho <- vector('list', length = t)
##
## setup save variables
##
gamma.save <- array(dim = c(p, t, n.mcmc))
sigma.squared.save <- vector(length = n.mcmc)
sigma.squared.eta.save <- vector(length = n.mcmc)
phi.save <- vector(length = n.mcmc)
beta.save <- array(dim = c(p, t, n.mcmc))
rho.save <- array(dim = c(p, t, n.mcmc))
Y.pred <- array(dim = c(m, t, n.mcmc))
delta.save <- delta
phi.accept <- 0
eta.accept <- 0
gamma.accept <- 0
##
## begin mcmc
##
for(k in 1:n.mcmc){
# if(k %% 1000 == 0){
cat(k, ' ')
# }
##
## sample Y.u
##
for(i in 1:t){
if(sum(gamma[[i]]) == 0){
Y.u[[i]] <- Sigma.full[H.u.list[[i]], H.list[[i]]] %*% Sigma.t.inv[[i]] %*% Y.list[[i]]
} else {
Y.u[[i]] <- HX.u.list[[i]][, gamma[[i]] == 1] %*% beta.tilde.gamma[[i]] + Sigma.full[H.u.list[[i]], H.list[[i]]] %*% Sigma.t.inv[[i]] %*% (Y.list[[i]] - HX.o.list[[i]][, gamma[[i]] == 1] %*% beta.tilde.gamma[[i]])
}
}
Y.c <- vector('list', length = t)
for(i in 1:t){
Y.c[[i]] <- vector(length = m)
Y.c[[i]][H.list[[i]]] <- Y.list[[i]]
Y.c[[i]][H.u.list[[i]]] <- Y.u[[i]]
}
beta.hat <- vector('list', length = t)
projectXontoY <- solve(t(X) %*% Sigma.full.inv %*% X) %*% t(X) %*% Sigma.full.inv
for(i in 1:t){
beta.hat[[i]] <- projectXontoY %*% Y.c[[i]]
}
##
## sample sigma.squared
##
tmp <- vector(length = t)
for(i in 1:t){
if(sum(gamma[[i]] == 0)){
tmp[i] <- t(Y.list[[i]]) %*% Sigma.t.inv[[i]] %*% (Y.list[[i]]) + t(beta.tilde.gamma[[i]]) %*% Lambda.gamma[[i]] %*% beta.tilde.gamma[[i]]
} else {
tmp[i] <- t(Y.list[[i]] - HX.o.list[[i]][, gamma[[i]] == 1] %*% beta.tilde.gamma[[i]]) %*% Sigma.t.inv[[i]] %*% (Y.list[[i]] - HX.o.list[[i]][, gamma[[i]] == 1] %*% beta.tilde.gamma[[i]]) + t(beta.tilde.gamma[[i]]) %*% Lambda.gamma[[i]] %*% beta.tilde.gamma[[i]]
}
}
sigma.squared <- 1 / rgamma(1, (sum(n.o) + sum(unlist(gamma))) / 2, sum(tmp) / 2)
##
## sample gammma
##
for(i in 1:t){
for(j in 1:p){
if(runif(1) > gamma.tune){
if(gamma[[i]][j] == 0){
gamma.star[[i]][j] <- 1
} else {
gamma.star[[i]][j] <- 0
}
}
}
}
gamma[[1]]
gamma.star
mh.gamma.1 <- sum(sapply(1:t, make.gamma.mh, gamma = gamma.star, beta.hat = beta.hat, Sigma.full.inv = Sigma.full.inv, Y.c = Y.c, tX.Sigma.full.inv.X = tX.Sigma.full.inv.X, tX.Sigma.full.inv = tX.Sigma.full.inv))
mh.gamma.2 <- sum(sapply(1:t, make.gamma.mh, gamma = gamma, beta.hat = beta.hat, Sigma.full.inv = Sigma.full.inv, Y.c = Y.c, tX.Sigma.full.inv.X = tX.Sigma.full.inv.X, tX.Sigma.full.inv = tX.Sigma.full.inv))
mh.gamma <- exp(mh.gamma.1 - mh.gamma.2)
if(mh.gamma > runif(1)){
gamma <- gamma.star
gamma.accept <- 1 / n.mcmc + gamma.accept
}
# for(i in 1:t){ ## using log scale
# Psi[[i]] <- 1 / 2 * log(lambda / sigma.squared) - 1 / (2 * sigma.squared) * (beta.hat[[i]]^2 * (lambda - 1000 * delta)) + log(pi.prior) - log(1 - pi.prior)
# rho[[i]] <- exp(Psi[[i]] - log(1 + exp(Psi[[i]])))
# }
#
for(i in 1:t){
# gamma[[i]] <- rbinom(p, 1, rho[[i]])
if(sum(gamma[[i]]) == 0){
Lambda.gamma[[i]] <- 0
} else {
Lambda.gamma[[i]] <- diag(lambda[gamma[[i]] == 1])
}
}
##
## sample beta.tilde.gamma
##
for(i in 1:t){
if(sum(gamma[[i]]) == 0){
beta.tilde.gamma[[i]] <- 0
} else {
beta.tilde.gamma[[i]] <- solve(1 / sigma.squared * tHX.o.list[[i]][gamma[[i]] == 1, ] %*% Sigma.t.inv[[i]] %*% HX.o.list[[i]][, gamma[[i]] == 1] + 1 / sigma.squared * Lambda.gamma[[i]]) %*% tHX.o.list[[i]][gamma[[i]] == 1, ] %*% Sigma.t.inv[[i]] %*% Y.list[[i]]
}
}
## sample sigma.squared.eta
sigma.squared.eta.star <- rnorm(1, sigma.squared.eta, sigma.eta.tune)
if(sigma.squared.eta.star > 0){
for(i in 1:t){
Sigma.t.star[[i]] <- I.o[[i]] + sigma.squared.eta.star * exp( - D.t[[i]] / phi)
Sigma.t.inv.star[[i]] <- solve(Sigma.t.star[[i]])
}
mh.eta.1 <- sum(sapply(1:t, make.mh, sigma.squared = sigma.squared, Sigma.t = Sigma.t.star, Sigma.t.inv = Sigma.t.inv.star, gamma = gamma, beta.tilde.gamma = beta.tilde.gamma))
mh.eta.2 <- sum(sapply(1:t, make.mh, sigma.squared = sigma.squared, Sigma.t = Sigma.t, Sigma.t.inv = Sigma.t.inv, gamma = gamma, beta.tilde.gamma = beta.tilde.gamma))
mh.eta <- exp(mh.eta.1 - mh.eta.2)
if(mh.eta > runif(1)){
sigma.squared.eta <- sigma.squared.eta.star
Sigma.t <- Sigma.t.star
Sigma.t.inv <- Sigma.t.inv.star
eta.accept <- 1 / n.mcmc + eta.accept
}
}
##
## sample phi
##
phi.star <- rnorm(1, phi, phi.tune)
if(phi.star > phi.lower && phi.star < phi.upper){
for(i in 1:t){
Sigma.t.star[[i]] <- I.o[[i]] + sigma.squared.eta * exp( - D.t[[i]] / phi.star)
Sigma.t.inv.star[[i]] <- solve(Sigma.t.star[[i]])
}
mh.phi.1 <- sum(sapply(1:t, make.mh, sigma.squared = sigma.squared, Sigma.t = Sigma.t.star, Sigma.t.inv = Sigma.t.inv.star, gamma = gamma, beta.tilde.gamma = beta.tilde.gamma))
mh.phi.2 <- sum(sapply(1:t, make.mh, sigma.squared = sigma.squared, Sigma.t = Sigma.t, Sigma.t.inv = Sigma.t.inv, gamma = gamma, beta.tilde.gamma = beta.tilde.gamma))
mh.phi <- exp(mh.phi.1 - mh.phi.2)
if(mh.phi > runif(1)){
phi <- phi.star
Sigma.t <- Sigma.t.star
Sigma.t.inv <- Sigma.t.inv.star
phi.accept <- 1 / n.mcmc + phi.accept
}
}
##
## Sigma.full
##
Sigma.full <- I.full * sigma.squared.eta * exp( - D / phi)
Sigma.full.inv <- solve(Sigma.full)
tX.Sigma.full.inv.X <- tX %*% Sigma.full.inv %*% X
tX.Sigma.full.inv <- tX %*% Sigma.full.inv
##
## log scoring rule
##
# log.score <- sum(dnorm(Y.new, mean = cbind(X.new[, 1], X.new[, 2:(p)][, gamma == 1]) %*% beta.tilde.gamma, sd = sqrt(sigma.squared), log = TRUE))
##
## save samples
##
Y.pred[, , k] <- matrix(unlist(Y.c), nrow = m, ncol = t, byrow = FALSE)
gamma.save[, , k] <- matrix(unlist(gamma), nrow = p, ncol = t, byrow = FALSE)
sigma.squared.save[k] <- sigma.squared
sigma.squared.eta.save[k] <- sigma.squared.eta
phi.save[k] <- phi
beta.save[, , k] <- matrix(unlist(beta.hat), nrow = p, ncol = t, byrow = FALSE)
# rho.save[, , k] <- matrix(unlist(rho), nrow = p, ncol = t, byrow = FALSE)
# delta.save <- delta
# log.score.save[k] <- log.score
}
list(gamma.save = gamma.save, sigma.squared.save = sigma.squared.save, beta.save = beta.save, rho.save = rho.save, delta.save = delta.save, Y.pred = Y.pred, eta.accept = eta.accept, phi.accept = phi.accept, gamma.accept = gamma.accept, sigma.squared.eta.save = sigma.squared.eta.save, phi.save = phi.save)#, log.score.save = log.score.save)
# list(gamma.save = gamma.save, sigma.squared.save = sigma.squared.save, beta.save = beta.save, delta.save = delta.save, Y.pred = Y.pred)#, log.score.save = log.score.save)
}
|
\name{MetaDE-package}
\alias{MetaDE-package}
\alias{MetaDE}
\docType{package}
\title{MetaDE: Microarray meta-analysis for differentially expressed gene detection
}
\description{
MetaDE MetaDE package implements 12 major meta-analysis methods for differential expression analysis
: Fisher (Rhodes, et al., 2002), Stouffer (Stouffer, 1949), adaptively weighted Fisher (AW) (Li and
Tseng, 2011), minimum p-value (minP), maximum p-value (maxP), rth ordered p-value (rOP)
(Song and Tseng, 2012), fixed effects model (FEM), random effects model (REM) (Choi, et al., 2003),
rank product (rankProd) (Hong, et al., 2006), naive sum of ranks and naive product of ranks
(Dreyfuss, et al., 2009). Detailed algorithms, pros and cons of different methods have been
discussed in a recent review paper (Tseng, et al., 2012). In addition to selecting a meta-analysis
method, two additional considerations are involved in the implementation: (1) Choice of test
statistics: Different test statistics are available in the package for each type of outcome
variable (e.g. t-statistic or moderated t-statistic for binary outcome, F-statistic for multi-class
outcome, regression or correlation for continuous outcome and Cox proportional hazard model for
survival outcome). Additionally, a minimum multi-class correlation (min-MCC) has been included for
multi-class outcome to only capture concordant expression patterns that F-statistic often fails
(Lu, et al., 2010); (2) One-sided test correction: When combining two-sided p-values for binary
outcomes, DE genes with discordant DE direction may be identified and the results are difficult to
interpret(e.g. up-regulation in one study but down-regulation in another study). One-sided test
correction is helpful to guarantee identification of DE genes with concordant DE direction. For
example, Pearson's correction has been proposed for Fisher's method (Owen, 2009). In addition to
the choices above, MetaDE also provides options for gene matching across studies and gene filtering
before meta-analysis. Outputs of the meta-analysis results include DE gene lists with corresponding
raw p-value, q-values and various visualization tools. Heatmaps can be plotted across studies.
\bold{The \code{ind.analysis} Function}\cr
This function is used to perform individual analysis and calculate the p-values frequently used in meta-analysis. Based on the type of outcome variable,
\bold{The \code{ind.cal.ES} Function}\cr
This function is used for calculating the effect sizes (standardized mean difference) frequently used in meta-analysis.
\bold{The \code{MetaDE.rawdata} Function}\cr
With the raw gene expression datasets, all the metheds combining the options of \code{ind.method} and \code{meta.method}
can be implemented by function \code{MetaDE.rawdata}.
\bold{The \code{MetaDE.pvalue} and \code{MetaDE.ES} Function}\cr
If p-values or effect sizes (and corresponding variances) have been calculated already, for
example by other methods not used in functions, \code{ind.analysis} or \code{ind.cal.ES}, with the help
of other software, then the meta-analysis can be implemented by function \code{MetaDE.pvalue}or \code{MetaDE.ES}.
}
\author{Xingbin Wang<xingbinw@gmail.com>, Jia Li<jiajiaysc@gmail.com> and George C Tseng<ctseng@pitt.edu>
}
\references{
Jia Li and George C. Tseng. (2011) An adaptively weighted statistic for detecting differential gene expression when combining multiple transcriptomic studies. Annals of Applied Statistics. 5:994-1019.
Shuya Lu, Jia Li, Chi Song, Kui Shen and George C Tseng. (2010) Biomarker Detection in the Integration of Multiple Multi-class Genomic Studies. Bioinformatics. 26:333-340. (PMID: 19965884; PMCID: PMC2815659)
Xingbin Wang, Yan Lin, Chi Song, Etienne Sibille and George C Tseng. (2012) Detecting disease-associated genes with confounding variable adjustment and the impact on genomic meta-analysis: with application to major depressive disorder. BMC Bioinformatics. 13:52.
George C. Tseng, Debashis Ghosh and Eleanor Feingold. (2012) Comprehensive literature review and statistical considerations for microarray meta-analysis. Nucleic Acids Research accepted
Xingbin Wang, Dongwan Kang, Kui Shen, Chi Song, Lunching Chang, Serena G. Liao, Zhiguang Huo, Naftali Kaminski, Etienne Sibille, Yan Lin, Jia Li and George C. Tseng. (2012) A Suite of R Packages for Quality Control, Differentially Expressed Gene and Enriched Pathway Detection in Microarray Meta-analysis. In press.
}
\keyword{ package }
| /man/MetaDE-package.Rd | no_license | liuyedao246/MetaDE | R | false | false | 4,604 | rd | \name{MetaDE-package}
\alias{MetaDE-package}
\alias{MetaDE}
\docType{package}
\title{MetaDE: Microarray meta-analysis for differentially expressed gene detection
}
\description{
MetaDE MetaDE package implements 12 major meta-analysis methods for differential expression analysis
: Fisher (Rhodes, et al., 2002), Stouffer (Stouffer, 1949), adaptively weighted Fisher (AW) (Li and
Tseng, 2011), minimum p-value (minP), maximum p-value (maxP), rth ordered p-value (rOP)
(Song and Tseng, 2012), fixed effects model (FEM), random effects model (REM) (Choi, et al., 2003),
rank product (rankProd) (Hong, et al., 2006), naive sum of ranks and naive product of ranks
(Dreyfuss, et al., 2009). Detailed algorithms, pros and cons of different methods have been
discussed in a recent review paper (Tseng, et al., 2012). In addition to selecting a meta-analysis
method, two additional considerations are involved in the implementation: (1) Choice of test
statistics: Different test statistics are available in the package for each type of outcome
variable (e.g. t-statistic or moderated t-statistic for binary outcome, F-statistic for multi-class
outcome, regression or correlation for continuous outcome and Cox proportional hazard model for
survival outcome). Additionally, a minimum multi-class correlation (min-MCC) has been included for
multi-class outcome to only capture concordant expression patterns that F-statistic often fails
(Lu, et al., 2010); (2) One-sided test correction: When combining two-sided p-values for binary
outcomes, DE genes with discordant DE direction may be identified and the results are difficult to
interpret(e.g. up-regulation in one study but down-regulation in another study). One-sided test
correction is helpful to guarantee identification of DE genes with concordant DE direction. For
example, Pearson's correction has been proposed for Fisher's method (Owen, 2009). In addition to
the choices above, MetaDE also provides options for gene matching across studies and gene filtering
before meta-analysis. Outputs of the meta-analysis results include DE gene lists with corresponding
raw p-value, q-values and various visualization tools. Heatmaps can be plotted across studies.
\bold{The \code{ind.analysis} Function}\cr
This function is used to perform individual analysis and calculate the p-values frequently used in meta-analysis. Based on the type of outcome variable,
\bold{The \code{ind.cal.ES} Function}\cr
This function is used for calculating the effect sizes (standardized mean difference) frequently used in meta-analysis.
\bold{The \code{MetaDE.rawdata} Function}\cr
With the raw gene expression datasets, all the metheds combining the options of \code{ind.method} and \code{meta.method}
can be implemented by function \code{MetaDE.rawdata}.
\bold{The \code{MetaDE.pvalue} and \code{MetaDE.ES} Function}\cr
If p-values or effect sizes (and corresponding variances) have been calculated already, for
example by other methods not used in functions, \code{ind.analysis} or \code{ind.cal.ES}, with the help
of other software, then the meta-analysis can be implemented by function \code{MetaDE.pvalue}or \code{MetaDE.ES}.
}
\author{Xingbin Wang<xingbinw@gmail.com>, Jia Li<jiajiaysc@gmail.com> and George C Tseng<ctseng@pitt.edu>
}
\references{
Jia Li and George C. Tseng. (2011) An adaptively weighted statistic for detecting differential gene expression when combining multiple transcriptomic studies. Annals of Applied Statistics. 5:994-1019.
Shuya Lu, Jia Li, Chi Song, Kui Shen and George C Tseng. (2010) Biomarker Detection in the Integration of Multiple Multi-class Genomic Studies. Bioinformatics. 26:333-340. (PMID: 19965884; PMCID: PMC2815659)
Xingbin Wang, Yan Lin, Chi Song, Etienne Sibille and George C Tseng. (2012) Detecting disease-associated genes with confounding variable adjustment and the impact on genomic meta-analysis: with application to major depressive disorder. BMC Bioinformatics. 13:52.
George C. Tseng, Debashis Ghosh and Eleanor Feingold. (2012) Comprehensive literature review and statistical considerations for microarray meta-analysis. Nucleic Acids Research accepted
Xingbin Wang, Dongwan Kang, Kui Shen, Chi Song, Lunching Chang, Serena G. Liao, Zhiguang Huo, Naftali Kaminski, Etienne Sibille, Yan Lin, Jia Li and George C. Tseng. (2012) A Suite of R Packages for Quality Control, Differentially Expressed Gene and Enriched Pathway Detection in Microarray Meta-analysis. In press.
}
\keyword{ package }
|
r=0.07
https://sandbox.dams.library.ucdavis.edu/fcrepo/rest/collection/sherry-lehmann/catalogs/d7z59x/media/images/d7z59x-001/svc:tesseract/full/full/0.07/default.jpg Accept:application/hocr+xml
| /ark_87287/d7z59x/d7z59x-001/rotated.r | permissive | ucd-library/wine-price-extraction | R | false | false | 195 | r | r=0.07
https://sandbox.dams.library.ucdavis.edu/fcrepo/rest/collection/sherry-lehmann/catalogs/d7z59x/media/images/d7z59x-001/svc:tesseract/full/full/0.07/default.jpg Accept:application/hocr+xml
|
# download and load the data into R
download.file('https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip', 'power.zip')
unzip('power.zip')
power <- read.table('household_power_consumption.txt', sep =';', header=TRUE, stringsAsFactors=FALSE, dec='.')
# We will only be using data from the dates 2007-02-01 and 2007-02-02.
date1 <- power[power$Date == '2/2/2007', ]
date2 <- power[power$Date == '1/2/2007', ]
ds <- rbind(date2,date1)
esm1 <- as.numeric(ds$Sub_metering_1)
esm2 <- as.numeric(ds$Sub_metering_2)
esm3 <- as.numeric(ds$Sub_metering_3)
times <- strptime(paste(ds$Date, ds$Time), format="%d/%m/%Y%H:%M:%S")
# plot 3
png("plot3.png", width = 480, height = 480, units = "px")
plot(times, esm1, type='l', xlab = '', ylab="Energy Sub Metering")
lines(times, esm2, col='red')
lines(times, esm3, col='blue')
legend("topright", legend = c('Sub Metering 1', 'Sub Metering 2', 'Sub Metering 3'), col=c('black', 'red', 'blue'), lty=1)
dev.off() | /plot3.R | no_license | matthew-kruse/ExData_Plotting1 | R | false | false | 998 | r | # download and load the data into R
download.file('https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip', 'power.zip')
unzip('power.zip')
power <- read.table('household_power_consumption.txt', sep =';', header=TRUE, stringsAsFactors=FALSE, dec='.')
# We will only be using data from the dates 2007-02-01 and 2007-02-02.
date1 <- power[power$Date == '2/2/2007', ]
date2 <- power[power$Date == '1/2/2007', ]
ds <- rbind(date2,date1)
esm1 <- as.numeric(ds$Sub_metering_1)
esm2 <- as.numeric(ds$Sub_metering_2)
esm3 <- as.numeric(ds$Sub_metering_3)
times <- strptime(paste(ds$Date, ds$Time), format="%d/%m/%Y%H:%M:%S")
# plot 3
png("plot3.png", width = 480, height = 480, units = "px")
plot(times, esm1, type='l', xlab = '', ylab="Energy Sub Metering")
lines(times, esm2, col='red')
lines(times, esm3, col='blue')
legend("topright", legend = c('Sub Metering 1', 'Sub Metering 2', 'Sub Metering 3'), col=c('black', 'red', 'blue'), lty=1)
dev.off() |
# This mini-project is based on the K-Means exercise from 'R in Action'
# Go here for the original blog post and solutions
# http://www.r-bloggers.com/k-means-clustering-from-r-in-action/
# Exercise 0: Install these packages if you don't have them already
# install.packages(c("cluster", "rattle","NbClust"))
# Now load the data and look at the first few rows
data(wine, package="rattle")
head(wine)
# Exercise 1: Remove the first column from the data and scale
# it using the scale() function
dataframe <- scale(wine[-1])
# Now we'd like to cluster the data using K-Means.
# How do we decide how many clusters to use if you don't know that already?
# We'll try two methods.
# Method 1: A plot of the total within-groups sums of squares against the
# number of clusters in a K-means solution can be helpful. A bend in the
# graph can suggest the appropriate number of clusters.
wssplot <- function(data, nc=15, seed=1234){
wss <- (nrow(data)-1)*sum(apply(data,2,var))
for (i in 2:nc){
set.seed(seed)
wss[i] <- sum(kmeans(data, centers=i)$withinss)}
plot(1:nc, wss, type="b", xlab="Number of Clusters",
ylab="Within groups sum of squares")
}
wssplot(df)
# Exercise 2:
# * How many clusters does this method suggest?
#3, because the line stops dropping as significantly after 3 clusters
# * Why does this method work? What's the intuition behind it?
#After 3 clusters, the varience is less and less explained with each cluster. It's intuitive because there are three types of wine.
# * Look at the code for wssplot() and figure out how it works
# Method 2: Use the NbClust library, which runs many experiments
# and gives a distribution of potential number of clusters.
library(NbClust)
set.seed(1234)
nc <- NbClust(df, min.nc=2, max.nc=15, method="kmeans")
barplot(table(nc$Best.n[1,]),
xlab="Numer of Clusters", ylab="Number of Criteria",
main="Number of Clusters Chosen by 26 Criteria")
# Exercise 3: How many clusters does this method suggest?
#This method also suggests 3 clusters
# Exercise 4: Once you've picked the number of clusters, run k-means
# using this number of clusters. Output the result of calling kmeans()
# into a variable fit.km
set.seed(1234)
fit.km <- kmeans(df, 3, nstart=25)
fit.km$size
fit.km$centers
fit.km
# Now we want to evaluate how well this clustering does.
# Exercise 5: using the table() function, show how the clusters in fit.km$clusters
# compares to the actual wine types in wine$Type. Would you consider this a good
# clustering?
cluster_check <- table(wine$Type, fit.km$cluster)
cluster_check
#I would considered it pretty good clustering, the clustering seems to follow the wine$types pretty closely
# Exercise 6:
# * Visualize these clusters using function clusplot() from the cluster library
# * Would you consider this a good clustering?
#clusplot( ... )
library(cluster)
clusplot(pam(dataframe, 3))
#I would consider this good clustering because most of the points visually seem to be contained within their clusters.
| /clustering.R | no_license | mseeley3/K-means-clustering | R | false | false | 3,149 | r | # This mini-project is based on the K-Means exercise from 'R in Action'
# Go here for the original blog post and solutions
# http://www.r-bloggers.com/k-means-clustering-from-r-in-action/
# Exercise 0: Install these packages if you don't have them already
# install.packages(c("cluster", "rattle","NbClust"))
# Now load the data and look at the first few rows
data(wine, package="rattle")
head(wine)
# Exercise 1: Remove the first column from the data and scale
# it using the scale() function
dataframe <- scale(wine[-1])
# Now we'd like to cluster the data using K-Means.
# How do we decide how many clusters to use if you don't know that already?
# We'll try two methods.
# Method 1: A plot of the total within-groups sums of squares against the
# number of clusters in a K-means solution can be helpful. A bend in the
# graph can suggest the appropriate number of clusters.
wssplot <- function(data, nc=15, seed=1234){
wss <- (nrow(data)-1)*sum(apply(data,2,var))
for (i in 2:nc){
set.seed(seed)
wss[i] <- sum(kmeans(data, centers=i)$withinss)}
plot(1:nc, wss, type="b", xlab="Number of Clusters",
ylab="Within groups sum of squares")
}
wssplot(df)
# Exercise 2:
# * How many clusters does this method suggest?
#3, because the line stops dropping as significantly after 3 clusters
# * Why does this method work? What's the intuition behind it?
#After 3 clusters, the varience is less and less explained with each cluster. It's intuitive because there are three types of wine.
# * Look at the code for wssplot() and figure out how it works
# Method 2: Use the NbClust library, which runs many experiments
# and gives a distribution of potential number of clusters.
library(NbClust)
set.seed(1234)
nc <- NbClust(df, min.nc=2, max.nc=15, method="kmeans")
barplot(table(nc$Best.n[1,]),
xlab="Numer of Clusters", ylab="Number of Criteria",
main="Number of Clusters Chosen by 26 Criteria")
# Exercise 3: How many clusters does this method suggest?
#This method also suggests 3 clusters
# Exercise 4: Once you've picked the number of clusters, run k-means
# using this number of clusters. Output the result of calling kmeans()
# into a variable fit.km
set.seed(1234)
fit.km <- kmeans(df, 3, nstart=25)
fit.km$size
fit.km$centers
fit.km
# Now we want to evaluate how well this clustering does.
# Exercise 5: using the table() function, show how the clusters in fit.km$clusters
# compares to the actual wine types in wine$Type. Would you consider this a good
# clustering?
cluster_check <- table(wine$Type, fit.km$cluster)
cluster_check
#I would considered it pretty good clustering, the clustering seems to follow the wine$types pretty closely
# Exercise 6:
# * Visualize these clusters using function clusplot() from the cluster library
# * Would you consider this a good clustering?
#clusplot( ... )
library(cluster)
clusplot(pam(dataframe, 3))
#I would consider this good clustering because most of the points visually seem to be contained within their clusters.
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_aggregate_indicators.R
\name{get_aggregate_indicators}
\alias{get_aggregate_indicators}
\title{retrieve aggregate indicator data}
\usage{
get_aggregate_indicators(symbol, resolution, api.key, write.file = FALSE)
}
\arguments{
\item{symbol}{the stock symbol to retrieve data for}
\item{resolution}{intervals for the data}
\item{api.key}{your API token from finnhub.io}
\item{write.file}{should the table be written to the "aggregate_indicators" folder?}
}
\value{
a data frame of aggregate indicators and trends
}
\description{
`get_aggregate_indicators` retrieves aggregate signal of multiple technical indicators (e.g. MACD, RSI, MA)
}
\examples{
\donttest{
### Get support and resistance levels with resolution of 1 minute
get_aggregate_indicators(symbol = 'AAPL', resolution = 1,
api.key = api.key)
}
}
| /man/get_aggregate_indicators.Rd | no_license | atamalu/finntools | R | false | true | 891 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_aggregate_indicators.R
\name{get_aggregate_indicators}
\alias{get_aggregate_indicators}
\title{retrieve aggregate indicator data}
\usage{
get_aggregate_indicators(symbol, resolution, api.key, write.file = FALSE)
}
\arguments{
\item{symbol}{the stock symbol to retrieve data for}
\item{resolution}{intervals for the data}
\item{api.key}{your API token from finnhub.io}
\item{write.file}{should the table be written to the "aggregate_indicators" folder?}
}
\value{
a data frame of aggregate indicators and trends
}
\description{
`get_aggregate_indicators` retrieves aggregate signal of multiple technical indicators (e.g. MACD, RSI, MA)
}
\examples{
\donttest{
### Get support and resistance levels with resolution of 1 minute
get_aggregate_indicators(symbol = 'AAPL', resolution = 1,
api.key = api.key)
}
}
|
best <- function(state = 'TX', outcome = "heart failure" ) {
library(data.table)
library(dplyr)
care_measures_data <- data.table(read.csv2('H:\\LITERATURA_i_POBRANE\\R_kurs\\Cursera R\\R-programming-week-4\\outcome-of-care-measures.csv', sep = ',', dec ='.', colClasses = "character" ))
hospital_states <- unique(care_measures_data$State)
if (!state %in% hospital_states){
msg <- paste0("Error in best(",state , ",", outcome,") : invalid state")
return (msg)
# Error in best("BB", "heart attack") : invalid state
}
## Check that state and outcome are valid
# New names
old <- c('Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack', 'Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure' ,
'Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia')
new <- c('heart attack' , 'heart failure' , 'pneumonia')
setnames(care_measures_data , old , new)
care_measures_data_2 <- care_measures_data %>%
select( Hospital.Name, State, outcome ) %>%
filter(State == state )
# cleaning
care_measures_data_2[,3] <- ifelse(care_measures_data_2[,3] == "Not Available", "NA", care_measures_data_2[,3] )
care_measures_data_2[,3] <- as.numeric(care_measures_data_2[,3])
care_measures_data_2 <- na.omit(care_measures_data_2)
care_measures_data_2 <- data.table(care_measures_data_2)
# min if else
if (outcome == 'heart attack'){
care_measures_data_2 <- care_measures_data_2[`heart attack` == min(`heart attack`),]
outcome_final <- care_measures_data_2[1,]
} else if (outcome == 'heart failure'){
care_measures_data_2 <- care_measures_data_2[`heart failure` == min(`heart failure`),]
outcome_final <- care_measures_data_2[1,]
} else{
care_measures_data_2 <- care_measures_data_2[`pneumonia` == min(`pneumonia`),]
outcome_final <- care_measures_data_2[1,]
}
# final
outcome_final <- outcome_final[,.(Hospital.Name)]
return(outcome_final)
}
| /best.R | no_license | JerzyOtwock/R-programming-week-4 | R | false | false | 2,174 | r |
best <- function(state = 'TX', outcome = "heart failure" ) {
library(data.table)
library(dplyr)
care_measures_data <- data.table(read.csv2('H:\\LITERATURA_i_POBRANE\\R_kurs\\Cursera R\\R-programming-week-4\\outcome-of-care-measures.csv', sep = ',', dec ='.', colClasses = "character" ))
hospital_states <- unique(care_measures_data$State)
if (!state %in% hospital_states){
msg <- paste0("Error in best(",state , ",", outcome,") : invalid state")
return (msg)
# Error in best("BB", "heart attack") : invalid state
}
## Check that state and outcome are valid
# New names
old <- c('Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack', 'Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure' ,
'Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia')
new <- c('heart attack' , 'heart failure' , 'pneumonia')
setnames(care_measures_data , old , new)
care_measures_data_2 <- care_measures_data %>%
select( Hospital.Name, State, outcome ) %>%
filter(State == state )
# cleaning
care_measures_data_2[,3] <- ifelse(care_measures_data_2[,3] == "Not Available", "NA", care_measures_data_2[,3] )
care_measures_data_2[,3] <- as.numeric(care_measures_data_2[,3])
care_measures_data_2 <- na.omit(care_measures_data_2)
care_measures_data_2 <- data.table(care_measures_data_2)
# min if else
if (outcome == 'heart attack'){
care_measures_data_2 <- care_measures_data_2[`heart attack` == min(`heart attack`),]
outcome_final <- care_measures_data_2[1,]
} else if (outcome == 'heart failure'){
care_measures_data_2 <- care_measures_data_2[`heart failure` == min(`heart failure`),]
outcome_final <- care_measures_data_2[1,]
} else{
care_measures_data_2 <- care_measures_data_2[`pneumonia` == min(`pneumonia`),]
outcome_final <- care_measures_data_2[1,]
}
# final
outcome_final <- outcome_final[,.(Hospital.Name)]
return(outcome_final)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/put_wrappers.R
\name{update_card_labels}
\alias{update_card_labels}
\title{Update card labels}
\usage{
update_card_labels(card, labels, ...)
}
\arguments{
\item{card}{Card id}
\item{labels}{A character vector of one or more label id}
\item{...}{Additional arguments passed to \code{\link{put_model}}}
}
\description{
Replace currently assigned labels.
}
| /man/update_card_labels.Rd | no_license | navigate-cgalvao/trelloR | R | false | true | 434 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/put_wrappers.R
\name{update_card_labels}
\alias{update_card_labels}
\title{Update card labels}
\usage{
update_card_labels(card, labels, ...)
}
\arguments{
\item{card}{Card id}
\item{labels}{A character vector of one or more label id}
\item{...}{Additional arguments passed to \code{\link{put_model}}}
}
\description{
Replace currently assigned labels.
}
|
reshape_description=function(jsons,jsons.names){
a=plyr::mdply(jsons,.fun = function(x) {
data.frame(jsonlite::fromJSON(x)[[1]],stringsAsFactors = FALSE)},.progress = 'text')
a$ON_CRAN=ifelse(a$Package%in%cran_current[,1],'CRAN_GITHUB','ONLY_GITHUB')
a$repo=jsons.names[as.numeric(a$X1)]
a1=a%>%dplyr::select(X1,ON_CRAN,repo,Package,Title,Author,Description,VignetteBuilder,BugReports,URL,Depends,Imports,Suggests,LinkingTo)%>%
reshape2::melt(.,id= head(names(.),-4))%>%dplyr::filter(!is.na(value))
# clean a bit more....
a2=a1%>%plyr::ddply(head(names(a1),-1),.fun=function(x){
data.frame(value=gsub(pattern = '^\\s+|\\s+$|\\s+\\((.*?)\\)|\\((.*?)\\)|\\b.1\\b|^s: ',
replacement = '',
x = strsplit(x$value,',')[[1]]
),
stringsAsFactors = FALSE)
},.progress = 'text')%>%dplyr::filter(!grepl(':|NULL',value))
# reshape for rankings
a3<-a2%>%plyr::dlply(.variables = c('ON_CRAN'),.fun=function(df){
df%>%dplyr::count(variable,value)%>%dplyr::arrange(variable,desc(n))%>%
dplyr::group_by(variable)%>%dplyr::do(.,cbind(rank=1:nrow(.),.))%>%
dplyr::mutate(value=sprintf('%s (%s)',value,n))%>%
reshape2::dcast(rank~variable,value.var='value')
})
l=list(raw=a,clean=a2,ranking=a3)
return(l)
} | /R/reshape_description.R | no_license | yonicd/gitLogs | R | false | false | 1,325 | r | reshape_description=function(jsons,jsons.names){
a=plyr::mdply(jsons,.fun = function(x) {
data.frame(jsonlite::fromJSON(x)[[1]],stringsAsFactors = FALSE)},.progress = 'text')
a$ON_CRAN=ifelse(a$Package%in%cran_current[,1],'CRAN_GITHUB','ONLY_GITHUB')
a$repo=jsons.names[as.numeric(a$X1)]
a1=a%>%dplyr::select(X1,ON_CRAN,repo,Package,Title,Author,Description,VignetteBuilder,BugReports,URL,Depends,Imports,Suggests,LinkingTo)%>%
reshape2::melt(.,id= head(names(.),-4))%>%dplyr::filter(!is.na(value))
# clean a bit more....
a2=a1%>%plyr::ddply(head(names(a1),-1),.fun=function(x){
data.frame(value=gsub(pattern = '^\\s+|\\s+$|\\s+\\((.*?)\\)|\\((.*?)\\)|\\b.1\\b|^s: ',
replacement = '',
x = strsplit(x$value,',')[[1]]
),
stringsAsFactors = FALSE)
},.progress = 'text')%>%dplyr::filter(!grepl(':|NULL',value))
# reshape for rankings
a3<-a2%>%plyr::dlply(.variables = c('ON_CRAN'),.fun=function(df){
df%>%dplyr::count(variable,value)%>%dplyr::arrange(variable,desc(n))%>%
dplyr::group_by(variable)%>%dplyr::do(.,cbind(rank=1:nrow(.),.))%>%
dplyr::mutate(value=sprintf('%s (%s)',value,n))%>%
reshape2::dcast(rank~variable,value.var='value')
})
l=list(raw=a,clean=a2,ranking=a3)
return(l)
} |
library(shiny)
library(ggvis)
shiny = shinyUI(fluidPage(
titlePanel("Life Expectancy and Income"),
mainPanel(
uiOutput("ggvis_ui"),
ggvisOutput("ggvis")
)
))
| /hw6/Bonus/ui.R | no_license | liams32/Stats-133 | R | false | false | 195 | r |
library(shiny)
library(ggvis)
shiny = shinyUI(fluidPage(
titlePanel("Life Expectancy and Income"),
mainPanel(
uiOutput("ggvis_ui"),
ggvisOutput("ggvis")
)
))
|
rm(list = ls())
library(simulator)
library(RColorBrewer)
source("../plot_functions.R")
sim_name <- "speed"
sim <- load_simulation(name = sim_name) %>%
subset_simulation(subset = c(1, 2, 4, 5)) %>%
subset_simulation(methods = c("APL", "sprinter", "sprinter1cv"))
# general graphical paramters
n_method <- length(evals(sim)[[1]]@method_name)
col_seq <- brewer.pal(10, "Paired")[c(2, 6, 5)]
lty_seq <- rep(2, n_method)
lwd_seq <- rep(2, n_method)
pch_seq <- seq(n_method)
pdf(file = "./plots/pred_time.pdf", width = 11, height = 5)
mat <- matrix(c(1, 2), ncol = 2)
layout(mat, c(9, 9, 9), c(1, 1, 1))
par(cex.main = 1.2, cex.lab = 1.6, cex.axis = 1.2)
xlab <- "p"
xaxis <- c(100, 200, 1000, 2000)
par(mar = c(4, 5, 1, 0.2))
ylab <- "Time (s)"
plot_aggr_eval_by_model(sim = sim,
metric_name = "time",
main = NULL,
xlab = xlab,
xaxis = xaxis,
ylab = ylab,
method_col = col_seq,
method_lty = lty_seq,
method_lwd = lwd_seq,
method_pch = pch_seq,
legend_location = "topleft")
par(mar = c(4, 5, 1, 0.2))
ylab <- "Mean squared error"
plot_aggr_eval_by_model(sim = sim,
metric_name = "mse_pred",
main = NULL,
xlab = xlab,
xaxis = xaxis,
ylab = ylab,
method_col = col_seq,
method_lty = lty_seq,
method_lwd = lwd_seq,
method_pch = pch_seq,
legend_location = NULL)
dev.off()
pdf(file = "./plots/nnzm_nnzi_pred.pdf", width = 11, height = 5)
sim <- subset_simulation(sim, subset = c(4))
mat <- matrix(c(1, 2), ncol = 2)
layout(mat, c(9, 9, 9), c(1, 1, 1))
par(cex.main = 1, cex.lab = 1.6, cex.axis = 1.2)
plot_main <- "Mixed (p = 1000, snr = 3)"
par(mar = c(4, 5, 1, 0.2))
# plot the number of selected main effects vs prediction mse
ylab <- "Mean squared error"
xlab <- "Number of non-zero main effects"
metric_name_1 <- "nnzm"
metric_name_2 <- "mse_pred"
plot_two_raw_evals(sim = sim,
metric_name_1 = metric_name_1,
metric_name_2 = metric_name_2,
main = plot_main,
xlab = xlab,
ylab = ylab,
method_col = col_seq,
method_pch = pch_seq,
legend_location = "bottomright")
par(mar = c(4, 5, 1, 0.2))
# plot the number of selected interactions vs prediction mse
ylab <- "Mean squared error"
xlab <- "Number of non-zero interactions"
metric_name_1 <- "nnzi"
metric_name_2 <- "mse_pred"
plot_two_raw_evals(sim = sim,
metric_name_1 = metric_name_1,
metric_name_2 = metric_name_2,
main = plot_main,
xlab = xlab,
ylab = ylab,
method_col = col_seq,
method_pch = pch_seq,
legend_location = NULL)
dev.off() | /sprinter/Gaussian/plot_speed.R | no_license | hugogogo/reproducible | R | false | false | 3,182 | r | rm(list = ls())
library(simulator)
library(RColorBrewer)
source("../plot_functions.R")
sim_name <- "speed"
sim <- load_simulation(name = sim_name) %>%
subset_simulation(subset = c(1, 2, 4, 5)) %>%
subset_simulation(methods = c("APL", "sprinter", "sprinter1cv"))
# general graphical paramters
n_method <- length(evals(sim)[[1]]@method_name)
col_seq <- brewer.pal(10, "Paired")[c(2, 6, 5)]
lty_seq <- rep(2, n_method)
lwd_seq <- rep(2, n_method)
pch_seq <- seq(n_method)
pdf(file = "./plots/pred_time.pdf", width = 11, height = 5)
mat <- matrix(c(1, 2), ncol = 2)
layout(mat, c(9, 9, 9), c(1, 1, 1))
par(cex.main = 1.2, cex.lab = 1.6, cex.axis = 1.2)
xlab <- "p"
xaxis <- c(100, 200, 1000, 2000)
par(mar = c(4, 5, 1, 0.2))
ylab <- "Time (s)"
plot_aggr_eval_by_model(sim = sim,
metric_name = "time",
main = NULL,
xlab = xlab,
xaxis = xaxis,
ylab = ylab,
method_col = col_seq,
method_lty = lty_seq,
method_lwd = lwd_seq,
method_pch = pch_seq,
legend_location = "topleft")
par(mar = c(4, 5, 1, 0.2))
ylab <- "Mean squared error"
plot_aggr_eval_by_model(sim = sim,
metric_name = "mse_pred",
main = NULL,
xlab = xlab,
xaxis = xaxis,
ylab = ylab,
method_col = col_seq,
method_lty = lty_seq,
method_lwd = lwd_seq,
method_pch = pch_seq,
legend_location = NULL)
dev.off()
pdf(file = "./plots/nnzm_nnzi_pred.pdf", width = 11, height = 5)
sim <- subset_simulation(sim, subset = c(4))
mat <- matrix(c(1, 2), ncol = 2)
layout(mat, c(9, 9, 9), c(1, 1, 1))
par(cex.main = 1, cex.lab = 1.6, cex.axis = 1.2)
plot_main <- "Mixed (p = 1000, snr = 3)"
par(mar = c(4, 5, 1, 0.2))
# plot the number of selected main effects vs prediction mse
ylab <- "Mean squared error"
xlab <- "Number of non-zero main effects"
metric_name_1 <- "nnzm"
metric_name_2 <- "mse_pred"
plot_two_raw_evals(sim = sim,
metric_name_1 = metric_name_1,
metric_name_2 = metric_name_2,
main = plot_main,
xlab = xlab,
ylab = ylab,
method_col = col_seq,
method_pch = pch_seq,
legend_location = "bottomright")
par(mar = c(4, 5, 1, 0.2))
# plot the number of selected interactions vs prediction mse
ylab <- "Mean squared error"
xlab <- "Number of non-zero interactions"
metric_name_1 <- "nnzi"
metric_name_2 <- "mse_pred"
plot_two_raw_evals(sim = sim,
metric_name_1 = metric_name_1,
metric_name_2 = metric_name_2,
main = plot_main,
xlab = xlab,
ylab = ylab,
method_col = col_seq,
method_pch = pch_seq,
legend_location = NULL)
dev.off() |
library(lava)
### Name: confband
### Title: Add Confidence limits bar to plot
### Aliases: confband forestplot
### Keywords: iplot
### ** Examples
plot(0,0,type="n",xlab="",ylab="")
confband(0.5,-0.5,0.5,0,col="darkblue")
confband(0.8,-0.5,0.5,0,col="darkred",vert=FALSE,pch=1,cex=1.5)
set.seed(1)
K <- 20
est <- rnorm(K)
se <- runif(K,0.2,0.4)
x <- cbind(est,est-2*se,est+2*se,runif(K,0.5,2))
x[c(3:4,10:12),] <- NA
rownames(x) <- unlist(lapply(letters[seq(K)],function(x) paste(rep(x,4),collapse="")))
rownames(x)[which(is.na(est))] <- ""
signif <- sign(x[,2])==sign(x[,3])
forestplot(x,text.right=FALSE)
forestplot(x[,-4],sep=c(2,15),col=signif+1,box1=TRUE,delta=0.2,pch=16,cex=1.5)
forestplot(x,vert=TRUE,text=FALSE)
forestplot(x,vert=TRUE,text=FALSE,pch=NA)
##forestplot(x,vert=TRUE,text.vert=FALSE)
##forestplot(val,vert=TRUE,add=TRUE)
z <- seq(10)
zu <- c(z[-1],10)
plot(z,type="n")
confband(z,zu,rep(0,length(z)),col=Col("darkblue"),polygon=TRUE,step=TRUE)
confband(z,zu,zu-2,col=Col("darkred"),polygon=TRUE,step=TRUE)
z <- seq(0,1,length.out=100)
plot(z,z,type="n")
confband(z,z,z^2,polygon="TRUE",col=Col("darkblue"))
set.seed(1)
k <- 10
x <- seq(k)
est <- rnorm(k)
sd <- runif(k)
val <- cbind(x,est,est-sd,est+sd)
par(mfrow=c(1,2))
plot(0,type="n",xlim=c(0,k+1),ylim=range(val[,-1]),axes=FALSE,xlab="",ylab="")
axis(2)
confband(val[,1],val[,3],val[,4],val[,2],pch=16,cex=2)
plot(0,type="n",ylim=c(0,k+1),xlim=range(val[,-1]),axes=FALSE,xlab="",ylab="")
axis(1)
confband(val[,1],val[,3],val[,4],val[,2],pch=16,cex=2,vert=FALSE)
| /data/genthat_extracted_code/lava/examples/confband.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,549 | r | library(lava)
### Name: confband
### Title: Add Confidence limits bar to plot
### Aliases: confband forestplot
### Keywords: iplot
### ** Examples
plot(0,0,type="n",xlab="",ylab="")
confband(0.5,-0.5,0.5,0,col="darkblue")
confband(0.8,-0.5,0.5,0,col="darkred",vert=FALSE,pch=1,cex=1.5)
set.seed(1)
K <- 20
est <- rnorm(K)
se <- runif(K,0.2,0.4)
x <- cbind(est,est-2*se,est+2*se,runif(K,0.5,2))
x[c(3:4,10:12),] <- NA
rownames(x) <- unlist(lapply(letters[seq(K)],function(x) paste(rep(x,4),collapse="")))
rownames(x)[which(is.na(est))] <- ""
signif <- sign(x[,2])==sign(x[,3])
forestplot(x,text.right=FALSE)
forestplot(x[,-4],sep=c(2,15),col=signif+1,box1=TRUE,delta=0.2,pch=16,cex=1.5)
forestplot(x,vert=TRUE,text=FALSE)
forestplot(x,vert=TRUE,text=FALSE,pch=NA)
##forestplot(x,vert=TRUE,text.vert=FALSE)
##forestplot(val,vert=TRUE,add=TRUE)
z <- seq(10)
zu <- c(z[-1],10)
plot(z,type="n")
confband(z,zu,rep(0,length(z)),col=Col("darkblue"),polygon=TRUE,step=TRUE)
confband(z,zu,zu-2,col=Col("darkred"),polygon=TRUE,step=TRUE)
z <- seq(0,1,length.out=100)
plot(z,z,type="n")
confband(z,z,z^2,polygon="TRUE",col=Col("darkblue"))
set.seed(1)
k <- 10
x <- seq(k)
est <- rnorm(k)
sd <- runif(k)
val <- cbind(x,est,est-sd,est+sd)
par(mfrow=c(1,2))
plot(0,type="n",xlim=c(0,k+1),ylim=range(val[,-1]),axes=FALSE,xlab="",ylab="")
axis(2)
confband(val[,1],val[,3],val[,4],val[,2],pch=16,cex=2)
plot(0,type="n",ylim=c(0,k+1),xlim=range(val[,-1]),axes=FALSE,xlab="",ylab="")
axis(1)
confband(val[,1],val[,3],val[,4],val[,2],pch=16,cex=2,vert=FALSE)
|
library(in2extRemes)
### Name: in2extRemes-package
### Title: Graphical User Interface Dialog Window for EVA
### Aliases: in2extRemes-package in2extRemes
### Keywords: package utilities
### ** Examples
## Not run:
##D in2extRemes()
## End(Not run)
| /data/genthat_extracted_code/in2extRemes/examples/in2extRemes-package.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 256 | r | library(in2extRemes)
### Name: in2extRemes-package
### Title: Graphical User Interface Dialog Window for EVA
### Aliases: in2extRemes-package in2extRemes
### Keywords: package utilities
### ** Examples
## Not run:
##D in2extRemes()
## End(Not run)
|
library(BSgenome.Hsapiens.UCSC.hg19)
library(ggbio)
library(GenomicRanges)
library(GenomicFeatures)
library(hexbin)
setwd('~/Documents/CREB/ChIPseqENCODE/insectBED1000/')
data = read.delim('130422_mergedOutput.annotateOneExpt.countMotifs.txt', row.names=1, stringsAsFactors = F)
data = data[,1:21]
#convert to right type
data$PeakScore = as.integer(data$PeakScore)
data$Distance.to.TSS = as.integer(data$Distance.to.TSS)
data$intervalSize = data$End - data$Start
#normalise the tag and motif number for the size of the interval
data$tagsPerInterval = (data$PeakScore / data$intervalSize )
data$motifPerInterval = (data$CREBmotifNo / data$intervalSize)
#the filters
dataPeakScore = subset(data, data$PeakScore >= 12)
dataTSSdist = subset(dataPeakScore, dataPeakScore$Distance.to.TSS <= 3000 & dataPeakScore$Distance.to.TSS >= -500)
dataMotif = subset(dataTSSdist, dataTSSdist$CREBmotifNo > 0)
dataMotifDensity = subset(dataMotif, dataMotif$motifPerInterval <= 1000)
#dataProtein = subset(dataMotifDensity, dataMotifDensity$Gene.Type == 'protein-coding')
#make some graphs
par(mfrow=c(2,1))
plot(data$Distance.to.TSS, data$motifPerInterval, type='h', xlab='Distance to transcriptional start site',
ylab='Motifs per base pair', main='Density of CREB motifs', xlim=c(-5e5,5e5))
plot(data$Distance.to.TSS, data$tagsPerInterval, type='h', xlab='Distance to transcriptional start site',
ylab='Tags per base pair', main='Density of CREB binding', xlim=c(-5e5,5e5))
par(mfrow=c(2,1))
plot(stats::density(data$Distance.to.TSS,bw='nrd0'), xlim=c(-1e4, 1e4),main="CREB binding at TSS",xlab="Distance from TSS (upstream - downstream)")
polygon(density(data$Distance.to.TSS,bw='nrd0'), col="lightblue", border="grey")
plot(stats::density(data$intervalSize,bw='nrd0'), xlim=c(0, 1e4),main="Proximal promoter length", xlab="Length of proximal promoter")
polygon(density(data$intervalSize,bw='nrd0'), col="lightgreen", border="grey")
par(mfrow=c(1,1))
plot(data$CREBmotifNo,data$PeakScore,pch=20, col=rainbow(20),main='Tag count vs CREB motifs', xlab='Number of CREB motifs',ylab='Tag counts')
x <- rnorm(data$CREBmotifNo)
y <- rnorm(data$PeakScore)
bin<-hexbin(x, y, xbins=100)
plot(bin, main="Hexagonal Binning")
genome <- BSgenome.Hsapiens.UCSC.hg19
len = as.vector(seqlengths(genome)[1:24])
bigRange = GRanges(seqnames=Rle(data$Chr), ranges=IRanges(start=data$Start, end=data$End,names=data$Gene.Name),
strand=data$Strand, peakScore=data$PeakScore, TSSdist=data$Distance.to.TSS, motifs=data$CREBmotifNo,
motifDensity=data$motifPerInterval, tagDensity=data$tagsPerInterval)
#write.table(dataMotifDensity, './130322_CREBchipAnnotationFiltered.txt', sep='\t', row.names=F) | /PhD/130422_crebChipAnnotationFiltering.R | no_license | dvbrown/Rscripts | R | false | false | 2,716 | r | library(BSgenome.Hsapiens.UCSC.hg19)
library(ggbio)
library(GenomicRanges)
library(GenomicFeatures)
library(hexbin)
setwd('~/Documents/CREB/ChIPseqENCODE/insectBED1000/')
data = read.delim('130422_mergedOutput.annotateOneExpt.countMotifs.txt', row.names=1, stringsAsFactors = F)
data = data[,1:21]
#convert to right type
data$PeakScore = as.integer(data$PeakScore)
data$Distance.to.TSS = as.integer(data$Distance.to.TSS)
data$intervalSize = data$End - data$Start
#normalise the tag and motif number for the size of the interval
data$tagsPerInterval = (data$PeakScore / data$intervalSize )
data$motifPerInterval = (data$CREBmotifNo / data$intervalSize)
#the filters
dataPeakScore = subset(data, data$PeakScore >= 12)
dataTSSdist = subset(dataPeakScore, dataPeakScore$Distance.to.TSS <= 3000 & dataPeakScore$Distance.to.TSS >= -500)
dataMotif = subset(dataTSSdist, dataTSSdist$CREBmotifNo > 0)
dataMotifDensity = subset(dataMotif, dataMotif$motifPerInterval <= 1000)
#dataProtein = subset(dataMotifDensity, dataMotifDensity$Gene.Type == 'protein-coding')
#make some graphs
par(mfrow=c(2,1))
plot(data$Distance.to.TSS, data$motifPerInterval, type='h', xlab='Distance to transcriptional start site',
ylab='Motifs per base pair', main='Density of CREB motifs', xlim=c(-5e5,5e5))
plot(data$Distance.to.TSS, data$tagsPerInterval, type='h', xlab='Distance to transcriptional start site',
ylab='Tags per base pair', main='Density of CREB binding', xlim=c(-5e5,5e5))
par(mfrow=c(2,1))
plot(stats::density(data$Distance.to.TSS,bw='nrd0'), xlim=c(-1e4, 1e4),main="CREB binding at TSS",xlab="Distance from TSS (upstream - downstream)")
polygon(density(data$Distance.to.TSS,bw='nrd0'), col="lightblue", border="grey")
plot(stats::density(data$intervalSize,bw='nrd0'), xlim=c(0, 1e4),main="Proximal promoter length", xlab="Length of proximal promoter")
polygon(density(data$intervalSize,bw='nrd0'), col="lightgreen", border="grey")
par(mfrow=c(1,1))
plot(data$CREBmotifNo,data$PeakScore,pch=20, col=rainbow(20),main='Tag count vs CREB motifs', xlab='Number of CREB motifs',ylab='Tag counts')
x <- rnorm(data$CREBmotifNo)
y <- rnorm(data$PeakScore)
bin<-hexbin(x, y, xbins=100)
plot(bin, main="Hexagonal Binning")
genome <- BSgenome.Hsapiens.UCSC.hg19
len = as.vector(seqlengths(genome)[1:24])
bigRange = GRanges(seqnames=Rle(data$Chr), ranges=IRanges(start=data$Start, end=data$End,names=data$Gene.Name),
strand=data$Strand, peakScore=data$PeakScore, TSSdist=data$Distance.to.TSS, motifs=data$CREBmotifNo,
motifDensity=data$motifPerInterval, tagDensity=data$tagsPerInterval)
#write.table(dataMotifDensity, './130322_CREBchipAnnotationFiltered.txt', sep='\t', row.names=F) |
# This file is auto-generated by h2o-3/h2o-bindings/bin/gen_R.py
# Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
#'
# -------------------------- H2O Stacked Ensemble -------------------------- #
#'
#' Build a stacked ensemble (aka. Super Learner) using the H2O base
#' learning algorithms specified by the user.
#'
#' @param x A vector containing the names or indices of the predictor variables to use in building the model.
#' If x is missing,then all columns except y are used.
#' @param y The name of the response variable in the model.If the data does not contain a header, this is the first column
#' index, and increasing from left to right. (The response must be either an integer or a
#' categorical variable).
#' @param model_id Destination id for this model; auto-generated if not specified.
#' @param training_frame Id of the training data frame (Not required, to allow initial validation of model parameters).
#' @param validation_frame Id of the validation data frame.
#' @param base_models List of model ids which we can stack together. Models must have been cross-validated using nfolds > 1, and
#' folds must be identical across models. Defaults to [].
#' @examples
#'
#' # See example R code here:
#' # http://docs.h2o.ai/h2o/latest-stable/h2o-docs/data-science/stacked-ensembles.html
#'
#' @export
h2o.stackedEnsemble <- function(x, y, training_frame,
model_id = NULL,
validation_frame = NULL,
base_models = list()
)
{
# If x is missing, then assume user wants to use all columns as features.
if (missing(x)) {
if (is.numeric(y)) {
x <- setdiff(col(training_frame), y)
} else {
x <- setdiff(colnames(training_frame), y)
}
}
# Required args: training_frame
if (missing(training_frame)) stop("argument 'training_frame' is missing, with no default")
# Training_frame must be a key or an H2OFrame object
if (!is.H2OFrame(training_frame))
tryCatch(training_frame <- h2o.getFrame(training_frame),
error = function(err) {
stop("argument 'training_frame' must be a valid H2OFrame or key")
})
# Validation_frame must be a key or an H2OFrame object
if (!is.null(validation_frame)) {
if (!is.H2OFrame(validation_frame))
tryCatch(validation_frame <- h2o.getFrame(validation_frame),
error = function(err) {
stop("argument 'validation_frame' must be a valid H2OFrame or key")
})
}
# Parameter list to send to model builder
parms <- list()
parms$training_frame <- training_frame
args <- .verify_dataxy(training_frame, x, y)
parms$response_column <- args$y
if (length(base_models) == 0) stop('base_models is empty')
# If base_models contains models instead of ids, replace with model id
for (i in 1:length(base_models)) {
if (inherits(base_models[[i]], 'H2OModel')) {
base_models[[i]] <- base_models[[i]]@model_id
}
}
if (!missing(model_id))
parms$model_id <- model_id
if (!missing(validation_frame))
parms$validation_frame <- validation_frame
if (!missing(base_models))
parms$base_models <- base_models
# Error check and build model
.h2o.modelJob('stackedensemble', parms, h2oRestApiVersion = 99)
}
| /h2o-r/h2o-package/R/stackedensemble.R | permissive | DEVESHTARASIA/h2o-3 | R | false | false | 3,403 | r | # This file is auto-generated by h2o-3/h2o-bindings/bin/gen_R.py
# Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
#'
# -------------------------- H2O Stacked Ensemble -------------------------- #
#'
#' Build a stacked ensemble (aka. Super Learner) using the H2O base
#' learning algorithms specified by the user.
#'
#' @param x A vector containing the names or indices of the predictor variables to use in building the model.
#' If x is missing,then all columns except y are used.
#' @param y The name of the response variable in the model.If the data does not contain a header, this is the first column
#' index, and increasing from left to right. (The response must be either an integer or a
#' categorical variable).
#' @param model_id Destination id for this model; auto-generated if not specified.
#' @param training_frame Id of the training data frame (Not required, to allow initial validation of model parameters).
#' @param validation_frame Id of the validation data frame.
#' @param base_models List of model ids which we can stack together. Models must have been cross-validated using nfolds > 1, and
#' folds must be identical across models. Defaults to [].
#' @examples
#'
#' # See example R code here:
#' # http://docs.h2o.ai/h2o/latest-stable/h2o-docs/data-science/stacked-ensembles.html
#'
#' @export
h2o.stackedEnsemble <- function(x, y, training_frame,
model_id = NULL,
validation_frame = NULL,
base_models = list()
)
{
# If x is missing, then assume user wants to use all columns as features.
if (missing(x)) {
if (is.numeric(y)) {
x <- setdiff(col(training_frame), y)
} else {
x <- setdiff(colnames(training_frame), y)
}
}
# Required args: training_frame
if (missing(training_frame)) stop("argument 'training_frame' is missing, with no default")
# Training_frame must be a key or an H2OFrame object
if (!is.H2OFrame(training_frame))
tryCatch(training_frame <- h2o.getFrame(training_frame),
error = function(err) {
stop("argument 'training_frame' must be a valid H2OFrame or key")
})
# Validation_frame must be a key or an H2OFrame object
if (!is.null(validation_frame)) {
if (!is.H2OFrame(validation_frame))
tryCatch(validation_frame <- h2o.getFrame(validation_frame),
error = function(err) {
stop("argument 'validation_frame' must be a valid H2OFrame or key")
})
}
# Parameter list to send to model builder
parms <- list()
parms$training_frame <- training_frame
args <- .verify_dataxy(training_frame, x, y)
parms$response_column <- args$y
if (length(base_models) == 0) stop('base_models is empty')
# If base_models contains models instead of ids, replace with model id
for (i in 1:length(base_models)) {
if (inherits(base_models[[i]], 'H2OModel')) {
base_models[[i]] <- base_models[[i]]@model_id
}
}
if (!missing(model_id))
parms$model_id <- model_id
if (!missing(validation_frame))
parms$validation_frame <- validation_frame
if (!missing(base_models))
parms$base_models <- base_models
# Error check and build model
.h2o.modelJob('stackedensemble', parms, h2oRestApiVersion = 99)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/general_use_functions.R
\name{sigm}
\alias{sigm}
\title{Sigmoid function}
\usage{
sigm(z)
}
\description{
Sigmoid function
}
| /epimapAUX/man/sigm.Rd | no_license | cboix/EPIMAP_ANALYSIS | R | false | true | 203 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/general_use_functions.R
\name{sigm}
\alias{sigm}
\title{Sigmoid function}
\usage{
sigm(z)
}
\description{
Sigmoid function
}
|
library(dplyr)
library(stringr)
# Preparation: Getting the data
if (!file.exists("data")) {
dir.create("data")
}
temp <- tempfile()
download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip", temp)
unzip(temp, exdir = "data")
unlink(temp)
# 1. Merging training and test data
# Pulling together training and test data files
# including activity (train_Y.txt, test_Y.txt)
# and subject id (subject_train.txt, subject_test.txt)
data_dir <- file.path("data", "UCI HAR Dataset")
# loading the training data
x_train <- read.delim(file.path(data_dir, "train", "X_train.txt"),
header = FALSE, sep = "")
subject_train <- read.delim(file.path(data_dir, "train", "subject_train.txt"),
header = FALSE, sep = "")
y_train <- read.delim(file.path(data_dir, "train", "y_train.txt"),
header = FALSE, sep = "")
train_data <- cbind(subject_train, x_train, y_train)
# loading the test data
x_test <- read.delim(file.path(data_dir, "test", "X_test.txt"),
header = FALSE, sep = "")
subject_test <- read.delim(file.path(data_dir, "test", "subject_test.txt"),
header = FALSE, sep = "")
y_test <- read.delim(file.path(data_dir, "test", "y_test.txt"),
header = FALSE, sep = "")
test_data <- cbind(subject_test, x_test, y_test)
# merging train and test data
merged_data <- rbind(train_data, test_data)
# Retrieving column names
# Note: taking only the second column from features.txt
x_column_names <- read.delim(file.path(data_dir, "features.txt"),
sep = " ", header = FALSE)[, 2]
# Fixing column names: 'BodyBody' -> 'Body' (see CodeBook)
x_column_names <- sub("BodyBody", "Body", x_column_names)
# Disambiguating columns 303-344
x_column_names[303:316] <- paste(x_column_names[303:316], "-X", sep = "")
x_column_names[317:330] <- paste(x_column_names[317:330], "-Y", sep = "")
x_column_names[331:344] <- paste(x_column_names[331:344], "-Z", sep = "")
# Disambiguating columns 382-423
x_column_names[382:395] <- paste(x_column_names[382:395], "-X", sep = "")
x_column_names[396:409] <- paste(x_column_names[396:409], "-Y", sep = "")
x_column_names[410:423] <- paste(x_column_names[410:423], "-Z", sep = "")
# Disambiguating columns 461-502
x_column_names[461:474] <- paste(x_column_names[461:474], "-X", sep = "")
x_column_names[475:488] <- paste(x_column_names[475:488], "-Y", sep = "")
x_column_names[489:502] <- paste(x_column_names[489:502], "-Z", sep = "")
column_names <- append(append("subject_id", x_column_names), "activity")
colnames(merged_data) <- column_names
# 2. Reducing features
reduced_data <- as_tibble(merged_data) %>%
select("subject_id", "activity",
contains("mean()") | contains("std()"))
# 3. Substituting activity labels
activity_labels <- read.delim(file.path(data_dir, "activity_labels.txt"),
sep = " ", header = FALSE)[, 2]
tidy_data <- reduced_data %>% mutate(activity = activity_labels[activity])
# 4. Renaming feature columns
source('transform_column_names.R')
column_names <- colnames(tidy_data)
tidy_column_names <- transform_column_names(column_names)
colnames(tidy_data) <- tidy_column_names
# 5. Creating a summary data set
summary <- tidy_data %>%
group_by(subject_id, activity) %>%
summarise_all(mean)
# 6. Storing generated tables
if (!file.exists("output")) {
dir.create("output")
}
write.table(tidy_data, file.path("output", "har-tidy.txt"), row.names = FALSE)
write.table(summary, file.path("output", "har-summary.txt"), row.names = FALSE) | /run_analysis.R | no_license | oliver7654/getting-and-cleaning-data-course-project | R | false | false | 3,444 | r | library(dplyr)
library(stringr)
# Preparation: Getting the data
if (!file.exists("data")) {
dir.create("data")
}
temp <- tempfile()
download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip", temp)
unzip(temp, exdir = "data")
unlink(temp)
# 1. Merging training and test data
# Pulling together training and test data files
# including activity (train_Y.txt, test_Y.txt)
# and subject id (subject_train.txt, subject_test.txt)
data_dir <- file.path("data", "UCI HAR Dataset")
# loading the training data
x_train <- read.delim(file.path(data_dir, "train", "X_train.txt"),
header = FALSE, sep = "")
subject_train <- read.delim(file.path(data_dir, "train", "subject_train.txt"),
header = FALSE, sep = "")
y_train <- read.delim(file.path(data_dir, "train", "y_train.txt"),
header = FALSE, sep = "")
train_data <- cbind(subject_train, x_train, y_train)
# loading the test data
x_test <- read.delim(file.path(data_dir, "test", "X_test.txt"),
header = FALSE, sep = "")
subject_test <- read.delim(file.path(data_dir, "test", "subject_test.txt"),
header = FALSE, sep = "")
y_test <- read.delim(file.path(data_dir, "test", "y_test.txt"),
header = FALSE, sep = "")
test_data <- cbind(subject_test, x_test, y_test)
# merging train and test data
merged_data <- rbind(train_data, test_data)
# Retrieving column names
# Note: taking only the second column from features.txt
x_column_names <- read.delim(file.path(data_dir, "features.txt"),
sep = " ", header = FALSE)[, 2]
# Fixing column names: 'BodyBody' -> 'Body' (see CodeBook)
x_column_names <- sub("BodyBody", "Body", x_column_names)
# Disambiguating columns 303-344
x_column_names[303:316] <- paste(x_column_names[303:316], "-X", sep = "")
x_column_names[317:330] <- paste(x_column_names[317:330], "-Y", sep = "")
x_column_names[331:344] <- paste(x_column_names[331:344], "-Z", sep = "")
# Disambiguating columns 382-423
x_column_names[382:395] <- paste(x_column_names[382:395], "-X", sep = "")
x_column_names[396:409] <- paste(x_column_names[396:409], "-Y", sep = "")
x_column_names[410:423] <- paste(x_column_names[410:423], "-Z", sep = "")
# Disambiguating columns 461-502
x_column_names[461:474] <- paste(x_column_names[461:474], "-X", sep = "")
x_column_names[475:488] <- paste(x_column_names[475:488], "-Y", sep = "")
x_column_names[489:502] <- paste(x_column_names[489:502], "-Z", sep = "")
column_names <- append(append("subject_id", x_column_names), "activity")
colnames(merged_data) <- column_names
# 2. Reducing features
reduced_data <- as_tibble(merged_data) %>%
select("subject_id", "activity",
contains("mean()") | contains("std()"))
# 3. Substituting activity labels
activity_labels <- read.delim(file.path(data_dir, "activity_labels.txt"),
sep = " ", header = FALSE)[, 2]
tidy_data <- reduced_data %>% mutate(activity = activity_labels[activity])
# 4. Renaming feature columns
source('transform_column_names.R')
column_names <- colnames(tidy_data)
tidy_column_names <- transform_column_names(column_names)
colnames(tidy_data) <- tidy_column_names
# 5. Creating a summary data set
summary <- tidy_data %>%
group_by(subject_id, activity) %>%
summarise_all(mean)
# 6. Storing generated tables
if (!file.exists("output")) {
dir.create("output")
}
write.table(tidy_data, file.path("output", "har-tidy.txt"), row.names = FALSE)
write.table(summary, file.path("output", "har-summary.txt"), row.names = FALSE) |
# Exam #1: Intro to "R" Programming
# Sept 2020
# All questions are worth 1 point each unless otherwise specified.
# You have 36h to take the exam. The exam will be due Friday, Sept. 18 at 9pm.
#----------------------------------------------------------------------------------
# Creating objects & using proper 'R' syntax ----
#1) please provide the commands that will show the data type of the following objects:
x <- c(FALSE,TRUE,FALSE)
class(x)
#2) Demonstrate the two ways you can assign a sequence of 10 numbers to a vector object. The sequence should start at 5.
a = seq(5, 14, 1)
b = 5:14
#3) Find the number of values assigned to one of the vector objects created in Question 2.
length(a)
#4) Create 2 objects and assign each 5 values. (3 POINTS)
d = c(1,3,5,7,9)
e = c(2,4,6,8,10)
#a) Perform inner and outer multiplication on these two objects
d%*%e
d%o%e
#b) Find the mean and standard deviation of each object
mean(d)
mean(e)
sd(d)
sd(e)
#5) Create four objects, each with a different data type: character, integer, double-numeric, and factor.
## The integer should have at least 3 values. The factor should have at least two levels.
character = "Hello"
interger = c(2L,4L,5L)
Double = c(1,1.5,2,2.5)
Factor = factor(c("N","S","E","W"), levels = 4)
#6) Using indexing, extract the 1st and 7th values from a sequence your created in Question 2. (3 POINTS)
# a) Assign the 1st and 7th values a new object.
A = a[1]
B = a[7]
# b) Apply the "less-than"; "greater-than"; "greater-than-equal to" operators.
A<B
A>B
A>=B
# c) Complete the following operations where:
# z = 1st value (see 6c);
# y = 7th value (see 6c);
# x = 5
# (z plus x) * (z + y))/2
# 10 * (x - y)
Z=A
Y=B
x=5
(Z+x)*(Z+Y)/2
10*(x-Y)
# 7) In comment, please tell me what is the “R” operator for ‘not’ (or negation)?
#The operator for "not" is !. != means not equal.
# 8) Using the two objects you created in Question 4, apply the following operators: %%, ^, %/%. (2 POINTS)
# In a comment, please explain what each operator is doing.
e%%d
d^e
d%/%e
#The %% operator is giving me the remainders from dividing each of the numbers in the first vector by the numbers in the 2nd vector.
#The ^ operator is taking the first vector and raising the values with in it by the corresponding values of the 2nd vector.
#The %/% vector is dividing each of the values from the first vector by the corresponding value in the 2nd vector without any remainders or fractions.
#9) Create an object with a left-to-right assignment operator.
X = a<<-b
# 10) Create 3 objects that each have a number assigned to them.
## The code for all three objects must exists a single line in the script.
## Print the values of those three objects.
O1 = 1
O2 = 2
O3 = 3
print(O1)
print(O2)
print(O3)
# 11) In a series of comments and code, describe and show examples of the special values Inf, -Inf, NaN, and NA.
ZA = NA
ZA = Inf/-Inf
#Inf means infinity, -Inf means negative infiinity, NaN means not a number, and NA means that there is no data (this default fills matrices and data frames)
#Ifinity divided by negative infinity is not a number (NaN)
# 12) Create a die with six sides and sample it with and without replacement.
## What is the difference between the two methods?
die=c(1,2,3,4,5,6)
sample(die, replace = FALSE)
sample(die, replace = TRUE)
#With replacements allows the same side of the die to be sampled while the false method does not allow for resampling.
# Creating data storage structures ------
# 13) Create an array with 6 rows, 3 columns, and 2 sheets (or levels).
AO = array(data = 1:36, dim = c(6, 3, 2))
# 14) Create a matrix with 10 rows and 4 columns.
MAT = matrix(1:40, nrow = 10, ncol = 4)
# 15) In a comment, describe the difference between the two data structures.
#Arrays are three dimensional in a similar fashion to a book while matrices are two dimensional.
# 16) Using indexing, extract the 2nd, 6th, and 9th values from the 3rd column of the matrix your created in Question 14.
MAT[c(2,6,9),3]
# 17) Create an object that extracts the second value from the second column in the second level (or sheet) in your array.
OA = AO[2,2,2]
# 18) Create a data frame object with 5 rows and 3 columns. Give the columns the names of your three favorite foods.
data.frame("Thai" = c(1,2,3,4,5), "Sushi" = c(1,2,3,4,5), "Mexican" = c(1,2,3,4,5))
# 19) Assign the formula for a linear regression to a new object using the following values: slope = 2, y-intercept = 0.5
LR = as.formula(y~2*x+0.5)
# Working with data --------
# Use the data frame “test1_data.Rdata” into RStudio console for the following questions:
# 20) Print the structure of the data frame. Insert a comment that describes the five different data types are present.
load("~/test1_data.Rdata")
print(d)
class(d)
str(d)
#The five types of data present are intergers, characters, Factors, date-time data, and numbers.
# 21) Show the the FIRST 6 rows of the data frame "d"?
head(d)
# 22) Find the number of rows in the data set.
nrow(d)
# 23) Find the number of columns in the data set.
ncol(d)
# 24) Change the ‘tow’ field from a character to a factor data type.
as.factor(d$tow)
is.factor(d$tow)
# 25) Change the ‘haul’ field from a numeric to an integer data type.
as.integer(d$haul)
is.integer(d$haul)
# 26) Remove the “sw.density” column from the data frame.
d$sw.density=NULL
# 27) Print the data type of only the ‘transect.id’ column.
class(d$transect.id)
#Working with character strings -----
# Use the data frame “test1_data.Rdata” into RStudio console for the following questions:
library(stringr)
#30) Find the unique values of transect.id and assign those to a new object.
UV = unique(d$transect.id)
#31) Extract the last 10 unique, character strings from transect.id and assign those strings to a new object
length(UV)
UVII=UV[32:41]
#32) Each value of transect.id has three components, separated by a dash ('-').
## Break these 3 components apart using the dash as the separator and assign each component to a new object.
TID=str_split_fixed(d$transect.id, pattern = "-", n=4)
OST = TID[,1]
N = TID[,2]
I = TID[,3]
L = TID[,4]
#There were 4 components seperated by "-" I seperated the 4th component as well.
#33) Recombine the three components into a single text string.
##Order the components so that the "D, M, S" component is first in the test string.
##Separate with a underscore ("_").
R4=str_c(L, I, N, OST, sep = "_")
#I recombined placing the 4th component to the front.
#34) Using the first 5 unique transect.id values from transect.id, replace the dash ("-") with a underscore ('_')
SUB = gsub(UV[1:5], pattern = "-", replacement = "_")
#35) Using the first 5 unique transect.id values, extract the first 5 characters (reading left to right) from the string
## and assign those values to a new object
NO = str_sub(UV[1:5], start = 1, end = 5)
#Importing data stored in different file types (e.g. .csv, .txt. , .xlsx) into 'R' ----
#36) Import the following files into 'R' and assign each data set its own unique object.
#These data sets were shared with you in an accompanying zip file.
#"aurelia_15minCell_statareas.Rdata" "aurelia_15minCell_statareas.txt" "Aurelia_SEAMAP_2012-2018_30minCell.xlsx"
#"ENVREC.csv" "OST14_1E_d5_frames.csv"
library(ncdf4)
library(readxl)
setwd ("C:/Users/Blaine/Downloads/test1_data_2020")
B = load("C:/Users/Blaine/Downloads/test1_data_2020/aurelia_15minCell_statareas.Rdata")
ENV = read.csv("C:/Users/Blaine/Downloads/test1_data_2020/ENVREC.csv")
A15T = read.table("C:/Users/Blaine/Downloads/test1_data_2020/aurelia_15minCell_statareas.txt", header = T, stringsAsFactors = F, sep = ",")
OST14 = read.csv("C:/Users/Blaine/Downloads/test1_data_2020/OST14_1E_d5_frames.csv", header = F)
#I looked at this file outside of R using both Excel and Notepad, but I couldn't decide how to seperate the data to make it intelligible.
AUR = read_xlsx("C:/Users/Blaine/Downloads/test1_data_2020/Aurelia_SEAMAP_2012-2018_30minCell.xlsx")
#Dates----
#37) Convert the following date to a 'R' date-time object. Set it to be in the West Coast (i.e., East Coast) time zone.
t <- "9/17/2020 12:05:32"
d= as.POSIXct(strptime(x = t, format = "%m/%d/%Y %H:%M:%OS", tz="America/Los_Angeles" ))
| /students_scripts/Blaine Novak Pilch/Blaine Novak Pilch Exam 1.R | no_license | Planktos/Intro2R_2020 | R | false | false | 8,922 | r | # Exam #1: Intro to "R" Programming
# Sept 2020
# All questions are worth 1 point each unless otherwise specified.
# You have 36h to take the exam. The exam will be due Friday, Sept. 18 at 9pm.
#----------------------------------------------------------------------------------
# Creating objects & using proper 'R' syntax ----
#1) please provide the commands that will show the data type of the following objects:
x <- c(FALSE,TRUE,FALSE)
class(x)
#2) Demonstrate the two ways you can assign a sequence of 10 numbers to a vector object. The sequence should start at 5.
a = seq(5, 14, 1)
b = 5:14
#3) Find the number of values assigned to one of the vector objects created in Question 2.
length(a)
#4) Create 2 objects and assign each 5 values. (3 POINTS)
d = c(1,3,5,7,9)
e = c(2,4,6,8,10)
#a) Perform inner and outer multiplication on these two objects
d%*%e
d%o%e
#b) Find the mean and standard deviation of each object
mean(d)
mean(e)
sd(d)
sd(e)
#5) Create four objects, each with a different data type: character, integer, double-numeric, and factor.
## The integer should have at least 3 values. The factor should have at least two levels.
character = "Hello"
interger = c(2L,4L,5L)
Double = c(1,1.5,2,2.5)
Factor = factor(c("N","S","E","W"), levels = 4)
#6) Using indexing, extract the 1st and 7th values from a sequence your created in Question 2. (3 POINTS)
# a) Assign the 1st and 7th values a new object.
A = a[1]
B = a[7]
# b) Apply the "less-than"; "greater-than"; "greater-than-equal to" operators.
A<B
A>B
A>=B
# c) Complete the following operations where:
# z = 1st value (see 6c);
# y = 7th value (see 6c);
# x = 5
# (z plus x) * (z + y))/2
# 10 * (x - y)
Z=A
Y=B
x=5
(Z+x)*(Z+Y)/2
10*(x-Y)
# 7) In comment, please tell me what is the “R” operator for ‘not’ (or negation)?
#The operator for "not" is !. != means not equal.
# 8) Using the two objects you created in Question 4, apply the following operators: %%, ^, %/%. (2 POINTS)
# In a comment, please explain what each operator is doing.
e%%d
d^e
d%/%e
#The %% operator is giving me the remainders from dividing each of the numbers in the first vector by the numbers in the 2nd vector.
#The ^ operator is taking the first vector and raising the values with in it by the corresponding values of the 2nd vector.
#The %/% vector is dividing each of the values from the first vector by the corresponding value in the 2nd vector without any remainders or fractions.
#9) Create an object with a left-to-right assignment operator.
X = a<<-b
# 10) Create 3 objects that each have a number assigned to them.
## The code for all three objects must exists a single line in the script.
## Print the values of those three objects.
O1 = 1
O2 = 2
O3 = 3
print(O1)
print(O2)
print(O3)
# 11) In a series of comments and code, describe and show examples of the special values Inf, -Inf, NaN, and NA.
ZA = NA
ZA = Inf/-Inf
#Inf means infinity, -Inf means negative infiinity, NaN means not a number, and NA means that there is no data (this default fills matrices and data frames)
#Ifinity divided by negative infinity is not a number (NaN)
# 12) Create a die with six sides and sample it with and without replacement.
## What is the difference between the two methods?
die=c(1,2,3,4,5,6)
sample(die, replace = FALSE)
sample(die, replace = TRUE)
#With replacements allows the same side of the die to be sampled while the false method does not allow for resampling.
# Creating data storage structures ------
# 13) Create an array with 6 rows, 3 columns, and 2 sheets (or levels).
AO = array(data = 1:36, dim = c(6, 3, 2))
# 14) Create a matrix with 10 rows and 4 columns.
MAT = matrix(1:40, nrow = 10, ncol = 4)
# 15) In a comment, describe the difference between the two data structures.
#Arrays are three dimensional in a similar fashion to a book while matrices are two dimensional.
# 16) Using indexing, extract the 2nd, 6th, and 9th values from the 3rd column of the matrix your created in Question 14.
MAT[c(2,6,9),3]
# 17) Create an object that extracts the second value from the second column in the second level (or sheet) in your array.
OA = AO[2,2,2]
# 18) Create a data frame object with 5 rows and 3 columns. Give the columns the names of your three favorite foods.
data.frame("Thai" = c(1,2,3,4,5), "Sushi" = c(1,2,3,4,5), "Mexican" = c(1,2,3,4,5))
# 19) Assign the formula for a linear regression to a new object using the following values: slope = 2, y-intercept = 0.5
LR = as.formula(y~2*x+0.5)
# Working with data --------
# Use the data frame “test1_data.Rdata” into RStudio console for the following questions:
# 20) Print the structure of the data frame. Insert a comment that describes the five different data types are present.
load("~/test1_data.Rdata")
print(d)
class(d)
str(d)
#The five types of data present are intergers, characters, Factors, date-time data, and numbers.
# 21) Show the the FIRST 6 rows of the data frame "d"?
head(d)
# 22) Find the number of rows in the data set.
nrow(d)
# 23) Find the number of columns in the data set.
ncol(d)
# 24) Change the ‘tow’ field from a character to a factor data type.
as.factor(d$tow)
is.factor(d$tow)
# 25) Change the ‘haul’ field from a numeric to an integer data type.
as.integer(d$haul)
is.integer(d$haul)
# 26) Remove the “sw.density” column from the data frame.
d$sw.density=NULL
# 27) Print the data type of only the ‘transect.id’ column.
class(d$transect.id)
#Working with character strings -----
# Use the data frame “test1_data.Rdata” into RStudio console for the following questions:
library(stringr)
#30) Find the unique values of transect.id and assign those to a new object.
UV = unique(d$transect.id)
#31) Extract the last 10 unique, character strings from transect.id and assign those strings to a new object
length(UV)
UVII=UV[32:41]
#32) Each value of transect.id has three components, separated by a dash ('-').
## Break these 3 components apart using the dash as the separator and assign each component to a new object.
TID=str_split_fixed(d$transect.id, pattern = "-", n=4)
OST = TID[,1]
N = TID[,2]
I = TID[,3]
L = TID[,4]
#There were 4 components seperated by "-" I seperated the 4th component as well.
#33) Recombine the three components into a single text string.
##Order the components so that the "D, M, S" component is first in the test string.
##Separate with a underscore ("_").
R4=str_c(L, I, N, OST, sep = "_")
#I recombined placing the 4th component to the front.
#34) Using the first 5 unique transect.id values from transect.id, replace the dash ("-") with a underscore ('_')
SUB = gsub(UV[1:5], pattern = "-", replacement = "_")
#35) Using the first 5 unique transect.id values, extract the first 5 characters (reading left to right) from the string
## and assign those values to a new object
NO = str_sub(UV[1:5], start = 1, end = 5)
#Importing data stored in different file types (e.g. .csv, .txt. , .xlsx) into 'R' ----
#36) Import the following files into 'R' and assign each data set its own unique object.
#These data sets were shared with you in an accompanying zip file.
#"aurelia_15minCell_statareas.Rdata" "aurelia_15minCell_statareas.txt" "Aurelia_SEAMAP_2012-2018_30minCell.xlsx"
#"ENVREC.csv" "OST14_1E_d5_frames.csv"
library(ncdf4)
library(readxl)
setwd ("C:/Users/Blaine/Downloads/test1_data_2020")
B = load("C:/Users/Blaine/Downloads/test1_data_2020/aurelia_15minCell_statareas.Rdata")
ENV = read.csv("C:/Users/Blaine/Downloads/test1_data_2020/ENVREC.csv")
A15T = read.table("C:/Users/Blaine/Downloads/test1_data_2020/aurelia_15minCell_statareas.txt", header = T, stringsAsFactors = F, sep = ",")
OST14 = read.csv("C:/Users/Blaine/Downloads/test1_data_2020/OST14_1E_d5_frames.csv", header = F)
#I looked at this file outside of R using both Excel and Notepad, but I couldn't decide how to seperate the data to make it intelligible.
AUR = read_xlsx("C:/Users/Blaine/Downloads/test1_data_2020/Aurelia_SEAMAP_2012-2018_30minCell.xlsx")
#Dates----
#37) Convert the following date to a 'R' date-time object. Set it to be in the West Coast (i.e., East Coast) time zone.
t <- "9/17/2020 12:05:32"
d= as.POSIXct(strptime(x = t, format = "%m/%d/%Y %H:%M:%OS", tz="America/Los_Angeles" ))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/firebasedynamiclinks_objects.R
\name{ITunesConnectAnalytics}
\alias{ITunesConnectAnalytics}
\title{ITunesConnectAnalytics Object}
\usage{
ITunesConnectAnalytics(ct = NULL, mt = NULL, pt = NULL, at = NULL)
}
\arguments{
\item{ct}{Campaign text that developers can optionally add to any link in order to}
\item{mt}{iTune media types, including music, podcasts, audiobooks and so on}
\item{pt}{Provider token that enables analytics for Dynamic Links from within iTunes}
\item{at}{Affiliate token used to create affiliate-coded links}
}
\value{
ITunesConnectAnalytics object
}
\description{
ITunesConnectAnalytics Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Parameters for iTunes Connect App Analytics.
}
| /googlefirebasedynamiclinksv1.auto/man/ITunesConnectAnalytics.Rd | permissive | GVersteeg/autoGoogleAPI | R | false | true | 825 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/firebasedynamiclinks_objects.R
\name{ITunesConnectAnalytics}
\alias{ITunesConnectAnalytics}
\title{ITunesConnectAnalytics Object}
\usage{
ITunesConnectAnalytics(ct = NULL, mt = NULL, pt = NULL, at = NULL)
}
\arguments{
\item{ct}{Campaign text that developers can optionally add to any link in order to}
\item{mt}{iTune media types, including music, podcasts, audiobooks and so on}
\item{pt}{Provider token that enables analytics for Dynamic Links from within iTunes}
\item{at}{Affiliate token used to create affiliate-coded links}
}
\value{
ITunesConnectAnalytics object
}
\description{
ITunesConnectAnalytics Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Parameters for iTunes Connect App Analytics.
}
|
library(sp)
library(keras)
library(tensorflow)
library(tfdatasets)
library(purrr)
library(ggplot2)
library(rsample)
library(stars)
library(raster)
library(reticulate)
library(mapview)
library(imager)
library(raster)
library(gdalUtils)
library(stars)
setwd("./")
# ################image based#############
first_model <- keras_model_sequential()
layer_conv_2d(first_model,filters = 32,kernel_size = 3, activation = "relu",input_shape = c(128,128,3))
layer_max_pooling_2d(first_model, pool_size = c(2, 2))
layer_conv_2d(first_model, filters = 64, kernel_size = c(3, 3), activation = "relu")
layer_max_pooling_2d(first_model, pool_size = c(2, 2))
layer_conv_2d(first_model, filters = 128, kernel_size = c(3, 3), activation = "relu")
layer_max_pooling_2d(first_model, pool_size = c(2, 2))
layer_conv_2d(first_model, filters = 128, kernel_size = c(3, 3), activation = "relu")
layer_max_pooling_2d(first_model, pool_size = c(2, 2))
layer_flatten(first_model)
layer_dense(first_model, units = 256, activation = "relu")
layer_dense(first_model, units = 1, activation = "sigmoid")
# Einladen der Trainingsdaten
subset_list <- list.files("Data/Subsets_128/Slices_Muenster/True", full.names = T)
data_true <- data.frame(img=subset_list,lbl=rep(1L,length(subset_list)))
subset_list <- list.files("Data/Subsets_128/Slices_Muenster/False", full.names = T)
data_false <- data.frame(img=subset_list,lbl=rep(0L,length(subset_list)))
data <- rbind(data_true,data_false)
set.seed(2020)
#Verarbeitung der Trainingsdaten
data <- initial_split(data,prop = 0.75, strata = "lbl")
c(nrow(training(data)[training(data)$lbl==0,]), nrow(training(data)[training(data)$lbl==1,]))
training_dataset <- tensor_slices_dataset(training(data))
dataset_iterator <- as_iterator(training_dataset)
dataset_list <- iterate(dataset_iterator)
head(dataset_list)
subset_size <- first_model$input_shape[2:3]
training_dataset <-
dataset_map(training_dataset, function(.x)
list_modify(.x, img = tf$image$decode_jpeg(tf$io$read_file(.x$img))))
training_dataset <-
dataset_map(training_dataset, function(.x)
list_modify(.x, img = tf$image$convert_image_dtype(.x$img, dtype = tf$float32)))
training_dataset <-
dataset_map(training_dataset, function(.x)
list_modify(.x, img = tf$image$resize(.x$img, size = shape(subset_size[1], subset_size[2]))))
training_dataset <- dataset_shuffle(training_dataset, buffer_size = 10L*128)
training_dataset <- dataset_batch(training_dataset, 10L)
training_dataset <- dataset_map(training_dataset, unname)
dataset_iterator <- as_iterator(training_dataset)
dataset_list <- iterate(dataset_iterator)
dataset_list[[1]][[1]]
dataset_list[[1]][[1]]$shape
dataset_list[[1]][[2]]
validation_dataset <- tensor_slices_dataset(testing(data))
validation_dataset <-
dataset_map(validation_dataset, function(.x)
list_modify(.x, img = tf$image$decode_jpeg(tf$io$read_file(.x$img))))
validation_dataset <-
dataset_map(validation_dataset, function(.x)
list_modify(.x, img = tf$image$convert_image_dtype(.x$img, dtype = tf$float32)))
validation_dataset <-
dataset_map(validation_dataset, function(.x)
list_modify(.x, img = tf$image$resize(.x$img, size = shape(subset_size[1], subset_size[2]))))
validation_dataset <- dataset_batch(validation_dataset, 10L)
validation_dataset <- dataset_map(validation_dataset, unname)
# Trainieren des Modells
compile(
first_model,
optimizer = optimizer_rmsprop(lr = 5e-5),
loss = "binary_crossentropy",
metrics = "accuracy"
)
diagnostics <- fit(first_model,
training_dataset,
epochs = 18,
validation_data = validation_dataset)
plot(diagnostics)
# Vorhersage
predictions <- predict(first_model,validation_dataset)
par(mfrow=c(1,3),mai=c(0.1,0.1,0.3,0.1),cex=0.8)
for(i in 1:3){
sample <- floor(runif(n = 1,min = 1,max = 56))
img_path <- as.character(testing(data)[[sample,1]])
img <- stack(img_path)
plotRGB(img,margins=T,main = paste("prediction:",round(predictions[sample],digits=3)," | ","label:",as.character(testing(data)[[sample,2]])))
}
# Einladen der Validierungsdaten
subset_list <- list.files("Data/Subsets_128/Slices_Berlin/True", full.names = T)
dataset <- tensor_slices_dataset(subset_list)
dataset <- dataset_map(dataset, function(.x)
tf$image$decode_jpeg(tf$io$read_file(.x)))
dataset <- dataset_map(dataset, function(.x)
tf$image$convert_image_dtype(.x, dtype = tf$float32))
dataset <- dataset_map(dataset, function(.x)
tf$image$resize(.x, size = shape(128, 128)))
dataset <- dataset_batch(dataset, 10L)
dataset <- dataset_map(dataset, unname)
#Vorhersage auf den Validierungsdaten
predictions <- predict(first_model, dataset)
save_model_hdf5(first_model,filepath = "Data/Models/imagebased_model2.h5")
# Validierung
vgg16_feat_extr <- application_vgg16(include_top = F,input_shape = c(128,128,3),weights = "imagenet")
freeze_weights(vgg16_feat_extr)
pretrained_model <- keras_model_sequential(vgg16_feat_extr$layers[1:15])
pretrained_model <- layer_flatten(pretrained_model)
pretrained_model <- layer_dense(pretrained_model,units = 256,activation = "relu")
pretrained_model <- layer_dense(pretrained_model,units = 1,activation = "sigmoid")
compile(
pretrained_model,
optimizer = optimizer_rmsprop(lr = 1e-5),
loss = "binary_crossentropy",
metrics = c("accuracy")
)
diagnostics <- fit(pretrained_model,
training_dataset,
epochs = 8,
validation_data = validation_dataset)
plot(diagnostics)
diagnostics$metrics
#UNET########################################################
input_tensor <- layer_input(shape = c(448,448,3))
unet_tensor <- layer_conv_2d(input_tensor,filters = 64,kernel_size = c(3,3), padding = "same",activation = "relu")
conc_tensor2 <- layer_conv_2d(unet_tensor,filters = 64,kernel_size = c(3,3), padding = "same",activation = "relu")
unet_tensor <- layer_max_pooling_2d(conc_tensor2)
unet_tensor <- layer_conv_2d(unet_tensor,filters = 128,kernel_size = c(3,3), padding = "same",activation = "relu")
conc_tensor1 <- layer_conv_2d(unet_tensor,filters = 128,kernel_size = c(3,3), padding = "same",activation = "relu")
unet_tensor <- layer_max_pooling_2d(conc_tensor1)
unet_tensor <- layer_conv_2d(unet_tensor,filters = 256,kernel_size = c(3,3), padding = "same",activation = "relu")
unet_tensor <- layer_conv_2d(unet_tensor,filters = 256,kernel_size = c(3,3), padding = "same",activation = "relu")
unet_tensor <- layer_conv_2d_transpose(unet_tensor,filters = 128,kernel_size = c(2,2),strides = 2,padding = "same")
unet_tensor <- layer_concatenate(list(conc_tensor1,unet_tensor))
unet_tensor <- layer_conv_2d(unet_tensor, filters = 128, kernel_size = c(3,3),padding = "same", activation = "relu")
unet_tensor <- layer_conv_2d(unet_tensor, filters = 128, kernel_size = c(3,3),padding = "same", activation = "relu")
unet_tensor <- layer_conv_2d_transpose(unet_tensor,filters = 64,kernel_size = c(2,2),strides = 2,padding = "same")
unet_tensor <- layer_concatenate(list(conc_tensor2,unet_tensor))
unet_tensor <- layer_conv_2d(unet_tensor, filters = 64, kernel_size = c(3,3),padding = "same", activation = "relu")
unet_tensor <- layer_conv_2d(unet_tensor, filters = 64, kernel_size = c(3,3),padding = "same", activation = "relu")
unet_tensor <- layer_conv_2d(unet_tensor,filters = 1,kernel_size = 1, activation = "sigmoid")
unet_model <- keras_model(inputs = input_tensor, outputs = unet_tensor)
vgg16_feat_extr <- application_vgg16(weights = "imagenet", include_top = FALSE, input_shape = c (448,448,3))
unet_tensor <- vgg16_feat_extr$layers[[15]]$output
unet_tensor <- layer_conv_2d(unet_tensor, filters = 1024, kernel_size = 3, padding = "same", activation = "relu")
unet_tensor <- layer_conv_2d(unet_tensor, filters = 1024, kernel_size = 3, padding = "same", activation = "relu")
unet_tensor <- layer_conv_2d_transpose(unet_tensor, filters = 512, kernel_size = 2, strides = 2, padding = "same")
unet_tensor <- layer_concatenate(list(vgg16_feat_extr$layers[[14]]$output, unet_tensor))
unet_tensor <- layer_conv_2d(unet_tensor, filters = 512, kernel_size = 3, padding = "same", activation = "relu")
unet_tensor <- layer_conv_2d(unet_tensor, filters = 512, kernel_size = 3, padding = "same", activation = "relu")
unet_tensor <- layer_conv_2d_transpose(unet_tensor, filters = 256, kernel_size = 2, strides = 2, padding = "same")
unet_tensor <- layer_concatenate(list(vgg16_feat_extr$layers[[10]]$output, unet_tensor))
unet_tensor <- layer_conv_2d(unet_tensor,filters = 256, kernel_size = 3, padding = "same", activation = "relu")
unet_tensor <- layer_conv_2d(unet_tensor,filters = 256, kernel_size = 3, padding = "same", activation = "relu")
unet_tensor <- layer_conv_2d_transpose(unet_tensor, filters = 128, kernel_size = 2, strides = 2, padding = "same")
unet_tensor <- layer_concatenate(list(vgg16_feat_extr$layers[[6]]$output, unet_tensor))
unet_tensor <- layer_conv_2d(unet_tensor, filters = 128, kernel_size = 3, padding = "same", activation = "relu")
unet_tensor <- layer_conv_2d(unet_tensor, filters = 128, kernel_size = 3, padding = "same", activation = "relu")
unet_tensor <- layer_conv_2d_transpose(unet_tensor, filters = 64, kernel_size = 2, strides = 2, padding = "same")
unet_tensor <- layer_concatenate(list(vgg16_feat_extr$layers[[3]]$output, unet_tensor))
unet_tensor <- layer_conv_2d(unet_tensor, filters = 64, kernel_size = 3, padding = "same", activation = "relu")
unet_tensor <- layer_conv_2d(unet_tensor, filters = 64, kernel_size = 3, padding = "same", activation = "relu")
unet_tensor <- layer_conv_2d(unet_tensor, filters = 1, kernel_size = 1, activation = "sigmoid")
pretrained_unet <- keras_model(inputs = vgg16_feat_extr$input, outputs = unet_tensor)
spectral_augmentation <- function(img) {
img <- tf$image$random_brightness(img, max_delta = 0.3)
img <- tf$image$random_contrast(img, lower = 0.8, upper = 1.2)
img <- tf$image$random_saturation(img, lower = 0.8, upper = 1.2)
img <- tf$clip_by_value(img,0, 1)
}
dl_prepare_data <- function(files=NULL, train, predict=FALSE, subsets_path=NULL, model_input_shape = c(448,448), batch_size = 10L) {
if (!predict){
spectral_augmentation <- function(img) {
img <- tf$image$random_brightness(img, max_delta = 0.3)
img <- tf$image$random_contrast(img, lower = 0.8, upper = 1.1)
img <- tf$image$random_saturation(img, lower = 0.8, upper = 1.1)
img <- tf$clip_by_value(img, 0, 1)
}
dataset <- tensor_slices_dataset(files)
dataset <-
dataset_map(dataset, function(.x)
list_modify(.x,img = tf$image$decode_jpeg(tf$io$read_file(.x$img)),
mask = tf$image$decode_jpeg(tf$io$read_file(.x$mask))))
dataset <-
dataset_map(dataset, function(.x)
list_modify(.x, img = tf$image$convert_image_dtype(.x$img, dtype = tf$float32),
mask = tf$image$convert_image_dtype(.x$mask, dtype = tf$float32)))
dataset <-
dataset_map(dataset, function(.x)
list_modify(.x, img = tf$image$resize(.x$img, size = shape(model_input_shape[1], model_input_shape[2])),
mask = tf$image$resize(.x$mask, size = shape(model_input_shape[1], model_input_shape[2]))))
if (train) {
augmentation <-
dataset_map(dataset, function(.x)
list_modify(.x, img = spectral_augmentation(.x$img)))
augmentation <-
dataset_map(augmentation, function(.x)
list_modify(.x, img = tf$image$flip_left_right(.x$img),
mask = tf$image$flip_left_right(.x$mask)))
dataset_augmented <- dataset_concatenate(dataset,augmentation)
augmentation <-
dataset_map(dataset, function(.x)
list_modify(.x, img = spectral_augmentation(.x$img)))
augmentation <-
dataset_map(augmentation, function(.x)
list_modify(.x, img = tf$image$flip_up_down(.x$img),
mask = tf$image$flip_up_down(.x$mask)))
dataset_augmented <- dataset_concatenate(dataset_augmented,augmentation)
augmentation <-
dataset_map(dataset, function(.x)
list_modify(.x, img = spectral_augmentation(.x$img)))
augmentation <-
dataset_map(augmentation, function(.x)
list_modify(.x, img = tf$image$flip_left_right(.x$img),
mask = tf$image$flip_left_right(.x$mask)))
augmentation <-
dataset_map(augmentation, function(.x)
list_modify(.x, img = tf$image$flip_up_down(.x$img),
mask = tf$image$flip_up_down(.x$mask)))
dataset_augmented <- dataset_concatenate(dataset_augmented,augmentation)
}
if (train) {
dataset <- dataset_shuffle(dataset_augmented, buffer_size = batch_size*128)
}
dataset <- dataset_batch(dataset, batch_size)
dataset <- dataset_map(dataset, unname)
}else{
o <- order(as.numeric(tools::file_path_sans_ext(basename(list.files(subsets_path)))))
subset_list <- list.files(subsets_path, full.names = T)[o]
dataset <- tensor_slices_dataset(subset_list)
dataset <-
dataset_map(dataset, function(.x)
tf$image$decode_jpeg(tf$io$read_file(.x)))
dataset <-
dataset_map(dataset, function(.x)
tf$image$convert_image_dtype(.x, dtype = tf$float32))
dataset <-
dataset_map(dataset, function(.x)
tf$image$resize(.x, size = shape(model_input_shape[1], model_input_shape[2])))
dataset <- dataset_batch(dataset, batch_size)
dataset <- dataset_map(dataset, unname)
}
}
# Einladen der Trainingsdaten
files <- data.frame(
img = list.files("Data/Subsets_448/Slices_Berlin", full.names = TRUE, pattern = "*.jpg"),
mask = list.files("Data/Subsets_448/Slices_Berlin_Mask", full.names = TRUE, pattern = "*.jpg")
)
files <- initial_split(files, prop = 0.8)
training_dataset <- dl_prepare_data(training(files),train = TRUE,model_input_shape = c(448,448),batch_size = 10L)
validation_dataset <- dl_prepare_data(testing(files),train = FALSE,model_input_shape = c(448,448),batch_size = 10L)
training_tensors <- training_dataset%>%as_iterator()%>%iterate()
# Training des Unets
compile(
pretrained_unet,
optimizer = optimizer_rmsprop(lr = 1e-5),
loss = "binary_crossentropy",
metrics = c(metric_binary_accuracy)
)
diagnostics <- fit(pretrained_unet,
training_dataset,
epochs = 15,
validation_data = validation_dataset)
plot(diagnostics)
save_model_hdf5(pretrained_unet,filepath = "Unets/pretrained_unet_versuch4")
pretrained_unet <- load_model_hdf5("Unets/pretrained_unet_versuch4")
# Vergleich Maske/Satellitenbild/Vorhersage
sample <- floor(runif(n = 1,min = 1,max = 10))
img_path <- as.character(testing(files)[[sample,1]])
mask_path <- as.character(testing(files)[[sample,2]])
img <- magick::image_read(img_path)
mask <- magick::image_read(mask_path)
pred <- magick::image_read(as.raster(predict(object = pretrained_unet,validation_dataset)[sample,,,]))
out <- magick::image_append(c(
magick::image_append(mask, stack = TRUE),
magick::image_append(img, stack = TRUE),
magick::image_append(pred, stack = TRUE)
)
)
plot(out)
# Einladen der Validierungsdaten
test_dataset <- dl_prepare_data(train = F,predict = T,subsets_path="Data/Subsets_448/Slices_Muenster/",model_input_shape = c(448,448),batch_size = 5L)
system.time(predictions <- predict(pretrained_unet,test_dataset))
plot_layer_activations <- function(img_path, model, activations_layers,channels){
model_input_size <- c(model$input_shape[[2]], model$input_shape[[3]])
img <- image_load(img_path, target_size = model_input_size) %>%
image_to_array() %>%
array_reshape(dim = c(1, model_input_size[1], model_input_size[2], 3)) %>%
imagenet_preprocess_input()
layer_outputs <- lapply(model$layers[activations_layers], function(layer) layer$output)
activation_model <- keras_model(inputs = model$input, outputs = layer_outputs)
activations <- predict(activation_model,img)
if(!is.list(activations)){
activations <- list(activations)
}
plot_channel <- function(channel,layer_name,channel_name) {
rotate <- function(x) t(apply(x, 2, rev))
image(rotate(channel), axes = FALSE, asp = 1,
col = terrain.colors(12),main=paste("layer:",layer_name,"channel:",channel_name))
}
for (i in 1:length(activations)) {
layer_activation <- activations[[i]]
layer_name <- model$layers[[activations_layers[i]]]$name
n_features <- dim(layer_activation)[[4]]
for (c in channels){
channel_image <- layer_activation[1,,,c]
plot_channel(channel_image,layer_name,c)
}
}
}
par(mfrow=c(1,1))
plot(read_stars("Data/Subsets_448/Slices_Muenster/M_01.jpg"),rgb=c(1,2,3))
plot(read_stars("Data/Subsets_448/Slices_Muenster/M_02.jpg"),rgb=c(1,2,3))
plot(read_stars("Data/Subsets_448/Slices_Muenster/M_03.jpg"),rgb=c(1,2,3))
plot(read_stars("Data/Subsets_448/Slices_Muenster/M_04.jpg"),rgb=c(1,2,3))
plot(read_stars("Data/Subsets_448/Slices_Muenster/M_05.jpg"),rgb=c(1,2,3))
plot(read_stars("Data/Subsets_448/Slices_Muenster/M_06.jpg"),rgb=c(1,2,3))
par(mfrow=c(3,4),mar=c(1,1,1,1),cex=0.5)
plot_layer_activations(img_path = "Data/Subsets_448/Slices_Muenster/M_01.jpg", model=pretrained_unet ,activations_layers = c(2,3,5,6,8,9,10,12,13,14), channels = 1:4)
| /Detection_of_urban_areas.R | no_license | A-Spork/Cloud_Projekt | R | false | false | 17,486 | r | library(sp)
library(keras)
library(tensorflow)
library(tfdatasets)
library(purrr)
library(ggplot2)
library(rsample)
library(stars)
library(raster)
library(reticulate)
library(mapview)
library(imager)
library(raster)
library(gdalUtils)
library(stars)
setwd("./")
# ################image based#############
first_model <- keras_model_sequential()
layer_conv_2d(first_model,filters = 32,kernel_size = 3, activation = "relu",input_shape = c(128,128,3))
layer_max_pooling_2d(first_model, pool_size = c(2, 2))
layer_conv_2d(first_model, filters = 64, kernel_size = c(3, 3), activation = "relu")
layer_max_pooling_2d(first_model, pool_size = c(2, 2))
layer_conv_2d(first_model, filters = 128, kernel_size = c(3, 3), activation = "relu")
layer_max_pooling_2d(first_model, pool_size = c(2, 2))
layer_conv_2d(first_model, filters = 128, kernel_size = c(3, 3), activation = "relu")
layer_max_pooling_2d(first_model, pool_size = c(2, 2))
layer_flatten(first_model)
layer_dense(first_model, units = 256, activation = "relu")
layer_dense(first_model, units = 1, activation = "sigmoid")
# Einladen der Trainingsdaten
subset_list <- list.files("Data/Subsets_128/Slices_Muenster/True", full.names = T)
data_true <- data.frame(img=subset_list,lbl=rep(1L,length(subset_list)))
subset_list <- list.files("Data/Subsets_128/Slices_Muenster/False", full.names = T)
data_false <- data.frame(img=subset_list,lbl=rep(0L,length(subset_list)))
data <- rbind(data_true,data_false)
set.seed(2020)
#Verarbeitung der Trainingsdaten
data <- initial_split(data,prop = 0.75, strata = "lbl")
c(nrow(training(data)[training(data)$lbl==0,]), nrow(training(data)[training(data)$lbl==1,]))
training_dataset <- tensor_slices_dataset(training(data))
dataset_iterator <- as_iterator(training_dataset)
dataset_list <- iterate(dataset_iterator)
head(dataset_list)
subset_size <- first_model$input_shape[2:3]
training_dataset <-
dataset_map(training_dataset, function(.x)
list_modify(.x, img = tf$image$decode_jpeg(tf$io$read_file(.x$img))))
training_dataset <-
dataset_map(training_dataset, function(.x)
list_modify(.x, img = tf$image$convert_image_dtype(.x$img, dtype = tf$float32)))
training_dataset <-
dataset_map(training_dataset, function(.x)
list_modify(.x, img = tf$image$resize(.x$img, size = shape(subset_size[1], subset_size[2]))))
training_dataset <- dataset_shuffle(training_dataset, buffer_size = 10L*128)
training_dataset <- dataset_batch(training_dataset, 10L)
training_dataset <- dataset_map(training_dataset, unname)
dataset_iterator <- as_iterator(training_dataset)
dataset_list <- iterate(dataset_iterator)
dataset_list[[1]][[1]]
dataset_list[[1]][[1]]$shape
dataset_list[[1]][[2]]
validation_dataset <- tensor_slices_dataset(testing(data))
validation_dataset <-
dataset_map(validation_dataset, function(.x)
list_modify(.x, img = tf$image$decode_jpeg(tf$io$read_file(.x$img))))
validation_dataset <-
dataset_map(validation_dataset, function(.x)
list_modify(.x, img = tf$image$convert_image_dtype(.x$img, dtype = tf$float32)))
validation_dataset <-
dataset_map(validation_dataset, function(.x)
list_modify(.x, img = tf$image$resize(.x$img, size = shape(subset_size[1], subset_size[2]))))
validation_dataset <- dataset_batch(validation_dataset, 10L)
validation_dataset <- dataset_map(validation_dataset, unname)
# Trainieren des Modells
compile(
first_model,
optimizer = optimizer_rmsprop(lr = 5e-5),
loss = "binary_crossentropy",
metrics = "accuracy"
)
diagnostics <- fit(first_model,
training_dataset,
epochs = 18,
validation_data = validation_dataset)
plot(diagnostics)
# Vorhersage
predictions <- predict(first_model,validation_dataset)
par(mfrow=c(1,3),mai=c(0.1,0.1,0.3,0.1),cex=0.8)
for(i in 1:3){
sample <- floor(runif(n = 1,min = 1,max = 56))
img_path <- as.character(testing(data)[[sample,1]])
img <- stack(img_path)
plotRGB(img,margins=T,main = paste("prediction:",round(predictions[sample],digits=3)," | ","label:",as.character(testing(data)[[sample,2]])))
}
# Einladen der Validierungsdaten
subset_list <- list.files("Data/Subsets_128/Slices_Berlin/True", full.names = T)
dataset <- tensor_slices_dataset(subset_list)
dataset <- dataset_map(dataset, function(.x)
tf$image$decode_jpeg(tf$io$read_file(.x)))
dataset <- dataset_map(dataset, function(.x)
tf$image$convert_image_dtype(.x, dtype = tf$float32))
dataset <- dataset_map(dataset, function(.x)
tf$image$resize(.x, size = shape(128, 128)))
dataset <- dataset_batch(dataset, 10L)
dataset <- dataset_map(dataset, unname)
#Vorhersage auf den Validierungsdaten
predictions <- predict(first_model, dataset)
save_model_hdf5(first_model,filepath = "Data/Models/imagebased_model2.h5")
# Validierung
vgg16_feat_extr <- application_vgg16(include_top = F,input_shape = c(128,128,3),weights = "imagenet")
freeze_weights(vgg16_feat_extr)
pretrained_model <- keras_model_sequential(vgg16_feat_extr$layers[1:15])
pretrained_model <- layer_flatten(pretrained_model)
pretrained_model <- layer_dense(pretrained_model,units = 256,activation = "relu")
pretrained_model <- layer_dense(pretrained_model,units = 1,activation = "sigmoid")
compile(
pretrained_model,
optimizer = optimizer_rmsprop(lr = 1e-5),
loss = "binary_crossentropy",
metrics = c("accuracy")
)
diagnostics <- fit(pretrained_model,
training_dataset,
epochs = 8,
validation_data = validation_dataset)
plot(diagnostics)
diagnostics$metrics
#UNET########################################################
input_tensor <- layer_input(shape = c(448,448,3))
unet_tensor <- layer_conv_2d(input_tensor,filters = 64,kernel_size = c(3,3), padding = "same",activation = "relu")
conc_tensor2 <- layer_conv_2d(unet_tensor,filters = 64,kernel_size = c(3,3), padding = "same",activation = "relu")
unet_tensor <- layer_max_pooling_2d(conc_tensor2)
unet_tensor <- layer_conv_2d(unet_tensor,filters = 128,kernel_size = c(3,3), padding = "same",activation = "relu")
conc_tensor1 <- layer_conv_2d(unet_tensor,filters = 128,kernel_size = c(3,3), padding = "same",activation = "relu")
unet_tensor <- layer_max_pooling_2d(conc_tensor1)
unet_tensor <- layer_conv_2d(unet_tensor,filters = 256,kernel_size = c(3,3), padding = "same",activation = "relu")
unet_tensor <- layer_conv_2d(unet_tensor,filters = 256,kernel_size = c(3,3), padding = "same",activation = "relu")
unet_tensor <- layer_conv_2d_transpose(unet_tensor,filters = 128,kernel_size = c(2,2),strides = 2,padding = "same")
unet_tensor <- layer_concatenate(list(conc_tensor1,unet_tensor))
unet_tensor <- layer_conv_2d(unet_tensor, filters = 128, kernel_size = c(3,3),padding = "same", activation = "relu")
unet_tensor <- layer_conv_2d(unet_tensor, filters = 128, kernel_size = c(3,3),padding = "same", activation = "relu")
unet_tensor <- layer_conv_2d_transpose(unet_tensor,filters = 64,kernel_size = c(2,2),strides = 2,padding = "same")
unet_tensor <- layer_concatenate(list(conc_tensor2,unet_tensor))
unet_tensor <- layer_conv_2d(unet_tensor, filters = 64, kernel_size = c(3,3),padding = "same", activation = "relu")
unet_tensor <- layer_conv_2d(unet_tensor, filters = 64, kernel_size = c(3,3),padding = "same", activation = "relu")
unet_tensor <- layer_conv_2d(unet_tensor,filters = 1,kernel_size = 1, activation = "sigmoid")
unet_model <- keras_model(inputs = input_tensor, outputs = unet_tensor)
vgg16_feat_extr <- application_vgg16(weights = "imagenet", include_top = FALSE, input_shape = c (448,448,3))
unet_tensor <- vgg16_feat_extr$layers[[15]]$output
unet_tensor <- layer_conv_2d(unet_tensor, filters = 1024, kernel_size = 3, padding = "same", activation = "relu")
unet_tensor <- layer_conv_2d(unet_tensor, filters = 1024, kernel_size = 3, padding = "same", activation = "relu")
unet_tensor <- layer_conv_2d_transpose(unet_tensor, filters = 512, kernel_size = 2, strides = 2, padding = "same")
unet_tensor <- layer_concatenate(list(vgg16_feat_extr$layers[[14]]$output, unet_tensor))
unet_tensor <- layer_conv_2d(unet_tensor, filters = 512, kernel_size = 3, padding = "same", activation = "relu")
unet_tensor <- layer_conv_2d(unet_tensor, filters = 512, kernel_size = 3, padding = "same", activation = "relu")
unet_tensor <- layer_conv_2d_transpose(unet_tensor, filters = 256, kernel_size = 2, strides = 2, padding = "same")
unet_tensor <- layer_concatenate(list(vgg16_feat_extr$layers[[10]]$output, unet_tensor))
unet_tensor <- layer_conv_2d(unet_tensor,filters = 256, kernel_size = 3, padding = "same", activation = "relu")
unet_tensor <- layer_conv_2d(unet_tensor,filters = 256, kernel_size = 3, padding = "same", activation = "relu")
unet_tensor <- layer_conv_2d_transpose(unet_tensor, filters = 128, kernel_size = 2, strides = 2, padding = "same")
unet_tensor <- layer_concatenate(list(vgg16_feat_extr$layers[[6]]$output, unet_tensor))
unet_tensor <- layer_conv_2d(unet_tensor, filters = 128, kernel_size = 3, padding = "same", activation = "relu")
unet_tensor <- layer_conv_2d(unet_tensor, filters = 128, kernel_size = 3, padding = "same", activation = "relu")
unet_tensor <- layer_conv_2d_transpose(unet_tensor, filters = 64, kernel_size = 2, strides = 2, padding = "same")
unet_tensor <- layer_concatenate(list(vgg16_feat_extr$layers[[3]]$output, unet_tensor))
unet_tensor <- layer_conv_2d(unet_tensor, filters = 64, kernel_size = 3, padding = "same", activation = "relu")
unet_tensor <- layer_conv_2d(unet_tensor, filters = 64, kernel_size = 3, padding = "same", activation = "relu")
unet_tensor <- layer_conv_2d(unet_tensor, filters = 1, kernel_size = 1, activation = "sigmoid")
pretrained_unet <- keras_model(inputs = vgg16_feat_extr$input, outputs = unet_tensor)
spectral_augmentation <- function(img) {
img <- tf$image$random_brightness(img, max_delta = 0.3)
img <- tf$image$random_contrast(img, lower = 0.8, upper = 1.2)
img <- tf$image$random_saturation(img, lower = 0.8, upper = 1.2)
img <- tf$clip_by_value(img,0, 1)
}
dl_prepare_data <- function(files=NULL, train, predict=FALSE, subsets_path=NULL, model_input_shape = c(448,448), batch_size = 10L) {
if (!predict){
spectral_augmentation <- function(img) {
img <- tf$image$random_brightness(img, max_delta = 0.3)
img <- tf$image$random_contrast(img, lower = 0.8, upper = 1.1)
img <- tf$image$random_saturation(img, lower = 0.8, upper = 1.1)
img <- tf$clip_by_value(img, 0, 1)
}
dataset <- tensor_slices_dataset(files)
dataset <-
dataset_map(dataset, function(.x)
list_modify(.x,img = tf$image$decode_jpeg(tf$io$read_file(.x$img)),
mask = tf$image$decode_jpeg(tf$io$read_file(.x$mask))))
dataset <-
dataset_map(dataset, function(.x)
list_modify(.x, img = tf$image$convert_image_dtype(.x$img, dtype = tf$float32),
mask = tf$image$convert_image_dtype(.x$mask, dtype = tf$float32)))
dataset <-
dataset_map(dataset, function(.x)
list_modify(.x, img = tf$image$resize(.x$img, size = shape(model_input_shape[1], model_input_shape[2])),
mask = tf$image$resize(.x$mask, size = shape(model_input_shape[1], model_input_shape[2]))))
if (train) {
augmentation <-
dataset_map(dataset, function(.x)
list_modify(.x, img = spectral_augmentation(.x$img)))
augmentation <-
dataset_map(augmentation, function(.x)
list_modify(.x, img = tf$image$flip_left_right(.x$img),
mask = tf$image$flip_left_right(.x$mask)))
dataset_augmented <- dataset_concatenate(dataset,augmentation)
augmentation <-
dataset_map(dataset, function(.x)
list_modify(.x, img = spectral_augmentation(.x$img)))
augmentation <-
dataset_map(augmentation, function(.x)
list_modify(.x, img = tf$image$flip_up_down(.x$img),
mask = tf$image$flip_up_down(.x$mask)))
dataset_augmented <- dataset_concatenate(dataset_augmented,augmentation)
augmentation <-
dataset_map(dataset, function(.x)
list_modify(.x, img = spectral_augmentation(.x$img)))
augmentation <-
dataset_map(augmentation, function(.x)
list_modify(.x, img = tf$image$flip_left_right(.x$img),
mask = tf$image$flip_left_right(.x$mask)))
augmentation <-
dataset_map(augmentation, function(.x)
list_modify(.x, img = tf$image$flip_up_down(.x$img),
mask = tf$image$flip_up_down(.x$mask)))
dataset_augmented <- dataset_concatenate(dataset_augmented,augmentation)
}
if (train) {
dataset <- dataset_shuffle(dataset_augmented, buffer_size = batch_size*128)
}
dataset <- dataset_batch(dataset, batch_size)
dataset <- dataset_map(dataset, unname)
}else{
o <- order(as.numeric(tools::file_path_sans_ext(basename(list.files(subsets_path)))))
subset_list <- list.files(subsets_path, full.names = T)[o]
dataset <- tensor_slices_dataset(subset_list)
dataset <-
dataset_map(dataset, function(.x)
tf$image$decode_jpeg(tf$io$read_file(.x)))
dataset <-
dataset_map(dataset, function(.x)
tf$image$convert_image_dtype(.x, dtype = tf$float32))
dataset <-
dataset_map(dataset, function(.x)
tf$image$resize(.x, size = shape(model_input_shape[1], model_input_shape[2])))
dataset <- dataset_batch(dataset, batch_size)
dataset <- dataset_map(dataset, unname)
}
}
# Einladen der Trainingsdaten
files <- data.frame(
img = list.files("Data/Subsets_448/Slices_Berlin", full.names = TRUE, pattern = "*.jpg"),
mask = list.files("Data/Subsets_448/Slices_Berlin_Mask", full.names = TRUE, pattern = "*.jpg")
)
files <- initial_split(files, prop = 0.8)
training_dataset <- dl_prepare_data(training(files),train = TRUE,model_input_shape = c(448,448),batch_size = 10L)
validation_dataset <- dl_prepare_data(testing(files),train = FALSE,model_input_shape = c(448,448),batch_size = 10L)
training_tensors <- training_dataset%>%as_iterator()%>%iterate()
# Training des Unets
compile(
pretrained_unet,
optimizer = optimizer_rmsprop(lr = 1e-5),
loss = "binary_crossentropy",
metrics = c(metric_binary_accuracy)
)
diagnostics <- fit(pretrained_unet,
training_dataset,
epochs = 15,
validation_data = validation_dataset)
plot(diagnostics)
save_model_hdf5(pretrained_unet,filepath = "Unets/pretrained_unet_versuch4")
pretrained_unet <- load_model_hdf5("Unets/pretrained_unet_versuch4")
# Vergleich Maske/Satellitenbild/Vorhersage
sample <- floor(runif(n = 1,min = 1,max = 10))
img_path <- as.character(testing(files)[[sample,1]])
mask_path <- as.character(testing(files)[[sample,2]])
img <- magick::image_read(img_path)
mask <- magick::image_read(mask_path)
pred <- magick::image_read(as.raster(predict(object = pretrained_unet,validation_dataset)[sample,,,]))
out <- magick::image_append(c(
magick::image_append(mask, stack = TRUE),
magick::image_append(img, stack = TRUE),
magick::image_append(pred, stack = TRUE)
)
)
plot(out)
# Einladen der Validierungsdaten
test_dataset <- dl_prepare_data(train = F,predict = T,subsets_path="Data/Subsets_448/Slices_Muenster/",model_input_shape = c(448,448),batch_size = 5L)
system.time(predictions <- predict(pretrained_unet,test_dataset))
plot_layer_activations <- function(img_path, model, activations_layers,channels){
model_input_size <- c(model$input_shape[[2]], model$input_shape[[3]])
img <- image_load(img_path, target_size = model_input_size) %>%
image_to_array() %>%
array_reshape(dim = c(1, model_input_size[1], model_input_size[2], 3)) %>%
imagenet_preprocess_input()
layer_outputs <- lapply(model$layers[activations_layers], function(layer) layer$output)
activation_model <- keras_model(inputs = model$input, outputs = layer_outputs)
activations <- predict(activation_model,img)
if(!is.list(activations)){
activations <- list(activations)
}
plot_channel <- function(channel,layer_name,channel_name) {
rotate <- function(x) t(apply(x, 2, rev))
image(rotate(channel), axes = FALSE, asp = 1,
col = terrain.colors(12),main=paste("layer:",layer_name,"channel:",channel_name))
}
for (i in 1:length(activations)) {
layer_activation <- activations[[i]]
layer_name <- model$layers[[activations_layers[i]]]$name
n_features <- dim(layer_activation)[[4]]
for (c in channels){
channel_image <- layer_activation[1,,,c]
plot_channel(channel_image,layer_name,c)
}
}
}
par(mfrow=c(1,1))
plot(read_stars("Data/Subsets_448/Slices_Muenster/M_01.jpg"),rgb=c(1,2,3))
plot(read_stars("Data/Subsets_448/Slices_Muenster/M_02.jpg"),rgb=c(1,2,3))
plot(read_stars("Data/Subsets_448/Slices_Muenster/M_03.jpg"),rgb=c(1,2,3))
plot(read_stars("Data/Subsets_448/Slices_Muenster/M_04.jpg"),rgb=c(1,2,3))
plot(read_stars("Data/Subsets_448/Slices_Muenster/M_05.jpg"),rgb=c(1,2,3))
plot(read_stars("Data/Subsets_448/Slices_Muenster/M_06.jpg"),rgb=c(1,2,3))
par(mfrow=c(3,4),mar=c(1,1,1,1),cex=0.5)
plot_layer_activations(img_path = "Data/Subsets_448/Slices_Muenster/M_01.jpg", model=pretrained_unet ,activations_layers = c(2,3,5,6,8,9,10,12,13,14), channels = 1:4)
|
## checking the working directory
if (!getwd() == "C:/Users/gauta/Documents/R/data")
setwd("C:/Users/gauta/Documents/R/data")
getwd()
## reading data - replacing nulls
DT <- read.table("household_power_consumption.txt",
header = TRUE, sep = ';', na.strings = "?")
## formatting date value to date and subsetting data
DT$Date <- as.Date(DT$Date, format="%d/%m/%Y")
DT <- subset(DT, DT$Date >="2007-02-01"
& DT$Date <="2007-02-01")
## openning the output grpahics file
png (file = "plot1.png", height=480, width=480, bg = "white" )
## histogram of global active power
hist(DT$Global_active_power,
col = "red",
xlab = "Global Active Power (Kilowats)",
ylab = "Frequency",
main = "Global Active Power")
##closing the graph output
dev.off()
| /plot1.R | no_license | GautamVarma/ExData_Plotting1 | R | false | false | 798 | r | ## checking the working directory
if (!getwd() == "C:/Users/gauta/Documents/R/data")
setwd("C:/Users/gauta/Documents/R/data")
getwd()
## reading data - replacing nulls
DT <- read.table("household_power_consumption.txt",
header = TRUE, sep = ';', na.strings = "?")
## formatting date value to date and subsetting data
DT$Date <- as.Date(DT$Date, format="%d/%m/%Y")
DT <- subset(DT, DT$Date >="2007-02-01"
& DT$Date <="2007-02-01")
## openning the output grpahics file
png (file = "plot1.png", height=480, width=480, bg = "white" )
## histogram of global active power
hist(DT$Global_active_power,
col = "red",
xlab = "Global Active Power (Kilowats)",
ylab = "Frequency",
main = "Global Active Power")
##closing the graph output
dev.off()
|
\name{modify_lang}
\alias{modify_lang}
\title{Recursively modify a language object}
\usage{
modify_lang(x, f, ...)
}
\arguments{
\item{x}{object to modify: should be a call, expression,
function or list of the above.}
\item{f}{function to apply to leaves}
\item{...}{other arguments passed to \code{f}}
}
\description{
Recursively modify a language object
}
\examples{
a_to_b <- function(x) {
if (is.name(x) && identical(x, quote(a))) return(quote(b))
x
}
examples <- list(
quote(a <- 5),
alist(a = 1, c = a),
function(a = 1) a * 10,
expression(a <- 1, a, f(a), f(a = a))
)
modify_lang(examples, a_to_b)
# Modifies all objects called a, but doesn't modify arguments named a
}
| /man/modify_lang.Rd | no_license | Lingbing/pryr | R | false | false | 703 | rd | \name{modify_lang}
\alias{modify_lang}
\title{Recursively modify a language object}
\usage{
modify_lang(x, f, ...)
}
\arguments{
\item{x}{object to modify: should be a call, expression,
function or list of the above.}
\item{f}{function to apply to leaves}
\item{...}{other arguments passed to \code{f}}
}
\description{
Recursively modify a language object
}
\examples{
a_to_b <- function(x) {
if (is.name(x) && identical(x, quote(a))) return(quote(b))
x
}
examples <- list(
quote(a <- 5),
alist(a = 1, c = a),
function(a = 1) a * 10,
expression(a <- 1, a, f(a), f(a = a))
)
modify_lang(examples, a_to_b)
# Modifies all objects called a, but doesn't modify arguments named a
}
|
#' Adjust for batch effects using an empirical Bayes framework
#'
#' ComBat allows users to adjust for batch effects in datasets where the batch covariate is known, using methodology
#' described in Johnson et al. 2007. It uses either parametric or non-parametric empirical Bayes frameworks for adjusting data for
#' batch effects. Users are returned an expression matrix that has been corrected for batch effects. The input
#' data are assumed to be cleaned and normalized before batch effect removal.
#'
#' @param dat Genomic measure matrix (dimensions probe x sample) - for example, expression matrix
#' @param batch {Batch covariate (only one batch allowed)}
#' @param mod Model matrix for outcome of interest and other covariates besides batch
#' @param par.prior (Optional) TRUE indicates parametric adjustments will be used, FALSE indicates non-parametric adjustments will be used
#' @param prior.plots (Optional) TRUE give prior plots with black as a kernel estimate of the empirical batch effect density and red as the parametric
#' @param mean.only (Optional) FALSE If TRUE ComBat only corrects the mean of the batch effect (no scale adjustment)
#' @param ref.batch (Optional) NULL If given, will use the selected batch as a reference for batch adjustment.
#' @param BPPARAM (Optional) BiocParallelParam for parallel operation
#'
#' @return data A probe x sample genomic measure matrix, adjusted for batch effects.
#'
#' @examples
#' library(bladderbatch)
#' data(bladderdata)
#' dat <- bladderEset[1:50,]
#'
#' pheno = pData(dat)
#' edata = exprs(dat)
#' batch = pheno$batch
#' mod = model.matrix(~as.factor(cancer), data=pheno)
#'
#' # parametric adjustment
#' combat_edata1 = ComBat(dat=edata, batch=batch, mod=NULL, par.prior=TRUE, prior.plots=FALSE)
#'
#' # non-parametric adjustment, mean-only version
#' combat_edata2 = ComBat(dat=edata, batch=batch, mod=NULL, par.prior=FALSE, mean.only=TRUE)
#'
#' # reference-batch version, with covariates
#' combat_edata3 = ComBat(dat=edata, batch=batch, mod=mod, par.prior=TRUE, ref.batch=3)
#'
#' @export
#'
ComBat <- function (dat, batch, mod = NULL, par.prior = TRUE, prior.plots = FALSE,
mean.only = FALSE, ref.batch = NULL, BPPARAM = bpparam()) {
# make batch a factor and make a set of indicators for batch
if(mean.only==TRUE){message("Using the 'mean only' version of ComBat.")}
if(length(dim(batch))>1){stop("This version of ComBat only allows one batch variable")} ## to be updated soon!
batch <- as.factor(batch)
batchmod <- model.matrix(~-1+batch)
if (!is.null(ref.batch)){ # check for reference batch, check value, and make appropriate changes
if (!(ref.batch%in%levels(batch))){stop("reference level ref.batch is not one of the levels of the batch variable")}
message(paste0("Using batch =", ref.batch, "as a reference batch (this batch won't change)."))
ref = which(levels(as.factor(batch))==ref.batch) # find the reference
batchmod[,ref]=1
}else{ref=NULL}
message(paste0("Found", nlevels(batch), "batches."))
# A few other characteristics on the batches
n.batch <- nlevels(batch)
batches <- list()
for (i in 1:n.batch){batches[[i]] <- which(batch == levels(batch)[i])} # list of samples in each batch
n.batches <- sapply(batches, length)
if(any(n.batches==1)){mean.only=TRUE; message("Note: one batch has only one sample, setting mean.only=TRUE.")}
n.array <- sum(n.batches)
#combine batch variable and covariates
design <- cbind(batchmod,mod)
# check for intercept in covariates, and drop if present
check <- apply(design, 2, function(x) all(x == 1))
if(!is.null(ref)){check[ref]=FALSE} ## except don't throw away the reference batch indicator
design <- as.matrix(design[,!check])
# Number of covariates or covariate levels
message(paste0("Adjusting for", ncol(design)-ncol(batchmod), "covariate(s) or covariate level(s).")
# Check if the design is confounded
if(qr(design)$rank<ncol(design)){
#if(ncol(design)<=(n.batch)){stop("Batch variables are redundant! Remove one or more of the batch variables so they are no longer confounded")}
if(ncol(design)==(n.batch+1)){stop("The covariate is confounded with batch! Remove the covariate and rerun ComBat")}
if(ncol(design)>(n.batch+1)){
if((qr(design[,-c(1:n.batch)])$rank<ncol(design[,-c(1:n.batch)]))){stop('The covariates are confounded! Please remove one or more of the covariates so the design is not confounded')
}else{stop("At least one covariate is confounded with batch! Please remove confounded covariates and rerun ComBat")}}
}
## Check for missing values
NAs = any(is.na(dat))
if(NAs){message(paste0("Found ", sum(is.na(dat)), " Missing Data Values."))}
#print(dat[1:2,])
##Standardize Data across genes
message("Standardizing Data across genes.")
if (!NAs){
B.hat <- solve(t(design)%*%design)%*%t(design)%*%t(as.matrix(dat))
}else{
B.hat=apply(dat,1,Beta.NA,design)
}
######## change grand.mean for ref batch
if(!is.null(ref.batch)){
grand.mean <- t(B.hat[ref, ])
}else{
grand.mean <- t(n.batches/n.array)%*%B.hat[1:n.batch,]
}
######## change var.pooled for ref batch
if (!NAs){
if(!is.null(ref.batch)){
ref.dat <- dat[, batches[[ref]]]
var.pooled <- ((ref.dat-t(design[batches[[ref]], ]%*%B.hat))^2)%*%rep(1/n.batches[ref],n.batches[ref])
}else{
var.pooled <- ((dat-t(design%*%B.hat))^2)%*%rep(1/n.array,n.array)
}
}else{
if(!is.null(ref.batch)){
ref.dat <- dat[, batches[[ref]]]
var.pooled <- apply(ref.dat-t(design[batches[[ref]], ]%*%B.hat),1,var,na.rm=TRUE)
}else{
var.pooled <- apply(dat-t(design%*%B.hat),1,var,na.rm=TRUE)
}
}
stand.mean <- t(grand.mean)%*%t(rep(1,n.array))
if(!is.null(design)){tmp <- design;tmp[,c(1:n.batch)] <- 0;stand.mean <- stand.mean+t(tmp%*%B.hat)}
s.data <- (dat-stand.mean)/(sqrt(var.pooled)%*%t(rep(1,n.array)))
##Get regression batch effect parameters
message("Fitting L/S model and finding priors.")
batch.design <- design[,1:n.batch]
if (!NAs){
gamma.hat <- solve(t(batch.design)%*%batch.design)%*%t(batch.design)%*%t(as.matrix(s.data))
} else{
gamma.hat=apply(s.data,1,Beta.NA,batch.design)
}
delta.hat <- NULL
for (i in batches){
if(mean.only==TRUE){delta.hat <- rbind(delta.hat,rep(1,nrow(s.data)))}else{
delta.hat <- rbind(delta.hat,apply(s.data[,i], 1, var,na.rm=TRUE))
}
}
##Find Priors
gamma.bar <- apply(gamma.hat, 1, mean)
t2 <- apply(gamma.hat, 1, var)
a.prior <- apply(delta.hat, 1, aprior)
b.prior <- apply(delta.hat, 1, bprior)
##Plot empirical and parametric priors
if (prior.plots & par.prior){
par(mfrow=c(2,2))
tmp <- density(gamma.hat[1,])
plot(tmp, type='l', main="Density Plot")
xx <- seq(min(tmp$x), max(tmp$x), length=100)
lines(xx,dnorm(xx,gamma.bar[1],sqrt(t2[1])), col=2)
qqnorm(gamma.hat[1,])
qqline(gamma.hat[1,], col=2)
tmp <- density(delta.hat[1,])
xx <- seq(min(tmp$x), max(tmp$x), length=100)
tmp1 <- list(x=xx, y=dgamma(xx, a.prior[1], b.prior[1]))
plot(tmp, typ="l", main="Density Plot", ylim=c(0, max(tmp$y, tmp1$y)))
lines(tmp1, col=2)
invgam <- 1/qgamma(ppoints(ncol(delta.hat)), a.prior[1], b.prior[1])
qqplot(delta.hat[1,], invgam, xlab="Sample Quantiles", ylab="Theoretical Quantiles")
lines(c(0, max(invgam)), c(0, max(invgam)), col=2)
title("Q-Q Plot")
}
##Find EB batch adjustments
gamma.star <- delta.star <- matrix(NA, nrow=n.batch, ncol=nrow(s.data))
if (par.prior) {
message("Finding parametric adjustments.")
results <- bplapply(1:n.batch, function(i) {
if (mean.only) {
gamma.star <- postmean(gamma.hat[i,], gamma.bar[i], 1, 1, t2[i])
delta.star <- rep(1, nrow(s.data))
}
else {
temp <- it.sol(s.data[, batches[[i]]], gamma.hat[i,
], delta.hat[i, ], gamma.bar[i], t2[i], a.prior[i],
b.prior[i])
gamma.star <- temp[1, ]
delta.star <- temp[2, ]
}
list(gamma.star=gamma.star, delta.star=delta.star)
}, BPPARAM = BPPARAM)
for (i in 1:n.batch) {
gamma.star[i,] <- results[[i]]$gamma.star
delta.star[i,] <- results[[i]]$delta.star
}
}
else {
message("Finding nonparametric adjustments.")
results <- bplapply(1:n.batch, function(i) {
if (mean.only) {
delta.hat[i, ] = 1
}
temp <- int.eprior(as.matrix(s.data[, batches[[i]]]),
gamma.hat[i, ], delta.hat[i, ])
list(gamma.star=temp[1,], delta.star=temp[2,])
}, BPPARAM = BPPARAM)
for (i in 1:n.batch) {
gamma.star[i,] <- results[[i]]$gamma.star
delta.star[i,] <- results[[i]]$delta.star
}
}
if(!is.null(ref.batch)){
gamma.star[ref,]=0 ## set reference batch mean equal to 0
delta.star[ref,]=1 ## set reference batch variance equal to 1
}
### Normalize the Data ###
message("Adjusting the Data.")
bayesdata <- s.data
j <- 1
for (i in batches){
bayesdata[,i] <- (bayesdata[,i]-t(batch.design[i,]%*%gamma.star))/(sqrt(delta.star[j,])%*%t(rep(1,n.batches[j])))
j <- j+1
}
bayesdata <- (bayesdata*(sqrt(var.pooled)%*%t(rep(1,n.array))))+stand.mean
##### tiny change still exist when tested on bladder data
#### total sum of change within each batch around 1e-15
##### (could be computational system error).
##### Do not change ref batch at all in reference version
if(!is.null(ref.batch)){
bayesdata[, batches[[ref]]] <- dat[, batches[[ref]]]
}
return(bayesdata)
}
| /R/ComBat.R | no_license | abelew/sva-devel | R | false | false | 9,635 | r | #' Adjust for batch effects using an empirical Bayes framework
#'
#' ComBat allows users to adjust for batch effects in datasets where the batch covariate is known, using methodology
#' described in Johnson et al. 2007. It uses either parametric or non-parametric empirical Bayes frameworks for adjusting data for
#' batch effects. Users are returned an expression matrix that has been corrected for batch effects. The input
#' data are assumed to be cleaned and normalized before batch effect removal.
#'
#' @param dat Genomic measure matrix (dimensions probe x sample) - for example, expression matrix
#' @param batch {Batch covariate (only one batch allowed)}
#' @param mod Model matrix for outcome of interest and other covariates besides batch
#' @param par.prior (Optional) TRUE indicates parametric adjustments will be used, FALSE indicates non-parametric adjustments will be used
#' @param prior.plots (Optional) TRUE give prior plots with black as a kernel estimate of the empirical batch effect density and red as the parametric
#' @param mean.only (Optional) FALSE If TRUE ComBat only corrects the mean of the batch effect (no scale adjustment)
#' @param ref.batch (Optional) NULL If given, will use the selected batch as a reference for batch adjustment.
#' @param BPPARAM (Optional) BiocParallelParam for parallel operation
#'
#' @return data A probe x sample genomic measure matrix, adjusted for batch effects.
#'
#' @examples
#' library(bladderbatch)
#' data(bladderdata)
#' dat <- bladderEset[1:50,]
#'
#' pheno = pData(dat)
#' edata = exprs(dat)
#' batch = pheno$batch
#' mod = model.matrix(~as.factor(cancer), data=pheno)
#'
#' # parametric adjustment
#' combat_edata1 = ComBat(dat=edata, batch=batch, mod=NULL, par.prior=TRUE, prior.plots=FALSE)
#'
#' # non-parametric adjustment, mean-only version
#' combat_edata2 = ComBat(dat=edata, batch=batch, mod=NULL, par.prior=FALSE, mean.only=TRUE)
#'
#' # reference-batch version, with covariates
#' combat_edata3 = ComBat(dat=edata, batch=batch, mod=mod, par.prior=TRUE, ref.batch=3)
#'
#' @export
#'
ComBat <- function (dat, batch, mod = NULL, par.prior = TRUE, prior.plots = FALSE,
mean.only = FALSE, ref.batch = NULL, BPPARAM = bpparam()) {
# make batch a factor and make a set of indicators for batch
if(mean.only==TRUE){message("Using the 'mean only' version of ComBat.")}
if(length(dim(batch))>1){stop("This version of ComBat only allows one batch variable")} ## to be updated soon!
batch <- as.factor(batch)
batchmod <- model.matrix(~-1+batch)
if (!is.null(ref.batch)){ # check for reference batch, check value, and make appropriate changes
if (!(ref.batch%in%levels(batch))){stop("reference level ref.batch is not one of the levels of the batch variable")}
message(paste0("Using batch =", ref.batch, "as a reference batch (this batch won't change)."))
ref = which(levels(as.factor(batch))==ref.batch) # find the reference
batchmod[,ref]=1
}else{ref=NULL}
message(paste0("Found", nlevels(batch), "batches."))
# A few other characteristics on the batches
n.batch <- nlevels(batch)
batches <- list()
for (i in 1:n.batch){batches[[i]] <- which(batch == levels(batch)[i])} # list of samples in each batch
n.batches <- sapply(batches, length)
if(any(n.batches==1)){mean.only=TRUE; message("Note: one batch has only one sample, setting mean.only=TRUE.")}
n.array <- sum(n.batches)
#combine batch variable and covariates
design <- cbind(batchmod,mod)
# check for intercept in covariates, and drop if present
check <- apply(design, 2, function(x) all(x == 1))
if(!is.null(ref)){check[ref]=FALSE} ## except don't throw away the reference batch indicator
design <- as.matrix(design[,!check])
# Number of covariates or covariate levels
message(paste0("Adjusting for", ncol(design)-ncol(batchmod), "covariate(s) or covariate level(s).")
# Check if the design is confounded
if(qr(design)$rank<ncol(design)){
#if(ncol(design)<=(n.batch)){stop("Batch variables are redundant! Remove one or more of the batch variables so they are no longer confounded")}
if(ncol(design)==(n.batch+1)){stop("The covariate is confounded with batch! Remove the covariate and rerun ComBat")}
if(ncol(design)>(n.batch+1)){
if((qr(design[,-c(1:n.batch)])$rank<ncol(design[,-c(1:n.batch)]))){stop('The covariates are confounded! Please remove one or more of the covariates so the design is not confounded')
}else{stop("At least one covariate is confounded with batch! Please remove confounded covariates and rerun ComBat")}}
}
## Check for missing values
NAs = any(is.na(dat))
if(NAs){message(paste0("Found ", sum(is.na(dat)), " Missing Data Values."))}
#print(dat[1:2,])
##Standardize Data across genes
message("Standardizing Data across genes.")
if (!NAs){
B.hat <- solve(t(design)%*%design)%*%t(design)%*%t(as.matrix(dat))
}else{
B.hat=apply(dat,1,Beta.NA,design)
}
######## change grand.mean for ref batch
if(!is.null(ref.batch)){
grand.mean <- t(B.hat[ref, ])
}else{
grand.mean <- t(n.batches/n.array)%*%B.hat[1:n.batch,]
}
######## change var.pooled for ref batch
if (!NAs){
if(!is.null(ref.batch)){
ref.dat <- dat[, batches[[ref]]]
var.pooled <- ((ref.dat-t(design[batches[[ref]], ]%*%B.hat))^2)%*%rep(1/n.batches[ref],n.batches[ref])
}else{
var.pooled <- ((dat-t(design%*%B.hat))^2)%*%rep(1/n.array,n.array)
}
}else{
if(!is.null(ref.batch)){
ref.dat <- dat[, batches[[ref]]]
var.pooled <- apply(ref.dat-t(design[batches[[ref]], ]%*%B.hat),1,var,na.rm=TRUE)
}else{
var.pooled <- apply(dat-t(design%*%B.hat),1,var,na.rm=TRUE)
}
}
stand.mean <- t(grand.mean)%*%t(rep(1,n.array))
if(!is.null(design)){tmp <- design;tmp[,c(1:n.batch)] <- 0;stand.mean <- stand.mean+t(tmp%*%B.hat)}
s.data <- (dat-stand.mean)/(sqrt(var.pooled)%*%t(rep(1,n.array)))
##Get regression batch effect parameters
message("Fitting L/S model and finding priors.")
batch.design <- design[,1:n.batch]
if (!NAs){
gamma.hat <- solve(t(batch.design)%*%batch.design)%*%t(batch.design)%*%t(as.matrix(s.data))
} else{
gamma.hat=apply(s.data,1,Beta.NA,batch.design)
}
delta.hat <- NULL
for (i in batches){
if(mean.only==TRUE){delta.hat <- rbind(delta.hat,rep(1,nrow(s.data)))}else{
delta.hat <- rbind(delta.hat,apply(s.data[,i], 1, var,na.rm=TRUE))
}
}
##Find Priors
gamma.bar <- apply(gamma.hat, 1, mean)
t2 <- apply(gamma.hat, 1, var)
a.prior <- apply(delta.hat, 1, aprior)
b.prior <- apply(delta.hat, 1, bprior)
##Plot empirical and parametric priors
if (prior.plots & par.prior){
par(mfrow=c(2,2))
tmp <- density(gamma.hat[1,])
plot(tmp, type='l', main="Density Plot")
xx <- seq(min(tmp$x), max(tmp$x), length=100)
lines(xx,dnorm(xx,gamma.bar[1],sqrt(t2[1])), col=2)
qqnorm(gamma.hat[1,])
qqline(gamma.hat[1,], col=2)
tmp <- density(delta.hat[1,])
xx <- seq(min(tmp$x), max(tmp$x), length=100)
tmp1 <- list(x=xx, y=dgamma(xx, a.prior[1], b.prior[1]))
plot(tmp, typ="l", main="Density Plot", ylim=c(0, max(tmp$y, tmp1$y)))
lines(tmp1, col=2)
invgam <- 1/qgamma(ppoints(ncol(delta.hat)), a.prior[1], b.prior[1])
qqplot(delta.hat[1,], invgam, xlab="Sample Quantiles", ylab="Theoretical Quantiles")
lines(c(0, max(invgam)), c(0, max(invgam)), col=2)
title("Q-Q Plot")
}
##Find EB batch adjustments
gamma.star <- delta.star <- matrix(NA, nrow=n.batch, ncol=nrow(s.data))
if (par.prior) {
message("Finding parametric adjustments.")
results <- bplapply(1:n.batch, function(i) {
if (mean.only) {
gamma.star <- postmean(gamma.hat[i,], gamma.bar[i], 1, 1, t2[i])
delta.star <- rep(1, nrow(s.data))
}
else {
temp <- it.sol(s.data[, batches[[i]]], gamma.hat[i,
], delta.hat[i, ], gamma.bar[i], t2[i], a.prior[i],
b.prior[i])
gamma.star <- temp[1, ]
delta.star <- temp[2, ]
}
list(gamma.star=gamma.star, delta.star=delta.star)
}, BPPARAM = BPPARAM)
for (i in 1:n.batch) {
gamma.star[i,] <- results[[i]]$gamma.star
delta.star[i,] <- results[[i]]$delta.star
}
}
else {
message("Finding nonparametric adjustments.")
results <- bplapply(1:n.batch, function(i) {
if (mean.only) {
delta.hat[i, ] = 1
}
temp <- int.eprior(as.matrix(s.data[, batches[[i]]]),
gamma.hat[i, ], delta.hat[i, ])
list(gamma.star=temp[1,], delta.star=temp[2,])
}, BPPARAM = BPPARAM)
for (i in 1:n.batch) {
gamma.star[i,] <- results[[i]]$gamma.star
delta.star[i,] <- results[[i]]$delta.star
}
}
if(!is.null(ref.batch)){
gamma.star[ref,]=0 ## set reference batch mean equal to 0
delta.star[ref,]=1 ## set reference batch variance equal to 1
}
### Normalize the Data ###
message("Adjusting the Data.")
bayesdata <- s.data
j <- 1
for (i in batches){
bayesdata[,i] <- (bayesdata[,i]-t(batch.design[i,]%*%gamma.star))/(sqrt(delta.star[j,])%*%t(rep(1,n.batches[j])))
j <- j+1
}
bayesdata <- (bayesdata*(sqrt(var.pooled)%*%t(rep(1,n.array))))+stand.mean
##### tiny change still exist when tested on bladder data
#### total sum of change within each batch around 1e-15
##### (could be computational system error).
##### Do not change ref batch at all in reference version
if(!is.null(ref.batch)){
bayesdata[, batches[[ref]]] <- dat[, batches[[ref]]]
}
return(bayesdata)
}
|
#' Automated analysis of CRISPR experiments.
#'
#' Main goals:
#' \enumerate{
#' \item Flexible pipeline for analysis of the CRISPR Mi-Seq or Hi-Seq data.
#' \item Compatible with GRanges and data.table style.
#' \item Precise quantification of mutation rates.
#' \item Prepare automatic reports as .Rmd files that are flexible
#' and open for manipulation.
#' \item Provide specialized plots for deletions, insertions, mismatches,
#' variants, heterogeneity of the reads.
#' }
#'
#' To learn more about amplican, start with the vignettes:
#' \code{browseVignettes(package = "amplican")}
#'
#' @docType package
#' @name amplican
#' @useDynLib amplican
#'
#' @import Rcpp ggthemes waffle knitr methods BiocGenerics Biostrings data.table
#' @importFrom Rcpp sourceCpp
#'
"_PACKAGE"
.onAttach <- function(libname, pkgname) {
packageStartupMessage(
paste0("Pease consider supporting this software by citing:\n\n",
"Labun et al. 2019\n",
"Accurate analysis of genuine CRISPR editing events with ampliCan.\n",
"Genome Res. 2019 Mar 8\n",
"doi: 10.1101/gr.244293.118\n",
"\nWithout appreciation scientific software is usually abandoned and",
" eventually deprecated, but you can easily support authors by ",
"citations."))
}
amplicanPipe <- function(min_freq_default) {
function(
config, fastq_folder, results_folder, knit_reports = TRUE,
write_alignments_format = "txt", average_quality = 30,
min_quality = 0, use_parallel = FALSE,
scoring_matrix = Biostrings::nucleotideSubstitutionMatrix(
match = 5, mismatch = -4, baseOnly = TRUE, type = "DNA"),
gap_opening = 25, gap_extension = 0, fastqfiles = 0.5,
primer_mismatch = 0, donor_mismatch = 3, PRIMER_DIMER = 30,
event_filter = TRUE, cut_buffer = 5,
promiscuous_consensus = TRUE, normalize = c("guideRNA", "Group"),
min_freq = min_freq_default) {
config <- normalizePath(config)
fastq_folder <- normalizePath(fastq_folder)
results_folder <- normalizePath(results_folder)
message("Checking write access...")
checkFileWriteAccess(results_folder)
aln <- amplicanAlign(config = config,
fastq_folder = fastq_folder,
use_parallel = use_parallel,
average_quality = average_quality,
scoring_matrix = scoring_matrix,
gap_opening = gap_opening,
gap_extension = gap_extension,
min_quality = min_quality,
fastqfiles = fastqfiles,
primer_mismatch = primer_mismatch,
donor_mismatch = donor_mismatch)
message("Saving alignments...")
resultsFolder <- file.path(results_folder, "alignments")
if (!dir.exists(resultsFolder)) {
dir.create(resultsFolder)
}
# save as .rds object
saveRDS(aln, file.path(resultsFolder, "AlignmentsExperimentSet.rds"))
# save as other formats
if (!"None" %in% write_alignments_format) {
for (frmt in write_alignments_format) {
writeAlignments(aln, file.path(resultsFolder,
paste0("alignments.", frmt)), frmt)
}
}
message("Saving parameters...")
logFileName <- file.path(results_folder, "RunParameters.txt")
if (file.exists(logFileName)) {
file.remove(logFileName)
}
logFileConn <- file(logFileName, open = "at")
writeLines(c(paste("Config file: ", config),
paste("Average Quality: ", average_quality),
paste("Minimum Quality: ", min_quality),
paste("Write Alignments: ", toString(write_alignments_format)),
paste("Fastq files Mode: ", fastqfiles),
paste("Gap Opening: ", gap_opening),
paste("Gap Extension: ", gap_extension),
paste("Consensus: ", promiscuous_consensus),
paste("Normalize: ", toString(normalize)),
paste("PRIMER DIMER buffer:", PRIMER_DIMER),
paste("Cut buffer:", cut_buffer),
"Scoring Matrix:"), logFileConn)
utils::write.csv(scoring_matrix, logFileConn, quote = FALSE, row.names = TRUE)
close(logFileConn)
message("Saving unassigned sequences...")
unData <- unassignedData(aln)
if (!is.null(unData)) data.table::fwrite(
unData, file.path(resultsFolder, "unassigned_reads.csv"))
message("Saving barcode statistics...")
data.table::fwrite(barcodeData(aln),
file.path(results_folder, "barcode_reads_filters.csv"))
message("Translating alignments into events...")
cfgT <- experimentData(aln)
aln <- extractEvents(aln, use_parallel = use_parallel)
message("Saving complete events - unfiltered...")
data.table::fwrite(aln, file.path(resultsFolder, "raw_events.csv"))
data.table::setDT(aln)
seqnames <- read_id <- counts <- NULL
if (dim(aln)[1] == 0) stop("There are no events.",
"Check whether you have correct primers in the config file.")
aln$overlaps <- amplicanOverlap(aln, cfgT, cut_buffer = cut_buffer)
aln$consensus <- if (fastqfiles <= 0.5) {
amplicanConsensus(aln, cfgT, promiscuous = promiscuous_consensus)
} else { TRUE }
# filter events overlapping primers
eOP <- findEOP(aln, cfgT)
aln <- aln[!eOP, ]
# find PRIMER DIMERS
PD <- findPD(aln, cfgT, PRIMER_DIMER = PRIMER_DIMER)
# summarize how many PRIMER DIMER reads per ID
onlyPD <- aln[PD, ]
onlyPD <- unique(onlyPD, by = c("seqnames", "read_id"))
data.table::setDT(onlyPD)
summaryPD <- onlyPD[, list(counts = sum(counts)), by = c("seqnames")]
cfgT$PRIMER_DIMER <- 0
cfgT$PRIMER_DIMER[match(summaryPD$seqnames, cfgT$ID)] <- summaryPD$counts
# apply filter - remove all events that come from PD infected reads
aln <- aln[!onlyPD, on = list(seqnames, read_id)]
# alignment event filter
cfgT$Low_Score <- 0
if (event_filter) {
for (i in seq_len(dim(cfgT)[1])) {
aln_id <- aln[seqnames == cfgT$ID[i], ]
if (dim(aln_id)[1] == 0 | cfgT$Donor[i] != "") next()
onlyBR <- aln_id[findLQR(aln_id), ]
onlyBR <- unique(onlyBR, by = "read_id")
cfgT[i, "Low_Score"] <- sum(onlyBR$counts)
aln <- aln[!(aln$seqnames == cfgT$ID[i] &
aln$read_id %in% onlyBR$read_id), ]
}
}
cfgT$Reads_Filtered <- cfgT$Reads - cfgT$PRIMER_DIMER - cfgT$Low_Score
# shift to relative (most left UPPER case is position 0)
message("Shifting events as relative...")
data.table::setDF(aln)
aln <- data.frame(amplicanMap(aln, cfgT), stringsAsFactors = FALSE)
message("Saving shifted events - filtered...")
data.table::fwrite(aln,
file.path(resultsFolder, "events_filtered_shifted.csv"))
# revert guides to 5'-3'
cfgT$guideRNA[cfgT$Direction] <- revComp(cfgT$guideRNA[cfgT$Direction])
# normalize
message("Normalizing events...")
aln <- amplicanNormalize(aln, cfgT, min_freq = min_freq, add = normalize)
message("Saving normalized events...")
data.table::fwrite(aln,
file.path(resultsFolder,
"events_filtered_shifted_normalized.csv"))
# summarize
cfgT <- amplicanSummarize(aln[aln$consensus & aln$overlaps, ], cfgT)
data.table::fwrite(
cfgT[, c("ID", "Barcode", "Forward_Reads_File", "Reverse_Reads_File",
"Group", "guideRNA", "Found_Guide", "Control", "Forward_Primer",
"Reverse_Primer", "Direction", "Amplicon", "Donor", "fwdPrPosEnd",
"rvePrPos", "Reads", "PRIMER_DIMER", "Low_Score",
"Reads_Filtered", "Reads_Del", "Reads_In",
"Reads_Edited", "Reads_Frameshifted", "HDR")],
file.path(results_folder, "config_summary.csv"))
# reports
reportsFolder <- file.path(results_folder, "reports")
if (!dir.exists(reportsFolder)) {
dir.create(reportsFolder)
}
message(paste0("Making reports... \nDue to high quality ",
"figures, it is time consuming. Use .Rmd templates for ",
"more control."))
amplicanReport(results_folder,
knit_reports = knit_reports,
cut_buffer = cut_buffer,
report_files = file.path(reportsFolder,
c("id_report",
"barcode_report",
"group_report",
"guide_report",
"amplicon_report",
"index")))
message("Finished.")
invisible(results_folder)
}
}
#' Wraps main package functionality into one function.
#'
#' amplicanPipeline is convenient wrapper around all functionality of the
#' package with the most robust settings. It will generate all results in the
#' \code{result_folder} and also knit prepared reports into 'reports' folder.
#' @param results_folder (string) Where do you want to store results?
#' The package will create files in that folder so make sure you have writing
#' permissions.
#' @param config (string) The path to your configuration file. For example:
#' \code{system.file("extdata", "config.txt", package = "amplican")}.
#' Configuration file can contain additional columns, but first 11 columns
#' have to follow the example config specification.
#' @param fastq_folder (string) Path to FASTQ files. If not specified,
#' FASTQ files should be in the same directory as config file.
#' @param knit_reports (boolean) whether function should "knit" all
#' reports automatically for you (it is time consuming, be patient), when false
#' reports will be prepared, but not knitted
#' @param use_parallel (boolean) Set to TRUE, if you have registered
#' multicore back-end.
#' @param average_quality (numeric) The FASTQ file have a quality for each
#' nucleotide, depending on sequencing technology there exist many formats.
#' This package uses \code{\link[ShortRead]{readFastq}} to parse the reads.
#' If the average quality of the reads fall below value of
#' \code{average_quality} then sequence is filtered. Default is 0.
#' @param min_quality (numeric) Similar as in average_quality, but depicts
#' the minimum quality for ALL nucleotides in given read. If one of nucleotides
#' has quality BELLOW \code{min_quality}, then the sequence is filtered.
#' Default is 20.
#' @param write_alignments_format (character vector) Whether
#' \code{amplicanPipeline} should write alignments results to separate files.
#' Alignments are also always saved as .rds object of
#' \code{\link{AlignmentsExperimentSet}} class.
#' Possible options are:
#' \itemize{
#' \item{"fasta"}{ outputs alignments in fasta format where header indicates
#' experiment ID, read id and number of reads}
#' \item{"txt"}{ simple format, read information followed by forward read and
#' amplicon sequence followed by reverse read with its amplicon sequence
#' eg.: \cr
#' \preformatted{
#' ID: ID_1 Count: 7
#' ACTGAAAAA--------
#' ACTG-----ACTGACTG
#'
#' ------G-ACTG
#' ACTGACTGACTG
#' }}
#' \item{"None"}{ Don't write any alignments to files.}
#' \item{c("fasta", "txt")}{ There are also possible combinations of
#' above formats, pass a vector to get alignments in multiple formats.}
#' }
#' @param scoring_matrix (matrix) Default is 'NUC44'. Pass desired matrix using
#' \code{\link{nucleotideSubstitutionMatrix}}.
#' @param gap_opening (numeric) The opening gap score.
#' @param gap_extension (numeric) The gap extension score.
#' @param fastqfiles (numeric) Normally you want to use both FASTQ files. But in
#' some special cases, you may want to use only the forward file, or only
#' the reverse file. Possible options:
#' \itemize{
#' \item{0}{ Use both FASTQ files.}
#' \item{0.5}{ Use both FASTQ files, but only for one of the reads (forward or
#' reverse) is required to have primer perfectly matched to sequence - eg. use
#' when reverse reads are trimmed of primers, but forward reads have forward
#' primer in the sequence.}
#' \item{1}{ Use only the forward FASTQ file.}
#' \item{2}{ Use only the reverse FASTQ file.}
#' }
#' @param primer_mismatch (numeric) Decide how many mismatches are allowed
#' during primer matching of the reads, that groups reads by experiments.
#' When \code{primer_mismatch = 0} no mismatches are allowed, which can increase
#' number of unasssigned read.
#' @param donor_mismatch (numeric) How many events of length 1 (mismatches,
#' deletions and insertions of length 1) are allowed when aligning toward the
#' donor template. This parameter is only used when donor template is specified.
#' The higher the parameter the less strict will be algorithm accepting read as
#' HDR. Set to 0 if only perfect alignments to the donor template marked as HDR,
#' unadvised due to error rate of the sequencers.
#' @param PRIMER_DIMER (numeric) Value specifying buffer for PRIMER DIMER
#' detection. For a given read it will be recognized as PRIMER DIMER when
#' alignment will introduce gap of size bigger than: \cr
#' \code{length of amplicon - (lengths of PRIMERS + PRIMER_DIMER value)}
#' @param event_filter (logical) Whether detection of offtarget reads,
#' should be enabled.
#' @param cut_buffer The number of bases by which extend expected cut sites
#' (specified as UPPER case letters in the amplicon) in 5' and 3' directions.
#' @param promiscuous_consensus (boolean) Whether rules of
#' \code{\link{amplicanConsensus}} should be \code{promiscuous}. When
#' promiscuous, we allow indels that have no confirmation on the other strand.
#' @param normalize (character vector) If column 'Control' in config table
#' has all FALSE/0 values then normalization is skipped. Otherwise,
#' normalization is strict, which means events that are
#' found in 'Control' TRUE group will be removed in 'Control' FALSE group.
#' This parameter by default uses columns 'guideRNA' and 'Group' to impose
#' additional restrictions on normalized events eg. only events created by the
#' same 'guideRNA' in the same 'Group' will be normalized.
#' @param min_freq (numeric) All events below this frequency are treated as
#' sequencing errors and rejected. This parameter is used during normalization
#' through \code{\link{amplicanNormalize}}.
#' @include amplicanAlign.R amplicanReport.R
#' @return (invisible) results_folder path
#' @export
#' @family analysis steps
#' @examples
#' # path to example config file
#' config <- system.file("extdata", "config.csv", package = "amplican")
#' # path to example fastq files
#' fastq_folder <- system.file("extdata", package = "amplican")
#' # output folder
#' results_folder <- tempdir()
#'
#' #full analysis, not knitting files automatically
#' amplicanPipeline(config, fastq_folder, results_folder, knit_reports = FALSE)
#'
# config <- system.file("extdata", "config.csv", package = "amplican")
# fastq_folder <- system.file("extdata", package = "amplican")
# results_folder <- tempdir()
# knit_reports = TRUE
# write_alignments_format = "txt"
# average_quality = 30
# min_quality = 0
# use_parallel = FALSE
# scoring_matrix = Biostrings::nucleotideSubstitutionMatrix(
# match = 5, mismatch = -4, baseOnly = TRUE, type = "DNA")
# gap_opening = 25
# gap_extension = 0
# fastqfiles = 0.5
# PRIMER_DIMER = 30
# event_filter = TRUE
# cut_buffer = 5
# primer_mismatch = 1
# promiscuous_consensus = TRUE
# normalize = c("guideRNA", "Group")
# donor_mismatch = 3
# min_freq = 0.01
amplicanPipeline <- amplicanPipe(0.01)
#' Wraps main package functionality into one function.
#'
#' amplicanPipelineIndexHopping is identical as amplicanPipeline except that
#' default \code{min_freq} threshold is set to 0.15. Setting this threshold
#' higher will decrease risks of inadequate normalization in cases of potential
#' Index Hopping, potentially decreasing precision of true editing rate calling.
#' Index Hopping can be mitigated with use of unique dual indexing pooling
#' combinations. However, in cases when you might expect Index Hopping to occur
#' you should use this function instead of amplicanPipeline.
#'
#' \code{result_folder} and also knit prepared reports into 'reports' folder.
#' @inheritParams amplicanPipeline
#' @include amplicanAlign.R amplicanReport.R
#' @return (invisible) results_folder path
#' @export
#' @family analysis steps
#'
amplicanPipelineConservative <- amplicanPipe(0.15)
| /R/amplican.R | no_license | vaofford/amplican | R | false | false | 16,685 | r | #' Automated analysis of CRISPR experiments.
#'
#' Main goals:
#' \enumerate{
#' \item Flexible pipeline for analysis of the CRISPR Mi-Seq or Hi-Seq data.
#' \item Compatible with GRanges and data.table style.
#' \item Precise quantification of mutation rates.
#' \item Prepare automatic reports as .Rmd files that are flexible
#' and open for manipulation.
#' \item Provide specialized plots for deletions, insertions, mismatches,
#' variants, heterogeneity of the reads.
#' }
#'
#' To learn more about amplican, start with the vignettes:
#' \code{browseVignettes(package = "amplican")}
#'
#' @docType package
#' @name amplican
#' @useDynLib amplican
#'
#' @import Rcpp ggthemes waffle knitr methods BiocGenerics Biostrings data.table
#' @importFrom Rcpp sourceCpp
#'
"_PACKAGE"
.onAttach <- function(libname, pkgname) {
packageStartupMessage(
paste0("Pease consider supporting this software by citing:\n\n",
"Labun et al. 2019\n",
"Accurate analysis of genuine CRISPR editing events with ampliCan.\n",
"Genome Res. 2019 Mar 8\n",
"doi: 10.1101/gr.244293.118\n",
"\nWithout appreciation scientific software is usually abandoned and",
" eventually deprecated, but you can easily support authors by ",
"citations."))
}
amplicanPipe <- function(min_freq_default) {
function(
config, fastq_folder, results_folder, knit_reports = TRUE,
write_alignments_format = "txt", average_quality = 30,
min_quality = 0, use_parallel = FALSE,
scoring_matrix = Biostrings::nucleotideSubstitutionMatrix(
match = 5, mismatch = -4, baseOnly = TRUE, type = "DNA"),
gap_opening = 25, gap_extension = 0, fastqfiles = 0.5,
primer_mismatch = 0, donor_mismatch = 3, PRIMER_DIMER = 30,
event_filter = TRUE, cut_buffer = 5,
promiscuous_consensus = TRUE, normalize = c("guideRNA", "Group"),
min_freq = min_freq_default) {
config <- normalizePath(config)
fastq_folder <- normalizePath(fastq_folder)
results_folder <- normalizePath(results_folder)
message("Checking write access...")
checkFileWriteAccess(results_folder)
aln <- amplicanAlign(config = config,
fastq_folder = fastq_folder,
use_parallel = use_parallel,
average_quality = average_quality,
scoring_matrix = scoring_matrix,
gap_opening = gap_opening,
gap_extension = gap_extension,
min_quality = min_quality,
fastqfiles = fastqfiles,
primer_mismatch = primer_mismatch,
donor_mismatch = donor_mismatch)
message("Saving alignments...")
resultsFolder <- file.path(results_folder, "alignments")
if (!dir.exists(resultsFolder)) {
dir.create(resultsFolder)
}
# save as .rds object
saveRDS(aln, file.path(resultsFolder, "AlignmentsExperimentSet.rds"))
# save as other formats
if (!"None" %in% write_alignments_format) {
for (frmt in write_alignments_format) {
writeAlignments(aln, file.path(resultsFolder,
paste0("alignments.", frmt)), frmt)
}
}
message("Saving parameters...")
logFileName <- file.path(results_folder, "RunParameters.txt")
if (file.exists(logFileName)) {
file.remove(logFileName)
}
logFileConn <- file(logFileName, open = "at")
writeLines(c(paste("Config file: ", config),
paste("Average Quality: ", average_quality),
paste("Minimum Quality: ", min_quality),
paste("Write Alignments: ", toString(write_alignments_format)),
paste("Fastq files Mode: ", fastqfiles),
paste("Gap Opening: ", gap_opening),
paste("Gap Extension: ", gap_extension),
paste("Consensus: ", promiscuous_consensus),
paste("Normalize: ", toString(normalize)),
paste("PRIMER DIMER buffer:", PRIMER_DIMER),
paste("Cut buffer:", cut_buffer),
"Scoring Matrix:"), logFileConn)
utils::write.csv(scoring_matrix, logFileConn, quote = FALSE, row.names = TRUE)
close(logFileConn)
message("Saving unassigned sequences...")
unData <- unassignedData(aln)
if (!is.null(unData)) data.table::fwrite(
unData, file.path(resultsFolder, "unassigned_reads.csv"))
message("Saving barcode statistics...")
data.table::fwrite(barcodeData(aln),
file.path(results_folder, "barcode_reads_filters.csv"))
message("Translating alignments into events...")
cfgT <- experimentData(aln)
aln <- extractEvents(aln, use_parallel = use_parallel)
message("Saving complete events - unfiltered...")
data.table::fwrite(aln, file.path(resultsFolder, "raw_events.csv"))
data.table::setDT(aln)
seqnames <- read_id <- counts <- NULL
if (dim(aln)[1] == 0) stop("There are no events.",
"Check whether you have correct primers in the config file.")
aln$overlaps <- amplicanOverlap(aln, cfgT, cut_buffer = cut_buffer)
aln$consensus <- if (fastqfiles <= 0.5) {
amplicanConsensus(aln, cfgT, promiscuous = promiscuous_consensus)
} else { TRUE }
# filter events overlapping primers
eOP <- findEOP(aln, cfgT)
aln <- aln[!eOP, ]
# find PRIMER DIMERS
PD <- findPD(aln, cfgT, PRIMER_DIMER = PRIMER_DIMER)
# summarize how many PRIMER DIMER reads per ID
onlyPD <- aln[PD, ]
onlyPD <- unique(onlyPD, by = c("seqnames", "read_id"))
data.table::setDT(onlyPD)
summaryPD <- onlyPD[, list(counts = sum(counts)), by = c("seqnames")]
cfgT$PRIMER_DIMER <- 0
cfgT$PRIMER_DIMER[match(summaryPD$seqnames, cfgT$ID)] <- summaryPD$counts
# apply filter - remove all events that come from PD infected reads
aln <- aln[!onlyPD, on = list(seqnames, read_id)]
# alignment event filter
cfgT$Low_Score <- 0
if (event_filter) {
for (i in seq_len(dim(cfgT)[1])) {
aln_id <- aln[seqnames == cfgT$ID[i], ]
if (dim(aln_id)[1] == 0 | cfgT$Donor[i] != "") next()
onlyBR <- aln_id[findLQR(aln_id), ]
onlyBR <- unique(onlyBR, by = "read_id")
cfgT[i, "Low_Score"] <- sum(onlyBR$counts)
aln <- aln[!(aln$seqnames == cfgT$ID[i] &
aln$read_id %in% onlyBR$read_id), ]
}
}
cfgT$Reads_Filtered <- cfgT$Reads - cfgT$PRIMER_DIMER - cfgT$Low_Score
# shift to relative (most left UPPER case is position 0)
message("Shifting events as relative...")
data.table::setDF(aln)
aln <- data.frame(amplicanMap(aln, cfgT), stringsAsFactors = FALSE)
message("Saving shifted events - filtered...")
data.table::fwrite(aln,
file.path(resultsFolder, "events_filtered_shifted.csv"))
# revert guides to 5'-3'
cfgT$guideRNA[cfgT$Direction] <- revComp(cfgT$guideRNA[cfgT$Direction])
# normalize
message("Normalizing events...")
aln <- amplicanNormalize(aln, cfgT, min_freq = min_freq, add = normalize)
message("Saving normalized events...")
data.table::fwrite(aln,
file.path(resultsFolder,
"events_filtered_shifted_normalized.csv"))
# summarize
cfgT <- amplicanSummarize(aln[aln$consensus & aln$overlaps, ], cfgT)
data.table::fwrite(
cfgT[, c("ID", "Barcode", "Forward_Reads_File", "Reverse_Reads_File",
"Group", "guideRNA", "Found_Guide", "Control", "Forward_Primer",
"Reverse_Primer", "Direction", "Amplicon", "Donor", "fwdPrPosEnd",
"rvePrPos", "Reads", "PRIMER_DIMER", "Low_Score",
"Reads_Filtered", "Reads_Del", "Reads_In",
"Reads_Edited", "Reads_Frameshifted", "HDR")],
file.path(results_folder, "config_summary.csv"))
# reports
reportsFolder <- file.path(results_folder, "reports")
if (!dir.exists(reportsFolder)) {
dir.create(reportsFolder)
}
message(paste0("Making reports... \nDue to high quality ",
"figures, it is time consuming. Use .Rmd templates for ",
"more control."))
amplicanReport(results_folder,
knit_reports = knit_reports,
cut_buffer = cut_buffer,
report_files = file.path(reportsFolder,
c("id_report",
"barcode_report",
"group_report",
"guide_report",
"amplicon_report",
"index")))
message("Finished.")
invisible(results_folder)
}
}
#' Wraps main package functionality into one function.
#'
#' amplicanPipeline is convenient wrapper around all functionality of the
#' package with the most robust settings. It will generate all results in the
#' \code{result_folder} and also knit prepared reports into 'reports' folder.
#' @param results_folder (string) Where do you want to store results?
#' The package will create files in that folder so make sure you have writing
#' permissions.
#' @param config (string) The path to your configuration file. For example:
#' \code{system.file("extdata", "config.txt", package = "amplican")}.
#' Configuration file can contain additional columns, but first 11 columns
#' have to follow the example config specification.
#' @param fastq_folder (string) Path to FASTQ files. If not specified,
#' FASTQ files should be in the same directory as config file.
#' @param knit_reports (boolean) whether function should "knit" all
#' reports automatically for you (it is time consuming, be patient), when false
#' reports will be prepared, but not knitted
#' @param use_parallel (boolean) Set to TRUE, if you have registered
#' multicore back-end.
#' @param average_quality (numeric) The FASTQ file have a quality for each
#' nucleotide, depending on sequencing technology there exist many formats.
#' This package uses \code{\link[ShortRead]{readFastq}} to parse the reads.
#' If the average quality of the reads fall below value of
#' \code{average_quality} then sequence is filtered. Default is 0.
#' @param min_quality (numeric) Similar as in average_quality, but depicts
#' the minimum quality for ALL nucleotides in given read. If one of nucleotides
#' has quality BELLOW \code{min_quality}, then the sequence is filtered.
#' Default is 20.
#' @param write_alignments_format (character vector) Whether
#' \code{amplicanPipeline} should write alignments results to separate files.
#' Alignments are also always saved as .rds object of
#' \code{\link{AlignmentsExperimentSet}} class.
#' Possible options are:
#' \itemize{
#' \item{"fasta"}{ outputs alignments in fasta format where header indicates
#' experiment ID, read id and number of reads}
#' \item{"txt"}{ simple format, read information followed by forward read and
#' amplicon sequence followed by reverse read with its amplicon sequence
#' eg.: \cr
#' \preformatted{
#' ID: ID_1 Count: 7
#' ACTGAAAAA--------
#' ACTG-----ACTGACTG
#'
#' ------G-ACTG
#' ACTGACTGACTG
#' }}
#' \item{"None"}{ Don't write any alignments to files.}
#' \item{c("fasta", "txt")}{ There are also possible combinations of
#' above formats, pass a vector to get alignments in multiple formats.}
#' }
#' @param scoring_matrix (matrix) Default is 'NUC44'. Pass desired matrix using
#' \code{\link{nucleotideSubstitutionMatrix}}.
#' @param gap_opening (numeric) The opening gap score.
#' @param gap_extension (numeric) The gap extension score.
#' @param fastqfiles (numeric) Normally you want to use both FASTQ files. But in
#' some special cases, you may want to use only the forward file, or only
#' the reverse file. Possible options:
#' \itemize{
#' \item{0}{ Use both FASTQ files.}
#' \item{0.5}{ Use both FASTQ files, but only for one of the reads (forward or
#' reverse) is required to have primer perfectly matched to sequence - eg. use
#' when reverse reads are trimmed of primers, but forward reads have forward
#' primer in the sequence.}
#' \item{1}{ Use only the forward FASTQ file.}
#' \item{2}{ Use only the reverse FASTQ file.}
#' }
#' @param primer_mismatch (numeric) Decide how many mismatches are allowed
#' during primer matching of the reads, that groups reads by experiments.
#' When \code{primer_mismatch = 0} no mismatches are allowed, which can increase
#' number of unasssigned read.
#' @param donor_mismatch (numeric) How many events of length 1 (mismatches,
#' deletions and insertions of length 1) are allowed when aligning toward the
#' donor template. This parameter is only used when donor template is specified.
#' The higher the parameter the less strict will be algorithm accepting read as
#' HDR. Set to 0 if only perfect alignments to the donor template marked as HDR,
#' unadvised due to error rate of the sequencers.
#' @param PRIMER_DIMER (numeric) Value specifying buffer for PRIMER DIMER
#' detection. For a given read it will be recognized as PRIMER DIMER when
#' alignment will introduce gap of size bigger than: \cr
#' \code{length of amplicon - (lengths of PRIMERS + PRIMER_DIMER value)}
#' @param event_filter (logical) Whether detection of offtarget reads,
#' should be enabled.
#' @param cut_buffer The number of bases by which extend expected cut sites
#' (specified as UPPER case letters in the amplicon) in 5' and 3' directions.
#' @param promiscuous_consensus (boolean) Whether rules of
#' \code{\link{amplicanConsensus}} should be \code{promiscuous}. When
#' promiscuous, we allow indels that have no confirmation on the other strand.
#' @param normalize (character vector) If column 'Control' in config table
#' has all FALSE/0 values then normalization is skipped. Otherwise,
#' normalization is strict, which means events that are
#' found in 'Control' TRUE group will be removed in 'Control' FALSE group.
#' This parameter by default uses columns 'guideRNA' and 'Group' to impose
#' additional restrictions on normalized events eg. only events created by the
#' same 'guideRNA' in the same 'Group' will be normalized.
#' @param min_freq (numeric) All events below this frequency are treated as
#' sequencing errors and rejected. This parameter is used during normalization
#' through \code{\link{amplicanNormalize}}.
#' @include amplicanAlign.R amplicanReport.R
#' @return (invisible) results_folder path
#' @export
#' @family analysis steps
#' @examples
#' # path to example config file
#' config <- system.file("extdata", "config.csv", package = "amplican")
#' # path to example fastq files
#' fastq_folder <- system.file("extdata", package = "amplican")
#' # output folder
#' results_folder <- tempdir()
#'
#' #full analysis, not knitting files automatically
#' amplicanPipeline(config, fastq_folder, results_folder, knit_reports = FALSE)
#'
# config <- system.file("extdata", "config.csv", package = "amplican")
# fastq_folder <- system.file("extdata", package = "amplican")
# results_folder <- tempdir()
# knit_reports = TRUE
# write_alignments_format = "txt"
# average_quality = 30
# min_quality = 0
# use_parallel = FALSE
# scoring_matrix = Biostrings::nucleotideSubstitutionMatrix(
# match = 5, mismatch = -4, baseOnly = TRUE, type = "DNA")
# gap_opening = 25
# gap_extension = 0
# fastqfiles = 0.5
# PRIMER_DIMER = 30
# event_filter = TRUE
# cut_buffer = 5
# primer_mismatch = 1
# promiscuous_consensus = TRUE
# normalize = c("guideRNA", "Group")
# donor_mismatch = 3
# min_freq = 0.01
amplicanPipeline <- amplicanPipe(0.01)
#' Wraps main package functionality into one function.
#'
#' amplicanPipelineIndexHopping is identical as amplicanPipeline except that
#' default \code{min_freq} threshold is set to 0.15. Setting this threshold
#' higher will decrease risks of inadequate normalization in cases of potential
#' Index Hopping, potentially decreasing precision of true editing rate calling.
#' Index Hopping can be mitigated with use of unique dual indexing pooling
#' combinations. However, in cases when you might expect Index Hopping to occur
#' you should use this function instead of amplicanPipeline.
#'
#' \code{result_folder} and also knit prepared reports into 'reports' folder.
#' @inheritParams amplicanPipeline
#' @include amplicanAlign.R amplicanReport.R
#' @return (invisible) results_folder path
#' @export
#' @family analysis steps
#'
amplicanPipelineConservative <- amplicanPipe(0.15)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/databICESdiv.R
\name{databICESdiv}
\alias{databICESdiv}
\title{Datos de abundancia por división ICES para una especie resúmen para grupos de trabajo}
\usage{
databICESdiv(gr, esp, camp, dns = "Cant", cor.time = TRUE, Nas = FALSE)
}
\arguments{
\item{gr}{Grupo de la especie: 1 peces, 2 crustáceos 3 moluscos 4 equinodermos 5 invertebrados}
\item{esp}{Código de la especie numérico o carácter con tres espacios. 999 para todas las especies del grupo}
\item{camp}{Campaña de la que se extraen los datos: un año comcreto (XX): Demersales "NXX", Porcupine "PXX", Arsa primavera "1XX" y Arsa otoño "2XX"}
\item{dns}{Elige el origen de las bases de datos: Porcupine "Pnew", Cantábrico "Cant, Golfo de Cádiz "Arsa" (únicamente para sacar datos al IBTS, no gráficos)}
\item{cor.time}{Si T corrige las abundancias en función de la duración del lance}
\item{Nas}{Permite calcular los errores estándar aunque sólo haya un lance en algún estrato (haciendo varianza =0 en ese estrato, incorrecto pero da una idea cuando sólo un estrato entre varios tiene sólo un lance)}
}
\value{
Devuelve un número con nombres organizado en dos líneas (biomasa y número) en columnas por subdivisiones ICES por columnas abundancia estratificada media por XIa, 8.cE, 8.cW
}
\description{
Salida de datos a csv para rellenar los informes de grupo de trabajo, filas con datos ab estratificada (Biomasa y N) y error estándar por subdivisión ICES función para Demersales Norte (saca 9.a, 8.c y total)
}
\seealso{
{\link{databICES} \link{databEstr} \link{datab}}
}
| /man/databICESDiv.Rd | no_license | Franvgls/CampR | R | false | true | 1,639 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/databICESdiv.R
\name{databICESdiv}
\alias{databICESdiv}
\title{Datos de abundancia por división ICES para una especie resúmen para grupos de trabajo}
\usage{
databICESdiv(gr, esp, camp, dns = "Cant", cor.time = TRUE, Nas = FALSE)
}
\arguments{
\item{gr}{Grupo de la especie: 1 peces, 2 crustáceos 3 moluscos 4 equinodermos 5 invertebrados}
\item{esp}{Código de la especie numérico o carácter con tres espacios. 999 para todas las especies del grupo}
\item{camp}{Campaña de la que se extraen los datos: un año comcreto (XX): Demersales "NXX", Porcupine "PXX", Arsa primavera "1XX" y Arsa otoño "2XX"}
\item{dns}{Elige el origen de las bases de datos: Porcupine "Pnew", Cantábrico "Cant, Golfo de Cádiz "Arsa" (únicamente para sacar datos al IBTS, no gráficos)}
\item{cor.time}{Si T corrige las abundancias en función de la duración del lance}
\item{Nas}{Permite calcular los errores estándar aunque sólo haya un lance en algún estrato (haciendo varianza =0 en ese estrato, incorrecto pero da una idea cuando sólo un estrato entre varios tiene sólo un lance)}
}
\value{
Devuelve un número con nombres organizado en dos líneas (biomasa y número) en columnas por subdivisiones ICES por columnas abundancia estratificada media por XIa, 8.cE, 8.cW
}
\description{
Salida de datos a csv para rellenar los informes de grupo de trabajo, filas con datos ab estratificada (Biomasa y N) y error estándar por subdivisión ICES función para Demersales Norte (saca 9.a, 8.c y total)
}
\seealso{
{\link{databICES} \link{databEstr} \link{datab}}
}
|
library(glmnet)
mydata = read.table("./TrainingSet/LassoBIC/kidney.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.95,family="gaussian",standardize=TRUE)
sink('./Model/EN/Lasso/kidney/kidney_095.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/Lasso/kidney/kidney_095.R | no_license | leon1003/QSMART | R | false | false | 351 | r | library(glmnet)
mydata = read.table("./TrainingSet/LassoBIC/kidney.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.95,family="gaussian",standardize=TRUE)
sink('./Model/EN/Lasso/kidney/kidney_095.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
corr2 <- function(directory, threshold = 0) {
corr <- numeric(0)
for (i in 1:332) {
data <- na.omit(read.csv(paste(directory, '/', sprintf("%03d", i), ".csv", sep="")))
if (nrow(data) >= threshold) {
cr <- cor(data["sulfate"], data["nitrate"])
if (!is.na(cr)) {
corr <- append(corr, cr)
}
}
}
corr
}
| /week2/corr2.r | permissive | josteinstraume/datasciencecoursera | R | false | false | 327 | r | corr2 <- function(directory, threshold = 0) {
corr <- numeric(0)
for (i in 1:332) {
data <- na.omit(read.csv(paste(directory, '/', sprintf("%03d", i), ".csv", sep="")))
if (nrow(data) >= threshold) {
cr <- cor(data["sulfate"], data["nitrate"])
if (!is.na(cr)) {
corr <- append(corr, cr)
}
}
}
corr
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/co.R
\docType{data}
\name{co}
\alias{co}
\title{Tibble con i dati di co per 6 stazioni della regione TOSCANA}
\format{
Un tibble con 8 colonne e 4386 osservazioni
}
\usage{
co
}
\description{
Tibble con i dati di co per 6 stazioni della regione TOSCANA
}
\keyword{datasets}
| /man/co.Rd | permissive | progettopulvirus/toscana | R | false | true | 352 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/co.R
\docType{data}
\name{co}
\alias{co}
\title{Tibble con i dati di co per 6 stazioni della regione TOSCANA}
\format{
Un tibble con 8 colonne e 4386 osservazioni
}
\usage{
co
}
\description{
Tibble con i dati di co per 6 stazioni della regione TOSCANA
}
\keyword{datasets}
|
hierNet <- function(x, y, lam, delta=1e-8, strong=FALSE, diagonal=TRUE, aa=NULL, zz=NULL, center=TRUE, stand.main=TRUE, stand.int=FALSE,
rho=nrow(x), niter=100, sym.eps=1e-3,
step=1, maxiter=2000, backtrack=0.2, tol=1e-5,
trace=0) {
# Main Hiernet function for fitting at a single parameter lambda.
# Note: L1 penalty terms have parameter lam.l1 = lambda * (1-delta)
# and L2 penalty has parameter lam.l2 = lambda * delta.
#
# stand.main and stand.int refer to scaling
stopifnot(nrow(x) == length(y), lam >= 0, delta >= 0, delta <= 1)
stopifnot(!is.null(step) && !is.null(maxiter))
if (strong) stopifnot(!is.null(niter))
stopifnot(class(y) == "numeric")
stopifnot(class(lam) == "numeric")
stopifnot(class(delta) == "numeric")
stopifnot(class(step) == "numeric", step > 0, maxiter > 0)
stopifnot(is.finite(x), is.finite(y), is.finite(lam), is.finite(delta))
this.call <- match.call()
if (!center) cat("WARNING: center=FALSE should almost never be used. This option is available for special uses only.", fill=TRUE)
# center and (maybe) scale variables
x <- scale(x, center=center, scale=stand.main)
mx <- attr(x, "scaled:center")
sx <- attr(x, "scaled:scale") # may be NULL
if (center) {
my <- mean(y)
y <- y - my
} else my <- NULL
if (is.null(zz)) {
if (trace > 0) cat("Computing zz...", fill=TRUE)
zz <- compute.interactions.c(x, diagonal=diagonal)
}
if (is.matrix(zz)) {
zz <- scale(zz, center=center, scale=stand.int)
mzz <- attr(zz, "scaled:center")
szz <- attr(zz, "scaled:scale") # may be NULL
zz <- as.numeric(zz)
} else {
mzz <- szz <- NULL
#cat("Provided zz is not a matrix, so it's assumed to be already centered.", fill=TRUE)
}
xnum <- as.numeric(x)
p <- ncol(x)
lam.l1 <- lam * (1 - delta)
lam.l2 <- lam * delta
if (strong) {
# strong hierarchy -- use ADMM4
if (is.null(rho)) rho <- as.numeric(nrow(x))
stopifnot(is.numeric(rho), is.finite(rho))
aa <- admm4(x, xnum, y, lam.l1=lam.l1, lam.l2=lam.l2, diagonal=diagonal, zz=zz,
rho=rho, niter=niter, aa=aa, sym.eps=sym.eps, # ADMM params
stepsize=step, backtrack=backtrack, maxiter=maxiter, tol=tol, # GG params
trace=trace)
# lack of symmetry in theta means that sometimes strong hierarchy will be (very slightly violated)
ii <- aa$bp + aa$bn == 0
# note aa$th[ii, ] = 0 since weak hierarchy holds for sure
if (sum(ii) > 0 & sum(ii) < p) {
thr <- max(abs(aa$th[!ii, ii]))
if (thr > 0) {
cat(" thr = ",thr, fill=TRUE)
if (thr > 1e-3)
warning("Had to change ADMM's 'th' by more than 0.001 to make strong hier hold! Increase niter (and/or rho). ")
aa$th[abs(aa$th) <= thr] <- 0
}
}
} else {
# weak hierarchy -- a single call to generalized gradient descent
if (is.null(aa)) {
aa <- list(th=matrix(0, p, p), bp=rep(0, p), bn=rep(0, p))
} else {
stopifnot(dim(aa$th) == c(p,p), length(aa$bp) == p, length(aa$bn) == p)
}
# this could be improved by not actually creating V...
V <- matrix(0, p, p)
rho <- 0
aa <- ggdescent.c(x=x, xnum=xnum, zz=zz, y=y, lam.l1=lam.l1, lam.l2=lam.l2, diagonal=diagonal,
rho=rho, V=V,
stepsize=step, backtrack=backtrack, maxiter=maxiter, tol=tol,
aa=aa, trace=trace)
}
aa$lam <- lam
aa$delta <- delta
aa$type <- "gaussian"
aa$diagonal <- diagonal
aa$strong <- strong
aa$obj <- Objective(aa=aa, x=x, y=y, lam.l1=lam.l1, lam.l2=lam.l2, xnum=xnum, zz=zz, strong=strong, trace = trace-1)
aa$step <- step
aa$maxiter <- maxiter
aa$backtrack <- backtrack
aa$tol <- tol
if (strong) {
# ADMM parameters:
aa$rho <- rho
aa$niter <- niter
aa$sym.eps <- sym.eps
}
aa$mx <- mx
aa$sx <- sx
aa$my <- my
aa$mzz <- mzz
aa$szz <- szz
aa$call <- this.call
class(aa) <- "hierNet"
return(aa)
}
print.hierNet <- function(x, ...) {
cat("Call:\n")
dput(x$call)
th=(x$th+t(x$th))/2
o2=colSums(th^2)!=0
b=x$bp-x$bn
o=b!=0
b=b[o]
if (any(o2)) {
# model has interactions
th=th[o,o2,drop=FALSE]
tight <- rowSums(abs(th)) >= x$bp[o] + x$bn[o] - 1e-9
tt <- rep("", length(tight))
tt[tight] <- "*"
mat=cbind(b,th)
mat=round(mat,4)
mat <- cbind(mat, tt)
cat("\n")
cat("Non-zero coefficients:",fill=T)
cat(" (Rows are predictors with nonzero main effects)",fill=T)
cat(" (1st column is main effect)", fill=T)
cat(" (Next columns are nonzero interactions of row predictor)", fill=T)
cat(" (Last column indicates whether hierarchy constraint is tight.)",fill=T)
cat("\n")
dimnames(mat)=list(as.character(which(o)),c("Main effect",as.character(which(o2)),"Tight?"))
print(mat, quote = FALSE)
} else {
mat <- matrix(round(b,4), length(b), 1)
cat("\n")
cat("Non-zero coefficients:",fill=T)
cat(" (No interactions in this model)",fill=T)
cat("\n")
dimnames(mat)=list(as.character(which(o)),"Main effect")
print(mat, quote = FALSE)
}
invisible()
}
print.hierNet.path <- function(x, ...) {
cat("Call:\n")
dput(x$call)
b=x$bp-x$bn
mat=cbind(round(x$lam,2),round(x$obj,2),colSums(b!=0),apply(x$th!=0,3,function(a) sum(diag(a)) + sum((a+t(a)!=0)[upper.tri(a)])))
dimnames(mat)=list(NULL,c("Lambda", "Objective", "Number of main effects","Number of interactions"))
cat("\n")
print(mat, quote = FALSE)
invisible()
}
print.hierNet.cv <- function(x, ...) {
cat("Call:\n")
dput(x$call)
mat=cbind(round(x$lamlist,2),x$nonzero,round(x$cv.err,2),round(x$cv.se,2))
dimnames(mat)=list(NULL,c("Lambda", "Number of nonzero","Mean CV error", "SE"))
cat("\n")
print(mat, quote = FALSE)
cat("\n")
cat(c("lamhat=",round(x$lamhat,2),"lamhat.1se=",round(x$lamhat.1se,2)),fill=T)
invisible()
}
hierNet.path <- function(x, y, lamlist=NULL, delta=1e-8, minlam=NULL, maxlam=NULL, nlam=20, flmin=.01,
diagonal=TRUE, strong=FALSE, aa=NULL, zz=NULL,
stand.main=TRUE, stand.int=FALSE,
rho=nrow(x), niter=100, sym.eps=1e-3,# ADMM params
step=1, maxiter=2000, backtrack=0.2, tol=1e-5, # GG descent params
trace=0) {
# Main Hiernet function for fitting at a sequence of lambda values.
# Note: L1 penalty terms have parameter lam.l1 = lambda * (1-delta)
# and L2 penalty has parameter lam.l2 = lambda * delta.
#
# Always centers both x and zz (unless zz is provided in as.numeric form)
# stand.main and stand.int refer to whether main effects and interactions should have norm sqrt(n-1)
# center and (maybe) scale variables
this.call <- match.call()
x <- scale(x, center=TRUE, scale=stand.main)
mx <- attr(x, "scaled:center")
sx <- attr(x, "scaled:scale") # may be NULL
my <- mean(y)
y <- y - my
if (is.null(maxlam)) {
if (!is.null(minlam)) stop("Cannot have maxlam=NULL if minlam is non-null.")
# maxlam <- max(abs(t(x) %*% y)/colSums(x^2))
maxlam <- max(abs(t(x) %*% y))
# temp <- t(scale(t(x), center=FALSE, scale=1/y))
# temp2 <- apply(temp, 2, twonorm)
# maxlam <- max(max(temp2), maxlam)
minlam <- maxlam * flmin
}
if (is.null(minlam)) minlam <- maxlam * flmin
if (is.null(lamlist))
lamlist <- exp(seq(log(maxlam),log(minlam),length=nlam))
nlam <- length(lamlist)
if (is.null(zz))
zz <- compute.interactions.c(x, diagonal=diagonal)
else
stopifnot(is.matrix(zz))
# center and (maybe) scale zz
zz <- scale(zz, center=TRUE, scale=stand.int)
mzz <- attr(zz, "scaled:center")
szz <- attr(zz, "scaled:scale") # may be NULL
zz <- as.numeric(zz)
p <- ncol(x)
cp2 <- choose(p, 2)
bp <- bn <- matrix(NA, nrow=p, ncol=nlam)
th <- array(NA, c(p, p, nlam))
obj <- rep(NA, nlam)
aa <- NULL
for (i in seq(nlam)) {
if (trace != 0) {
cat(c("i,lam=", i, round(lamlist[i],2)), fill=TRUE)
}
aa <- hierNet(x, y, lam=lamlist[i], delta=delta, strong=strong, diagonal=diagonal, aa=aa, zz=zz,
stand.main=FALSE, stand.int=FALSE, # have already standardized
rho=rho, niter=niter, sym.eps=sym.eps,
step=step, maxiter=maxiter, backtrack=backtrack, tol=tol, trace=trace)
bp[, i] <- aa$bp
bn[, i] <- aa$bn
th[, , i] <- aa$th
obj[i] <- aa$obj
}
dimnames(bp) <- dimnames(bn) <- list(as.character(1:p), NULL)
dimnames(th) <- list(as.character(1:p), as.character(1:p), NULL)
out <- list(bp=bp, bn=bn, th=th, obj=obj, lamlist=lamlist, delta=delta, mx=mx, sx=sx, mzz=mzz, szz=szz, my=my,
type="gaussian", diagonal=diagonal, strong=strong,
step=step, maxiter=maxiter, backtrack=backtrack, tol=tol,
call=this.call)
if (strong) {
# ADMM parameters:
out$rho <- rho
out$niter <- niter
out$sym.eps <- sym.eps
}
class(out) <- "hierNet.path"
out
}
predict.hierNet <- function(object, newx, newzz=NULL, ...) {
n <- nrow(newx)
if (is.null(object$sx))
newx <- scale(newx, center=object$mx, scale=FALSE)
else
newx <- scale(newx, center=object$mx, scale=object$sx)
if (is.null(newzz))
newzz <- compute.interactions.c(newx, diagonal=object$diagonal)
if (is.null(object$szz))
newzz <- scale(newzz, center=object$mzz, scale=FALSE)
else
newzz <- scale(newzz, center=object$mzz, scale=object$szz)
newzz <- as.numeric(newzz)
newx <- as.numeric(newx)
stopifnot(is.finite(newzz), is.finite(newx))
if (!("matrix" %in% class(object$bp)))
yhatt <- Compute.yhat.c(newx, newzz, object) + object$my
else {
nlam <- ncol(object$bp)
yhat <- matrix(NA, n, nlam)
# this could be made more efficient
for (i in seq(nlam)) {
bb <- list(bp=object$bp[, i], bn=object$bn[, i], th=object$th[, , i], diagonal=object$diagonal)
yhat[, i] <- Compute.yhat.c(newx, newzz, bb)
}
yhatt <- yhat + object$my
}
if (object$type == "logistic") {
# predict from hierNet.logistic object object
b0 <- object$b0
if(is.matrix(yhatt))
b0 <- matrix(b0, nrow=nrow(yhatt), ncol=ncol(yhatt), byrow=T)
yhatt <- b0 + yhatt
pr <- 1 / (1 + exp(-yhatt))
return(list(prob=pr, yhat=1*(pr>.5)))
}
return(yhatt)
}
predict.hierNet.path <- function(object, newx, newzz=NULL, ...){
predict.hierNet(object, newx, newzz, ...)
}
admm4 <- function(x, xnum, y, lam.l1, lam.l2, diagonal, zz=NULL, rho, niter, aa=NULL, sym.eps=1e-3, trace=1, ...) {
# Performs ADMM4.
# Note: xnum is the matrix x as a numeric. Both are passed to avoid having to call as.numeric too
# many times.
p <- ncol(x)
if (is.null(zz)) {
if (trace > 0) cat("Computing zz...", fill=TRUE)
zz <- as.numeric(compute.interactions.c(x, diagonal=diagonal))
}
else if ("matrix" %in% class(zz)) zz <- as.numeric(zz)
if (is.null(aa)) {
aa <- list(u=matrix(0, p, p),
th=matrix(0, p, p),
bp=rep(0, p),
bn=rep(0, p),
tt=matrix(0, p, p),
diagonal=diagonal)
} else {
stopifnot(diagonal == aa$diagonal)
}
if (is.null(aa$tt) || is.null(aa$u)) {
aa$tt <- 0.5 * (aa$th + t(aa$th))
aa$u <- matrix(0, p, p)
}
obj <- Objective(aa=aa, x=x, y=y, lam.l1=lam.l1, lam.l2=lam.l2, xnum=xnum, zz=zz, strong=TRUE, sym.eps=sym.eps, trace = trace-1)
ll <- NULL
for (i in seq(niter)) {
if (trace > 0) cat(i, " ")
ll <- c(ll, ADMM4.Lagrangian(aa, xnum, zz, y, lam.l1=lam.l1, lam.l2=lam.l2, diagonal=diagonal, rho))
V <- aa$u - rho * aa$tt
gg <- ggdescent.c(x, xnum, zz, y, lam.l1=lam.l1, lam.l2=lam.l2, diagonal=diagonal, rho, V, trace=trace-1, aa=aa, ...)
aa$th <- gg$th
aa$bp <- gg$bp
aa$bn <- gg$bn
aa$tt <- (aa$th + t(aa$th)) / 2 + (aa$u + t(aa$u)) / (2 * rho)
aa$u <- aa$u + rho * (aa$th - aa$tt)
obj <- c(obj, Objective(aa=aa, x=x, y=y, lam.l1=lam.l1, lam.l2=lam.l2, xnum=xnum, zz=zz, strong=TRUE, sym.eps=sym.eps, trace = trace-1))
if (trace > 0) cat(obj[i+1], fill=TRUE)
}
if (max(abs(aa$th-t(aa$th))) > sym.eps)
cat("Attention: th not symmetric within the desired sym.eps. Run ADMM for more iterations. And try increasing rho.")
aa$obj <- obj
aa$lagr <- ll
aa
}
Objective <- function(aa, x, y, lam.l1, lam.l2, xnum=NULL, zz=NULL, strong=TRUE, sym.eps=1e-3, trace = -1) {
# evaluates the NewYal objective at aa.
if (strong) {
if (max(aa$th-t(aa$th)) > sym.eps) {
if (trace != -1){
cat("Theta is not symmetric.", fill=TRUE)
}
return(Inf)
}
}
if (any(rowSums(abs(aa$th)) > aa$bp + aa$bn + 1e-5)) {
cat("hierarchy violated.", fill=TRUE)
return(Inf)
}
if (any(aa$bp < -1e-5)||any(aa$bn < -1e-5)) {
cat("Non-negative of bp or bn violated.", fill=TRUE)
return(Inf)
}
if (aa$diagonal == FALSE)
if (any(abs(diag(aa$th)) > 1e-8)) {
cat("Zero diagonal violated.", fill=TRUE)
return(Inf)
}
if (is.null(zz)) {
zz <- as.numeric(compute.interactions.c(x, diagonal=aa$diagonal))
}
if (is.null(xnum)) xnum <- as.numeric(x)
r <- y - Compute.yhat.c(xnum, zz, aa)
pen <- lam.l1 * sum(aa$bp + aa$bn) + lam.l1 * sum(abs(aa$th))/2 + lam.l1 * sum(abs(diag(aa$th)))/2
pen <- pen + lam.l2 * (sum(aa$bp^2) + sum(aa$bn^2) + sum(aa$th^2))
sum(r^2)/2 + pen
}
Objective.logistic <- function(aa, x, y, lam.l1, lam.l2, xnum=NULL, zz=NULL, strong=TRUE, sym.eps=1e-3, trace = -1) {
# evaluates the logistic hiernet objective at aa.
stopifnot(y %in% c(0,1))
stopifnot("diagonal" %in% names(aa))
if (aa$diagonal == FALSE)
if (any(abs(diag(aa$th)) > 1e-8)) {
cat("Diagonal of Theta is nonzero.", fill=TRUE)
return(Inf)
}
if (strong) {
if (max(aa$th-t(aa$th)) > sym.eps) {
if (trace != -1){
cat("Theta is not symmetric.", fill=TRUE)
}
return(Inf)
}
}
if (any(rowSums(abs(aa$th)) > aa$bp + aa$bn + 1e-5)) {
cat("hierarchy violated.", fill=TRUE)
return(Inf)
}
if (any(aa$bp < -1e5)||any(aa$bn < -1e5)) {
cat("Non-negative of bp or bn violated.", fill=TRUE)
return(Inf)
}
if (is.null(zz)) {
zz <- as.numeric(scale(compute.interactions.c(x, diagonal=aa$diagonal), center=TRUE, scale=FALSE))
}
if (is.matrix(zz)) zz <- as.numeric(zz)
if (is.null(xnum)) xnum <- as.numeric(x)
phat <- Compute.phat.c(xnum, zz, aa)
loss <- -sum(y*log(phat)) - sum((1-y)*log(1-phat))
pen <- lam.l1 * sum(aa$bp + aa$bn) + lam.l1 * sum(abs(aa$th))/2 + lam.l1 * sum(abs(diag(aa$th)))/2
pen <- pen + lam.l2 * (sum(aa$bp^2) + sum(aa$bn^2) + sum(aa$th^2))
loss + pen
}
compute.interactions.c <- function(x, diagonal=TRUE) {
# Returns (uncentered) n by cp2 matrix of interactions.
# The columns of zz are in standard order (11), 12,13,14,...,(22),23,...
# z's (jk)th column is x_j * x_k
n <- nrow(x)
p <- ncol(x)
cp2 <- p * (p - 1) / 2
if (diagonal) {
cp2 <- cp2 + p
out <- .C("ComputeInteractionsWithDiagWithIndices",
as.double(x),
as.integer(n),
as.integer(p),
z=rep(0, n * cp2),
i1=as.integer(rep(0, cp2)),
i2=as.integer(rep(0, cp2)), PACKAGE="hierNet")
}
else {
out <- .C("ComputeInteractionsWithIndices",
as.double(x),
as.integer(n),
as.integer(p),
z=rep(0, n * cp2),
i1=as.integer(rep(0, cp2)),
i2=as.integer(rep(0, cp2)), PACKAGE="hierNet")
}
z <- matrix(out$z, n, cp2)
rownames(z) <- rownames(x)
if (is.null(colnames(x))) {
colnames(z) <- paste(out$i1, out$i2, sep=":")
}
else {
colnames(z) <- paste(colnames(x)[out$i1], colnames(x)[out$i2], sep=":")
}
z
}
compute.full.interactions.c <- function(x) {
# Returns (uncentered) n by p^2 matrix of interactions.
# The columns of zz are in standard order 11,12,13,14,...,23,...
# z's (jk)th column is x_j * x_k
n <- nrow(x)
p <- ncol(x)
out <- .C("ComputeFullInteractions",
as.double(x),
as.integer(n),
as.integer(p),
z=rep(0, n * p^2),
PACKAGE="hierNet")
matrix(out$z, n, p^2)
}
Compute.yhat.c <- function(xnum, zz, aa) {
# aa: list containing bp, bn, th, diagonal
# note: zz is the n by cp2 matrix, whereas z is the n by p^2 one.
p <- length(aa$bp)
n <- length(xnum) / p
stopifnot(n==round(n))
stopifnot("diagonal" %in% names(aa))
if (aa$diagonal) stopifnot(length(zz) == n * (choose(p,2) + p))
else stopifnot(length(zz) == n * choose(p,2))
out <- .C("compute_yhat_zz_R",
xnum,
as.integer(n),
as.integer(p),
zz,
as.integer(aa$diagonal),
as.double(aa$th),
aa$bp,
aa$bn,
yhat=rep(0, n),
PACKAGE="hierNet")
out$yhat
}
Compute.phat.c <- function(xnum, zz, aa) {
# aa: list containing b0, bp, bn, th
# note: zz is the n by cp2 matrix, whereas z is the n by p^2 one.
stopifnot(c("b0","bp","bn","th","diagonal") %in% names(aa))
p <- length(aa$bp)
n <- length(xnum) / p
if (is.matrix(xnum)) xnum <- as.numeric(xnum)
stopifnot(n == round(n))
if (aa$diagonal) stopifnot(length(zz) == n * (choose(p,2) + p))
else stopifnot(length(zz) == n * choose(p,2))
#void compute_phat_zz_R(double *x, int *n, int *p, double *zz, int *diagonal,
# double *b0, double *th, double *bp, double *bn, double *phat) {
out <- .C("compute_phat_zz_R",
xnum,
as.integer(n),
as.integer(p),
zz,
as.integer(aa$diagonal),
as.double(aa$b0),
as.double(aa$th),
aa$bp,
aa$bn,
phat=rep(0, n),
PACKAGE="hierNet")
out$phat
}
ggdescent.c <- function(x, xnum, zz, y, lam.l1, lam.l2, diagonal, rho, V, stepsize, backtrack=0.2, maxiter=100,
tol=1e-5, aa=NULL, trace=1) {
# See ADMM4 pdf for the problem this solves.
#
# x, xnum, zz, y: data (note: zz is a length n*cp2 vector, not a matrix) xnum is x as a vector
# lam.l1: l1-penalty parameter
# lam.l2: l2-penalty parameter
# rho: admm parameter
# V: see ADMM4 pdf
# stepsize: step size to start backtracking with
# backtrack: factor by which step is reduced on each backtrack.
# maxiter: number of generalized gradient steps to take.
# tol: stop gg descent if change in objective is below tol.
# aa: initial estimate of (th, bp, bn)
# trace: how verbose to be
#
# void ggdescent_R(double *x, int *n, int *p, double *zz, int *diagonal, double *y,
# double *lamL1, double*lamL2, double *rho, double *V, int *maxiter,
# double *curth, double *curbp, double *curbn,
# double *t, int *stepwindow, double *backtrack, double *tol, int *trace,
# double *th, double *bp, double *bn) {
n <- length(y)
p <- ncol(x)
stepwindow <- 10
if (is.null(aa)) aa <- list(th=matrix(0,p,p), bp=rep(0,p), bn=rep(0,p))
out <- .C("ggdescent_R",
xnum,
as.integer(n),
as.integer(p),
zz,
as.integer(diagonal),
y,
as.double(lam.l1),
as.double(lam.l2),
as.double(rho),
as.double(V),
as.integer(maxiter),
as.double(aa$th),
aa$bp,
aa$bn,
stepsize,
as.integer(stepwindow),
backtrack,
tol,
as.integer(trace),
th=rep(0, p*p),
bp=rep(0, p),
bn=rep(0, p),
PACKAGE="hierNet")
list(bp=out$bp, bn=out$bn, th=matrix(out$th, p, p))
}
hierNet.logistic <- function(x, y, lam, delta=1e-8, diagonal=TRUE, strong=FALSE, aa=NULL, zz=NULL, center=TRUE,
stand.main=TRUE, stand.int=FALSE,
rho=nrow(x), niter=100, sym.eps=1e-3,# ADMM params
step=1, maxiter=2000, backtrack=0.2, tol=1e-5, # GG descent params
trace=1) {
# Solves the logistic regression hiernet. Returns (b0, bp, bn, th)
this.call <- match.call()
n <- nrow(x)
p <- ncol(x)
stopifnot(y %in% c(0,1))
stopifnot(length(y) == n, lam >= 0, delta >= 0, delta <= 1)
stopifnot(!is.null(step) && !is.null(maxiter))
stopifnot(class(lam) == "numeric")
stopifnot(class(delta) == "numeric")
stopifnot(class(step) == "numeric", step > 0, maxiter > 0)
stopifnot(is.finite(x), is.finite(y), is.finite(lam), is.finite(delta))
lam.l1 <- lam * (1 - delta)
lam.l2 <- lam * delta
if (!center)
cat("WARNING: center=FALSE should almost never be used. This option is available for special uses only.", fill = TRUE)
x <- scale(x, center = center, scale = stand.main)
mx <- attr(x, "scaled:center")
sx <- attr(x, "scaled:scale")
if (is.null(aa)) aa <- list(b0=0, bp=rep(0, p), bn=rep(0, p), th=matrix(0, p, p), diagonal=diagonal)
if (is.null(zz)) {
if (trace > 0) cat("Computing zz...", fill=TRUE)
zz <- compute.interactions.c(x, diagonal=diagonal)
}
if (is.matrix(zz)) {
zz <- scale(zz, center=center, scale=stand.int)
mzz <- attr(zz, "scaled:center")
szz <- attr(zz, "scaled:scale")
zz <- as.numeric(zz)
} else {
mzz <- szz <- NULL
#cat("Provided zz is not a matrix, so it's assumed to be already centered.", fill = TRUE)
}
xnum <- as.numeric(x)
if (strong) {
# strong hierarchy -- use ADMM4 (logistic regression version)
stopifnot(is.numeric(rho), is.finite(rho))
out <- admm4.logistic(x, xnum, y, lam.l1, lam.l2, diagonal=diagonal, zz=zz,
rho=rho, niter=niter, aa=aa, sym.eps=sym.eps, # ADMM params
stepsize=step, backtrack=backtrack, maxiter=maxiter, tol=tol, # GG params
trace=trace)
ii <- out$bp + out$bn == 0
# note out$th[ii, ] = 0 since weak hierarchy holds for sure
sumii <- sum(ii)
if (sumii > 0 && sumii < p) {
thr <- max(abs(out$th[!ii, ii]))
if (thr > 0) {
cat(" thr = ",thr, fill=TRUE)
if (thr > 1e-3)
warning("Had to change ADMM's 'th' by more than 0.001 to make strong hier hold! Increase niter (and/or rho). ")
aa$th[abs(aa$th) <= thr] <- 0
}
}
} else {
out <- ggdescent.logistic(xnum=xnum, zz=zz, y=y, lam.l1=lam.l1, lam.l2=lam.l2, diagonal=diagonal, rho=0, V=matrix(0,p,p),
stepsize=step, backtrack=backtrack, maxiter=maxiter,
tol=tol, aa=aa, trace=trace)
}
out$call <- this.call
out$lam <- lam
out$delta <- delta
out$type <- "logistic"
out$diagonal <- diagonal
out$strong <- strong
if (strong) {
# ADMM parameters:
out$rho <- rho
out$niter <- niter
out$sym.eps <- sym.eps
}
out$step <- step
out$maxiter <- maxiter
out$backtrack <- backtrack
out$tol <- tol
out$obj <- critf.logistic(x, y, lam.l1, lam.l2, out$b0, out$bp, out$bn, out$th)
out$mx <- mx
out$my <- 0
out$sx <- sx
out$mzz <- mzz
class(out) <- "hierNet"
return(out)
}
admm4.logistic <- function(x, xnum, y, lam.l1, lam.l2, diagonal, zz=NULL, rho=10, niter, aa=NULL, sym.eps=1e-3, trace=1, ...) {
# Performs ADMM4 for logistic loss.
# Note: xnum is the matrix x as a numeric. Both are passed to avoid having to call as.numeric too
# many times.
p <- ncol(x)
if (is.null(zz)) {
if (trace > 0) cat("Computing zz...", fill=TRUE)
zz <- as.numeric(compute.interactions.c(x, diagonal=diagonal))
}
else if ("matrix" %in% class(zz)) zz <- as.numeric(zz)
if (is.null(aa)) {
aa <- list(u=matrix(0, p, p),
th=matrix(0, p, p),
bp=rep(0, p),
bn=rep(0, p),
tt=matrix(0, p, p),
diagonal=diagonal)
}
if (is.null(aa$tt) || is.null(aa$u)) {
aa$tt <- 0.5 * (aa$th + t(aa$th))
aa$u <- matrix(0, p, p)
}
obj <- Objective.logistic(aa=aa, x=x, y=y, lam.l1=lam.l1, lam.l2=lam.l2, xnum=xnum, zz=zz, strong=TRUE, sym.eps=sym.eps, trace = trace-1)
for (i in seq(niter)) {
if (trace > 0) cat(i, " ")
V <- aa$u - rho * aa$tt
gg <- ggdescent.logistic(xnum, zz, y, lam.l1=lam.l1, lam.l2=lam.l2, diagonal=diagonal, rho, V, trace=trace-1, aa=aa, ...)
aa$th <- gg$th
aa$bp <- gg$bp
aa$bn <- gg$bn
aa$tt <- (aa$th + t(aa$th)) / 2 + (aa$u + t(aa$u)) / (2 * rho)
aa$u <- aa$u + rho * (aa$th - aa$tt)
obj <- c(obj, Objective.logistic(aa=aa, x=x, y=y, lam.l1=lam.l1, lam.l2=lam.l2, xnum=xnum, zz=zz, strong=TRUE, sym.eps=sym.eps, trace = trace-1))
if (trace > 0) cat(obj[i+1], fill=TRUE)
}
if (max(abs(aa$th-t(aa$th))) > sym.eps)
cat("Attention: th not symmetric within the desired sym.eps. Run ADMM for more iterations. And try increasing rho.")
aa$obj <- obj
aa
}
ggdescent.logistic <- function(xnum, zz, y, lam.l1, lam.l2, diagonal, rho, V, stepsize, backtrack=0.2, maxiter=100,
tol=1e-5, aa=NULL, trace=1) {
# See ADMM4 pdf and logistic.pdf for the problem this solves.
#
# xnum, zz, y: data (note: zz is a length n*cp2 vector, not a matrix) xnum is x as a (n*p)-vector
# lam.l1: l1-penalty parameter
# lam.l2: l2-penalty parameter
# rho: admm parameter
# V: see ADMM4 pdf
# stepsize: step size to start backtracking with
# backtrack: factor by which step is reduced on each backtrack.
# maxiter: number of generalized gradient steps to take.
# tol: stop gg descent if change in objective is below tol.
# aa: initial estimate of (b0, th, bp, bn)
# trace: how verbose to be
#
#void ggdescent_logistic_R(double *x, int *n, int *p, double *zz, int * diagonal, double *y,
# double *lamL1, double *lamL2, double *rho, double *V, int *maxiter,
# double *curb0, double *curth, double *curbp, double *curbn,
# double *t, int *stepwindow, double *backtrack, double *tol, int *trace,
# double *b0, double *th, double *bp, double *bn) {
n <- length(y)
p <- length(xnum) / n
stopifnot(p == round(p))
if (diagonal) stopifnot(length(zz) == n * (choose(p,2)+p))
else stopifnot(length(zz) == n * choose(p,2))
stepwindow <- 10
if (is.null(aa)) aa <- list(b0=0, th=matrix(0,p,p), bp=rep(0,p), bn=rep(0,p))
out <- .C("ggdescent_logistic_R",
xnum,
as.integer(n),
as.integer(p),
zz,
as.integer(diagonal),
as.double(y), # convert from integer to double
as.double(lam.l1),
as.double(lam.l2),
as.double(rho),
as.double(V),
as.integer(maxiter),
as.double(aa$b0),
as.double(aa$th),
aa$bp,
aa$bn,
as.double(stepsize),
as.integer(stepwindow),
as.double(backtrack),
as.double(tol),
as.integer(trace),
b0=as.double(0),
th=rep(0, p*p),
bp=rep(0, p),
bn=rep(0, p),
PACKAGE="hierNet")
list(b0=out$b0, bp=out$bp, bn=out$bn, th=matrix(out$th, p, p))
}
ADMM4.Lagrangian <- function(aa, xnum, zz, y, lam.l1, lam.l2, diagonal, rho) {
# aa: list with (th, bp, bn, tt, u)
# zz is a vector not a matrix
if (aa$diagonal == FALSE)
if (any(abs(diag(aa$th)) > 1e-8)) {
cat("Diagonal of Theta is nonzero.", fill=TRUE)
return(Inf)
}
if (max(aa$tt-t(aa$tt)) > 1e-8) {
cat("Theta is not symmetrik.", fill=TRUE)
return(Inf)
}
if (any(rowSums(abs(aa$th)) > aa$bp + aa$bn + 1e-5)) {
cat("hierarchy violated.", fill=TRUE)
return(Inf)
}
if (any(aa$bp < -1e-5)||any(aa$bn < -1e-5)) {
cat("Non-negative of bp or bn violated.", fill=TRUE)
return(Inf)
}
if (diagonal == FALSE)
if (any(abs(diag(aa$th)) > 1e-5)) {
cat("Zero diagonal violated.", fill=TRUE)
return(Inf)
}
V <- aa$u - rho * aa$tt
r <- y - Compute.yhat.c(xnum, zz, aa)
admm <- sum(aa$u*(aa$th-aa$tt)) + (rho/2) * sum((aa$th-aa$tt)^2)
#admm <- sum(V*aa$th) + (rho/2) * sum(aa$th^2) + (rho/2)*sum(aa$tt^2) - sum(aa$u*aa$tt)
pen <- lam.l1 * (sum(aa$bp + aa$bn) + sum(abs(aa$th))/2)
pen <- pen + lam.l2 * (sum(aa$bp^2) + sum(aa$bn^2) + sum(aa$th^2))
sum(r^2)/2 + pen + admm
}
predict.hierNet.logistic <- function(object, newx, newzz=NULL, ...) {
predict.hierNet(object, newx, newzz, ...)
}
critf.logistic <- function(x, y, lam.l1, lam.l2, b0, bp, bn, th) {
yhat <- b0 + x %*% (bp - bn) + 0.5 * diag(x %*% th %*% t(x))
p <- 1 / (1 + exp(-yhat))
val <- -sum(y * log(p) + (1 - y) * log(1 - p))
val <- val + lam.l1 * sum(bp + bn) + lam.l1 * sum(abs(th))/2 + lam.l1 * sum(abs(diag(th)))/2
val <- val + lam.l2 * (sum(bp^2) + sum(bn^2) + sum(th^2))
return(val)
}
twonorm <- function(x) {sqrt(sum(x * x))}
hierNet.logistic.path <- function (x, y, lamlist=NULL, delta=1e-8, minlam=NULL, maxlam=NULL, flmin=.01, nlam=20,
diagonal=TRUE, strong=FALSE, aa=NULL,
zz=NULL, stand.main=TRUE, stand.int=FALSE,
rho=nrow(x), niter=100, sym.eps=1e-3,# ADMM params
step=1, maxiter=2000, backtrack=0.2, tol=1e-5, # GG params
trace=0) {
this.call=match.call()
stopifnot(y %in% c(0, 1))
x <- scale(x, center=TRUE, scale=stand.main)
mx <- attr(x, "scaled:center")
sx <- attr(x, "scaled:scale")
if (is.null(maxlam)) {
if (!is.null(minlam)) stop("Cannot have maxlam=NULL if minlam is non-null.")
maxlam <- max(abs(t(x) %*% y))
minlam <- maxlam * flmin
}
if (is.null(minlam)) minlam <- maxlam * flmin
if (is.null(lamlist))
lamlist <- exp(seq(log(maxlam), log(minlam), length=nlam))
nlam <- length(lamlist)
if (is.null(zz))
zz <- compute.interactions.c(x, diagonal=diagonal)
else stopifnot(is.matrix(zz))
zz <- scale(zz, center=TRUE, scale=stand.int)
mzz <- attr(zz, "scaled:center")
szz <- attr(zz, "scaled:scale")
zz <- as.numeric(zz)
p <- ncol(x)
cp2 <- choose(p, 2)
b0 <- rep(NA, nlam)
bp <- bn <- matrix(NA, nrow=p, ncol=nlam)
th <- array(NA, c(p, p, nlam))
obj <- rep(NA, nlam)
aa <- NULL
for (i in seq(nlam)) {
if (trace != 0) {
cat(c("i,lam=", i, round(lamlist[i],2)), fill=TRUE)
}
aa <- hierNet.logistic(x, y, lam=lamlist[i], delta=delta, diagonal=diagonal, strong=strong,
aa=aa, zz=zz, stand.main=FALSE, stand.int=FALSE,
rho=rho, niter=niter, sym.eps=sym.eps,
step=step, maxiter=maxiter, backtrack=backtrack, tol=tol,
trace=trace)
b0[i] <- aa$b0
bp[, i] <- aa$bp
bn[, i] <- aa$bn
th[, , i] <- aa$th
obj[i] <- aa$obj
}
dimnames(bp) <- dimnames(bn) <- list(as.character(1:p), NULL)
dimnames(th) <- list(as.character(1:p), as.character(1:p), NULL)
out <- list(b0=b0, bp=bp, bn=bn, th=th, obj=obj, lamlist=lamlist, delta=delta,
mx=mx, my=0, sx=sx, mzz=mzz, szz=szz,
type="logistic", diagonal=diagonal, strong=strong,
step=step, maxiter=maxiter, backtrack=backtrack, tol=tol,
call=this.call)
if (strong) {
# ADMM parameters:
out$rho <- aa$rho
out$niter <- niter
out$sym.eps <- sym.eps
}
class(out) <- "hierNet.path"
out
}
balanced.folds <- function(y, nfolds=min(min(table(y)), 10)) {
totals <- table(y)
fmax <- max(totals)
nfolds <- min(nfolds, fmax)
# makes no sense to have more folds than the max class size
folds <- as.list(seq(nfolds))
yids <- split(seq(y), y)
# nice we to get the ids in a list, split by class
###Make a big matrix, with enough rows to get in all the folds per class
bigmat <- matrix(NA, ceiling(fmax/nfolds) * nfolds, length(totals))
for(i in seq(totals)) {
bigmat[seq(totals[i]), i] <- sample(yids[[i]])
}
smallmat <- matrix(bigmat, nrow = nfolds) # reshape the matrix
### Now do a clever sort to mix up the NAs
smallmat <- permute.rows(t(smallmat)) ### Now a clever unlisting
# the "clever" unlist doesn't work when there are no N
# apply(smallmat, 2, function(x)
# x[!is.na(x)])
res <-vector("list", nfolds)
for(j in 1:nfolds) {
jj <- !is.na(smallmat[, j])
res[[j]] <- smallmat[jj, j]
}
return(res)
}
permute.rows <-function(x) {
dd <- dim(x)
n <- dd[1]
p <- dd[2]
mm <- runif(length(x)) + rep(seq(n) * 10, rep(p, n))
matrix(t(x)[order(mm)], n, p, byrow = TRUE)
}
hierNet.cv <- function(fit, x, y, nfolds=10, folds=NULL, trace=0) {
this.call <- match.call()
stopifnot(class(fit) == "hierNet.path")
if(fit$type=="gaussian"){errfun=function(y,yhat){(y-yhat)^2}}
if(fit$type=="logistic"){errfun=function(y,yhat){1*(y!=yhat)}}
n <- length(y)
if(is.null(folds)) {
folds <- split(sample(1:n), rep(1:nfolds, length = n))
}
else {
stopifnot(class(folds)=="list")
nfolds <- length(folds)
}
lamlist=fit$lamlist
# get whether fit was standardized based on fit$sx and fit$szz...
if (is.null(fit$mx)) stop("hierNet object was not centered. hierNet.cv has not been written for this (unusual) case.")
stand.main <- !is.null(fit$sx)
stand.int <- !is.null(fit$szz)
n.lamlist <- length(lamlist) ### Set up the data structures
size <- double(n.lamlist)
err2=matrix(NA,nrow=nfolds,ncol=length(lamlist))
for(ii in 1:nfolds) {
cat("Fold", ii, ":")
if(fit$type=="gaussian"){
a <- hierNet.path(x[-folds[[ii]],],y=y[-folds[[ii]]],
lamlist=lamlist, delta=fit$delta, diagonal=fit$diagonal, strong=fit$strong, trace=trace,
stand.main=stand.main, stand.int=stand.int,
rho=fit$rho, niter=fit$niter, sym.eps=fit$sym.eps, # ADMM parameters (which will be NULL if strong=F)
step=fit$step, maxiter=fit$maxiter, backtrack=fit$backtrack, tol=fit$tol) # GG descent params
yhatt=predict.hierNet(a,newx=x[folds[[ii]],])
}
if(fit$type=="logistic"){
a <- hierNet.logistic.path(x[-folds[[ii]],],y=y[-folds[[ii]]],
lamlist=lamlist, delta=fit$delta, diagonal=fit$diagonal, strong=fit$strong,
trace=trace, stand.main=stand.main, stand.int=stand.int,
rho=fit$rho, niter=fit$niter, sym.eps=fit$sym.eps, # ADMM parameters (which will be NULL if strong=F)
step=fit$step, maxiter=fit$maxiter, backtrack=fit$backtrack, tol=fit$tol) # GG descent params
yhatt=predict.hierNet.logistic(a,newx=x[folds[[ii]],])$yhat
}
temp=matrix(y[folds[[ii]]],nrow=length(folds[[ii]]),ncol=n.lamlist)
err2[ii,]=colMeans(errfun(yhatt,temp))
cat("\n")
}
errm=colMeans(err2)
errse=sqrt(apply(err2,2,var)/nfolds)
o=which.min(errm)
lamhat=lamlist[o]
oo=errm<= errm[o]+errse[o]
lamhat.1se=lamlist[oo & lamlist>=lamhat][1]
nonzero=colSums(fit$bp-fit$bn!=0) + apply(fit$th!=0, 3, function(a) sum(diag(a)) + sum((a+t(a)!=0)[upper.tri(a)]))
obj <- list(lamlist=lamlist, cv.err=errm,cv.se=errse,lamhat=lamhat, lamhat.1se=lamhat.1se,
nonzero=nonzero, folds=folds,
call = this.call)
class(obj) <- "hierNet.cv"
obj
}
plot.hierNet.cv <- function(x, ...) {
par(mar = c(5, 5, 5, 1))
yrang=range(c(x$cv.err-x$cv.se,x$cv.err+x$cv.se))
plot(log(x$lamlist), x$cv.err, xlab="log(lambda)",
ylab = "Cross-validation Error", type="n",ylim=yrang)
axis(3, at = log(x$lamlist), labels = paste(x$nonzero), srt = 90, adj = 0)
mtext("Number of features", 3, 4, cex = 1.2)
axis(2, at = c(0, 0.2, 0.4, 0.6, 0.8))
error.bars(log(x$lamlist), x$cv.err - x$cv.se, x$cv.err + x$cv.se, width = 0.01, col = "darkgrey")
points(log(x$lamlist), x$cv.err, col=2, pch=19)
abline(v=log(x$lamhat), lty=3)
abline(v=log(x$lamhat.1se), lty=3)
invisible()
}
error.bars <-function(x, upper, lower, width = 0.02, ...) {
xlim <- range(x)
barw <- diff(xlim) * width
segments(x, upper, x, lower, ...)
segments(x - barw, upper, x + barw, upper, ...)
segments(x - barw, lower, x + barw, lower, ...)
range(upper, lower)
}
hierNet.varimp <- function(fit,x,y, ...) {
# NOTE: uses 0.5 cutoff for logistic case
lam=fit$lam
if(fit$type=="gaussian"){errfun=function(y,yhat){(y-yhat)^2}}
if(fit$type=="logistic"){
errfun=function(y,yhat){
term1=y*log(yhat);term1[yhat==0]=0
term2=(1-y)*log(1-yhat);term2[yhat==1]=0
val=-sum(term1+term2)
return(val)
}}
yhat=predict(fit,x)
rss=sum(errfun(y,yhat))
varsum=fit$bp-fit$bn+rowSums(abs(fit$th))
oo=which(abs(varsum)>1e-6)
imp=rss2=rep(NA,ncol(x))
for(j in oo){
cat(j)
fit0=fit;fit0$bp=fit$bp[-j];fit0$bn=fit$bn[-j];fit0$th=fit$th[-j,-j]
if(fit$type=="gaussian"){ fit2=hierNet(x[,-j],y,lam,delta=fit$delta,diagonal=fit$diagonal,aa=fit0)}
if(fit$type=="logistic"){ fit2=hierNet.logistic(x[,-j],y,lam,delta=fit$delta,diagonal=fit$diagonal,aa=fit0)}
yhat2=predict(fit2,x[,-j])
rss2[j]=sum(errfun(y,yhat2))
imp[j]=(rss2[j]-rss)/rss2[j]
}
imp[-oo]=0
res=cbind(1:ncol(x),round(imp,3))
ooo=order(-imp)
dimnames(res)=list(NULL,c("Predictor","Importance"))
cat("",fill=T)
return(res[ooo,])
}
| /R/funcs.R | no_license | prischen/hierNet | R | false | false | 37,504 | r | hierNet <- function(x, y, lam, delta=1e-8, strong=FALSE, diagonal=TRUE, aa=NULL, zz=NULL, center=TRUE, stand.main=TRUE, stand.int=FALSE,
rho=nrow(x), niter=100, sym.eps=1e-3,
step=1, maxiter=2000, backtrack=0.2, tol=1e-5,
trace=0) {
# Main Hiernet function for fitting at a single parameter lambda.
# Note: L1 penalty terms have parameter lam.l1 = lambda * (1-delta)
# and L2 penalty has parameter lam.l2 = lambda * delta.
#
# stand.main and stand.int refer to scaling
stopifnot(nrow(x) == length(y), lam >= 0, delta >= 0, delta <= 1)
stopifnot(!is.null(step) && !is.null(maxiter))
if (strong) stopifnot(!is.null(niter))
stopifnot(class(y) == "numeric")
stopifnot(class(lam) == "numeric")
stopifnot(class(delta) == "numeric")
stopifnot(class(step) == "numeric", step > 0, maxiter > 0)
stopifnot(is.finite(x), is.finite(y), is.finite(lam), is.finite(delta))
this.call <- match.call()
if (!center) cat("WARNING: center=FALSE should almost never be used. This option is available for special uses only.", fill=TRUE)
# center and (maybe) scale variables
x <- scale(x, center=center, scale=stand.main)
mx <- attr(x, "scaled:center")
sx <- attr(x, "scaled:scale") # may be NULL
if (center) {
my <- mean(y)
y <- y - my
} else my <- NULL
if (is.null(zz)) {
if (trace > 0) cat("Computing zz...", fill=TRUE)
zz <- compute.interactions.c(x, diagonal=diagonal)
}
if (is.matrix(zz)) {
zz <- scale(zz, center=center, scale=stand.int)
mzz <- attr(zz, "scaled:center")
szz <- attr(zz, "scaled:scale") # may be NULL
zz <- as.numeric(zz)
} else {
mzz <- szz <- NULL
#cat("Provided zz is not a matrix, so it's assumed to be already centered.", fill=TRUE)
}
xnum <- as.numeric(x)
p <- ncol(x)
lam.l1 <- lam * (1 - delta)
lam.l2 <- lam * delta
if (strong) {
# strong hierarchy -- use ADMM4
if (is.null(rho)) rho <- as.numeric(nrow(x))
stopifnot(is.numeric(rho), is.finite(rho))
aa <- admm4(x, xnum, y, lam.l1=lam.l1, lam.l2=lam.l2, diagonal=diagonal, zz=zz,
rho=rho, niter=niter, aa=aa, sym.eps=sym.eps, # ADMM params
stepsize=step, backtrack=backtrack, maxiter=maxiter, tol=tol, # GG params
trace=trace)
# lack of symmetry in theta means that sometimes strong hierarchy will be (very slightly violated)
ii <- aa$bp + aa$bn == 0
# note aa$th[ii, ] = 0 since weak hierarchy holds for sure
if (sum(ii) > 0 & sum(ii) < p) {
thr <- max(abs(aa$th[!ii, ii]))
if (thr > 0) {
cat(" thr = ",thr, fill=TRUE)
if (thr > 1e-3)
warning("Had to change ADMM's 'th' by more than 0.001 to make strong hier hold! Increase niter (and/or rho). ")
aa$th[abs(aa$th) <= thr] <- 0
}
}
} else {
# weak hierarchy -- a single call to generalized gradient descent
if (is.null(aa)) {
aa <- list(th=matrix(0, p, p), bp=rep(0, p), bn=rep(0, p))
} else {
stopifnot(dim(aa$th) == c(p,p), length(aa$bp) == p, length(aa$bn) == p)
}
# this could be improved by not actually creating V...
V <- matrix(0, p, p)
rho <- 0
aa <- ggdescent.c(x=x, xnum=xnum, zz=zz, y=y, lam.l1=lam.l1, lam.l2=lam.l2, diagonal=diagonal,
rho=rho, V=V,
stepsize=step, backtrack=backtrack, maxiter=maxiter, tol=tol,
aa=aa, trace=trace)
}
aa$lam <- lam
aa$delta <- delta
aa$type <- "gaussian"
aa$diagonal <- diagonal
aa$strong <- strong
aa$obj <- Objective(aa=aa, x=x, y=y, lam.l1=lam.l1, lam.l2=lam.l2, xnum=xnum, zz=zz, strong=strong, trace = trace-1)
aa$step <- step
aa$maxiter <- maxiter
aa$backtrack <- backtrack
aa$tol <- tol
if (strong) {
# ADMM parameters:
aa$rho <- rho
aa$niter <- niter
aa$sym.eps <- sym.eps
}
aa$mx <- mx
aa$sx <- sx
aa$my <- my
aa$mzz <- mzz
aa$szz <- szz
aa$call <- this.call
class(aa) <- "hierNet"
return(aa)
}
print.hierNet <- function(x, ...) {
cat("Call:\n")
dput(x$call)
th=(x$th+t(x$th))/2
o2=colSums(th^2)!=0
b=x$bp-x$bn
o=b!=0
b=b[o]
if (any(o2)) {
# model has interactions
th=th[o,o2,drop=FALSE]
tight <- rowSums(abs(th)) >= x$bp[o] + x$bn[o] - 1e-9
tt <- rep("", length(tight))
tt[tight] <- "*"
mat=cbind(b,th)
mat=round(mat,4)
mat <- cbind(mat, tt)
cat("\n")
cat("Non-zero coefficients:",fill=T)
cat(" (Rows are predictors with nonzero main effects)",fill=T)
cat(" (1st column is main effect)", fill=T)
cat(" (Next columns are nonzero interactions of row predictor)", fill=T)
cat(" (Last column indicates whether hierarchy constraint is tight.)",fill=T)
cat("\n")
dimnames(mat)=list(as.character(which(o)),c("Main effect",as.character(which(o2)),"Tight?"))
print(mat, quote = FALSE)
} else {
mat <- matrix(round(b,4), length(b), 1)
cat("\n")
cat("Non-zero coefficients:",fill=T)
cat(" (No interactions in this model)",fill=T)
cat("\n")
dimnames(mat)=list(as.character(which(o)),"Main effect")
print(mat, quote = FALSE)
}
invisible()
}
print.hierNet.path <- function(x, ...) {
cat("Call:\n")
dput(x$call)
b=x$bp-x$bn
mat=cbind(round(x$lam,2),round(x$obj,2),colSums(b!=0),apply(x$th!=0,3,function(a) sum(diag(a)) + sum((a+t(a)!=0)[upper.tri(a)])))
dimnames(mat)=list(NULL,c("Lambda", "Objective", "Number of main effects","Number of interactions"))
cat("\n")
print(mat, quote = FALSE)
invisible()
}
print.hierNet.cv <- function(x, ...) {
cat("Call:\n")
dput(x$call)
mat=cbind(round(x$lamlist,2),x$nonzero,round(x$cv.err,2),round(x$cv.se,2))
dimnames(mat)=list(NULL,c("Lambda", "Number of nonzero","Mean CV error", "SE"))
cat("\n")
print(mat, quote = FALSE)
cat("\n")
cat(c("lamhat=",round(x$lamhat,2),"lamhat.1se=",round(x$lamhat.1se,2)),fill=T)
invisible()
}
hierNet.path <- function(x, y, lamlist=NULL, delta=1e-8, minlam=NULL, maxlam=NULL, nlam=20, flmin=.01,
diagonal=TRUE, strong=FALSE, aa=NULL, zz=NULL,
stand.main=TRUE, stand.int=FALSE,
rho=nrow(x), niter=100, sym.eps=1e-3,# ADMM params
step=1, maxiter=2000, backtrack=0.2, tol=1e-5, # GG descent params
trace=0) {
# Main Hiernet function for fitting at a sequence of lambda values.
# Note: L1 penalty terms have parameter lam.l1 = lambda * (1-delta)
# and L2 penalty has parameter lam.l2 = lambda * delta.
#
# Always centers both x and zz (unless zz is provided in as.numeric form)
# stand.main and stand.int refer to whether main effects and interactions should have norm sqrt(n-1)
# center and (maybe) scale variables
this.call <- match.call()
x <- scale(x, center=TRUE, scale=stand.main)
mx <- attr(x, "scaled:center")
sx <- attr(x, "scaled:scale") # may be NULL
my <- mean(y)
y <- y - my
if (is.null(maxlam)) {
if (!is.null(minlam)) stop("Cannot have maxlam=NULL if minlam is non-null.")
# maxlam <- max(abs(t(x) %*% y)/colSums(x^2))
maxlam <- max(abs(t(x) %*% y))
# temp <- t(scale(t(x), center=FALSE, scale=1/y))
# temp2 <- apply(temp, 2, twonorm)
# maxlam <- max(max(temp2), maxlam)
minlam <- maxlam * flmin
}
if (is.null(minlam)) minlam <- maxlam * flmin
if (is.null(lamlist))
lamlist <- exp(seq(log(maxlam),log(minlam),length=nlam))
nlam <- length(lamlist)
if (is.null(zz))
zz <- compute.interactions.c(x, diagonal=diagonal)
else
stopifnot(is.matrix(zz))
# center and (maybe) scale zz
zz <- scale(zz, center=TRUE, scale=stand.int)
mzz <- attr(zz, "scaled:center")
szz <- attr(zz, "scaled:scale") # may be NULL
zz <- as.numeric(zz)
p <- ncol(x)
cp2 <- choose(p, 2)
bp <- bn <- matrix(NA, nrow=p, ncol=nlam)
th <- array(NA, c(p, p, nlam))
obj <- rep(NA, nlam)
aa <- NULL
for (i in seq(nlam)) {
if (trace != 0) {
cat(c("i,lam=", i, round(lamlist[i],2)), fill=TRUE)
}
aa <- hierNet(x, y, lam=lamlist[i], delta=delta, strong=strong, diagonal=diagonal, aa=aa, zz=zz,
stand.main=FALSE, stand.int=FALSE, # have already standardized
rho=rho, niter=niter, sym.eps=sym.eps,
step=step, maxiter=maxiter, backtrack=backtrack, tol=tol, trace=trace)
bp[, i] <- aa$bp
bn[, i] <- aa$bn
th[, , i] <- aa$th
obj[i] <- aa$obj
}
dimnames(bp) <- dimnames(bn) <- list(as.character(1:p), NULL)
dimnames(th) <- list(as.character(1:p), as.character(1:p), NULL)
out <- list(bp=bp, bn=bn, th=th, obj=obj, lamlist=lamlist, delta=delta, mx=mx, sx=sx, mzz=mzz, szz=szz, my=my,
type="gaussian", diagonal=diagonal, strong=strong,
step=step, maxiter=maxiter, backtrack=backtrack, tol=tol,
call=this.call)
if (strong) {
# ADMM parameters:
out$rho <- rho
out$niter <- niter
out$sym.eps <- sym.eps
}
class(out) <- "hierNet.path"
out
}
predict.hierNet <- function(object, newx, newzz=NULL, ...) {
n <- nrow(newx)
if (is.null(object$sx))
newx <- scale(newx, center=object$mx, scale=FALSE)
else
newx <- scale(newx, center=object$mx, scale=object$sx)
if (is.null(newzz))
newzz <- compute.interactions.c(newx, diagonal=object$diagonal)
if (is.null(object$szz))
newzz <- scale(newzz, center=object$mzz, scale=FALSE)
else
newzz <- scale(newzz, center=object$mzz, scale=object$szz)
newzz <- as.numeric(newzz)
newx <- as.numeric(newx)
stopifnot(is.finite(newzz), is.finite(newx))
if (!("matrix" %in% class(object$bp)))
yhatt <- Compute.yhat.c(newx, newzz, object) + object$my
else {
nlam <- ncol(object$bp)
yhat <- matrix(NA, n, nlam)
# this could be made more efficient
for (i in seq(nlam)) {
bb <- list(bp=object$bp[, i], bn=object$bn[, i], th=object$th[, , i], diagonal=object$diagonal)
yhat[, i] <- Compute.yhat.c(newx, newzz, bb)
}
yhatt <- yhat + object$my
}
if (object$type == "logistic") {
# predict from hierNet.logistic object object
b0 <- object$b0
if(is.matrix(yhatt))
b0 <- matrix(b0, nrow=nrow(yhatt), ncol=ncol(yhatt), byrow=T)
yhatt <- b0 + yhatt
pr <- 1 / (1 + exp(-yhatt))
return(list(prob=pr, yhat=1*(pr>.5)))
}
return(yhatt)
}
predict.hierNet.path <- function(object, newx, newzz=NULL, ...){
predict.hierNet(object, newx, newzz, ...)
}
admm4 <- function(x, xnum, y, lam.l1, lam.l2, diagonal, zz=NULL, rho, niter, aa=NULL, sym.eps=1e-3, trace=1, ...) {
# Performs ADMM4.
# Note: xnum is the matrix x as a numeric. Both are passed to avoid having to call as.numeric too
# many times.
p <- ncol(x)
if (is.null(zz)) {
if (trace > 0) cat("Computing zz...", fill=TRUE)
zz <- as.numeric(compute.interactions.c(x, diagonal=diagonal))
}
else if ("matrix" %in% class(zz)) zz <- as.numeric(zz)
if (is.null(aa)) {
aa <- list(u=matrix(0, p, p),
th=matrix(0, p, p),
bp=rep(0, p),
bn=rep(0, p),
tt=matrix(0, p, p),
diagonal=diagonal)
} else {
stopifnot(diagonal == aa$diagonal)
}
if (is.null(aa$tt) || is.null(aa$u)) {
aa$tt <- 0.5 * (aa$th + t(aa$th))
aa$u <- matrix(0, p, p)
}
obj <- Objective(aa=aa, x=x, y=y, lam.l1=lam.l1, lam.l2=lam.l2, xnum=xnum, zz=zz, strong=TRUE, sym.eps=sym.eps, trace = trace-1)
ll <- NULL
for (i in seq(niter)) {
if (trace > 0) cat(i, " ")
ll <- c(ll, ADMM4.Lagrangian(aa, xnum, zz, y, lam.l1=lam.l1, lam.l2=lam.l2, diagonal=diagonal, rho))
V <- aa$u - rho * aa$tt
gg <- ggdescent.c(x, xnum, zz, y, lam.l1=lam.l1, lam.l2=lam.l2, diagonal=diagonal, rho, V, trace=trace-1, aa=aa, ...)
aa$th <- gg$th
aa$bp <- gg$bp
aa$bn <- gg$bn
aa$tt <- (aa$th + t(aa$th)) / 2 + (aa$u + t(aa$u)) / (2 * rho)
aa$u <- aa$u + rho * (aa$th - aa$tt)
obj <- c(obj, Objective(aa=aa, x=x, y=y, lam.l1=lam.l1, lam.l2=lam.l2, xnum=xnum, zz=zz, strong=TRUE, sym.eps=sym.eps, trace = trace-1))
if (trace > 0) cat(obj[i+1], fill=TRUE)
}
if (max(abs(aa$th-t(aa$th))) > sym.eps)
cat("Attention: th not symmetric within the desired sym.eps. Run ADMM for more iterations. And try increasing rho.")
aa$obj <- obj
aa$lagr <- ll
aa
}
Objective <- function(aa, x, y, lam.l1, lam.l2, xnum=NULL, zz=NULL, strong=TRUE, sym.eps=1e-3, trace = -1) {
# evaluates the NewYal objective at aa.
if (strong) {
if (max(aa$th-t(aa$th)) > sym.eps) {
if (trace != -1){
cat("Theta is not symmetric.", fill=TRUE)
}
return(Inf)
}
}
if (any(rowSums(abs(aa$th)) > aa$bp + aa$bn + 1e-5)) {
cat("hierarchy violated.", fill=TRUE)
return(Inf)
}
if (any(aa$bp < -1e-5)||any(aa$bn < -1e-5)) {
cat("Non-negative of bp or bn violated.", fill=TRUE)
return(Inf)
}
if (aa$diagonal == FALSE)
if (any(abs(diag(aa$th)) > 1e-8)) {
cat("Zero diagonal violated.", fill=TRUE)
return(Inf)
}
if (is.null(zz)) {
zz <- as.numeric(compute.interactions.c(x, diagonal=aa$diagonal))
}
if (is.null(xnum)) xnum <- as.numeric(x)
r <- y - Compute.yhat.c(xnum, zz, aa)
pen <- lam.l1 * sum(aa$bp + aa$bn) + lam.l1 * sum(abs(aa$th))/2 + lam.l1 * sum(abs(diag(aa$th)))/2
pen <- pen + lam.l2 * (sum(aa$bp^2) + sum(aa$bn^2) + sum(aa$th^2))
sum(r^2)/2 + pen
}
Objective.logistic <- function(aa, x, y, lam.l1, lam.l2, xnum=NULL, zz=NULL, strong=TRUE, sym.eps=1e-3, trace = -1) {
# evaluates the logistic hiernet objective at aa.
stopifnot(y %in% c(0,1))
stopifnot("diagonal" %in% names(aa))
if (aa$diagonal == FALSE)
if (any(abs(diag(aa$th)) > 1e-8)) {
cat("Diagonal of Theta is nonzero.", fill=TRUE)
return(Inf)
}
if (strong) {
if (max(aa$th-t(aa$th)) > sym.eps) {
if (trace != -1){
cat("Theta is not symmetric.", fill=TRUE)
}
return(Inf)
}
}
if (any(rowSums(abs(aa$th)) > aa$bp + aa$bn + 1e-5)) {
cat("hierarchy violated.", fill=TRUE)
return(Inf)
}
if (any(aa$bp < -1e5)||any(aa$bn < -1e5)) {
cat("Non-negative of bp or bn violated.", fill=TRUE)
return(Inf)
}
if (is.null(zz)) {
zz <- as.numeric(scale(compute.interactions.c(x, diagonal=aa$diagonal), center=TRUE, scale=FALSE))
}
if (is.matrix(zz)) zz <- as.numeric(zz)
if (is.null(xnum)) xnum <- as.numeric(x)
phat <- Compute.phat.c(xnum, zz, aa)
loss <- -sum(y*log(phat)) - sum((1-y)*log(1-phat))
pen <- lam.l1 * sum(aa$bp + aa$bn) + lam.l1 * sum(abs(aa$th))/2 + lam.l1 * sum(abs(diag(aa$th)))/2
pen <- pen + lam.l2 * (sum(aa$bp^2) + sum(aa$bn^2) + sum(aa$th^2))
loss + pen
}
compute.interactions.c <- function(x, diagonal=TRUE) {
# Returns (uncentered) n by cp2 matrix of interactions.
# The columns of zz are in standard order (11), 12,13,14,...,(22),23,...
# z's (jk)th column is x_j * x_k
n <- nrow(x)
p <- ncol(x)
cp2 <- p * (p - 1) / 2
if (diagonal) {
cp2 <- cp2 + p
out <- .C("ComputeInteractionsWithDiagWithIndices",
as.double(x),
as.integer(n),
as.integer(p),
z=rep(0, n * cp2),
i1=as.integer(rep(0, cp2)),
i2=as.integer(rep(0, cp2)), PACKAGE="hierNet")
}
else {
out <- .C("ComputeInteractionsWithIndices",
as.double(x),
as.integer(n),
as.integer(p),
z=rep(0, n * cp2),
i1=as.integer(rep(0, cp2)),
i2=as.integer(rep(0, cp2)), PACKAGE="hierNet")
}
z <- matrix(out$z, n, cp2)
rownames(z) <- rownames(x)
if (is.null(colnames(x))) {
colnames(z) <- paste(out$i1, out$i2, sep=":")
}
else {
colnames(z) <- paste(colnames(x)[out$i1], colnames(x)[out$i2], sep=":")
}
z
}
compute.full.interactions.c <- function(x) {
# Returns (uncentered) n by p^2 matrix of interactions.
# The columns of zz are in standard order 11,12,13,14,...,23,...
# z's (jk)th column is x_j * x_k
n <- nrow(x)
p <- ncol(x)
out <- .C("ComputeFullInteractions",
as.double(x),
as.integer(n),
as.integer(p),
z=rep(0, n * p^2),
PACKAGE="hierNet")
matrix(out$z, n, p^2)
}
Compute.yhat.c <- function(xnum, zz, aa) {
# aa: list containing bp, bn, th, diagonal
# note: zz is the n by cp2 matrix, whereas z is the n by p^2 one.
p <- length(aa$bp)
n <- length(xnum) / p
stopifnot(n==round(n))
stopifnot("diagonal" %in% names(aa))
if (aa$diagonal) stopifnot(length(zz) == n * (choose(p,2) + p))
else stopifnot(length(zz) == n * choose(p,2))
out <- .C("compute_yhat_zz_R",
xnum,
as.integer(n),
as.integer(p),
zz,
as.integer(aa$diagonal),
as.double(aa$th),
aa$bp,
aa$bn,
yhat=rep(0, n),
PACKAGE="hierNet")
out$yhat
}
Compute.phat.c <- function(xnum, zz, aa) {
# aa: list containing b0, bp, bn, th
# note: zz is the n by cp2 matrix, whereas z is the n by p^2 one.
stopifnot(c("b0","bp","bn","th","diagonal") %in% names(aa))
p <- length(aa$bp)
n <- length(xnum) / p
if (is.matrix(xnum)) xnum <- as.numeric(xnum)
stopifnot(n == round(n))
if (aa$diagonal) stopifnot(length(zz) == n * (choose(p,2) + p))
else stopifnot(length(zz) == n * choose(p,2))
#void compute_phat_zz_R(double *x, int *n, int *p, double *zz, int *diagonal,
# double *b0, double *th, double *bp, double *bn, double *phat) {
out <- .C("compute_phat_zz_R",
xnum,
as.integer(n),
as.integer(p),
zz,
as.integer(aa$diagonal),
as.double(aa$b0),
as.double(aa$th),
aa$bp,
aa$bn,
phat=rep(0, n),
PACKAGE="hierNet")
out$phat
}
ggdescent.c <- function(x, xnum, zz, y, lam.l1, lam.l2, diagonal, rho, V, stepsize, backtrack=0.2, maxiter=100,
tol=1e-5, aa=NULL, trace=1) {
# See ADMM4 pdf for the problem this solves.
#
# x, xnum, zz, y: data (note: zz is a length n*cp2 vector, not a matrix) xnum is x as a vector
# lam.l1: l1-penalty parameter
# lam.l2: l2-penalty parameter
# rho: admm parameter
# V: see ADMM4 pdf
# stepsize: step size to start backtracking with
# backtrack: factor by which step is reduced on each backtrack.
# maxiter: number of generalized gradient steps to take.
# tol: stop gg descent if change in objective is below tol.
# aa: initial estimate of (th, bp, bn)
# trace: how verbose to be
#
# void ggdescent_R(double *x, int *n, int *p, double *zz, int *diagonal, double *y,
# double *lamL1, double*lamL2, double *rho, double *V, int *maxiter,
# double *curth, double *curbp, double *curbn,
# double *t, int *stepwindow, double *backtrack, double *tol, int *trace,
# double *th, double *bp, double *bn) {
n <- length(y)
p <- ncol(x)
stepwindow <- 10
if (is.null(aa)) aa <- list(th=matrix(0,p,p), bp=rep(0,p), bn=rep(0,p))
out <- .C("ggdescent_R",
xnum,
as.integer(n),
as.integer(p),
zz,
as.integer(diagonal),
y,
as.double(lam.l1),
as.double(lam.l2),
as.double(rho),
as.double(V),
as.integer(maxiter),
as.double(aa$th),
aa$bp,
aa$bn,
stepsize,
as.integer(stepwindow),
backtrack,
tol,
as.integer(trace),
th=rep(0, p*p),
bp=rep(0, p),
bn=rep(0, p),
PACKAGE="hierNet")
list(bp=out$bp, bn=out$bn, th=matrix(out$th, p, p))
}
hierNet.logistic <- function(x, y, lam, delta=1e-8, diagonal=TRUE, strong=FALSE, aa=NULL, zz=NULL, center=TRUE,
stand.main=TRUE, stand.int=FALSE,
rho=nrow(x), niter=100, sym.eps=1e-3,# ADMM params
step=1, maxiter=2000, backtrack=0.2, tol=1e-5, # GG descent params
trace=1) {
# Solves the logistic regression hiernet. Returns (b0, bp, bn, th)
this.call <- match.call()
n <- nrow(x)
p <- ncol(x)
stopifnot(y %in% c(0,1))
stopifnot(length(y) == n, lam >= 0, delta >= 0, delta <= 1)
stopifnot(!is.null(step) && !is.null(maxiter))
stopifnot(class(lam) == "numeric")
stopifnot(class(delta) == "numeric")
stopifnot(class(step) == "numeric", step > 0, maxiter > 0)
stopifnot(is.finite(x), is.finite(y), is.finite(lam), is.finite(delta))
lam.l1 <- lam * (1 - delta)
lam.l2 <- lam * delta
if (!center)
cat("WARNING: center=FALSE should almost never be used. This option is available for special uses only.", fill = TRUE)
x <- scale(x, center = center, scale = stand.main)
mx <- attr(x, "scaled:center")
sx <- attr(x, "scaled:scale")
if (is.null(aa)) aa <- list(b0=0, bp=rep(0, p), bn=rep(0, p), th=matrix(0, p, p), diagonal=diagonal)
if (is.null(zz)) {
if (trace > 0) cat("Computing zz...", fill=TRUE)
zz <- compute.interactions.c(x, diagonal=diagonal)
}
if (is.matrix(zz)) {
zz <- scale(zz, center=center, scale=stand.int)
mzz <- attr(zz, "scaled:center")
szz <- attr(zz, "scaled:scale")
zz <- as.numeric(zz)
} else {
mzz <- szz <- NULL
#cat("Provided zz is not a matrix, so it's assumed to be already centered.", fill = TRUE)
}
xnum <- as.numeric(x)
if (strong) {
# strong hierarchy -- use ADMM4 (logistic regression version)
stopifnot(is.numeric(rho), is.finite(rho))
out <- admm4.logistic(x, xnum, y, lam.l1, lam.l2, diagonal=diagonal, zz=zz,
rho=rho, niter=niter, aa=aa, sym.eps=sym.eps, # ADMM params
stepsize=step, backtrack=backtrack, maxiter=maxiter, tol=tol, # GG params
trace=trace)
ii <- out$bp + out$bn == 0
# note out$th[ii, ] = 0 since weak hierarchy holds for sure
sumii <- sum(ii)
if (sumii > 0 && sumii < p) {
thr <- max(abs(out$th[!ii, ii]))
if (thr > 0) {
cat(" thr = ",thr, fill=TRUE)
if (thr > 1e-3)
warning("Had to change ADMM's 'th' by more than 0.001 to make strong hier hold! Increase niter (and/or rho). ")
aa$th[abs(aa$th) <= thr] <- 0
}
}
} else {
out <- ggdescent.logistic(xnum=xnum, zz=zz, y=y, lam.l1=lam.l1, lam.l2=lam.l2, diagonal=diagonal, rho=0, V=matrix(0,p,p),
stepsize=step, backtrack=backtrack, maxiter=maxiter,
tol=tol, aa=aa, trace=trace)
}
out$call <- this.call
out$lam <- lam
out$delta <- delta
out$type <- "logistic"
out$diagonal <- diagonal
out$strong <- strong
if (strong) {
# ADMM parameters:
out$rho <- rho
out$niter <- niter
out$sym.eps <- sym.eps
}
out$step <- step
out$maxiter <- maxiter
out$backtrack <- backtrack
out$tol <- tol
out$obj <- critf.logistic(x, y, lam.l1, lam.l2, out$b0, out$bp, out$bn, out$th)
out$mx <- mx
out$my <- 0
out$sx <- sx
out$mzz <- mzz
class(out) <- "hierNet"
return(out)
}
admm4.logistic <- function(x, xnum, y, lam.l1, lam.l2, diagonal, zz=NULL, rho=10, niter, aa=NULL, sym.eps=1e-3, trace=1, ...) {
# Performs ADMM4 for logistic loss.
# Note: xnum is the matrix x as a numeric. Both are passed to avoid having to call as.numeric too
# many times.
p <- ncol(x)
if (is.null(zz)) {
if (trace > 0) cat("Computing zz...", fill=TRUE)
zz <- as.numeric(compute.interactions.c(x, diagonal=diagonal))
}
else if ("matrix" %in% class(zz)) zz <- as.numeric(zz)
if (is.null(aa)) {
aa <- list(u=matrix(0, p, p),
th=matrix(0, p, p),
bp=rep(0, p),
bn=rep(0, p),
tt=matrix(0, p, p),
diagonal=diagonal)
}
if (is.null(aa$tt) || is.null(aa$u)) {
aa$tt <- 0.5 * (aa$th + t(aa$th))
aa$u <- matrix(0, p, p)
}
obj <- Objective.logistic(aa=aa, x=x, y=y, lam.l1=lam.l1, lam.l2=lam.l2, xnum=xnum, zz=zz, strong=TRUE, sym.eps=sym.eps, trace = trace-1)
for (i in seq(niter)) {
if (trace > 0) cat(i, " ")
V <- aa$u - rho * aa$tt
gg <- ggdescent.logistic(xnum, zz, y, lam.l1=lam.l1, lam.l2=lam.l2, diagonal=diagonal, rho, V, trace=trace-1, aa=aa, ...)
aa$th <- gg$th
aa$bp <- gg$bp
aa$bn <- gg$bn
aa$tt <- (aa$th + t(aa$th)) / 2 + (aa$u + t(aa$u)) / (2 * rho)
aa$u <- aa$u + rho * (aa$th - aa$tt)
obj <- c(obj, Objective.logistic(aa=aa, x=x, y=y, lam.l1=lam.l1, lam.l2=lam.l2, xnum=xnum, zz=zz, strong=TRUE, sym.eps=sym.eps, trace = trace-1))
if (trace > 0) cat(obj[i+1], fill=TRUE)
}
if (max(abs(aa$th-t(aa$th))) > sym.eps)
cat("Attention: th not symmetric within the desired sym.eps. Run ADMM for more iterations. And try increasing rho.")
aa$obj <- obj
aa
}
ggdescent.logistic <- function(xnum, zz, y, lam.l1, lam.l2, diagonal, rho, V, stepsize, backtrack=0.2, maxiter=100,
tol=1e-5, aa=NULL, trace=1) {
# See ADMM4 pdf and logistic.pdf for the problem this solves.
#
# xnum, zz, y: data (note: zz is a length n*cp2 vector, not a matrix) xnum is x as a (n*p)-vector
# lam.l1: l1-penalty parameter
# lam.l2: l2-penalty parameter
# rho: admm parameter
# V: see ADMM4 pdf
# stepsize: step size to start backtracking with
# backtrack: factor by which step is reduced on each backtrack.
# maxiter: number of generalized gradient steps to take.
# tol: stop gg descent if change in objective is below tol.
# aa: initial estimate of (b0, th, bp, bn)
# trace: how verbose to be
#
#void ggdescent_logistic_R(double *x, int *n, int *p, double *zz, int * diagonal, double *y,
# double *lamL1, double *lamL2, double *rho, double *V, int *maxiter,
# double *curb0, double *curth, double *curbp, double *curbn,
# double *t, int *stepwindow, double *backtrack, double *tol, int *trace,
# double *b0, double *th, double *bp, double *bn) {
n <- length(y)
p <- length(xnum) / n
stopifnot(p == round(p))
if (diagonal) stopifnot(length(zz) == n * (choose(p,2)+p))
else stopifnot(length(zz) == n * choose(p,2))
stepwindow <- 10
if (is.null(aa)) aa <- list(b0=0, th=matrix(0,p,p), bp=rep(0,p), bn=rep(0,p))
out <- .C("ggdescent_logistic_R",
xnum,
as.integer(n),
as.integer(p),
zz,
as.integer(diagonal),
as.double(y), # convert from integer to double
as.double(lam.l1),
as.double(lam.l2),
as.double(rho),
as.double(V),
as.integer(maxiter),
as.double(aa$b0),
as.double(aa$th),
aa$bp,
aa$bn,
as.double(stepsize),
as.integer(stepwindow),
as.double(backtrack),
as.double(tol),
as.integer(trace),
b0=as.double(0),
th=rep(0, p*p),
bp=rep(0, p),
bn=rep(0, p),
PACKAGE="hierNet")
list(b0=out$b0, bp=out$bp, bn=out$bn, th=matrix(out$th, p, p))
}
ADMM4.Lagrangian <- function(aa, xnum, zz, y, lam.l1, lam.l2, diagonal, rho) {
# aa: list with (th, bp, bn, tt, u)
# zz is a vector not a matrix
if (aa$diagonal == FALSE)
if (any(abs(diag(aa$th)) > 1e-8)) {
cat("Diagonal of Theta is nonzero.", fill=TRUE)
return(Inf)
}
if (max(aa$tt-t(aa$tt)) > 1e-8) {
cat("Theta is not symmetrik.", fill=TRUE)
return(Inf)
}
if (any(rowSums(abs(aa$th)) > aa$bp + aa$bn + 1e-5)) {
cat("hierarchy violated.", fill=TRUE)
return(Inf)
}
if (any(aa$bp < -1e-5)||any(aa$bn < -1e-5)) {
cat("Non-negative of bp or bn violated.", fill=TRUE)
return(Inf)
}
if (diagonal == FALSE)
if (any(abs(diag(aa$th)) > 1e-5)) {
cat("Zero diagonal violated.", fill=TRUE)
return(Inf)
}
V <- aa$u - rho * aa$tt
r <- y - Compute.yhat.c(xnum, zz, aa)
admm <- sum(aa$u*(aa$th-aa$tt)) + (rho/2) * sum((aa$th-aa$tt)^2)
#admm <- sum(V*aa$th) + (rho/2) * sum(aa$th^2) + (rho/2)*sum(aa$tt^2) - sum(aa$u*aa$tt)
pen <- lam.l1 * (sum(aa$bp + aa$bn) + sum(abs(aa$th))/2)
pen <- pen + lam.l2 * (sum(aa$bp^2) + sum(aa$bn^2) + sum(aa$th^2))
sum(r^2)/2 + pen + admm
}
predict.hierNet.logistic <- function(object, newx, newzz=NULL, ...) {
predict.hierNet(object, newx, newzz, ...)
}
critf.logistic <- function(x, y, lam.l1, lam.l2, b0, bp, bn, th) {
yhat <- b0 + x %*% (bp - bn) + 0.5 * diag(x %*% th %*% t(x))
p <- 1 / (1 + exp(-yhat))
val <- -sum(y * log(p) + (1 - y) * log(1 - p))
val <- val + lam.l1 * sum(bp + bn) + lam.l1 * sum(abs(th))/2 + lam.l1 * sum(abs(diag(th)))/2
val <- val + lam.l2 * (sum(bp^2) + sum(bn^2) + sum(th^2))
return(val)
}
twonorm <- function(x) {sqrt(sum(x * x))}
hierNet.logistic.path <- function (x, y, lamlist=NULL, delta=1e-8, minlam=NULL, maxlam=NULL, flmin=.01, nlam=20,
diagonal=TRUE, strong=FALSE, aa=NULL,
zz=NULL, stand.main=TRUE, stand.int=FALSE,
rho=nrow(x), niter=100, sym.eps=1e-3,# ADMM params
step=1, maxiter=2000, backtrack=0.2, tol=1e-5, # GG params
trace=0) {
this.call=match.call()
stopifnot(y %in% c(0, 1))
x <- scale(x, center=TRUE, scale=stand.main)
mx <- attr(x, "scaled:center")
sx <- attr(x, "scaled:scale")
if (is.null(maxlam)) {
if (!is.null(minlam)) stop("Cannot have maxlam=NULL if minlam is non-null.")
maxlam <- max(abs(t(x) %*% y))
minlam <- maxlam * flmin
}
if (is.null(minlam)) minlam <- maxlam * flmin
if (is.null(lamlist))
lamlist <- exp(seq(log(maxlam), log(minlam), length=nlam))
nlam <- length(lamlist)
if (is.null(zz))
zz <- compute.interactions.c(x, diagonal=diagonal)
else stopifnot(is.matrix(zz))
zz <- scale(zz, center=TRUE, scale=stand.int)
mzz <- attr(zz, "scaled:center")
szz <- attr(zz, "scaled:scale")
zz <- as.numeric(zz)
p <- ncol(x)
cp2 <- choose(p, 2)
b0 <- rep(NA, nlam)
bp <- bn <- matrix(NA, nrow=p, ncol=nlam)
th <- array(NA, c(p, p, nlam))
obj <- rep(NA, nlam)
aa <- NULL
for (i in seq(nlam)) {
if (trace != 0) {
cat(c("i,lam=", i, round(lamlist[i],2)), fill=TRUE)
}
aa <- hierNet.logistic(x, y, lam=lamlist[i], delta=delta, diagonal=diagonal, strong=strong,
aa=aa, zz=zz, stand.main=FALSE, stand.int=FALSE,
rho=rho, niter=niter, sym.eps=sym.eps,
step=step, maxiter=maxiter, backtrack=backtrack, tol=tol,
trace=trace)
b0[i] <- aa$b0
bp[, i] <- aa$bp
bn[, i] <- aa$bn
th[, , i] <- aa$th
obj[i] <- aa$obj
}
dimnames(bp) <- dimnames(bn) <- list(as.character(1:p), NULL)
dimnames(th) <- list(as.character(1:p), as.character(1:p), NULL)
out <- list(b0=b0, bp=bp, bn=bn, th=th, obj=obj, lamlist=lamlist, delta=delta,
mx=mx, my=0, sx=sx, mzz=mzz, szz=szz,
type="logistic", diagonal=diagonal, strong=strong,
step=step, maxiter=maxiter, backtrack=backtrack, tol=tol,
call=this.call)
if (strong) {
# ADMM parameters:
out$rho <- aa$rho
out$niter <- niter
out$sym.eps <- sym.eps
}
class(out) <- "hierNet.path"
out
}
balanced.folds <- function(y, nfolds=min(min(table(y)), 10)) {
totals <- table(y)
fmax <- max(totals)
nfolds <- min(nfolds, fmax)
# makes no sense to have more folds than the max class size
folds <- as.list(seq(nfolds))
yids <- split(seq(y), y)
# nice we to get the ids in a list, split by class
###Make a big matrix, with enough rows to get in all the folds per class
bigmat <- matrix(NA, ceiling(fmax/nfolds) * nfolds, length(totals))
for(i in seq(totals)) {
bigmat[seq(totals[i]), i] <- sample(yids[[i]])
}
smallmat <- matrix(bigmat, nrow = nfolds) # reshape the matrix
### Now do a clever sort to mix up the NAs
smallmat <- permute.rows(t(smallmat)) ### Now a clever unlisting
# the "clever" unlist doesn't work when there are no N
# apply(smallmat, 2, function(x)
# x[!is.na(x)])
res <-vector("list", nfolds)
for(j in 1:nfolds) {
jj <- !is.na(smallmat[, j])
res[[j]] <- smallmat[jj, j]
}
return(res)
}
permute.rows <-function(x) {
dd <- dim(x)
n <- dd[1]
p <- dd[2]
mm <- runif(length(x)) + rep(seq(n) * 10, rep(p, n))
matrix(t(x)[order(mm)], n, p, byrow = TRUE)
}
hierNet.cv <- function(fit, x, y, nfolds=10, folds=NULL, trace=0) {
this.call <- match.call()
stopifnot(class(fit) == "hierNet.path")
if(fit$type=="gaussian"){errfun=function(y,yhat){(y-yhat)^2}}
if(fit$type=="logistic"){errfun=function(y,yhat){1*(y!=yhat)}}
n <- length(y)
if(is.null(folds)) {
folds <- split(sample(1:n), rep(1:nfolds, length = n))
}
else {
stopifnot(class(folds)=="list")
nfolds <- length(folds)
}
lamlist=fit$lamlist
# get whether fit was standardized based on fit$sx and fit$szz...
if (is.null(fit$mx)) stop("hierNet object was not centered. hierNet.cv has not been written for this (unusual) case.")
stand.main <- !is.null(fit$sx)
stand.int <- !is.null(fit$szz)
n.lamlist <- length(lamlist) ### Set up the data structures
size <- double(n.lamlist)
err2=matrix(NA,nrow=nfolds,ncol=length(lamlist))
for(ii in 1:nfolds) {
cat("Fold", ii, ":")
if(fit$type=="gaussian"){
a <- hierNet.path(x[-folds[[ii]],],y=y[-folds[[ii]]],
lamlist=lamlist, delta=fit$delta, diagonal=fit$diagonal, strong=fit$strong, trace=trace,
stand.main=stand.main, stand.int=stand.int,
rho=fit$rho, niter=fit$niter, sym.eps=fit$sym.eps, # ADMM parameters (which will be NULL if strong=F)
step=fit$step, maxiter=fit$maxiter, backtrack=fit$backtrack, tol=fit$tol) # GG descent params
yhatt=predict.hierNet(a,newx=x[folds[[ii]],])
}
if(fit$type=="logistic"){
a <- hierNet.logistic.path(x[-folds[[ii]],],y=y[-folds[[ii]]],
lamlist=lamlist, delta=fit$delta, diagonal=fit$diagonal, strong=fit$strong,
trace=trace, stand.main=stand.main, stand.int=stand.int,
rho=fit$rho, niter=fit$niter, sym.eps=fit$sym.eps, # ADMM parameters (which will be NULL if strong=F)
step=fit$step, maxiter=fit$maxiter, backtrack=fit$backtrack, tol=fit$tol) # GG descent params
yhatt=predict.hierNet.logistic(a,newx=x[folds[[ii]],])$yhat
}
temp=matrix(y[folds[[ii]]],nrow=length(folds[[ii]]),ncol=n.lamlist)
err2[ii,]=colMeans(errfun(yhatt,temp))
cat("\n")
}
errm=colMeans(err2)
errse=sqrt(apply(err2,2,var)/nfolds)
o=which.min(errm)
lamhat=lamlist[o]
oo=errm<= errm[o]+errse[o]
lamhat.1se=lamlist[oo & lamlist>=lamhat][1]
nonzero=colSums(fit$bp-fit$bn!=0) + apply(fit$th!=0, 3, function(a) sum(diag(a)) + sum((a+t(a)!=0)[upper.tri(a)]))
obj <- list(lamlist=lamlist, cv.err=errm,cv.se=errse,lamhat=lamhat, lamhat.1se=lamhat.1se,
nonzero=nonzero, folds=folds,
call = this.call)
class(obj) <- "hierNet.cv"
obj
}
plot.hierNet.cv <- function(x, ...) {
par(mar = c(5, 5, 5, 1))
yrang=range(c(x$cv.err-x$cv.se,x$cv.err+x$cv.se))
plot(log(x$lamlist), x$cv.err, xlab="log(lambda)",
ylab = "Cross-validation Error", type="n",ylim=yrang)
axis(3, at = log(x$lamlist), labels = paste(x$nonzero), srt = 90, adj = 0)
mtext("Number of features", 3, 4, cex = 1.2)
axis(2, at = c(0, 0.2, 0.4, 0.6, 0.8))
error.bars(log(x$lamlist), x$cv.err - x$cv.se, x$cv.err + x$cv.se, width = 0.01, col = "darkgrey")
points(log(x$lamlist), x$cv.err, col=2, pch=19)
abline(v=log(x$lamhat), lty=3)
abline(v=log(x$lamhat.1se), lty=3)
invisible()
}
error.bars <-function(x, upper, lower, width = 0.02, ...) {
xlim <- range(x)
barw <- diff(xlim) * width
segments(x, upper, x, lower, ...)
segments(x - barw, upper, x + barw, upper, ...)
segments(x - barw, lower, x + barw, lower, ...)
range(upper, lower)
}
hierNet.varimp <- function(fit,x,y, ...) {
# NOTE: uses 0.5 cutoff for logistic case
lam=fit$lam
if(fit$type=="gaussian"){errfun=function(y,yhat){(y-yhat)^2}}
if(fit$type=="logistic"){
errfun=function(y,yhat){
term1=y*log(yhat);term1[yhat==0]=0
term2=(1-y)*log(1-yhat);term2[yhat==1]=0
val=-sum(term1+term2)
return(val)
}}
yhat=predict(fit,x)
rss=sum(errfun(y,yhat))
varsum=fit$bp-fit$bn+rowSums(abs(fit$th))
oo=which(abs(varsum)>1e-6)
imp=rss2=rep(NA,ncol(x))
for(j in oo){
cat(j)
fit0=fit;fit0$bp=fit$bp[-j];fit0$bn=fit$bn[-j];fit0$th=fit$th[-j,-j]
if(fit$type=="gaussian"){ fit2=hierNet(x[,-j],y,lam,delta=fit$delta,diagonal=fit$diagonal,aa=fit0)}
if(fit$type=="logistic"){ fit2=hierNet.logistic(x[,-j],y,lam,delta=fit$delta,diagonal=fit$diagonal,aa=fit0)}
yhat2=predict(fit2,x[,-j])
rss2[j]=sum(errfun(y,yhat2))
imp[j]=(rss2[j]-rss)/rss2[j]
}
imp[-oo]=0
res=cbind(1:ncol(x),round(imp,3))
ooo=order(-imp)
dimnames(res)=list(NULL,c("Predictor","Importance"))
cat("",fill=T)
return(res[ooo,])
}
|
#######################################################
# Update newly received clinical data for UCSF samples.
# Date: 2018.11.23
# Author: Kevin J.
#######################################################
# Matthew Grimmer provided additional clinical data on November 16th 2018.
ucsf_clinical_sheet = '/Users/johnsk/Documents/Life-History/ClinicalData/UCSF/2018-1116_glass_wes_clinic_table-costello_Roel.xlsx'
#######################################################
# Necessary packages:
library(tidyverse)
library(openxlsx)
library(DBI)
library(stringr)
#######################################################
# Establish connection with the database.
con <- DBI::dbConnect(odbc::odbc(), "VerhaakDB")
## Load in clinical data, it may require some processing before use.
ucsf_clinical = readWorkbook(ucsf_clinical_sheet, sheet = 1, startRow = 1, colNames = TRUE)
# Retrieve the case_sources and biospecimen_aliquots from the Database.
cases = dbReadTable(con, Id(schema="clinical",table="cases"))
surgeries = dbReadTable(con, Id(schema="clinical",table="surgeries"))
# Subset the cases and surgeries tables to just the patients from UCSF.
ucsf_cases = cases %>%
filter(grepl("GLSS-SF-", case_barcode))
ucsf_surgeries = surgeries %>%
filter(grepl("GLSS-SF-", case_barcode))
# Gather the format of the variables to be uploaded to the cases table.
str(ucsf_cases)
# Revise these variables to be uploaded to the database.
ucsf_clinical_db = ucsf_clinical %>%
filter(tm_sampletype2 == "TP") %>%
filter(!(is.na(age.at.diagnosis))) %>%
mutate(patient_number = gsub("patient", "", patientid)) %>%
mutate_at("patient_number", str_pad, width = 4, side='left', pad = 0) %>%
mutate(case_barcode = paste("GLSS-SF", patient_number, sep="-")) %>%
left_join(ucsf_cases, by="case_barcode") %>%
mutate(revise_case_vital_status = recode(vital.status, "A" = "alive", "D"="dead"),
revise_case_overall_survival_mo = round(as.numeric(overall.survival.mo)),
revise_case_age_diagnosis_years = floor(age.at.diagnosis))
# First update the `case_age_diagnosis_years` variable.
for (i in 1:dim(ucsf_clinical_db)[1]){
if(is.na(ucsf_clinical_db$case_age_diagnosis_years[i])){
rs = dbSendStatement(con, sprintf("UPDATE clinical.cases SET case_age_diagnosis_years = '%s' WHERE case_barcode = '%s'", ucsf_clinical_db$revise_case_age_diagnosis_years[i], ucsf_clinical_db$case_barcode[i]))
dbClearResult(rs)
print(ucsf_clinical_db$case_barcode[i])
print(ucsf_clinical_db$revise_case_age_diagnosis_years[i])
}
}
# Next update the `case_vital_status` variable.
for (i in 1:dim(ucsf_clinical_db)[1]){
if(is.na(ucsf_clinical_db$case_vital_status[i])){
rs = dbSendStatement(con, sprintf("UPDATE clinical.cases SET case_vital_status = '%s' WHERE case_barcode = '%s'", ucsf_clinical_db$revise_case_vital_status[i], ucsf_clinical_db$case_barcode[i]))
dbClearResult(rs)
print(ucsf_clinical_db$case_barcode[i])
print(ucsf_clinical_db$revise_case_vital_status[i])
}
}
# Finally, update the `case_overall_survival_mo` variable.
for (i in 1:dim(ucsf_clinical_db)[1]){
if(is.na(ucsf_clinical_db$case_overall_survival_mo[i])){
rs = dbSendStatement(con, sprintf("UPDATE clinical.cases SET case_overall_survival_mo = '%s' WHERE case_barcode = '%s'", ucsf_clinical_db$revise_case_overall_survival_mo[i], ucsf_clinical_db$case_barcode[i]))
dbClearResult(rs)
print(ucsf_clinical_db$case_barcode[i])
print(ucsf_clinical_db$revise_case_overall_survival_mo[i])
}
}
# NOTE: GLSS-SF-0081 is still missing the `case_overall_survival_mo` variable.
# It's more difficult to amend the surgeries table because of the clinical variables' format.
# Instead of using a loop, the objective is to manually enter each field.
ucsf_surgery_db = ucsf_clinical %>%
mutate(patient_number = gsub("patient", "", patientid)) %>%
mutate_at("patient_number", str_pad, width = 4, side='left', pad = 0) %>%
mutate(sample_barcode = paste("GLSS-SF", patient_number, tm_sampletype2, sep="-"))
#########################
# Manually update values. Use dbClearResult to prevent error.
#########################
### surgical_interval_mo
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET surgical_interval_mo = '4' WHERE sample_barcode = 'GLSS-SF-0131-R1'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET surgical_interval_mo = '26' WHERE sample_barcode = 'GLSS-SF-0157-R1'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET surgical_interval_mo = '63' WHERE sample_barcode = 'GLSS-SF-0334-R1'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET surgical_interval_mo = '18' WHERE sample_barcode = 'GLSS-SF-0339-R1'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET surgical_interval_mo = '43' WHERE sample_barcode = 'GLSS-SF-0060-R1'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET surgical_interval_mo = '39' WHERE sample_barcode = 'GLSS-SF-0081-R1'")
dbClearResult(rs)
### histology
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET histology = 'Glioblastoma' WHERE sample_barcode = 'GLSS-SF-0131-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET histology = 'Glioblastoma' WHERE sample_barcode = 'GLSS-SF-0131-R1'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET histology = 'Oligoastrocytoma' WHERE sample_barcode = 'GLSS-SF-0157-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET histology = 'Oligoastrocytoma' WHERE sample_barcode = 'GLSS-SF-0157-R1'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET histology = 'Oligodendroglioma' WHERE sample_barcode = 'GLSS-SF-0334-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET histology = 'Oligodendroglioma' WHERE sample_barcode = 'GLSS-SF-0334-R1'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET histology = 'Astrocytoma' WHERE sample_barcode = 'GLSS-SF-0338-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET histology = 'Astrocytoma' WHERE sample_barcode = 'GLSS-SF-0338-R1'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET histology = 'Astrocytoma' WHERE sample_barcode = 'GLSS-SF-0339-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET histology = 'Astrocytoma' WHERE sample_barcode = 'GLSS-SF-0339-R1'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET histology = 'Astrocytoma' WHERE sample_barcode = 'GLSS-SF-0081-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET histology = 'Astrocytoma' WHERE sample_barcode = 'GLSS-SF-0081-R1'")
dbClearResult(rs)
### who_classification
### grade
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET grade = 'IV' WHERE sample_barcode = 'GLSS-SF-0131-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET grade = 'IV' WHERE sample_barcode = 'GLSS-SF-0131-R1'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET grade = 'II' WHERE sample_barcode = 'GLSS-SF-0157-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET grade = 'IV' WHERE sample_barcode = 'GLSS-SF-0157-R1'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET grade = 'II' WHERE sample_barcode = 'GLSS-SF-0159-R1'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET grade = 'II' WHERE sample_barcode = 'GLSS-SF-0334-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET grade = 'II' WHERE sample_barcode = 'GLSS-SF-0334-R1'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET grade = 'II' WHERE sample_barcode = 'GLSS-SF-0338-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET grade = 'III' WHERE sample_barcode = 'GLSS-SF-0338-R1'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET grade = 'II' WHERE sample_barcode = 'GLSS-SF-0339-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET grade = 'II' WHERE sample_barcode = 'GLSS-SF-0339-R1'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET grade = 'II' WHERE sample_barcode = 'GLSS-SF-0065-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET grade = 'II' WHERE sample_barcode = 'GLSS-SF-0081-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET grade = 'II' WHERE sample_barcode = 'GLSS-SF-0081-R1'")
dbClearResult(rs)
### treatment_tmz
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_tmz = '1' WHERE sample_barcode = 'GLSS-SF-0131-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_tmz = '1' WHERE sample_barcode = 'GLSS-SF-0157-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_tmz = '1' WHERE sample_barcode = 'GLSS-SF-0334-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_tmz = '1' WHERE sample_barcode = 'GLSS-SF-0338-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_tmz = '0' WHERE sample_barcode = 'GLSS-SF-0339-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_tmz = '1' WHERE sample_barcode = 'GLSS-SF-0081-TP'")
dbClearResult(rs)
### treatment_chemotherapy_other") treatment_chemotherapy_other_cycles
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_chemotherapy_other = 'Irinotecan, Optune, CBD' WHERE sample_barcode = 'GLSS-SF-0131-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_chemotherapy_other_cycles = '2' WHERE sample_barcode = 'GLSS-SF-0131-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_chemotherapy_other = 'Lomustine' WHERE sample_barcode = 'GLSS-SF-0157-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_chemotherapy_other_cycles = '12' WHERE sample_barcode = 'GLSS-SF-0157-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_chemotherapy_other = 'Lomustine' WHERE sample_barcode = 'GLSS-SF-0170-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_chemotherapy_other_cycles = '12, 10' WHERE sample_barcode = 'GLSS-SF-0170-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_chemotherapy_other = 'Steroids' WHERE sample_barcode = 'GLSS-SF-0032-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_chemotherapy_other_cycles = '4' WHERE sample_barcode = 'GLSS-SF-0032-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_chemotherapy_other = 'Everolimus' WHERE sample_barcode = 'GLSS-SF-0039-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_chemotherapy_other_cycles = '12' WHERE sample_barcode = 'GLSS-SF-0039-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_chemotherapy_other = 'Lomustine' WHERE sample_barcode = 'GLSS-SF-0081-TP'")
dbClearResult(rs)
### treatment_radiotherapy") treatment_radiation_other
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_radiotherapy = '1' WHERE sample_barcode = 'GLSS-SF-0131-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_radiotherapy = '1' WHERE sample_barcode = 'GLSS-SF-0157-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_radiation_other = 'IMRT' WHERE sample_barcode = 'GLSS-SF-0157-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_radiotherapy = '1' WHERE sample_barcode = 'GLSS-SF-0159-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_radiation_other = 'IMRT' WHERE sample_barcode = 'GLSS-SF-0159-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_radiotherapy = '1' WHERE sample_barcode = 'GLSS-SF-0032-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_radiation_other = 'IMRT' WHERE sample_barcode = 'GLSS-SF-0032-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_radiotherapy = '1' WHERE sample_barcode = 'GLSS-SF-0338-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_radiotherapy = '1' WHERE sample_barcode = 'GLSS-SF-0039-TP'")
dbClearResult(rs)
dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_radiation_other = 'IMRT' WHERE sample_barcode = 'GLSS-SF-0039-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_radiotherapy = '1' WHERE sample_barcode = 'GLSS-SF-0060-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_radiation_other = 'Proton beam' WHERE sample_barcode = 'GLSS-SF-0060-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_radiotherapy = '1' WHERE sample_barcode = 'GLSS-SF-0069-TP'")
dbClearResult(rs)
dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_radiation_other = 'IMRT' WHERE sample_barcode = 'GLSS-SF-0069-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_radiotherapy = '1' WHERE sample_barcode = 'GLSS-SF-0081-TP'")
dbClearResult(rs)
## Update who_classification
rs =dbSendStatement(con, "UPDATE clinical.surgeries SET who_classification = 'Diffuse Astrocytoma, IDH-mutant' WHERE histology = 'Astrocytoma' AND grade = 'II' AND idh_status = 'IDHmut'")
dbClearResult(rs)
rs =dbSendStatement(con, "UPDATE clinical.surgeries SET who_classification = 'Diffuse Astrocytoma, IDH-wildtype' WHERE histology = 'Astrocytoma' AND grade = 'II' AND idh_status = 'IDHwt'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET who_classification = 'Anaplastic Astrocytoma, IDH-mutant' WHERE histology = 'Astrocytoma' AND grade = 'III' AND idh_status = 'IDHmut'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET who_classification = 'Anaplastic Astrocytoma, IDH-wildtype' WHERE histology = 'Astrocytoma' AND grade = 'III' AND idh_status = 'IDHwt'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET who_classification = 'Glioblastoma, IDH-wildtype' WHERE histology = 'Glioblastoma' AND grade = 'IV' AND idh_status = 'IDHwt'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET who_classification = 'Glioblastoma, IDH-mutant' WHERE histology = 'Glioblastoma' AND grade = 'IV' AND idh_status = 'IDHmut'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET who_classification = 'Oligodendroglioma, IDH-mutant and 1p/19q-codeleted' WHERE histology = 'Oligodendroglioma' AND grade = 'II' AND idh_status = 'IDHmut' AND codel_status = 'codel'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET who_classification = 'Anaplastic Oligodendroglioma, IDH-mutant and 1p/19q-codeleted' WHERE histology = 'Oligodendroglioma' AND grade = 'III' AND idh_status = 'IDHmut' AND codel_status = 'codel'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET who_classification = 'Oligodendroglioma, IDH-mutant and 1p/19q-codeleted' WHERE histology IS NOT NULL AND grade = 'II' AND idh_status = 'IDHmut' AND codel_status = 'codel'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET who_classification = 'Anaplastic Oligodendroglioma, IDH-mutant and 1p/19q-codeleted' WHERE histology IS NOT NULL AND (grade = 'III' OR grade = 'IV') AND idh_status = 'IDHmut' AND codel_status = 'codel'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET who_classification = 'Diffuse Astrocytoma, IDH-mutant' WHERE histology = 'Oligoastrocytoma' AND grade = 'II' AND idh_status = 'IDHmut' AND codel_status = 'noncodel'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET who_classification = 'Diffuse Astrocytoma, IDH-wildtype' WHERE histology = 'Oligoastrocytoma' AND grade = 'II' AND idh_status = 'IDHwt' AND codel_status = 'noncodel'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET who_classification = 'Anaplastic Astrocytoma, IDH-mutant' WHERE histology = 'Oligoastrocytoma' AND grade = 'III' AND idh_status = 'IDHmut' AND codel_status = 'noncodel'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET who_classification = 'Anaplastic Astrocytoma, IDH-wildtype' WHERE histology = 'Oligoastrocytoma' AND grade = 'III' AND idh_status = 'IDHwt' AND codel_status = 'noncodel'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET who_classification = 'Diffuse Astrocytoma, IDH-mutant' WHERE histology = 'Oligodendroglioma' AND grade = 'II' AND idh_status = 'IDHmut' AND codel_status = 'noncodel'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET who_classification = 'Diffuse Astrocytoma, IDH-wildtype' WHERE histology = 'Oligodendroglioma' AND grade = 'II' AND idh_status = 'IDHwt' AND codel_status = 'noncodel'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET who_classification = 'Anaplastic Astrocytoma, IDH-mutant' WHERE histology = 'Oligodendroglioma' AND grade = 'III' AND idh_status = 'IDHmut' AND codel_status = 'noncodel'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET who_classification = 'Anaplastic Astrocytoma, IDH-wildtype' WHERE histology = 'Oligodendroglioma' AND grade = 'III' AND idh_status = 'IDHwt' AND codel_status = 'noncodel'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET who_classification = 'Diffuse Astrocytoma, NOS' WHERE histology = 'Astrocytoma' AND grade = 'II' AND idh_status IS NULL AND codel_status IS NULL")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET who_classification = 'Anaplastic Astrocytoma, NOS' WHERE histology = 'Astrocytoma' AND grade = 'III' AND idh_status IS NULL AND codel_status IS NULL")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET who_classification = 'Oligodendroglioma, NOS' WHERE histology = 'Oligodendroglioma' AND grade = 'II' AND idh_status IS NULL AND codel_status IS NULL")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET who_classification = 'Anaplastic Oligodendroglioma, NOS' WHERE histology = 'Oligodendroglioma' AND (grade = 'III' OR grade = 'IV') AND idh_status IS NULL AND codel_status IS NULL")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET who_classification = 'Glioblastoma, NOS' WHERE histology = 'Glioblastoma' AND grade = 'IV' AND idh_status IS NULL AND codel_status IS NULL")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET who_classification = 'Oligoastrocytoma, NOS' WHERE histology = 'Oligoastrocytoma' AND grade = 'II' AND idh_status IS NULL AND codel_status IS NULL")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET who_classification = 'Anaplastic Oligoastrocytoma, NOS' WHERE histology = 'Oligoastrocytoma' AND grade = 'III' AND idh_status IS NULL AND codel_status IS NULL")
dbClearResult(rs)
| /R/preprocess/ucsf-clinical-update.R | permissive | fpbarthel/GLASS | R | false | false | 19,651 | r | #######################################################
# Update newly received clinical data for UCSF samples.
# Date: 2018.11.23
# Author: Kevin J.
#######################################################
# Matthew Grimmer provided additional clinical data on November 16th 2018.
ucsf_clinical_sheet = '/Users/johnsk/Documents/Life-History/ClinicalData/UCSF/2018-1116_glass_wes_clinic_table-costello_Roel.xlsx'
#######################################################
# Necessary packages:
library(tidyverse)
library(openxlsx)
library(DBI)
library(stringr)
#######################################################
# Establish connection with the database.
con <- DBI::dbConnect(odbc::odbc(), "VerhaakDB")
## Load in clinical data, it may require some processing before use.
ucsf_clinical = readWorkbook(ucsf_clinical_sheet, sheet = 1, startRow = 1, colNames = TRUE)
# Retrieve the case_sources and biospecimen_aliquots from the Database.
cases = dbReadTable(con, Id(schema="clinical",table="cases"))
surgeries = dbReadTable(con, Id(schema="clinical",table="surgeries"))
# Subset the cases and surgeries tables to just the patients from UCSF.
ucsf_cases = cases %>%
filter(grepl("GLSS-SF-", case_barcode))
ucsf_surgeries = surgeries %>%
filter(grepl("GLSS-SF-", case_barcode))
# Gather the format of the variables to be uploaded to the cases table.
str(ucsf_cases)
# Revise these variables to be uploaded to the database.
ucsf_clinical_db = ucsf_clinical %>%
filter(tm_sampletype2 == "TP") %>%
filter(!(is.na(age.at.diagnosis))) %>%
mutate(patient_number = gsub("patient", "", patientid)) %>%
mutate_at("patient_number", str_pad, width = 4, side='left', pad = 0) %>%
mutate(case_barcode = paste("GLSS-SF", patient_number, sep="-")) %>%
left_join(ucsf_cases, by="case_barcode") %>%
mutate(revise_case_vital_status = recode(vital.status, "A" = "alive", "D"="dead"),
revise_case_overall_survival_mo = round(as.numeric(overall.survival.mo)),
revise_case_age_diagnosis_years = floor(age.at.diagnosis))
# First update the `case_age_diagnosis_years` variable.
for (i in 1:dim(ucsf_clinical_db)[1]){
if(is.na(ucsf_clinical_db$case_age_diagnosis_years[i])){
rs = dbSendStatement(con, sprintf("UPDATE clinical.cases SET case_age_diagnosis_years = '%s' WHERE case_barcode = '%s'", ucsf_clinical_db$revise_case_age_diagnosis_years[i], ucsf_clinical_db$case_barcode[i]))
dbClearResult(rs)
print(ucsf_clinical_db$case_barcode[i])
print(ucsf_clinical_db$revise_case_age_diagnosis_years[i])
}
}
# Next update the `case_vital_status` variable.
for (i in 1:dim(ucsf_clinical_db)[1]){
if(is.na(ucsf_clinical_db$case_vital_status[i])){
rs = dbSendStatement(con, sprintf("UPDATE clinical.cases SET case_vital_status = '%s' WHERE case_barcode = '%s'", ucsf_clinical_db$revise_case_vital_status[i], ucsf_clinical_db$case_barcode[i]))
dbClearResult(rs)
print(ucsf_clinical_db$case_barcode[i])
print(ucsf_clinical_db$revise_case_vital_status[i])
}
}
# Finally, update the `case_overall_survival_mo` variable.
for (i in 1:dim(ucsf_clinical_db)[1]){
if(is.na(ucsf_clinical_db$case_overall_survival_mo[i])){
rs = dbSendStatement(con, sprintf("UPDATE clinical.cases SET case_overall_survival_mo = '%s' WHERE case_barcode = '%s'", ucsf_clinical_db$revise_case_overall_survival_mo[i], ucsf_clinical_db$case_barcode[i]))
dbClearResult(rs)
print(ucsf_clinical_db$case_barcode[i])
print(ucsf_clinical_db$revise_case_overall_survival_mo[i])
}
}
# NOTE: GLSS-SF-0081 is still missing the `case_overall_survival_mo` variable.
# It's more difficult to amend the surgeries table because of the clinical variables' format.
# Instead of using a loop, the objective is to manually enter each field.
ucsf_surgery_db = ucsf_clinical %>%
mutate(patient_number = gsub("patient", "", patientid)) %>%
mutate_at("patient_number", str_pad, width = 4, side='left', pad = 0) %>%
mutate(sample_barcode = paste("GLSS-SF", patient_number, tm_sampletype2, sep="-"))
#########################
# Manually update values. Use dbClearResult to prevent error.
#########################
### surgical_interval_mo
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET surgical_interval_mo = '4' WHERE sample_barcode = 'GLSS-SF-0131-R1'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET surgical_interval_mo = '26' WHERE sample_barcode = 'GLSS-SF-0157-R1'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET surgical_interval_mo = '63' WHERE sample_barcode = 'GLSS-SF-0334-R1'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET surgical_interval_mo = '18' WHERE sample_barcode = 'GLSS-SF-0339-R1'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET surgical_interval_mo = '43' WHERE sample_barcode = 'GLSS-SF-0060-R1'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET surgical_interval_mo = '39' WHERE sample_barcode = 'GLSS-SF-0081-R1'")
dbClearResult(rs)
### histology
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET histology = 'Glioblastoma' WHERE sample_barcode = 'GLSS-SF-0131-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET histology = 'Glioblastoma' WHERE sample_barcode = 'GLSS-SF-0131-R1'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET histology = 'Oligoastrocytoma' WHERE sample_barcode = 'GLSS-SF-0157-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET histology = 'Oligoastrocytoma' WHERE sample_barcode = 'GLSS-SF-0157-R1'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET histology = 'Oligodendroglioma' WHERE sample_barcode = 'GLSS-SF-0334-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET histology = 'Oligodendroglioma' WHERE sample_barcode = 'GLSS-SF-0334-R1'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET histology = 'Astrocytoma' WHERE sample_barcode = 'GLSS-SF-0338-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET histology = 'Astrocytoma' WHERE sample_barcode = 'GLSS-SF-0338-R1'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET histology = 'Astrocytoma' WHERE sample_barcode = 'GLSS-SF-0339-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET histology = 'Astrocytoma' WHERE sample_barcode = 'GLSS-SF-0339-R1'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET histology = 'Astrocytoma' WHERE sample_barcode = 'GLSS-SF-0081-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET histology = 'Astrocytoma' WHERE sample_barcode = 'GLSS-SF-0081-R1'")
dbClearResult(rs)
### who_classification
### grade
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET grade = 'IV' WHERE sample_barcode = 'GLSS-SF-0131-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET grade = 'IV' WHERE sample_barcode = 'GLSS-SF-0131-R1'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET grade = 'II' WHERE sample_barcode = 'GLSS-SF-0157-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET grade = 'IV' WHERE sample_barcode = 'GLSS-SF-0157-R1'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET grade = 'II' WHERE sample_barcode = 'GLSS-SF-0159-R1'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET grade = 'II' WHERE sample_barcode = 'GLSS-SF-0334-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET grade = 'II' WHERE sample_barcode = 'GLSS-SF-0334-R1'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET grade = 'II' WHERE sample_barcode = 'GLSS-SF-0338-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET grade = 'III' WHERE sample_barcode = 'GLSS-SF-0338-R1'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET grade = 'II' WHERE sample_barcode = 'GLSS-SF-0339-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET grade = 'II' WHERE sample_barcode = 'GLSS-SF-0339-R1'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET grade = 'II' WHERE sample_barcode = 'GLSS-SF-0065-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET grade = 'II' WHERE sample_barcode = 'GLSS-SF-0081-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET grade = 'II' WHERE sample_barcode = 'GLSS-SF-0081-R1'")
dbClearResult(rs)
### treatment_tmz
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_tmz = '1' WHERE sample_barcode = 'GLSS-SF-0131-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_tmz = '1' WHERE sample_barcode = 'GLSS-SF-0157-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_tmz = '1' WHERE sample_barcode = 'GLSS-SF-0334-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_tmz = '1' WHERE sample_barcode = 'GLSS-SF-0338-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_tmz = '0' WHERE sample_barcode = 'GLSS-SF-0339-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_tmz = '1' WHERE sample_barcode = 'GLSS-SF-0081-TP'")
dbClearResult(rs)
### treatment_chemotherapy_other") treatment_chemotherapy_other_cycles
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_chemotherapy_other = 'Irinotecan, Optune, CBD' WHERE sample_barcode = 'GLSS-SF-0131-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_chemotherapy_other_cycles = '2' WHERE sample_barcode = 'GLSS-SF-0131-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_chemotherapy_other = 'Lomustine' WHERE sample_barcode = 'GLSS-SF-0157-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_chemotherapy_other_cycles = '12' WHERE sample_barcode = 'GLSS-SF-0157-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_chemotherapy_other = 'Lomustine' WHERE sample_barcode = 'GLSS-SF-0170-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_chemotherapy_other_cycles = '12, 10' WHERE sample_barcode = 'GLSS-SF-0170-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_chemotherapy_other = 'Steroids' WHERE sample_barcode = 'GLSS-SF-0032-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_chemotherapy_other_cycles = '4' WHERE sample_barcode = 'GLSS-SF-0032-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_chemotherapy_other = 'Everolimus' WHERE sample_barcode = 'GLSS-SF-0039-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_chemotherapy_other_cycles = '12' WHERE sample_barcode = 'GLSS-SF-0039-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_chemotherapy_other = 'Lomustine' WHERE sample_barcode = 'GLSS-SF-0081-TP'")
dbClearResult(rs)
### treatment_radiotherapy") treatment_radiation_other
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_radiotherapy = '1' WHERE sample_barcode = 'GLSS-SF-0131-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_radiotherapy = '1' WHERE sample_barcode = 'GLSS-SF-0157-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_radiation_other = 'IMRT' WHERE sample_barcode = 'GLSS-SF-0157-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_radiotherapy = '1' WHERE sample_barcode = 'GLSS-SF-0159-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_radiation_other = 'IMRT' WHERE sample_barcode = 'GLSS-SF-0159-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_radiotherapy = '1' WHERE sample_barcode = 'GLSS-SF-0032-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_radiation_other = 'IMRT' WHERE sample_barcode = 'GLSS-SF-0032-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_radiotherapy = '1' WHERE sample_barcode = 'GLSS-SF-0338-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_radiotherapy = '1' WHERE sample_barcode = 'GLSS-SF-0039-TP'")
dbClearResult(rs)
dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_radiation_other = 'IMRT' WHERE sample_barcode = 'GLSS-SF-0039-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_radiotherapy = '1' WHERE sample_barcode = 'GLSS-SF-0060-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_radiation_other = 'Proton beam' WHERE sample_barcode = 'GLSS-SF-0060-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_radiotherapy = '1' WHERE sample_barcode = 'GLSS-SF-0069-TP'")
dbClearResult(rs)
dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_radiation_other = 'IMRT' WHERE sample_barcode = 'GLSS-SF-0069-TP'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET treatment_radiotherapy = '1' WHERE sample_barcode = 'GLSS-SF-0081-TP'")
dbClearResult(rs)
## Update who_classification
rs =dbSendStatement(con, "UPDATE clinical.surgeries SET who_classification = 'Diffuse Astrocytoma, IDH-mutant' WHERE histology = 'Astrocytoma' AND grade = 'II' AND idh_status = 'IDHmut'")
dbClearResult(rs)
rs =dbSendStatement(con, "UPDATE clinical.surgeries SET who_classification = 'Diffuse Astrocytoma, IDH-wildtype' WHERE histology = 'Astrocytoma' AND grade = 'II' AND idh_status = 'IDHwt'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET who_classification = 'Anaplastic Astrocytoma, IDH-mutant' WHERE histology = 'Astrocytoma' AND grade = 'III' AND idh_status = 'IDHmut'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET who_classification = 'Anaplastic Astrocytoma, IDH-wildtype' WHERE histology = 'Astrocytoma' AND grade = 'III' AND idh_status = 'IDHwt'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET who_classification = 'Glioblastoma, IDH-wildtype' WHERE histology = 'Glioblastoma' AND grade = 'IV' AND idh_status = 'IDHwt'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET who_classification = 'Glioblastoma, IDH-mutant' WHERE histology = 'Glioblastoma' AND grade = 'IV' AND idh_status = 'IDHmut'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET who_classification = 'Oligodendroglioma, IDH-mutant and 1p/19q-codeleted' WHERE histology = 'Oligodendroglioma' AND grade = 'II' AND idh_status = 'IDHmut' AND codel_status = 'codel'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET who_classification = 'Anaplastic Oligodendroglioma, IDH-mutant and 1p/19q-codeleted' WHERE histology = 'Oligodendroglioma' AND grade = 'III' AND idh_status = 'IDHmut' AND codel_status = 'codel'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET who_classification = 'Oligodendroglioma, IDH-mutant and 1p/19q-codeleted' WHERE histology IS NOT NULL AND grade = 'II' AND idh_status = 'IDHmut' AND codel_status = 'codel'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET who_classification = 'Anaplastic Oligodendroglioma, IDH-mutant and 1p/19q-codeleted' WHERE histology IS NOT NULL AND (grade = 'III' OR grade = 'IV') AND idh_status = 'IDHmut' AND codel_status = 'codel'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET who_classification = 'Diffuse Astrocytoma, IDH-mutant' WHERE histology = 'Oligoastrocytoma' AND grade = 'II' AND idh_status = 'IDHmut' AND codel_status = 'noncodel'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET who_classification = 'Diffuse Astrocytoma, IDH-wildtype' WHERE histology = 'Oligoastrocytoma' AND grade = 'II' AND idh_status = 'IDHwt' AND codel_status = 'noncodel'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET who_classification = 'Anaplastic Astrocytoma, IDH-mutant' WHERE histology = 'Oligoastrocytoma' AND grade = 'III' AND idh_status = 'IDHmut' AND codel_status = 'noncodel'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET who_classification = 'Anaplastic Astrocytoma, IDH-wildtype' WHERE histology = 'Oligoastrocytoma' AND grade = 'III' AND idh_status = 'IDHwt' AND codel_status = 'noncodel'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET who_classification = 'Diffuse Astrocytoma, IDH-mutant' WHERE histology = 'Oligodendroglioma' AND grade = 'II' AND idh_status = 'IDHmut' AND codel_status = 'noncodel'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET who_classification = 'Diffuse Astrocytoma, IDH-wildtype' WHERE histology = 'Oligodendroglioma' AND grade = 'II' AND idh_status = 'IDHwt' AND codel_status = 'noncodel'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET who_classification = 'Anaplastic Astrocytoma, IDH-mutant' WHERE histology = 'Oligodendroglioma' AND grade = 'III' AND idh_status = 'IDHmut' AND codel_status = 'noncodel'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET who_classification = 'Anaplastic Astrocytoma, IDH-wildtype' WHERE histology = 'Oligodendroglioma' AND grade = 'III' AND idh_status = 'IDHwt' AND codel_status = 'noncodel'")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET who_classification = 'Diffuse Astrocytoma, NOS' WHERE histology = 'Astrocytoma' AND grade = 'II' AND idh_status IS NULL AND codel_status IS NULL")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET who_classification = 'Anaplastic Astrocytoma, NOS' WHERE histology = 'Astrocytoma' AND grade = 'III' AND idh_status IS NULL AND codel_status IS NULL")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET who_classification = 'Oligodendroglioma, NOS' WHERE histology = 'Oligodendroglioma' AND grade = 'II' AND idh_status IS NULL AND codel_status IS NULL")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET who_classification = 'Anaplastic Oligodendroglioma, NOS' WHERE histology = 'Oligodendroglioma' AND (grade = 'III' OR grade = 'IV') AND idh_status IS NULL AND codel_status IS NULL")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET who_classification = 'Glioblastoma, NOS' WHERE histology = 'Glioblastoma' AND grade = 'IV' AND idh_status IS NULL AND codel_status IS NULL")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET who_classification = 'Oligoastrocytoma, NOS' WHERE histology = 'Oligoastrocytoma' AND grade = 'II' AND idh_status IS NULL AND codel_status IS NULL")
dbClearResult(rs)
rs = dbSendStatement(con, "UPDATE clinical.surgeries SET who_classification = 'Anaplastic Oligoastrocytoma, NOS' WHERE histology = 'Oligoastrocytoma' AND grade = 'III' AND idh_status IS NULL AND codel_status IS NULL")
dbClearResult(rs)
|
# test_helper.R
# test some of the helper functions
test_that("Check if sortIndex and rankIndex offset each other", {
u = matrix(runif(12), nrow=4, ncol=3)
expect_true(all(sortIndex(-rankIndex(sortIndex(u))) == sortIndex(u)))
})
test_that("Check if sortIndex works", {
u = matrix(c(3, 2, 8, 1, 12, 2, 9, 2, 13, 5, 3.1, 2.1), nrow=4, ncol=3)
expect_true(all(sortIndex(u) == c(3,1,2,4,1,3,2,4,1,2,3,4)-1))
})
test_that("Check if rankIndex works", {
u = matrix(c(2, 2, 1, 2, 1, 0, 0, 1, 0, 1, 2, 0), nrow=3, ncol=4, byrow = TRUE)
expect_true(all(rankIndex(u) == matrix(c(2, 1, 1, 2, 1, 2, 0, 1, 0, 0, 2, 0), nrow=3, ncol=4, byrow = TRUE)))
})
test_that("Check reprow", {
x = matrix(c(2, 4, 5,
3, 2, 1), ncol = 3, byrow = TRUE)
y = reprow(x, 2)
expect_true(identical(y, rbind(x[1,], x[1,], x[2,], x[2,])))
y = reprow(x, c(2, 2))
expect_true(identical(y, rbind(x[1,], x[1,], x[2,], x[2,])))
y = reprow(x, c(2, 1))
expect_true(identical(y, rbind(x[1,], x[1,], x[2,])))
})
test_that("Check repcol", {
x = matrix(c(2, 4, 5,
3, 2, 1), ncol = 3, byrow = TRUE)
y = repcol(x, 2)
expect_true(identical(y, cbind(x[,1], x[,1], x[,2], x[,2], x[,3], x[,3])))
y = repcol(x, c(2, 2, 2))
expect_true(identical(y, cbind(x[,1], x[,1], x[,2], x[,2], x[,3], x[,3])))
y = repcol(x, c(2, 3, 1))
expect_true(identical(y, cbind(x[,1], x[,1], x[,2], x[,2], x[,2], x[,3])))
}) | /data/genthat_extracted_code/matchingR/tests/test_helper.R | no_license | surayaaramli/typeRrh | R | false | false | 1,507 | r | # test_helper.R
# test some of the helper functions
test_that("Check if sortIndex and rankIndex offset each other", {
u = matrix(runif(12), nrow=4, ncol=3)
expect_true(all(sortIndex(-rankIndex(sortIndex(u))) == sortIndex(u)))
})
test_that("Check if sortIndex works", {
u = matrix(c(3, 2, 8, 1, 12, 2, 9, 2, 13, 5, 3.1, 2.1), nrow=4, ncol=3)
expect_true(all(sortIndex(u) == c(3,1,2,4,1,3,2,4,1,2,3,4)-1))
})
test_that("Check if rankIndex works", {
u = matrix(c(2, 2, 1, 2, 1, 0, 0, 1, 0, 1, 2, 0), nrow=3, ncol=4, byrow = TRUE)
expect_true(all(rankIndex(u) == matrix(c(2, 1, 1, 2, 1, 2, 0, 1, 0, 0, 2, 0), nrow=3, ncol=4, byrow = TRUE)))
})
test_that("Check reprow", {
x = matrix(c(2, 4, 5,
3, 2, 1), ncol = 3, byrow = TRUE)
y = reprow(x, 2)
expect_true(identical(y, rbind(x[1,], x[1,], x[2,], x[2,])))
y = reprow(x, c(2, 2))
expect_true(identical(y, rbind(x[1,], x[1,], x[2,], x[2,])))
y = reprow(x, c(2, 1))
expect_true(identical(y, rbind(x[1,], x[1,], x[2,])))
})
test_that("Check repcol", {
x = matrix(c(2, 4, 5,
3, 2, 1), ncol = 3, byrow = TRUE)
y = repcol(x, 2)
expect_true(identical(y, cbind(x[,1], x[,1], x[,2], x[,2], x[,3], x[,3])))
y = repcol(x, c(2, 2, 2))
expect_true(identical(y, cbind(x[,1], x[,1], x[,2], x[,2], x[,3], x[,3])))
y = repcol(x, c(2, 3, 1))
expect_true(identical(y, cbind(x[,1], x[,1], x[,2], x[,2], x[,2], x[,3])))
}) |
library(NISTunits)
### Name: NISTinchPerSecTOmeterPerSec
### Title: Convert inch per second to meter per second
### Aliases: NISTinchPerSecTOmeterPerSec
### Keywords: programming
### ** Examples
NISTinchPerSecTOmeterPerSec(10)
| /data/genthat_extracted_code/NISTunits/examples/NISTinchPerSecTOmeterPerSec.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 234 | r | library(NISTunits)
### Name: NISTinchPerSecTOmeterPerSec
### Title: Convert inch per second to meter per second
### Aliases: NISTinchPerSecTOmeterPerSec
### Keywords: programming
### ** Examples
NISTinchPerSecTOmeterPerSec(10)
|
#' @export
populateShinyApp <- function(outputDirectory = './ShinyApp',
shinyDirectory,
resultDirectory,
minCellCount = 10,
databaseName = 'sharable name of development data'){
#check inputs
if(missing(shinyDirectory)){
shinyDirectory <- system.file("shiny", "PLPViewer", package = "ABCgentamicin")
}
if(missing(resultDirectory)){
stop('Need to enter the resultDirectory')
}
if(!dir.exists(resultDirectory)){
stop('resultDirectory does not exist')
}
# create the shiny data folder
if(!dir.exists(outputDirectory)){
dir.create(outputDirectory, recursive = T)
}
# copy shiny folder to outputDirectory
R.utils::copyDirectory(from = shinyDirectory,
to= outputDirectory,
recursive=TRUE)
outputDirectory <- file.path(outputDirectory,'data')
if(!dir.exists(outputDirectory)){
dir.create(outputDirectory, recursive = T)
}
#outputDirectory <- file.path(shinyDirectory,'data')
# copy the settings csv
file <- utils::read.csv(file.path(resultDirectory,'settings.csv'))
utils::write.csv(file, file.path(outputDirectory,'settings.csv'), row.names = F)
# copy each analysis as a rds file and copy the log
files <- dir(resultDirectory, full.names = F)
files <- files[grep('Analysis', files)]
for(file in files){
if(!dir.exists(file.path(outputDirectory,file))){
dir.create(file.path(outputDirectory,file))
}
if(dir.exists(file.path(resultDirectory,file, 'plpResult'))){
res <- PatientLevelPrediction::loadPlpResult(file.path(resultDirectory,file, 'plpResult'))
res <- PatientLevelPrediction::transportPlp(res, n= minCellCount,
save = F, dataName = databaseName)
saveRDS(res, file.path(outputDirectory,file, 'plpResult.rds'))
}
if(file.exists(file.path(resultDirectory,file, 'plpLog.txt'))){
file.copy(from = file.path(resultDirectory,file, 'plpLog.txt'),
to = file.path(outputDirectory,file, 'plpLog.txt'))
}
}
# copy any validation results
if(dir.exists(file.path(resultDirectory,'Validation'))){
valFolders <- dir(file.path(resultDirectory,'Validation'), full.names = F)
if(length(valFolders)>0){
# move each of the validation rds
for(valFolder in valFolders){
# get the analysisIds
valSubfolders <- dir(file.path(resultDirectory,'Validation',valFolder), full.names = F)
if(length(valSubfolders)!=0){
for(valSubfolder in valSubfolders ){
valOut <- file.path(valFolder,valSubfolder)
if(!dir.exists(file.path(outputDirectory,'Validation',valOut))){
dir.create(file.path(outputDirectory,'Validation',valOut), recursive = T)
}
if(file.exists(file.path(resultDirectory,'Validation',valOut, 'validationResult.rds'))){
res <- readRDS(file.path(resultDirectory,'Validation',valOut, 'validationResult.rds'))
res <- PatientLevelPrediction::transportPlp(res, n= minCellCount,
save = F, dataName = databaseName)
saveRDS(res, file.path(outputDirectory,'Validation',valOut, 'validationResult.rds'))
}
}
}
}
}
}
ParallelLogger::logInfo(paste0('Shiny App created at: ', outputDirectory))
ParallelLogger::logInfo(paste0('Upload the folder ', outputDirectory, ' to the shinyDeploy OHDSI github to share the results with others.'))
return(outputDirectory)
}
| /AbxBetterChoice/ABCgentamicin/R/populateShinyApp.R | no_license | ABMI/AbxBetterChoice | R | false | false | 3,762 | r | #' @export
populateShinyApp <- function(outputDirectory = './ShinyApp',
shinyDirectory,
resultDirectory,
minCellCount = 10,
databaseName = 'sharable name of development data'){
#check inputs
if(missing(shinyDirectory)){
shinyDirectory <- system.file("shiny", "PLPViewer", package = "ABCgentamicin")
}
if(missing(resultDirectory)){
stop('Need to enter the resultDirectory')
}
if(!dir.exists(resultDirectory)){
stop('resultDirectory does not exist')
}
# create the shiny data folder
if(!dir.exists(outputDirectory)){
dir.create(outputDirectory, recursive = T)
}
# copy shiny folder to outputDirectory
R.utils::copyDirectory(from = shinyDirectory,
to= outputDirectory,
recursive=TRUE)
outputDirectory <- file.path(outputDirectory,'data')
if(!dir.exists(outputDirectory)){
dir.create(outputDirectory, recursive = T)
}
#outputDirectory <- file.path(shinyDirectory,'data')
# copy the settings csv
file <- utils::read.csv(file.path(resultDirectory,'settings.csv'))
utils::write.csv(file, file.path(outputDirectory,'settings.csv'), row.names = F)
# copy each analysis as a rds file and copy the log
files <- dir(resultDirectory, full.names = F)
files <- files[grep('Analysis', files)]
for(file in files){
if(!dir.exists(file.path(outputDirectory,file))){
dir.create(file.path(outputDirectory,file))
}
if(dir.exists(file.path(resultDirectory,file, 'plpResult'))){
res <- PatientLevelPrediction::loadPlpResult(file.path(resultDirectory,file, 'plpResult'))
res <- PatientLevelPrediction::transportPlp(res, n= minCellCount,
save = F, dataName = databaseName)
saveRDS(res, file.path(outputDirectory,file, 'plpResult.rds'))
}
if(file.exists(file.path(resultDirectory,file, 'plpLog.txt'))){
file.copy(from = file.path(resultDirectory,file, 'plpLog.txt'),
to = file.path(outputDirectory,file, 'plpLog.txt'))
}
}
# copy any validation results
if(dir.exists(file.path(resultDirectory,'Validation'))){
valFolders <- dir(file.path(resultDirectory,'Validation'), full.names = F)
if(length(valFolders)>0){
# move each of the validation rds
for(valFolder in valFolders){
# get the analysisIds
valSubfolders <- dir(file.path(resultDirectory,'Validation',valFolder), full.names = F)
if(length(valSubfolders)!=0){
for(valSubfolder in valSubfolders ){
valOut <- file.path(valFolder,valSubfolder)
if(!dir.exists(file.path(outputDirectory,'Validation',valOut))){
dir.create(file.path(outputDirectory,'Validation',valOut), recursive = T)
}
if(file.exists(file.path(resultDirectory,'Validation',valOut, 'validationResult.rds'))){
res <- readRDS(file.path(resultDirectory,'Validation',valOut, 'validationResult.rds'))
res <- PatientLevelPrediction::transportPlp(res, n= minCellCount,
save = F, dataName = databaseName)
saveRDS(res, file.path(outputDirectory,'Validation',valOut, 'validationResult.rds'))
}
}
}
}
}
}
ParallelLogger::logInfo(paste0('Shiny App created at: ', outputDirectory))
ParallelLogger::logInfo(paste0('Upload the folder ', outputDirectory, ' to the shinyDeploy OHDSI github to share the results with others.'))
return(outputDirectory)
}
|
a=5
x=rnorm(100)
plot(x)
vec1 = c(1,4,6,8,10)
vec2 = seq(from=0, to=1.1, by=0.25)
sum(vec1)
mat=matrix(data=c(9,2,3,4,5,6),ncol=3)
t = data.frame(x = c(11,12,14),
y = c(19,20,21), z = c(10,9,7))
L = list(one=1, two=c(1,2),
five=seq(0, 1, length=5))
plot(rnorm(100), type="l", col="gold")
d = data.frame(a = c(3,4,5),
b = c(12,43,54))
write.table(d, file="tst0.txt",
row.names=FALSE)
d2 = read.table(file="tst0.txt",
header=TRUE)
j = c(1,2,NA)
max(j, na.rm=TRUE)
m = "apples"
date1=strptime( c("20100225230000",
"20100226000000", "20100226010000"),
format="%Y%m%d%H%M%S")
dt = as.Date('1915-6-16')
dt1 = as.Date('1/15/2001',format='%m/%d/%Y')
h = seq(from=1, to=8)
s = c()
for(i in 2:10) {
s[i] = h[i] * 10
}
fun1 = function(arg1, arg2) {
w = arg1 ^ 2
return(arg2 + w)
}
| /_MIT_math_fin/intro_R.r | no_license | quant108/MIT-Math-Finance | R | false | false | 976 | r | a=5
x=rnorm(100)
plot(x)
vec1 = c(1,4,6,8,10)
vec2 = seq(from=0, to=1.1, by=0.25)
sum(vec1)
mat=matrix(data=c(9,2,3,4,5,6),ncol=3)
t = data.frame(x = c(11,12,14),
y = c(19,20,21), z = c(10,9,7))
L = list(one=1, two=c(1,2),
five=seq(0, 1, length=5))
plot(rnorm(100), type="l", col="gold")
d = data.frame(a = c(3,4,5),
b = c(12,43,54))
write.table(d, file="tst0.txt",
row.names=FALSE)
d2 = read.table(file="tst0.txt",
header=TRUE)
j = c(1,2,NA)
max(j, na.rm=TRUE)
m = "apples"
date1=strptime( c("20100225230000",
"20100226000000", "20100226010000"),
format="%Y%m%d%H%M%S")
dt = as.Date('1915-6-16')
dt1 = as.Date('1/15/2001',format='%m/%d/%Y')
h = seq(from=1, to=8)
s = c()
for(i in 2:10) {
s[i] = h[i] * 10
}
fun1 = function(arg1, arg2) {
w = arg1 ^ 2
return(arg2 + w)
}
|
# @knitr setup
fillcolor <- "gray90"
# @knitr Fig1 -------------------------------------------------------------------
library(VGAM)
## Variant A
svg("figs/Taavi1a.svg", width = 4, height = 2.76, pointsize = 12)
Scale <- 0.3
par(mar = rep(2, 4))
plot.new()
plot.window(xlim = c(0, 1.2), ylim = c(0, 2))
qq <- qrayleigh(0.2, scale = Scale)
coord.x <- c(0, seq(0, qq, 0.01), qq)
coord.y <- c(0, drayleigh(seq(0, qq, 0.01), scale = Scale), 0)
polygon(coord.x, coord.y, col = fillcolor, border = NA)
curve(drayleigh(x, Scale),
xlim = c(0, 1.2),
ylab = NA,
xlab = NA,
add = T)
text(c(0.12, 0.45), y = 0.4, c(expression(alpha), expression(1 - alpha)))
axis(side = 1, at = qq, labels = expression(x[alpha]), pos = 0)
axis(side = 2, labels = NA, lwd.ticks = 0)
lines(qq, drayleigh(qq, scale = Scale), lty = 3, type = "h")
abline(h = 0, lty = 1)
loc <- par("usr")
text(loc[1], loc[4], labels = expression(f(x)), pos = 2, xpd = T)
text(loc[2], loc[3], expression(x), pos = 4, xpd = T)
dev.off()
## Variant B
svg("figs/Taavi1b.svg", width = 4, height = 2.76, pointsize = 12)
par(mar = rep(2, 4))
plot.new()
plot.window(xlim = c(0, 1.2), ylim = c(0, 2))
qq <- qrayleigh(0.2, scale = Scale)
coord.x <- c(0, seq(0, qq, 0.01), qq)
coord.y <- c(0, drayleigh(seq(0, qq, 0.01), scale = Scale), 0)
polygon(coord.x, coord.y, col = fillcolor, border = NA)
curve(drayleigh(x, Scale),
xlim = c(0, 1.2),
ylab = NA,
xlab = NA,
add = T)
axis(side = 1, at = qq, labels = expression(x[0.2]), pos = 0) # Kas tavalise fondiga või italicus?
axis(side = 2, labels = NA, lwd.ticks = 0)
lines(qq, drayleigh(qq, scale = Scale), lty = 3, type = "h")
abline(h = 0, lty = 1)
text(c(0.12, 0.45), y = 0.4, c(0.2, 0.8))
loc <- par("usr")
text(loc[1], loc[4], expression(f(x)), pos = 2, xpd = T)
text(loc[2], loc[3], expression(x), pos = 4, xpd = T)
dev.off()
# @knitr Fig2 -------------------------------------------------------------------
svg("figs/Taavi2.svg", width = 4, height = 2.76, pointsize = 12)
par(mar = rep(2, 4))
plot.new()
plot.window(xlim = c(-3, 3), ylim = c(0, .4))
qq <- round(qnorm(c(0.025, 0.975)), 2)
coord.x <- c(-3, seq(-3, qq[1], 0.01), qq[1])
coord.y <- c(0, dnorm(seq(-3, qq[1], 0.01)), 0)
polygon(coord.x, coord.y, col = 'gray90', border = NA, density = NA)
coord.x <- c(qq[2], seq(qq[2], 3, 0.01), 3)
coord.y <- c(0, dnorm(seq(qq[2], 3, 0.01)), 0)
polygon(coord.x, coord.y, col = fillcolor, border = NA, density = NA)
text(0, y = dnorm(0.5)/2, 0.95)
curve(dnorm(x),
xlim = c(-3, 3),
ylab = NA,
xlab = NA,
add = T)
axis(side = 1, at = qq, pos = 0)
lines(qq, dnorm(qq), lty = 3, type = "h")
arrows(0, 0, 0, 5, lwd = 1, length = 0.15)
abline(h = 0, lty = 1)
loc <- par("usr")
text(0, dnorm(0) + 0.02, expression(f(x)), pos = 2, xpd = T)
text(loc[2], loc[3], expression(x), pos = 4, xpd = T)
dev.off()
# @knitr Fig3 -------------------------------------------------------------------
# two-tailed
svg("figs/Taavi3_two-tailed.svg", width = 4, height = 2.76, pointsize = 12)
par(mar = rep(2, 4))
plot.new()
plot.window(xlim = c(-3, 3), ylim = c(0, .4))
qq <- round(qnorm(c(0.025, 0.975)), 2)
coord.x <- c(-3, seq(-3, qq[1], 0.01), qq[1])
coord.y <- c(0, dnorm(seq(-3, qq[1], 0.01)), 0)
polygon(coord.x, coord.y, col = 'gray90', border = NA, density = NA)
coord.x <- c(qq[2], seq(qq[2], 3, 0.01), 3)
coord.y <- c(0, dnorm(seq(qq[2], 3, 0.01)), 0)
polygon(coord.x, coord.y, col = fillcolor, border = NA, density = NA)
text(qnorm(c(0.025, 0.975)), y = 0.017,
labels = expression(p),
pos = c(2, 4)) # U+1D4D7
axis(side = 1,
at = qq,
labels = c(expression(-t), expression(t)),
pos = 0,
font = 3)
lines(qq, dnorm(qq), lty = 3, type = "h")
arrows(0, 0, 0, 5, lwd = 1, length = 0.15)
abline(h = 0, lty = 1)
curve(dnorm(x),
xlim = c(-3, 3),
ylab = NA,
xlab = NA,
add = T)
loc <- par("usr")
text(0, dnorm(0) + 0.02, expression(f(x)), pos = 2, xpd = T)
text(loc[2], loc[3], expression(x), pos = 4, xpd = T)
dev.off()
## Right
svg("figs/Taavi3_right-tailed.svg", width = 4, height = 2.76, pointsize = 12)
par(mar = rep(2, 4))
plot.new()
plot.window(xlim = c(-3, 3), ylim = c(0, .4))
qq <- round(qnorm(0.95), 2)
coord.x <- c(qq, seq(qq, 3, 0.01), 3)
coord.y <- c(0, dnorm(seq(qq, 3, 0.01)), 0)
polygon(coord.x, coord.y, col = fillcolor, border = NA, density = NA)
text(qnorm(0.95), y = 0.035, labels = expression(p), pos = 4) # U+1D4D7
axis(side = 1,
at = qq,
labels = expression(t),
pos = 0)
lines(qq, dnorm(qq), lty = 3, type = "h")
arrows(0, 0, 0, 5, lwd = 1, length = 0.15)
abline(h = 0, lty = 1)
curve(dnorm(x),
xlim = c(-3, 3),
ylab = NA,
xlab = NA,
add = T)
loc <- par("usr")
text(0, dnorm(0) + 0.02, expression(f(x)), pos = 2, xpd = T)
text(loc[2], loc[3], expression(x), pos = 4, xpd = T)
dev.off()
## Left
svg("figs/Taavi3_left-tailed.svg", width = 4, height = 2.76, pointsize = 12)
par(mar = rep(2, 4))
plot.new()
plot.window(xlim = c(-3, 3), ylim = c(0, .4))
qq <- round(qnorm(0.05), 2)
coord.x <- c(-3, seq(-3, qq, 0.01), qq)
coord.y <- c(0, dnorm(seq(-3, qq, 0.01)), 0)
polygon(coord.x, coord.y, col = 'gray90', border = NA, density = NA)
text(qnorm(0.05), y = 0.035, labels = expression(p), pos = 2) # U+1D4D7
axis(side = 1,
at = qq,
labels = expression(-t),
pos = 0)
lines(qq, dnorm(qq), lty = 3, type = "h")
arrows(0, 0, 0, 5, lwd = 1, length = 0.15)
abline(h = 0, lty = 1)
curve(dnorm(x),
xlim = c(-3, 3),
ylab = NA,
xlab = NA,
add = T)
loc <- par("usr")
text(0, dnorm(0) + 0.02, expression(f(x)), pos = 2, xpd = T)
text(loc[2], loc[3], expression(x), pos = 4, xpd = T)
dev.off()
# @knitr Fig4 -------------------------------------------------------------------
## left
svg("figs/Taavi4_left.svg", width = 4, height = 2.76, pointsize = 12)
par(mar = rep(2, 4))
plot.new()
plot.window(xlim = c(0, 30), ylim = c(0, .1))
df <- 4
ncp <- 4
qq <- round(qchisq(0.05, df = df, ncp = ncp), 2)
coord.x <- c(0, seq(0, qq, 0.01), qq)
coord.y <- c(0, dchisq(seq(0, qq, 0.01), df = df, ncp = ncp), 0)
polygon(coord.x, coord.y, col = 'gray90', border = NA, density = NA)
text(qchisq(0.01, df = df, ncp = ncp), y = 0.018, labels = expression(p), pos = 1)
axis(side = 1,
at = qq,
labels = expression(t),
pos = 0)
axis(side = 2, labels = NA, lwd.ticks = 0)
lines(qq, dchisq(qq, df = df, ncp = ncp), lty = 3, type = "h")
abline(h = 0, lty = 1)
curve(dchisq(x, df = df, ncp = ncp),
xlim = c(0, 30),
ylab = NA,
xlab = NA,
add = T)
loc <- par("usr")
text(loc[1], loc[4], expression(f(x)), pos = 2, xpd = T)
text(loc[2], loc[3], expression(x), pos = 4, xpd = T)
dev.off()
## right
svg("figs/Taavi4_right.svg", width = 4, height = 2.76, pointsize = 12)
par(mar = rep(2, 4))
plot.new()
plot.window(xlim = c(0, 30), ylim = c(0, .1))
qq <- round(qchisq(0.95, df = df, ncp = ncp), 2)
coord.x <- c(qq, seq(qq, 30, 0.01), 30)
coord.y <- c(0, dchisq(seq(qq, 30, 0.01), df = df, ncp = ncp), 0)
polygon(coord.x, coord.y, col = 'gray90', border = NA, density = NA)
text(qchisq(0.96, df = df, ncp = ncp), y = 0.005, labels = expression(p))
axis(side = 1,
at = qq,
labels = expression(t),
pos = 0)
axis(side = 2, labels = NA, lwd.ticks = 0)
lines(qq, dchisq(qq, df = df, ncp = ncp), lty = 3, type = "h")
abline(h = 0, lty = 1)
curve(dchisq(x, df = df, ncp = ncp),
xlim = c(0, 30),
ylab = NA,
xlab = NA,
add = T)
loc <- par("usr")
text(loc[1], loc[4], expression(f(x)), pos = 2, xpd = T)
text(loc[2], loc[3], expression(x), pos = 4, xpd = T)
dev.off()
# @knitr Fig6 -------------------------------------------------------------------
svg("figs/Taavi6.svg", width = 5, height = 2.76, pointsize = 12)
df <- 5
ncp <- 0
par(mar = rep(2, 4))
plot.new()
plot.window(xlim = c(0, 20), ylim = c(0, .15))
qq <- round(qchisq(c(0.708, 0.95), df = df, ncp = ncp), 2)
coord.x <- c(qq[1], seq(qq[1], 20, 0.01), 20)
coord.y <- c(0, dchisq(seq(qq[1], 20, 0.01), df = df, ncp = ncp), 0)
polygon(coord.x, coord.y, col = 'gray90', border = NA, density = NA)
text(qchisq(mean(c(0.708, 0.95)), df = df, ncp = ncp) + 0.8,
y = dchisq(mean(c(0.708, 0.95)), df = df, ncp = ncp)/3.2,
labels = round(1 - 0.708, 2))
axis(side = 1,
at = qq,
labels = qq,
pos = 0)
axis(side = 2, labels = NA, lwd.ticks = 0)
abline(h = 0, lty = 1)
lines(qq, dchisq(qq, df = df, ncp = ncp), lty = 3, type = "h")
curve(dchisq(x, df = df, ncp = ncp),
xlim = c(0, 20),
ylab = NA,
xlab = NA,
add = T)
loc <- par("usr")
text(loc[1], loc[4], expression(f(x)), pos = 2, xpd = T)
text(loc[2], loc[3], expression(x), pos = 4, xpd = T)
dev.off()
| /R/figs.R | no_license | tpall/book-figs | R | false | false | 8,734 | r |
# @knitr setup
fillcolor <- "gray90"
# @knitr Fig1 -------------------------------------------------------------------
library(VGAM)
## Variant A
svg("figs/Taavi1a.svg", width = 4, height = 2.76, pointsize = 12)
Scale <- 0.3
par(mar = rep(2, 4))
plot.new()
plot.window(xlim = c(0, 1.2), ylim = c(0, 2))
qq <- qrayleigh(0.2, scale = Scale)
coord.x <- c(0, seq(0, qq, 0.01), qq)
coord.y <- c(0, drayleigh(seq(0, qq, 0.01), scale = Scale), 0)
polygon(coord.x, coord.y, col = fillcolor, border = NA)
curve(drayleigh(x, Scale),
xlim = c(0, 1.2),
ylab = NA,
xlab = NA,
add = T)
text(c(0.12, 0.45), y = 0.4, c(expression(alpha), expression(1 - alpha)))
axis(side = 1, at = qq, labels = expression(x[alpha]), pos = 0)
axis(side = 2, labels = NA, lwd.ticks = 0)
lines(qq, drayleigh(qq, scale = Scale), lty = 3, type = "h")
abline(h = 0, lty = 1)
loc <- par("usr")
text(loc[1], loc[4], labels = expression(f(x)), pos = 2, xpd = T)
text(loc[2], loc[3], expression(x), pos = 4, xpd = T)
dev.off()
## Variant B
svg("figs/Taavi1b.svg", width = 4, height = 2.76, pointsize = 12)
par(mar = rep(2, 4))
plot.new()
plot.window(xlim = c(0, 1.2), ylim = c(0, 2))
qq <- qrayleigh(0.2, scale = Scale)
coord.x <- c(0, seq(0, qq, 0.01), qq)
coord.y <- c(0, drayleigh(seq(0, qq, 0.01), scale = Scale), 0)
polygon(coord.x, coord.y, col = fillcolor, border = NA)
curve(drayleigh(x, Scale),
xlim = c(0, 1.2),
ylab = NA,
xlab = NA,
add = T)
axis(side = 1, at = qq, labels = expression(x[0.2]), pos = 0) # Kas tavalise fondiga või italicus?
axis(side = 2, labels = NA, lwd.ticks = 0)
lines(qq, drayleigh(qq, scale = Scale), lty = 3, type = "h")
abline(h = 0, lty = 1)
text(c(0.12, 0.45), y = 0.4, c(0.2, 0.8))
loc <- par("usr")
text(loc[1], loc[4], expression(f(x)), pos = 2, xpd = T)
text(loc[2], loc[3], expression(x), pos = 4, xpd = T)
dev.off()
# @knitr Fig2 -------------------------------------------------------------------
svg("figs/Taavi2.svg", width = 4, height = 2.76, pointsize = 12)
par(mar = rep(2, 4))
plot.new()
plot.window(xlim = c(-3, 3), ylim = c(0, .4))
qq <- round(qnorm(c(0.025, 0.975)), 2)
coord.x <- c(-3, seq(-3, qq[1], 0.01), qq[1])
coord.y <- c(0, dnorm(seq(-3, qq[1], 0.01)), 0)
polygon(coord.x, coord.y, col = 'gray90', border = NA, density = NA)
coord.x <- c(qq[2], seq(qq[2], 3, 0.01), 3)
coord.y <- c(0, dnorm(seq(qq[2], 3, 0.01)), 0)
polygon(coord.x, coord.y, col = fillcolor, border = NA, density = NA)
text(0, y = dnorm(0.5)/2, 0.95)
curve(dnorm(x),
xlim = c(-3, 3),
ylab = NA,
xlab = NA,
add = T)
axis(side = 1, at = qq, pos = 0)
lines(qq, dnorm(qq), lty = 3, type = "h")
arrows(0, 0, 0, 5, lwd = 1, length = 0.15)
abline(h = 0, lty = 1)
loc <- par("usr")
text(0, dnorm(0) + 0.02, expression(f(x)), pos = 2, xpd = T)
text(loc[2], loc[3], expression(x), pos = 4, xpd = T)
dev.off()
# @knitr Fig3 -------------------------------------------------------------------
# two-tailed
svg("figs/Taavi3_two-tailed.svg", width = 4, height = 2.76, pointsize = 12)
par(mar = rep(2, 4))
plot.new()
plot.window(xlim = c(-3, 3), ylim = c(0, .4))
qq <- round(qnorm(c(0.025, 0.975)), 2)
coord.x <- c(-3, seq(-3, qq[1], 0.01), qq[1])
coord.y <- c(0, dnorm(seq(-3, qq[1], 0.01)), 0)
polygon(coord.x, coord.y, col = 'gray90', border = NA, density = NA)
coord.x <- c(qq[2], seq(qq[2], 3, 0.01), 3)
coord.y <- c(0, dnorm(seq(qq[2], 3, 0.01)), 0)
polygon(coord.x, coord.y, col = fillcolor, border = NA, density = NA)
text(qnorm(c(0.025, 0.975)), y = 0.017,
labels = expression(p),
pos = c(2, 4)) # U+1D4D7
axis(side = 1,
at = qq,
labels = c(expression(-t), expression(t)),
pos = 0,
font = 3)
lines(qq, dnorm(qq), lty = 3, type = "h")
arrows(0, 0, 0, 5, lwd = 1, length = 0.15)
abline(h = 0, lty = 1)
curve(dnorm(x),
xlim = c(-3, 3),
ylab = NA,
xlab = NA,
add = T)
loc <- par("usr")
text(0, dnorm(0) + 0.02, expression(f(x)), pos = 2, xpd = T)
text(loc[2], loc[3], expression(x), pos = 4, xpd = T)
dev.off()
## Right
svg("figs/Taavi3_right-tailed.svg", width = 4, height = 2.76, pointsize = 12)
par(mar = rep(2, 4))
plot.new()
plot.window(xlim = c(-3, 3), ylim = c(0, .4))
qq <- round(qnorm(0.95), 2)
coord.x <- c(qq, seq(qq, 3, 0.01), 3)
coord.y <- c(0, dnorm(seq(qq, 3, 0.01)), 0)
polygon(coord.x, coord.y, col = fillcolor, border = NA, density = NA)
text(qnorm(0.95), y = 0.035, labels = expression(p), pos = 4) # U+1D4D7
axis(side = 1,
at = qq,
labels = expression(t),
pos = 0)
lines(qq, dnorm(qq), lty = 3, type = "h")
arrows(0, 0, 0, 5, lwd = 1, length = 0.15)
abline(h = 0, lty = 1)
curve(dnorm(x),
xlim = c(-3, 3),
ylab = NA,
xlab = NA,
add = T)
loc <- par("usr")
text(0, dnorm(0) + 0.02, expression(f(x)), pos = 2, xpd = T)
text(loc[2], loc[3], expression(x), pos = 4, xpd = T)
dev.off()
## Left
svg("figs/Taavi3_left-tailed.svg", width = 4, height = 2.76, pointsize = 12)
par(mar = rep(2, 4))
plot.new()
plot.window(xlim = c(-3, 3), ylim = c(0, .4))
qq <- round(qnorm(0.05), 2)
coord.x <- c(-3, seq(-3, qq, 0.01), qq)
coord.y <- c(0, dnorm(seq(-3, qq, 0.01)), 0)
polygon(coord.x, coord.y, col = 'gray90', border = NA, density = NA)
text(qnorm(0.05), y = 0.035, labels = expression(p), pos = 2) # U+1D4D7
axis(side = 1,
at = qq,
labels = expression(-t),
pos = 0)
lines(qq, dnorm(qq), lty = 3, type = "h")
arrows(0, 0, 0, 5, lwd = 1, length = 0.15)
abline(h = 0, lty = 1)
curve(dnorm(x),
xlim = c(-3, 3),
ylab = NA,
xlab = NA,
add = T)
loc <- par("usr")
text(0, dnorm(0) + 0.02, expression(f(x)), pos = 2, xpd = T)
text(loc[2], loc[3], expression(x), pos = 4, xpd = T)
dev.off()
# @knitr Fig4 -------------------------------------------------------------------
## left
svg("figs/Taavi4_left.svg", width = 4, height = 2.76, pointsize = 12)
par(mar = rep(2, 4))
plot.new()
plot.window(xlim = c(0, 30), ylim = c(0, .1))
df <- 4
ncp <- 4
qq <- round(qchisq(0.05, df = df, ncp = ncp), 2)
coord.x <- c(0, seq(0, qq, 0.01), qq)
coord.y <- c(0, dchisq(seq(0, qq, 0.01), df = df, ncp = ncp), 0)
polygon(coord.x, coord.y, col = 'gray90', border = NA, density = NA)
text(qchisq(0.01, df = df, ncp = ncp), y = 0.018, labels = expression(p), pos = 1)
axis(side = 1,
at = qq,
labels = expression(t),
pos = 0)
axis(side = 2, labels = NA, lwd.ticks = 0)
lines(qq, dchisq(qq, df = df, ncp = ncp), lty = 3, type = "h")
abline(h = 0, lty = 1)
curve(dchisq(x, df = df, ncp = ncp),
xlim = c(0, 30),
ylab = NA,
xlab = NA,
add = T)
loc <- par("usr")
text(loc[1], loc[4], expression(f(x)), pos = 2, xpd = T)
text(loc[2], loc[3], expression(x), pos = 4, xpd = T)
dev.off()
## right
svg("figs/Taavi4_right.svg", width = 4, height = 2.76, pointsize = 12)
par(mar = rep(2, 4))
plot.new()
plot.window(xlim = c(0, 30), ylim = c(0, .1))
qq <- round(qchisq(0.95, df = df, ncp = ncp), 2)
coord.x <- c(qq, seq(qq, 30, 0.01), 30)
coord.y <- c(0, dchisq(seq(qq, 30, 0.01), df = df, ncp = ncp), 0)
polygon(coord.x, coord.y, col = 'gray90', border = NA, density = NA)
text(qchisq(0.96, df = df, ncp = ncp), y = 0.005, labels = expression(p))
axis(side = 1,
at = qq,
labels = expression(t),
pos = 0)
axis(side = 2, labels = NA, lwd.ticks = 0)
lines(qq, dchisq(qq, df = df, ncp = ncp), lty = 3, type = "h")
abline(h = 0, lty = 1)
curve(dchisq(x, df = df, ncp = ncp),
xlim = c(0, 30),
ylab = NA,
xlab = NA,
add = T)
loc <- par("usr")
text(loc[1], loc[4], expression(f(x)), pos = 2, xpd = T)
text(loc[2], loc[3], expression(x), pos = 4, xpd = T)
dev.off()
# @knitr Fig6 -------------------------------------------------------------------
svg("figs/Taavi6.svg", width = 5, height = 2.76, pointsize = 12)
df <- 5
ncp <- 0
par(mar = rep(2, 4))
plot.new()
plot.window(xlim = c(0, 20), ylim = c(0, .15))
qq <- round(qchisq(c(0.708, 0.95), df = df, ncp = ncp), 2)
coord.x <- c(qq[1], seq(qq[1], 20, 0.01), 20)
coord.y <- c(0, dchisq(seq(qq[1], 20, 0.01), df = df, ncp = ncp), 0)
polygon(coord.x, coord.y, col = 'gray90', border = NA, density = NA)
text(qchisq(mean(c(0.708, 0.95)), df = df, ncp = ncp) + 0.8,
y = dchisq(mean(c(0.708, 0.95)), df = df, ncp = ncp)/3.2,
labels = round(1 - 0.708, 2))
axis(side = 1,
at = qq,
labels = qq,
pos = 0)
axis(side = 2, labels = NA, lwd.ticks = 0)
abline(h = 0, lty = 1)
lines(qq, dchisq(qq, df = df, ncp = ncp), lty = 3, type = "h")
curve(dchisq(x, df = df, ncp = ncp),
xlim = c(0, 20),
ylab = NA,
xlab = NA,
add = T)
loc <- par("usr")
text(loc[1], loc[4], expression(f(x)), pos = 2, xpd = T)
text(loc[2], loc[3], expression(x), pos = 4, xpd = T)
dev.off()
|
#' @name get_mon
#' @export
get_mon_attributes <- function(...) {
x <- simmer::get_mon_attributes(...)
class(x) <- c("attributes", class(x))
x
}
#' @name plot.mon
#' @param keys attributes to plot (if left empty, all attributes are shown).
#'
#' @details The S3 method for 'attributes' does not support any metric. It simply
#' shows a stairstep graph of the values throughout the simulation for the keys
#' provided (or all the collected attributes if no key is provided).
#'
#' @export
plot.attributes <- function(x, metric=NULL, keys, ...) {
if (!missing(keys)) x <- x %>%
dplyr::filter(.data$key %in% keys) %>%
dplyr::mutate(key = factor(.data$key, levels = keys))
if (nrow(x) == 0)
stop("no data available or 'keys' not found")
plot_obj <-
ggplot(x) +
aes_(x = ~time, y = ~value) +
geom_step(aes_(group = ~replication), alpha = set_alpha(x)) +
stat_smooth() +
xlab("simulation time") +
ylab("value") +
expand_limits(y = 0)
if (length(unique(x$key)) > 1) {
plot_obj <- plot_obj +
ggtitle("Attribute evolution") +
facet_wrap(~key, scales = "free_y")
} else {
plot_obj <- plot_obj +
ggtitle(paste0("Attribute evolution: ", x$key[[1]]))
}
plot_obj
}
| /R/plot.attributes.R | no_license | gridl/simmer.plot | R | false | false | 1,246 | r | #' @name get_mon
#' @export
get_mon_attributes <- function(...) {
x <- simmer::get_mon_attributes(...)
class(x) <- c("attributes", class(x))
x
}
#' @name plot.mon
#' @param keys attributes to plot (if left empty, all attributes are shown).
#'
#' @details The S3 method for 'attributes' does not support any metric. It simply
#' shows a stairstep graph of the values throughout the simulation for the keys
#' provided (or all the collected attributes if no key is provided).
#'
#' @export
plot.attributes <- function(x, metric=NULL, keys, ...) {
if (!missing(keys)) x <- x %>%
dplyr::filter(.data$key %in% keys) %>%
dplyr::mutate(key = factor(.data$key, levels = keys))
if (nrow(x) == 0)
stop("no data available or 'keys' not found")
plot_obj <-
ggplot(x) +
aes_(x = ~time, y = ~value) +
geom_step(aes_(group = ~replication), alpha = set_alpha(x)) +
stat_smooth() +
xlab("simulation time") +
ylab("value") +
expand_limits(y = 0)
if (length(unique(x$key)) > 1) {
plot_obj <- plot_obj +
ggtitle("Attribute evolution") +
facet_wrap(~key, scales = "free_y")
} else {
plot_obj <- plot_obj +
ggtitle(paste0("Attribute evolution: ", x$key[[1]]))
}
plot_obj
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main.R
\name{get_mer_data}
\alias{get_mer_data}
\title{MER}
\usage{
get_mer_data(api_key, compnum)
}
\arguments{
\item{api_key}{API Key object (see also, setapi())}
\item{compnum}{company number (see also, compdata)}
}
\description{
https://www.quandl.com/databases/MF1/documentation
Mergent Global Fundamental Data에서 제공한 데이터를 검색할 수 있습니다.
Quandly의 api_key를 첫번째 인자로, compdata의 compnumber를 두번째 인자로 입력합니다.
}
| /man/get_mer_data.Rd | no_license | drtagkim/quandlWrapper | R | false | true | 560 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main.R
\name{get_mer_data}
\alias{get_mer_data}
\title{MER}
\usage{
get_mer_data(api_key, compnum)
}
\arguments{
\item{api_key}{API Key object (see also, setapi())}
\item{compnum}{company number (see also, compdata)}
}
\description{
https://www.quandl.com/databases/MF1/documentation
Mergent Global Fundamental Data에서 제공한 데이터를 검색할 수 있습니다.
Quandly의 api_key를 첫번째 인자로, compdata의 compnumber를 두번째 인자로 입력합니다.
}
|
library(glmnet)
mydata = read.table("./TrainingSet/RF/endometrium.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=1,family="gaussian",standardize=FALSE)
sink('./Model/EN/Classifier/endometrium/endometrium_098.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/Classifier/endometrium/endometrium_098.R | no_license | leon1003/QSMART | R | false | false | 363 | r | library(glmnet)
mydata = read.table("./TrainingSet/RF/endometrium.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=1,family="gaussian",standardize=FALSE)
sink('./Model/EN/Classifier/endometrium/endometrium_098.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
# Read data
data1 = read.csv("Zomnumeric.csv")
View(data1)
# load library
library(caret)
library(e1071)
# Transforming the dependent variable to a factor
data1$Has.Table.booking = as.factor(data1$Has.Table.booking)
#Partitioning the data into training and validation data
set.seed(101)
index = createDataPartition(data1$Has.Table.booking, p = 0.7, list = F )
train = data1[index,]
validation = data1[-index,]
# Explore data
dim(train)
dim(validation)
names(train)
head(train)
head(validation)
# Setting levels for both training and validation data
levels(train$Has.Table.booking) <- make.names(levels(factor(train$Has.Table.booking)))
levels(validation$Has.Table.booking) <- make.names(levels(factor(validation$Has.Table.booking)))
# Setting up train controls
repeats = 3
numbers = 10
tunel = 10
set.seed(1234)
x = trainControl(method = "repeatedcv",
number = numbers,
repeats = repeats,
classProbs = TRUE,
summaryFunction = twoClassSummary)
model1 <- train(Has.Table.booking~. , data = train, method = "knn",
preProcess = c("center","scale"),
trControl = x,
metric = "ROC",
tuneLength = tunel)
# Summary of model
model1
plot(model1)
# Validation
valid_pred <- predict(model1,validation, type = "prob")
#Storing Model Performance Scores
library(ROCR)
pred_val <-prediction(valid_pred[,2],validation$Has.Table.booking)
# Calculating Area under Curve (AUC)
perf_val <- performance(pred_val,"auc")
perf_val
# Plot AUC
perf_val <- performance(pred_val, "tpr", "fpr")
plot(perf_val, col = "green", lwd = 1.5)
#Calculating KS statistics
ks <- max(attr(perf_val, "y.values")[[1]] - (attr(perf_val, "x.values")[[1]]))
ks
| /knearestwrkng.R | no_license | Madumitha-S/da | R | false | false | 1,815 | r | # Read data
data1 = read.csv("Zomnumeric.csv")
View(data1)
# load library
library(caret)
library(e1071)
# Transforming the dependent variable to a factor
data1$Has.Table.booking = as.factor(data1$Has.Table.booking)
#Partitioning the data into training and validation data
set.seed(101)
index = createDataPartition(data1$Has.Table.booking, p = 0.7, list = F )
train = data1[index,]
validation = data1[-index,]
# Explore data
dim(train)
dim(validation)
names(train)
head(train)
head(validation)
# Setting levels for both training and validation data
levels(train$Has.Table.booking) <- make.names(levels(factor(train$Has.Table.booking)))
levels(validation$Has.Table.booking) <- make.names(levels(factor(validation$Has.Table.booking)))
# Setting up train controls
repeats = 3
numbers = 10
tunel = 10
set.seed(1234)
x = trainControl(method = "repeatedcv",
number = numbers,
repeats = repeats,
classProbs = TRUE,
summaryFunction = twoClassSummary)
model1 <- train(Has.Table.booking~. , data = train, method = "knn",
preProcess = c("center","scale"),
trControl = x,
metric = "ROC",
tuneLength = tunel)
# Summary of model
model1
plot(model1)
# Validation
valid_pred <- predict(model1,validation, type = "prob")
#Storing Model Performance Scores
library(ROCR)
pred_val <-prediction(valid_pred[,2],validation$Has.Table.booking)
# Calculating Area under Curve (AUC)
perf_val <- performance(pred_val,"auc")
perf_val
# Plot AUC
perf_val <- performance(pred_val, "tpr", "fpr")
plot(perf_val, col = "green", lwd = 1.5)
#Calculating KS statistics
ks <- max(attr(perf_val, "y.values")[[1]] - (attr(perf_val, "x.values")[[1]]))
ks
|
# This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common get_config new_operation new_request send_request
#' @include schemas_service.R
NULL
#' Creates a discoverer
#'
#' @description
#' Creates a discoverer.
#'
#' @usage
#' schemas_create_discoverer(Description, SourceArn, Tags)
#'
#' @param Description A description for the discoverer.
#' @param SourceArn [required] The ARN of the event bus.
#' @param Tags Tags associated with the resource.
#'
#' @section Request syntax:
#' ```
#' svc$create_discoverer(
#' Description = "string",
#' SourceArn = "string",
#' Tags = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_create_discoverer
schemas_create_discoverer <- function(Description = NULL, SourceArn, Tags = NULL) {
op <- new_operation(
name = "CreateDiscoverer",
http_method = "POST",
http_path = "/v1/discoverers",
paginator = list()
)
input <- .schemas$create_discoverer_input(Description = Description, SourceArn = SourceArn, Tags = Tags)
output <- .schemas$create_discoverer_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$create_discoverer <- schemas_create_discoverer
#' Creates a registry
#'
#' @description
#' Creates a registry.
#'
#' @usage
#' schemas_create_registry(Description, RegistryName, Tags)
#'
#' @param Description A description of the registry to be created.
#' @param RegistryName [required] The name of the registry.
#' @param Tags Tags to associate with the registry.
#'
#' @section Request syntax:
#' ```
#' svc$create_registry(
#' Description = "string",
#' RegistryName = "string",
#' Tags = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_create_registry
schemas_create_registry <- function(Description = NULL, RegistryName, Tags = NULL) {
op <- new_operation(
name = "CreateRegistry",
http_method = "POST",
http_path = "/v1/registries/name/{registryName}",
paginator = list()
)
input <- .schemas$create_registry_input(Description = Description, RegistryName = RegistryName, Tags = Tags)
output <- .schemas$create_registry_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$create_registry <- schemas_create_registry
#' Creates a schema definition
#'
#' @description
#' Creates a schema definition.
#'
#' Inactive schemas will be deleted after two years.
#'
#' @usage
#' schemas_create_schema(Content, Description, RegistryName, SchemaName,
#' Tags, Type)
#'
#' @param Content [required] The source of the schema definition.
#' @param Description A description of the schema.
#' @param RegistryName [required] The name of the registry.
#' @param SchemaName [required] The name of the schema.
#' @param Tags Tags associated with the schema.
#' @param Type [required] The type of schema.
#'
#' @section Request syntax:
#' ```
#' svc$create_schema(
#' Content = "string",
#' Description = "string",
#' RegistryName = "string",
#' SchemaName = "string",
#' Tags = list(
#' "string"
#' ),
#' Type = "OpenApi3"|"JSONSchemaDraft4"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_create_schema
schemas_create_schema <- function(Content, Description = NULL, RegistryName, SchemaName, Tags = NULL, Type) {
op <- new_operation(
name = "CreateSchema",
http_method = "POST",
http_path = "/v1/registries/name/{registryName}/schemas/name/{schemaName}",
paginator = list()
)
input <- .schemas$create_schema_input(Content = Content, Description = Description, RegistryName = RegistryName, SchemaName = SchemaName, Tags = Tags, Type = Type)
output <- .schemas$create_schema_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$create_schema <- schemas_create_schema
#' Deletes a discoverer
#'
#' @description
#' Deletes a discoverer.
#'
#' @usage
#' schemas_delete_discoverer(DiscovererId)
#'
#' @param DiscovererId [required] The ID of the discoverer.
#'
#' @section Request syntax:
#' ```
#' svc$delete_discoverer(
#' DiscovererId = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_delete_discoverer
schemas_delete_discoverer <- function(DiscovererId) {
op <- new_operation(
name = "DeleteDiscoverer",
http_method = "DELETE",
http_path = "/v1/discoverers/id/{discovererId}",
paginator = list()
)
input <- .schemas$delete_discoverer_input(DiscovererId = DiscovererId)
output <- .schemas$delete_discoverer_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$delete_discoverer <- schemas_delete_discoverer
#' Deletes a Registry
#'
#' @description
#' Deletes a Registry.
#'
#' @usage
#' schemas_delete_registry(RegistryName)
#'
#' @param RegistryName [required] The name of the registry.
#'
#' @section Request syntax:
#' ```
#' svc$delete_registry(
#' RegistryName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_delete_registry
schemas_delete_registry <- function(RegistryName) {
op <- new_operation(
name = "DeleteRegistry",
http_method = "DELETE",
http_path = "/v1/registries/name/{registryName}",
paginator = list()
)
input <- .schemas$delete_registry_input(RegistryName = RegistryName)
output <- .schemas$delete_registry_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$delete_registry <- schemas_delete_registry
#' Delete the resource-based policy attached to the specified registry
#'
#' @description
#' Delete the resource-based policy attached to the specified registry.
#'
#' @usage
#' schemas_delete_resource_policy(RegistryName)
#'
#' @param RegistryName The name of the registry.
#'
#' @section Request syntax:
#' ```
#' svc$delete_resource_policy(
#' RegistryName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_delete_resource_policy
schemas_delete_resource_policy <- function(RegistryName = NULL) {
op <- new_operation(
name = "DeleteResourcePolicy",
http_method = "DELETE",
http_path = "/v1/policy",
paginator = list()
)
input <- .schemas$delete_resource_policy_input(RegistryName = RegistryName)
output <- .schemas$delete_resource_policy_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$delete_resource_policy <- schemas_delete_resource_policy
#' Delete a schema definition
#'
#' @description
#' Delete a schema definition.
#'
#' @usage
#' schemas_delete_schema(RegistryName, SchemaName)
#'
#' @param RegistryName [required] The name of the registry.
#' @param SchemaName [required] The name of the schema.
#'
#' @section Request syntax:
#' ```
#' svc$delete_schema(
#' RegistryName = "string",
#' SchemaName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_delete_schema
schemas_delete_schema <- function(RegistryName, SchemaName) {
op <- new_operation(
name = "DeleteSchema",
http_method = "DELETE",
http_path = "/v1/registries/name/{registryName}/schemas/name/{schemaName}",
paginator = list()
)
input <- .schemas$delete_schema_input(RegistryName = RegistryName, SchemaName = SchemaName)
output <- .schemas$delete_schema_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$delete_schema <- schemas_delete_schema
#' Delete the schema version definition
#'
#' @description
#' Delete the schema version definition
#'
#' @usage
#' schemas_delete_schema_version(RegistryName, SchemaName, SchemaVersion)
#'
#' @param RegistryName [required] The name of the registry.
#' @param SchemaName [required] The name of the schema.
#' @param SchemaVersion [required] The version number of the schema
#'
#' @section Request syntax:
#' ```
#' svc$delete_schema_version(
#' RegistryName = "string",
#' SchemaName = "string",
#' SchemaVersion = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_delete_schema_version
schemas_delete_schema_version <- function(RegistryName, SchemaName, SchemaVersion) {
op <- new_operation(
name = "DeleteSchemaVersion",
http_method = "DELETE",
http_path = "/v1/registries/name/{registryName}/schemas/name/{schemaName}/version/{schemaVersion}",
paginator = list()
)
input <- .schemas$delete_schema_version_input(RegistryName = RegistryName, SchemaName = SchemaName, SchemaVersion = SchemaVersion)
output <- .schemas$delete_schema_version_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$delete_schema_version <- schemas_delete_schema_version
#' Describe the code binding URI
#'
#' @description
#' Describe the code binding URI.
#'
#' @usage
#' schemas_describe_code_binding(Language, RegistryName, SchemaName,
#' SchemaVersion)
#'
#' @param Language [required] The language of the code binding.
#' @param RegistryName [required] The name of the registry.
#' @param SchemaName [required] The name of the schema.
#' @param SchemaVersion Specifying this limits the results to only this schema version.
#'
#' @section Request syntax:
#' ```
#' svc$describe_code_binding(
#' Language = "string",
#' RegistryName = "string",
#' SchemaName = "string",
#' SchemaVersion = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_describe_code_binding
schemas_describe_code_binding <- function(Language, RegistryName, SchemaName, SchemaVersion = NULL) {
op <- new_operation(
name = "DescribeCodeBinding",
http_method = "GET",
http_path = "/v1/registries/name/{registryName}/schemas/name/{schemaName}/language/{language}",
paginator = list()
)
input <- .schemas$describe_code_binding_input(Language = Language, RegistryName = RegistryName, SchemaName = SchemaName, SchemaVersion = SchemaVersion)
output <- .schemas$describe_code_binding_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$describe_code_binding <- schemas_describe_code_binding
#' Describes the discoverer
#'
#' @description
#' Describes the discoverer.
#'
#' @usage
#' schemas_describe_discoverer(DiscovererId)
#'
#' @param DiscovererId [required] The ID of the discoverer.
#'
#' @section Request syntax:
#' ```
#' svc$describe_discoverer(
#' DiscovererId = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_describe_discoverer
schemas_describe_discoverer <- function(DiscovererId) {
op <- new_operation(
name = "DescribeDiscoverer",
http_method = "GET",
http_path = "/v1/discoverers/id/{discovererId}",
paginator = list()
)
input <- .schemas$describe_discoverer_input(DiscovererId = DiscovererId)
output <- .schemas$describe_discoverer_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$describe_discoverer <- schemas_describe_discoverer
#' Describes the registry
#'
#' @description
#' Describes the registry.
#'
#' @usage
#' schemas_describe_registry(RegistryName)
#'
#' @param RegistryName [required] The name of the registry.
#'
#' @section Request syntax:
#' ```
#' svc$describe_registry(
#' RegistryName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_describe_registry
schemas_describe_registry <- function(RegistryName) {
op <- new_operation(
name = "DescribeRegistry",
http_method = "GET",
http_path = "/v1/registries/name/{registryName}",
paginator = list()
)
input <- .schemas$describe_registry_input(RegistryName = RegistryName)
output <- .schemas$describe_registry_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$describe_registry <- schemas_describe_registry
#' Retrieve the schema definition
#'
#' @description
#' Retrieve the schema definition.
#'
#' @usage
#' schemas_describe_schema(RegistryName, SchemaName, SchemaVersion)
#'
#' @param RegistryName [required] The name of the registry.
#' @param SchemaName [required] The name of the schema.
#' @param SchemaVersion Specifying this limits the results to only this schema version.
#'
#' @section Request syntax:
#' ```
#' svc$describe_schema(
#' RegistryName = "string",
#' SchemaName = "string",
#' SchemaVersion = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_describe_schema
schemas_describe_schema <- function(RegistryName, SchemaName, SchemaVersion = NULL) {
op <- new_operation(
name = "DescribeSchema",
http_method = "GET",
http_path = "/v1/registries/name/{registryName}/schemas/name/{schemaName}",
paginator = list()
)
input <- .schemas$describe_schema_input(RegistryName = RegistryName, SchemaName = SchemaName, SchemaVersion = SchemaVersion)
output <- .schemas$describe_schema_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$describe_schema <- schemas_describe_schema
#' Export schema
#'
#' @description
#' Export schema
#'
#' @usage
#' schemas_export_schema(RegistryName, SchemaName, SchemaVersion, Type)
#'
#' @param RegistryName [required] The name of the registry.
#' @param SchemaName [required] The name of the schema.
#' @param SchemaVersion Specifying this limits the results to only this schema version.
#' @param Type [required]
#'
#' @section Request syntax:
#' ```
#' svc$export_schema(
#' RegistryName = "string",
#' SchemaName = "string",
#' SchemaVersion = "string",
#' Type = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_export_schema
schemas_export_schema <- function(RegistryName, SchemaName, SchemaVersion = NULL, Type) {
op <- new_operation(
name = "ExportSchema",
http_method = "GET",
http_path = "/v1/registries/name/{registryName}/schemas/name/{schemaName}/export",
paginator = list()
)
input <- .schemas$export_schema_input(RegistryName = RegistryName, SchemaName = SchemaName, SchemaVersion = SchemaVersion, Type = Type)
output <- .schemas$export_schema_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$export_schema <- schemas_export_schema
#' Get the code binding source URI
#'
#' @description
#' Get the code binding source URI.
#'
#' @usage
#' schemas_get_code_binding_source(Language, RegistryName, SchemaName,
#' SchemaVersion)
#'
#' @param Language [required] The language of the code binding.
#' @param RegistryName [required] The name of the registry.
#' @param SchemaName [required] The name of the schema.
#' @param SchemaVersion Specifying this limits the results to only this schema version.
#'
#' @section Request syntax:
#' ```
#' svc$get_code_binding_source(
#' Language = "string",
#' RegistryName = "string",
#' SchemaName = "string",
#' SchemaVersion = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_get_code_binding_source
schemas_get_code_binding_source <- function(Language, RegistryName, SchemaName, SchemaVersion = NULL) {
op <- new_operation(
name = "GetCodeBindingSource",
http_method = "GET",
http_path = "/v1/registries/name/{registryName}/schemas/name/{schemaName}/language/{language}/source",
paginator = list()
)
input <- .schemas$get_code_binding_source_input(Language = Language, RegistryName = RegistryName, SchemaName = SchemaName, SchemaVersion = SchemaVersion)
output <- .schemas$get_code_binding_source_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$get_code_binding_source <- schemas_get_code_binding_source
#' Get the discovered schema that was generated based on sampled events
#'
#' @description
#' Get the discovered schema that was generated based on sampled events.
#'
#' @usage
#' schemas_get_discovered_schema(Events, Type)
#'
#' @param Events [required] An array of strings where each string is a JSON event. These are the
#' events that were used to generate the schema. The array includes a
#' single type of event and has a maximum size of 10 events.
#' @param Type [required] The type of event.
#'
#' @section Request syntax:
#' ```
#' svc$get_discovered_schema(
#' Events = list(
#' "string"
#' ),
#' Type = "OpenApi3"|"JSONSchemaDraft4"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_get_discovered_schema
schemas_get_discovered_schema <- function(Events, Type) {
op <- new_operation(
name = "GetDiscoveredSchema",
http_method = "POST",
http_path = "/v1/discover",
paginator = list()
)
input <- .schemas$get_discovered_schema_input(Events = Events, Type = Type)
output <- .schemas$get_discovered_schema_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$get_discovered_schema <- schemas_get_discovered_schema
#' Retrieves the resource-based policy attached to a given registry
#'
#' @description
#' Retrieves the resource-based policy attached to a given registry.
#'
#' @usage
#' schemas_get_resource_policy(RegistryName)
#'
#' @param RegistryName The name of the registry.
#'
#' @section Request syntax:
#' ```
#' svc$get_resource_policy(
#' RegistryName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_get_resource_policy
schemas_get_resource_policy <- function(RegistryName = NULL) {
op <- new_operation(
name = "GetResourcePolicy",
http_method = "GET",
http_path = "/v1/policy",
paginator = list()
)
input <- .schemas$get_resource_policy_input(RegistryName = RegistryName)
output <- .schemas$get_resource_policy_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$get_resource_policy <- schemas_get_resource_policy
#' List the discoverers
#'
#' @description
#' List the discoverers.
#'
#' @usage
#' schemas_list_discoverers(DiscovererIdPrefix, Limit, NextToken,
#' SourceArnPrefix)
#'
#' @param DiscovererIdPrefix Specifying this limits the results to only those discoverer IDs that
#' start with the specified prefix.
#' @param Limit
#' @param NextToken The token that specifies the next page of results to return. To request
#' the first page, leave NextToken empty. The token will expire in 24
#' hours, and cannot be shared with other accounts.
#' @param SourceArnPrefix Specifying this limits the results to only those ARNs that start with
#' the specified prefix.
#'
#' @section Request syntax:
#' ```
#' svc$list_discoverers(
#' DiscovererIdPrefix = "string",
#' Limit = 123,
#' NextToken = "string",
#' SourceArnPrefix = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_list_discoverers
schemas_list_discoverers <- function(DiscovererIdPrefix = NULL, Limit = NULL, NextToken = NULL, SourceArnPrefix = NULL) {
op <- new_operation(
name = "ListDiscoverers",
http_method = "GET",
http_path = "/v1/discoverers",
paginator = list()
)
input <- .schemas$list_discoverers_input(DiscovererIdPrefix = DiscovererIdPrefix, Limit = Limit, NextToken = NextToken, SourceArnPrefix = SourceArnPrefix)
output <- .schemas$list_discoverers_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$list_discoverers <- schemas_list_discoverers
#' List the registries
#'
#' @description
#' List the registries.
#'
#' @usage
#' schemas_list_registries(Limit, NextToken, RegistryNamePrefix, Scope)
#'
#' @param Limit
#' @param NextToken The token that specifies the next page of results to return. To request
#' the first page, leave NextToken empty. The token will expire in 24
#' hours, and cannot be shared with other accounts.
#' @param RegistryNamePrefix Specifying this limits the results to only those registry names that
#' start with the specified prefix.
#' @param Scope Can be set to Local or AWS to limit responses to your custom registries,
#' or the ones provided by AWS.
#'
#' @section Request syntax:
#' ```
#' svc$list_registries(
#' Limit = 123,
#' NextToken = "string",
#' RegistryNamePrefix = "string",
#' Scope = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_list_registries
schemas_list_registries <- function(Limit = NULL, NextToken = NULL, RegistryNamePrefix = NULL, Scope = NULL) {
op <- new_operation(
name = "ListRegistries",
http_method = "GET",
http_path = "/v1/registries",
paginator = list()
)
input <- .schemas$list_registries_input(Limit = Limit, NextToken = NextToken, RegistryNamePrefix = RegistryNamePrefix, Scope = Scope)
output <- .schemas$list_registries_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$list_registries <- schemas_list_registries
#' Provides a list of the schema versions and related information
#'
#' @description
#' Provides a list of the schema versions and related information.
#'
#' @usage
#' schemas_list_schema_versions(Limit, NextToken, RegistryName, SchemaName)
#'
#' @param Limit
#' @param NextToken The token that specifies the next page of results to return. To request
#' the first page, leave NextToken empty. The token will expire in 24
#' hours, and cannot be shared with other accounts.
#' @param RegistryName [required] The name of the registry.
#' @param SchemaName [required] The name of the schema.
#'
#' @section Request syntax:
#' ```
#' svc$list_schema_versions(
#' Limit = 123,
#' NextToken = "string",
#' RegistryName = "string",
#' SchemaName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_list_schema_versions
schemas_list_schema_versions <- function(Limit = NULL, NextToken = NULL, RegistryName, SchemaName) {
op <- new_operation(
name = "ListSchemaVersions",
http_method = "GET",
http_path = "/v1/registries/name/{registryName}/schemas/name/{schemaName}/versions",
paginator = list()
)
input <- .schemas$list_schema_versions_input(Limit = Limit, NextToken = NextToken, RegistryName = RegistryName, SchemaName = SchemaName)
output <- .schemas$list_schema_versions_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$list_schema_versions <- schemas_list_schema_versions
#' List the schemas
#'
#' @description
#' List the schemas.
#'
#' @usage
#' schemas_list_schemas(Limit, NextToken, RegistryName, SchemaNamePrefix)
#'
#' @param Limit
#' @param NextToken The token that specifies the next page of results to return. To request
#' the first page, leave NextToken empty. The token will expire in 24
#' hours, and cannot be shared with other accounts.
#' @param RegistryName [required] The name of the registry.
#' @param SchemaNamePrefix Specifying this limits the results to only those schema names that start
#' with the specified prefix.
#'
#' @section Request syntax:
#' ```
#' svc$list_schemas(
#' Limit = 123,
#' NextToken = "string",
#' RegistryName = "string",
#' SchemaNamePrefix = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_list_schemas
schemas_list_schemas <- function(Limit = NULL, NextToken = NULL, RegistryName, SchemaNamePrefix = NULL) {
op <- new_operation(
name = "ListSchemas",
http_method = "GET",
http_path = "/v1/registries/name/{registryName}/schemas",
paginator = list()
)
input <- .schemas$list_schemas_input(Limit = Limit, NextToken = NextToken, RegistryName = RegistryName, SchemaNamePrefix = SchemaNamePrefix)
output <- .schemas$list_schemas_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$list_schemas <- schemas_list_schemas
#' Get tags for resource
#'
#' @description
#' Get tags for resource.
#'
#' @usage
#' schemas_list_tags_for_resource(ResourceArn)
#'
#' @param ResourceArn [required] The ARN of the resource.
#'
#' @section Request syntax:
#' ```
#' svc$list_tags_for_resource(
#' ResourceArn = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_list_tags_for_resource
schemas_list_tags_for_resource <- function(ResourceArn) {
op <- new_operation(
name = "ListTagsForResource",
http_method = "GET",
http_path = "/tags/{resource-arn}",
paginator = list()
)
input <- .schemas$list_tags_for_resource_input(ResourceArn = ResourceArn)
output <- .schemas$list_tags_for_resource_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$list_tags_for_resource <- schemas_list_tags_for_resource
#' Put code binding URI
#'
#' @description
#' Put code binding URI
#'
#' @usage
#' schemas_put_code_binding(Language, RegistryName, SchemaName,
#' SchemaVersion)
#'
#' @param Language [required] The language of the code binding.
#' @param RegistryName [required] The name of the registry.
#' @param SchemaName [required] The name of the schema.
#' @param SchemaVersion Specifying this limits the results to only this schema version.
#'
#' @section Request syntax:
#' ```
#' svc$put_code_binding(
#' Language = "string",
#' RegistryName = "string",
#' SchemaName = "string",
#' SchemaVersion = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_put_code_binding
schemas_put_code_binding <- function(Language, RegistryName, SchemaName, SchemaVersion = NULL) {
op <- new_operation(
name = "PutCodeBinding",
http_method = "POST",
http_path = "/v1/registries/name/{registryName}/schemas/name/{schemaName}/language/{language}",
paginator = list()
)
input <- .schemas$put_code_binding_input(Language = Language, RegistryName = RegistryName, SchemaName = SchemaName, SchemaVersion = SchemaVersion)
output <- .schemas$put_code_binding_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$put_code_binding <- schemas_put_code_binding
#' The name of the policy
#'
#' @description
#' The name of the policy.
#'
#' @usage
#' schemas_put_resource_policy(Policy, RegistryName, RevisionId)
#'
#' @param Policy [required] The resource-based policy.
#' @param RegistryName The name of the registry.
#' @param RevisionId The revision ID of the policy.
#'
#' @section Request syntax:
#' ```
#' svc$put_resource_policy(
#' Policy = "string",
#' RegistryName = "string",
#' RevisionId = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_put_resource_policy
schemas_put_resource_policy <- function(Policy, RegistryName = NULL, RevisionId = NULL) {
op <- new_operation(
name = "PutResourcePolicy",
http_method = "PUT",
http_path = "/v1/policy",
paginator = list()
)
input <- .schemas$put_resource_policy_input(Policy = Policy, RegistryName = RegistryName, RevisionId = RevisionId)
output <- .schemas$put_resource_policy_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$put_resource_policy <- schemas_put_resource_policy
#' Search the schemas
#'
#' @description
#' Search the schemas
#'
#' @usage
#' schemas_search_schemas(Keywords, Limit, NextToken, RegistryName)
#'
#' @param Keywords [required] Specifying this limits the results to only schemas that include the
#' provided keywords.
#' @param Limit
#' @param NextToken The token that specifies the next page of results to return. To request
#' the first page, leave NextToken empty. The token will expire in 24
#' hours, and cannot be shared with other accounts.
#' @param RegistryName [required] The name of the registry.
#'
#' @section Request syntax:
#' ```
#' svc$search_schemas(
#' Keywords = "string",
#' Limit = 123,
#' NextToken = "string",
#' RegistryName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_search_schemas
schemas_search_schemas <- function(Keywords, Limit = NULL, NextToken = NULL, RegistryName) {
op <- new_operation(
name = "SearchSchemas",
http_method = "GET",
http_path = "/v1/registries/name/{registryName}/schemas/search",
paginator = list()
)
input <- .schemas$search_schemas_input(Keywords = Keywords, Limit = Limit, NextToken = NextToken, RegistryName = RegistryName)
output <- .schemas$search_schemas_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$search_schemas <- schemas_search_schemas
#' Starts the discoverer
#'
#' @description
#' Starts the discoverer
#'
#' @usage
#' schemas_start_discoverer(DiscovererId)
#'
#' @param DiscovererId [required] The ID of the discoverer.
#'
#' @section Request syntax:
#' ```
#' svc$start_discoverer(
#' DiscovererId = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_start_discoverer
schemas_start_discoverer <- function(DiscovererId) {
op <- new_operation(
name = "StartDiscoverer",
http_method = "POST",
http_path = "/v1/discoverers/id/{discovererId}/start",
paginator = list()
)
input <- .schemas$start_discoverer_input(DiscovererId = DiscovererId)
output <- .schemas$start_discoverer_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$start_discoverer <- schemas_start_discoverer
#' Stops the discoverer
#'
#' @description
#' Stops the discoverer
#'
#' @usage
#' schemas_stop_discoverer(DiscovererId)
#'
#' @param DiscovererId [required] The ID of the discoverer.
#'
#' @section Request syntax:
#' ```
#' svc$stop_discoverer(
#' DiscovererId = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_stop_discoverer
schemas_stop_discoverer <- function(DiscovererId) {
op <- new_operation(
name = "StopDiscoverer",
http_method = "POST",
http_path = "/v1/discoverers/id/{discovererId}/stop",
paginator = list()
)
input <- .schemas$stop_discoverer_input(DiscovererId = DiscovererId)
output <- .schemas$stop_discoverer_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$stop_discoverer <- schemas_stop_discoverer
#' Add tags to a resource
#'
#' @description
#' Add tags to a resource.
#'
#' @usage
#' schemas_tag_resource(ResourceArn, Tags)
#'
#' @param ResourceArn [required] The ARN of the resource.
#' @param Tags [required] Tags associated with the resource.
#'
#' @section Request syntax:
#' ```
#' svc$tag_resource(
#' ResourceArn = "string",
#' Tags = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_tag_resource
schemas_tag_resource <- function(ResourceArn, Tags) {
op <- new_operation(
name = "TagResource",
http_method = "POST",
http_path = "/tags/{resource-arn}",
paginator = list()
)
input <- .schemas$tag_resource_input(ResourceArn = ResourceArn, Tags = Tags)
output <- .schemas$tag_resource_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$tag_resource <- schemas_tag_resource
#' Removes tags from a resource
#'
#' @description
#' Removes tags from a resource.
#'
#' @usage
#' schemas_untag_resource(ResourceArn, TagKeys)
#'
#' @param ResourceArn [required] The ARN of the resource.
#' @param TagKeys [required] Keys of key-value pairs.
#'
#' @section Request syntax:
#' ```
#' svc$untag_resource(
#' ResourceArn = "string",
#' TagKeys = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_untag_resource
schemas_untag_resource <- function(ResourceArn, TagKeys) {
op <- new_operation(
name = "UntagResource",
http_method = "DELETE",
http_path = "/tags/{resource-arn}",
paginator = list()
)
input <- .schemas$untag_resource_input(ResourceArn = ResourceArn, TagKeys = TagKeys)
output <- .schemas$untag_resource_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$untag_resource <- schemas_untag_resource
#' Updates the discoverer
#'
#' @description
#' Updates the discoverer
#'
#' @usage
#' schemas_update_discoverer(Description, DiscovererId)
#'
#' @param Description The description of the discoverer to update.
#' @param DiscovererId [required] The ID of the discoverer.
#'
#' @section Request syntax:
#' ```
#' svc$update_discoverer(
#' Description = "string",
#' DiscovererId = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_update_discoverer
schemas_update_discoverer <- function(Description = NULL, DiscovererId) {
op <- new_operation(
name = "UpdateDiscoverer",
http_method = "PUT",
http_path = "/v1/discoverers/id/{discovererId}",
paginator = list()
)
input <- .schemas$update_discoverer_input(Description = Description, DiscovererId = DiscovererId)
output <- .schemas$update_discoverer_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$update_discoverer <- schemas_update_discoverer
#' Updates a registry
#'
#' @description
#' Updates a registry.
#'
#' @usage
#' schemas_update_registry(Description, RegistryName)
#'
#' @param Description The description of the registry to update.
#' @param RegistryName [required] The name of the registry.
#'
#' @section Request syntax:
#' ```
#' svc$update_registry(
#' Description = "string",
#' RegistryName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_update_registry
schemas_update_registry <- function(Description = NULL, RegistryName) {
op <- new_operation(
name = "UpdateRegistry",
http_method = "PUT",
http_path = "/v1/registries/name/{registryName}",
paginator = list()
)
input <- .schemas$update_registry_input(Description = Description, RegistryName = RegistryName)
output <- .schemas$update_registry_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$update_registry <- schemas_update_registry
#' Updates the schema definition Inactive schemas will be deleted after two
#' years
#'
#' @description
#' Updates the schema definition
#'
#' Inactive schemas will be deleted after two years.
#'
#' @usage
#' schemas_update_schema(ClientTokenId, Content, Description, RegistryName,
#' SchemaName, Type)
#'
#' @param ClientTokenId The ID of the client token.
#' @param Content The source of the schema definition.
#' @param Description The description of the schema.
#' @param RegistryName [required] The name of the registry.
#' @param SchemaName [required] The name of the schema.
#' @param Type The schema type for the events schema.
#'
#' @section Request syntax:
#' ```
#' svc$update_schema(
#' ClientTokenId = "string",
#' Content = "string",
#' Description = "string",
#' RegistryName = "string",
#' SchemaName = "string",
#' Type = "OpenApi3"|"JSONSchemaDraft4"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_update_schema
schemas_update_schema <- function(ClientTokenId = NULL, Content = NULL, Description = NULL, RegistryName, SchemaName, Type = NULL) {
op <- new_operation(
name = "UpdateSchema",
http_method = "PUT",
http_path = "/v1/registries/name/{registryName}/schemas/name/{schemaName}",
paginator = list()
)
input <- .schemas$update_schema_input(ClientTokenId = ClientTokenId, Content = Content, Description = Description, RegistryName = RegistryName, SchemaName = SchemaName, Type = Type)
output <- .schemas$update_schema_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$update_schema <- schemas_update_schema
| /paws/R/schemas_operations.R | permissive | sanchezvivi/paws | R | false | false | 38,469 | r | # This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common get_config new_operation new_request send_request
#' @include schemas_service.R
NULL
#' Creates a discoverer
#'
#' @description
#' Creates a discoverer.
#'
#' @usage
#' schemas_create_discoverer(Description, SourceArn, Tags)
#'
#' @param Description A description for the discoverer.
#' @param SourceArn [required] The ARN of the event bus.
#' @param Tags Tags associated with the resource.
#'
#' @section Request syntax:
#' ```
#' svc$create_discoverer(
#' Description = "string",
#' SourceArn = "string",
#' Tags = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_create_discoverer
schemas_create_discoverer <- function(Description = NULL, SourceArn, Tags = NULL) {
op <- new_operation(
name = "CreateDiscoverer",
http_method = "POST",
http_path = "/v1/discoverers",
paginator = list()
)
input <- .schemas$create_discoverer_input(Description = Description, SourceArn = SourceArn, Tags = Tags)
output <- .schemas$create_discoverer_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$create_discoverer <- schemas_create_discoverer
#' Creates a registry
#'
#' @description
#' Creates a registry.
#'
#' @usage
#' schemas_create_registry(Description, RegistryName, Tags)
#'
#' @param Description A description of the registry to be created.
#' @param RegistryName [required] The name of the registry.
#' @param Tags Tags to associate with the registry.
#'
#' @section Request syntax:
#' ```
#' svc$create_registry(
#' Description = "string",
#' RegistryName = "string",
#' Tags = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_create_registry
schemas_create_registry <- function(Description = NULL, RegistryName, Tags = NULL) {
op <- new_operation(
name = "CreateRegistry",
http_method = "POST",
http_path = "/v1/registries/name/{registryName}",
paginator = list()
)
input <- .schemas$create_registry_input(Description = Description, RegistryName = RegistryName, Tags = Tags)
output <- .schemas$create_registry_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$create_registry <- schemas_create_registry
#' Creates a schema definition
#'
#' @description
#' Creates a schema definition.
#'
#' Inactive schemas will be deleted after two years.
#'
#' @usage
#' schemas_create_schema(Content, Description, RegistryName, SchemaName,
#' Tags, Type)
#'
#' @param Content [required] The source of the schema definition.
#' @param Description A description of the schema.
#' @param RegistryName [required] The name of the registry.
#' @param SchemaName [required] The name of the schema.
#' @param Tags Tags associated with the schema.
#' @param Type [required] The type of schema.
#'
#' @section Request syntax:
#' ```
#' svc$create_schema(
#' Content = "string",
#' Description = "string",
#' RegistryName = "string",
#' SchemaName = "string",
#' Tags = list(
#' "string"
#' ),
#' Type = "OpenApi3"|"JSONSchemaDraft4"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_create_schema
schemas_create_schema <- function(Content, Description = NULL, RegistryName, SchemaName, Tags = NULL, Type) {
op <- new_operation(
name = "CreateSchema",
http_method = "POST",
http_path = "/v1/registries/name/{registryName}/schemas/name/{schemaName}",
paginator = list()
)
input <- .schemas$create_schema_input(Content = Content, Description = Description, RegistryName = RegistryName, SchemaName = SchemaName, Tags = Tags, Type = Type)
output <- .schemas$create_schema_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$create_schema <- schemas_create_schema
#' Deletes a discoverer
#'
#' @description
#' Deletes a discoverer.
#'
#' @usage
#' schemas_delete_discoverer(DiscovererId)
#'
#' @param DiscovererId [required] The ID of the discoverer.
#'
#' @section Request syntax:
#' ```
#' svc$delete_discoverer(
#' DiscovererId = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_delete_discoverer
schemas_delete_discoverer <- function(DiscovererId) {
op <- new_operation(
name = "DeleteDiscoverer",
http_method = "DELETE",
http_path = "/v1/discoverers/id/{discovererId}",
paginator = list()
)
input <- .schemas$delete_discoverer_input(DiscovererId = DiscovererId)
output <- .schemas$delete_discoverer_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$delete_discoverer <- schemas_delete_discoverer
#' Deletes a Registry
#'
#' @description
#' Deletes a Registry.
#'
#' @usage
#' schemas_delete_registry(RegistryName)
#'
#' @param RegistryName [required] The name of the registry.
#'
#' @section Request syntax:
#' ```
#' svc$delete_registry(
#' RegistryName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_delete_registry
schemas_delete_registry <- function(RegistryName) {
op <- new_operation(
name = "DeleteRegistry",
http_method = "DELETE",
http_path = "/v1/registries/name/{registryName}",
paginator = list()
)
input <- .schemas$delete_registry_input(RegistryName = RegistryName)
output <- .schemas$delete_registry_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$delete_registry <- schemas_delete_registry
#' Delete the resource-based policy attached to the specified registry
#'
#' @description
#' Delete the resource-based policy attached to the specified registry.
#'
#' @usage
#' schemas_delete_resource_policy(RegistryName)
#'
#' @param RegistryName The name of the registry.
#'
#' @section Request syntax:
#' ```
#' svc$delete_resource_policy(
#' RegistryName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_delete_resource_policy
schemas_delete_resource_policy <- function(RegistryName = NULL) {
op <- new_operation(
name = "DeleteResourcePolicy",
http_method = "DELETE",
http_path = "/v1/policy",
paginator = list()
)
input <- .schemas$delete_resource_policy_input(RegistryName = RegistryName)
output <- .schemas$delete_resource_policy_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$delete_resource_policy <- schemas_delete_resource_policy
#' Delete a schema definition
#'
#' @description
#' Delete a schema definition.
#'
#' @usage
#' schemas_delete_schema(RegistryName, SchemaName)
#'
#' @param RegistryName [required] The name of the registry.
#' @param SchemaName [required] The name of the schema.
#'
#' @section Request syntax:
#' ```
#' svc$delete_schema(
#' RegistryName = "string",
#' SchemaName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_delete_schema
schemas_delete_schema <- function(RegistryName, SchemaName) {
op <- new_operation(
name = "DeleteSchema",
http_method = "DELETE",
http_path = "/v1/registries/name/{registryName}/schemas/name/{schemaName}",
paginator = list()
)
input <- .schemas$delete_schema_input(RegistryName = RegistryName, SchemaName = SchemaName)
output <- .schemas$delete_schema_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$delete_schema <- schemas_delete_schema
#' Delete the schema version definition
#'
#' @description
#' Delete the schema version definition
#'
#' @usage
#' schemas_delete_schema_version(RegistryName, SchemaName, SchemaVersion)
#'
#' @param RegistryName [required] The name of the registry.
#' @param SchemaName [required] The name of the schema.
#' @param SchemaVersion [required] The version number of the schema
#'
#' @section Request syntax:
#' ```
#' svc$delete_schema_version(
#' RegistryName = "string",
#' SchemaName = "string",
#' SchemaVersion = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_delete_schema_version
schemas_delete_schema_version <- function(RegistryName, SchemaName, SchemaVersion) {
op <- new_operation(
name = "DeleteSchemaVersion",
http_method = "DELETE",
http_path = "/v1/registries/name/{registryName}/schemas/name/{schemaName}/version/{schemaVersion}",
paginator = list()
)
input <- .schemas$delete_schema_version_input(RegistryName = RegistryName, SchemaName = SchemaName, SchemaVersion = SchemaVersion)
output <- .schemas$delete_schema_version_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$delete_schema_version <- schemas_delete_schema_version
#' Describe the code binding URI
#'
#' @description
#' Describe the code binding URI.
#'
#' @usage
#' schemas_describe_code_binding(Language, RegistryName, SchemaName,
#' SchemaVersion)
#'
#' @param Language [required] The language of the code binding.
#' @param RegistryName [required] The name of the registry.
#' @param SchemaName [required] The name of the schema.
#' @param SchemaVersion Specifying this limits the results to only this schema version.
#'
#' @section Request syntax:
#' ```
#' svc$describe_code_binding(
#' Language = "string",
#' RegistryName = "string",
#' SchemaName = "string",
#' SchemaVersion = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_describe_code_binding
schemas_describe_code_binding <- function(Language, RegistryName, SchemaName, SchemaVersion = NULL) {
op <- new_operation(
name = "DescribeCodeBinding",
http_method = "GET",
http_path = "/v1/registries/name/{registryName}/schemas/name/{schemaName}/language/{language}",
paginator = list()
)
input <- .schemas$describe_code_binding_input(Language = Language, RegistryName = RegistryName, SchemaName = SchemaName, SchemaVersion = SchemaVersion)
output <- .schemas$describe_code_binding_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$describe_code_binding <- schemas_describe_code_binding
#' Describes the discoverer
#'
#' @description
#' Describes the discoverer.
#'
#' @usage
#' schemas_describe_discoverer(DiscovererId)
#'
#' @param DiscovererId [required] The ID of the discoverer.
#'
#' @section Request syntax:
#' ```
#' svc$describe_discoverer(
#' DiscovererId = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_describe_discoverer
schemas_describe_discoverer <- function(DiscovererId) {
op <- new_operation(
name = "DescribeDiscoverer",
http_method = "GET",
http_path = "/v1/discoverers/id/{discovererId}",
paginator = list()
)
input <- .schemas$describe_discoverer_input(DiscovererId = DiscovererId)
output <- .schemas$describe_discoverer_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$describe_discoverer <- schemas_describe_discoverer
#' Describes the registry
#'
#' @description
#' Describes the registry.
#'
#' @usage
#' schemas_describe_registry(RegistryName)
#'
#' @param RegistryName [required] The name of the registry.
#'
#' @section Request syntax:
#' ```
#' svc$describe_registry(
#' RegistryName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_describe_registry
schemas_describe_registry <- function(RegistryName) {
op <- new_operation(
name = "DescribeRegistry",
http_method = "GET",
http_path = "/v1/registries/name/{registryName}",
paginator = list()
)
input <- .schemas$describe_registry_input(RegistryName = RegistryName)
output <- .schemas$describe_registry_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$describe_registry <- schemas_describe_registry
#' Retrieve the schema definition
#'
#' @description
#' Retrieve the schema definition.
#'
#' @usage
#' schemas_describe_schema(RegistryName, SchemaName, SchemaVersion)
#'
#' @param RegistryName [required] The name of the registry.
#' @param SchemaName [required] The name of the schema.
#' @param SchemaVersion Specifying this limits the results to only this schema version.
#'
#' @section Request syntax:
#' ```
#' svc$describe_schema(
#' RegistryName = "string",
#' SchemaName = "string",
#' SchemaVersion = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_describe_schema
schemas_describe_schema <- function(RegistryName, SchemaName, SchemaVersion = NULL) {
op <- new_operation(
name = "DescribeSchema",
http_method = "GET",
http_path = "/v1/registries/name/{registryName}/schemas/name/{schemaName}",
paginator = list()
)
input <- .schemas$describe_schema_input(RegistryName = RegistryName, SchemaName = SchemaName, SchemaVersion = SchemaVersion)
output <- .schemas$describe_schema_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$describe_schema <- schemas_describe_schema
#' Export schema
#'
#' @description
#' Export schema
#'
#' @usage
#' schemas_export_schema(RegistryName, SchemaName, SchemaVersion, Type)
#'
#' @param RegistryName [required] The name of the registry.
#' @param SchemaName [required] The name of the schema.
#' @param SchemaVersion Specifying this limits the results to only this schema version.
#' @param Type [required]
#'
#' @section Request syntax:
#' ```
#' svc$export_schema(
#' RegistryName = "string",
#' SchemaName = "string",
#' SchemaVersion = "string",
#' Type = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_export_schema
schemas_export_schema <- function(RegistryName, SchemaName, SchemaVersion = NULL, Type) {
op <- new_operation(
name = "ExportSchema",
http_method = "GET",
http_path = "/v1/registries/name/{registryName}/schemas/name/{schemaName}/export",
paginator = list()
)
input <- .schemas$export_schema_input(RegistryName = RegistryName, SchemaName = SchemaName, SchemaVersion = SchemaVersion, Type = Type)
output <- .schemas$export_schema_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$export_schema <- schemas_export_schema
#' Get the code binding source URI
#'
#' @description
#' Get the code binding source URI.
#'
#' @usage
#' schemas_get_code_binding_source(Language, RegistryName, SchemaName,
#' SchemaVersion)
#'
#' @param Language [required] The language of the code binding.
#' @param RegistryName [required] The name of the registry.
#' @param SchemaName [required] The name of the schema.
#' @param SchemaVersion Specifying this limits the results to only this schema version.
#'
#' @section Request syntax:
#' ```
#' svc$get_code_binding_source(
#' Language = "string",
#' RegistryName = "string",
#' SchemaName = "string",
#' SchemaVersion = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_get_code_binding_source
schemas_get_code_binding_source <- function(Language, RegistryName, SchemaName, SchemaVersion = NULL) {
op <- new_operation(
name = "GetCodeBindingSource",
http_method = "GET",
http_path = "/v1/registries/name/{registryName}/schemas/name/{schemaName}/language/{language}/source",
paginator = list()
)
input <- .schemas$get_code_binding_source_input(Language = Language, RegistryName = RegistryName, SchemaName = SchemaName, SchemaVersion = SchemaVersion)
output <- .schemas$get_code_binding_source_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$get_code_binding_source <- schemas_get_code_binding_source
#' Get the discovered schema that was generated based on sampled events
#'
#' @description
#' Get the discovered schema that was generated based on sampled events.
#'
#' @usage
#' schemas_get_discovered_schema(Events, Type)
#'
#' @param Events [required] An array of strings where each string is a JSON event. These are the
#' events that were used to generate the schema. The array includes a
#' single type of event and has a maximum size of 10 events.
#' @param Type [required] The type of event.
#'
#' @section Request syntax:
#' ```
#' svc$get_discovered_schema(
#' Events = list(
#' "string"
#' ),
#' Type = "OpenApi3"|"JSONSchemaDraft4"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_get_discovered_schema
schemas_get_discovered_schema <- function(Events, Type) {
op <- new_operation(
name = "GetDiscoveredSchema",
http_method = "POST",
http_path = "/v1/discover",
paginator = list()
)
input <- .schemas$get_discovered_schema_input(Events = Events, Type = Type)
output <- .schemas$get_discovered_schema_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$get_discovered_schema <- schemas_get_discovered_schema
#' Retrieves the resource-based policy attached to a given registry
#'
#' @description
#' Retrieves the resource-based policy attached to a given registry.
#'
#' @usage
#' schemas_get_resource_policy(RegistryName)
#'
#' @param RegistryName The name of the registry.
#'
#' @section Request syntax:
#' ```
#' svc$get_resource_policy(
#' RegistryName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_get_resource_policy
schemas_get_resource_policy <- function(RegistryName = NULL) {
op <- new_operation(
name = "GetResourcePolicy",
http_method = "GET",
http_path = "/v1/policy",
paginator = list()
)
input <- .schemas$get_resource_policy_input(RegistryName = RegistryName)
output <- .schemas$get_resource_policy_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$get_resource_policy <- schemas_get_resource_policy
#' List the discoverers
#'
#' @description
#' List the discoverers.
#'
#' @usage
#' schemas_list_discoverers(DiscovererIdPrefix, Limit, NextToken,
#' SourceArnPrefix)
#'
#' @param DiscovererIdPrefix Specifying this limits the results to only those discoverer IDs that
#' start with the specified prefix.
#' @param Limit
#' @param NextToken The token that specifies the next page of results to return. To request
#' the first page, leave NextToken empty. The token will expire in 24
#' hours, and cannot be shared with other accounts.
#' @param SourceArnPrefix Specifying this limits the results to only those ARNs that start with
#' the specified prefix.
#'
#' @section Request syntax:
#' ```
#' svc$list_discoverers(
#' DiscovererIdPrefix = "string",
#' Limit = 123,
#' NextToken = "string",
#' SourceArnPrefix = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_list_discoverers
schemas_list_discoverers <- function(DiscovererIdPrefix = NULL, Limit = NULL, NextToken = NULL, SourceArnPrefix = NULL) {
op <- new_operation(
name = "ListDiscoverers",
http_method = "GET",
http_path = "/v1/discoverers",
paginator = list()
)
input <- .schemas$list_discoverers_input(DiscovererIdPrefix = DiscovererIdPrefix, Limit = Limit, NextToken = NextToken, SourceArnPrefix = SourceArnPrefix)
output <- .schemas$list_discoverers_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$list_discoverers <- schemas_list_discoverers
#' List the registries
#'
#' @description
#' List the registries.
#'
#' @usage
#' schemas_list_registries(Limit, NextToken, RegistryNamePrefix, Scope)
#'
#' @param Limit
#' @param NextToken The token that specifies the next page of results to return. To request
#' the first page, leave NextToken empty. The token will expire in 24
#' hours, and cannot be shared with other accounts.
#' @param RegistryNamePrefix Specifying this limits the results to only those registry names that
#' start with the specified prefix.
#' @param Scope Can be set to Local or AWS to limit responses to your custom registries,
#' or the ones provided by AWS.
#'
#' @section Request syntax:
#' ```
#' svc$list_registries(
#' Limit = 123,
#' NextToken = "string",
#' RegistryNamePrefix = "string",
#' Scope = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_list_registries
schemas_list_registries <- function(Limit = NULL, NextToken = NULL, RegistryNamePrefix = NULL, Scope = NULL) {
op <- new_operation(
name = "ListRegistries",
http_method = "GET",
http_path = "/v1/registries",
paginator = list()
)
input <- .schemas$list_registries_input(Limit = Limit, NextToken = NextToken, RegistryNamePrefix = RegistryNamePrefix, Scope = Scope)
output <- .schemas$list_registries_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$list_registries <- schemas_list_registries
#' Provides a list of the schema versions and related information
#'
#' @description
#' Provides a list of the schema versions and related information.
#'
#' @usage
#' schemas_list_schema_versions(Limit, NextToken, RegistryName, SchemaName)
#'
#' @param Limit
#' @param NextToken The token that specifies the next page of results to return. To request
#' the first page, leave NextToken empty. The token will expire in 24
#' hours, and cannot be shared with other accounts.
#' @param RegistryName [required] The name of the registry.
#' @param SchemaName [required] The name of the schema.
#'
#' @section Request syntax:
#' ```
#' svc$list_schema_versions(
#' Limit = 123,
#' NextToken = "string",
#' RegistryName = "string",
#' SchemaName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_list_schema_versions
schemas_list_schema_versions <- function(Limit = NULL, NextToken = NULL, RegistryName, SchemaName) {
op <- new_operation(
name = "ListSchemaVersions",
http_method = "GET",
http_path = "/v1/registries/name/{registryName}/schemas/name/{schemaName}/versions",
paginator = list()
)
input <- .schemas$list_schema_versions_input(Limit = Limit, NextToken = NextToken, RegistryName = RegistryName, SchemaName = SchemaName)
output <- .schemas$list_schema_versions_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$list_schema_versions <- schemas_list_schema_versions
#' List the schemas
#'
#' @description
#' List the schemas.
#'
#' @usage
#' schemas_list_schemas(Limit, NextToken, RegistryName, SchemaNamePrefix)
#'
#' @param Limit
#' @param NextToken The token that specifies the next page of results to return. To request
#' the first page, leave NextToken empty. The token will expire in 24
#' hours, and cannot be shared with other accounts.
#' @param RegistryName [required] The name of the registry.
#' @param SchemaNamePrefix Specifying this limits the results to only those schema names that start
#' with the specified prefix.
#'
#' @section Request syntax:
#' ```
#' svc$list_schemas(
#' Limit = 123,
#' NextToken = "string",
#' RegistryName = "string",
#' SchemaNamePrefix = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_list_schemas
schemas_list_schemas <- function(Limit = NULL, NextToken = NULL, RegistryName, SchemaNamePrefix = NULL) {
op <- new_operation(
name = "ListSchemas",
http_method = "GET",
http_path = "/v1/registries/name/{registryName}/schemas",
paginator = list()
)
input <- .schemas$list_schemas_input(Limit = Limit, NextToken = NextToken, RegistryName = RegistryName, SchemaNamePrefix = SchemaNamePrefix)
output <- .schemas$list_schemas_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$list_schemas <- schemas_list_schemas
#' Get tags for resource
#'
#' @description
#' Get tags for resource.
#'
#' @usage
#' schemas_list_tags_for_resource(ResourceArn)
#'
#' @param ResourceArn [required] The ARN of the resource.
#'
#' @section Request syntax:
#' ```
#' svc$list_tags_for_resource(
#' ResourceArn = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_list_tags_for_resource
schemas_list_tags_for_resource <- function(ResourceArn) {
op <- new_operation(
name = "ListTagsForResource",
http_method = "GET",
http_path = "/tags/{resource-arn}",
paginator = list()
)
input <- .schemas$list_tags_for_resource_input(ResourceArn = ResourceArn)
output <- .schemas$list_tags_for_resource_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$list_tags_for_resource <- schemas_list_tags_for_resource
#' Put code binding URI
#'
#' @description
#' Put code binding URI
#'
#' @usage
#' schemas_put_code_binding(Language, RegistryName, SchemaName,
#' SchemaVersion)
#'
#' @param Language [required] The language of the code binding.
#' @param RegistryName [required] The name of the registry.
#' @param SchemaName [required] The name of the schema.
#' @param SchemaVersion Specifying this limits the results to only this schema version.
#'
#' @section Request syntax:
#' ```
#' svc$put_code_binding(
#' Language = "string",
#' RegistryName = "string",
#' SchemaName = "string",
#' SchemaVersion = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_put_code_binding
schemas_put_code_binding <- function(Language, RegistryName, SchemaName, SchemaVersion = NULL) {
op <- new_operation(
name = "PutCodeBinding",
http_method = "POST",
http_path = "/v1/registries/name/{registryName}/schemas/name/{schemaName}/language/{language}",
paginator = list()
)
input <- .schemas$put_code_binding_input(Language = Language, RegistryName = RegistryName, SchemaName = SchemaName, SchemaVersion = SchemaVersion)
output <- .schemas$put_code_binding_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$put_code_binding <- schemas_put_code_binding
#' The name of the policy
#'
#' @description
#' The name of the policy.
#'
#' @usage
#' schemas_put_resource_policy(Policy, RegistryName, RevisionId)
#'
#' @param Policy [required] The resource-based policy.
#' @param RegistryName The name of the registry.
#' @param RevisionId The revision ID of the policy.
#'
#' @section Request syntax:
#' ```
#' svc$put_resource_policy(
#' Policy = "string",
#' RegistryName = "string",
#' RevisionId = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_put_resource_policy
schemas_put_resource_policy <- function(Policy, RegistryName = NULL, RevisionId = NULL) {
op <- new_operation(
name = "PutResourcePolicy",
http_method = "PUT",
http_path = "/v1/policy",
paginator = list()
)
input <- .schemas$put_resource_policy_input(Policy = Policy, RegistryName = RegistryName, RevisionId = RevisionId)
output <- .schemas$put_resource_policy_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$put_resource_policy <- schemas_put_resource_policy
#' Search the schemas
#'
#' @description
#' Search the schemas
#'
#' @usage
#' schemas_search_schemas(Keywords, Limit, NextToken, RegistryName)
#'
#' @param Keywords [required] Specifying this limits the results to only schemas that include the
#' provided keywords.
#' @param Limit
#' @param NextToken The token that specifies the next page of results to return. To request
#' the first page, leave NextToken empty. The token will expire in 24
#' hours, and cannot be shared with other accounts.
#' @param RegistryName [required] The name of the registry.
#'
#' @section Request syntax:
#' ```
#' svc$search_schemas(
#' Keywords = "string",
#' Limit = 123,
#' NextToken = "string",
#' RegistryName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_search_schemas
schemas_search_schemas <- function(Keywords, Limit = NULL, NextToken = NULL, RegistryName) {
op <- new_operation(
name = "SearchSchemas",
http_method = "GET",
http_path = "/v1/registries/name/{registryName}/schemas/search",
paginator = list()
)
input <- .schemas$search_schemas_input(Keywords = Keywords, Limit = Limit, NextToken = NextToken, RegistryName = RegistryName)
output <- .schemas$search_schemas_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$search_schemas <- schemas_search_schemas
#' Starts the discoverer
#'
#' @description
#' Starts the discoverer
#'
#' @usage
#' schemas_start_discoverer(DiscovererId)
#'
#' @param DiscovererId [required] The ID of the discoverer.
#'
#' @section Request syntax:
#' ```
#' svc$start_discoverer(
#' DiscovererId = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_start_discoverer
schemas_start_discoverer <- function(DiscovererId) {
op <- new_operation(
name = "StartDiscoverer",
http_method = "POST",
http_path = "/v1/discoverers/id/{discovererId}/start",
paginator = list()
)
input <- .schemas$start_discoverer_input(DiscovererId = DiscovererId)
output <- .schemas$start_discoverer_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$start_discoverer <- schemas_start_discoverer
#' Stops the discoverer
#'
#' @description
#' Stops the discoverer
#'
#' @usage
#' schemas_stop_discoverer(DiscovererId)
#'
#' @param DiscovererId [required] The ID of the discoverer.
#'
#' @section Request syntax:
#' ```
#' svc$stop_discoverer(
#' DiscovererId = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_stop_discoverer
schemas_stop_discoverer <- function(DiscovererId) {
op <- new_operation(
name = "StopDiscoverer",
http_method = "POST",
http_path = "/v1/discoverers/id/{discovererId}/stop",
paginator = list()
)
input <- .schemas$stop_discoverer_input(DiscovererId = DiscovererId)
output <- .schemas$stop_discoverer_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$stop_discoverer <- schemas_stop_discoverer
#' Add tags to a resource
#'
#' @description
#' Add tags to a resource.
#'
#' @usage
#' schemas_tag_resource(ResourceArn, Tags)
#'
#' @param ResourceArn [required] The ARN of the resource.
#' @param Tags [required] Tags associated with the resource.
#'
#' @section Request syntax:
#' ```
#' svc$tag_resource(
#' ResourceArn = "string",
#' Tags = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_tag_resource
schemas_tag_resource <- function(ResourceArn, Tags) {
op <- new_operation(
name = "TagResource",
http_method = "POST",
http_path = "/tags/{resource-arn}",
paginator = list()
)
input <- .schemas$tag_resource_input(ResourceArn = ResourceArn, Tags = Tags)
output <- .schemas$tag_resource_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$tag_resource <- schemas_tag_resource
#' Removes tags from a resource
#'
#' @description
#' Removes tags from a resource.
#'
#' @usage
#' schemas_untag_resource(ResourceArn, TagKeys)
#'
#' @param ResourceArn [required] The ARN of the resource.
#' @param TagKeys [required] Keys of key-value pairs.
#'
#' @section Request syntax:
#' ```
#' svc$untag_resource(
#' ResourceArn = "string",
#' TagKeys = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_untag_resource
schemas_untag_resource <- function(ResourceArn, TagKeys) {
op <- new_operation(
name = "UntagResource",
http_method = "DELETE",
http_path = "/tags/{resource-arn}",
paginator = list()
)
input <- .schemas$untag_resource_input(ResourceArn = ResourceArn, TagKeys = TagKeys)
output <- .schemas$untag_resource_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$untag_resource <- schemas_untag_resource
#' Updates the discoverer
#'
#' @description
#' Updates the discoverer
#'
#' @usage
#' schemas_update_discoverer(Description, DiscovererId)
#'
#' @param Description The description of the discoverer to update.
#' @param DiscovererId [required] The ID of the discoverer.
#'
#' @section Request syntax:
#' ```
#' svc$update_discoverer(
#' Description = "string",
#' DiscovererId = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_update_discoverer
schemas_update_discoverer <- function(Description = NULL, DiscovererId) {
op <- new_operation(
name = "UpdateDiscoverer",
http_method = "PUT",
http_path = "/v1/discoverers/id/{discovererId}",
paginator = list()
)
input <- .schemas$update_discoverer_input(Description = Description, DiscovererId = DiscovererId)
output <- .schemas$update_discoverer_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$update_discoverer <- schemas_update_discoverer
#' Updates a registry
#'
#' @description
#' Updates a registry.
#'
#' @usage
#' schemas_update_registry(Description, RegistryName)
#'
#' @param Description The description of the registry to update.
#' @param RegistryName [required] The name of the registry.
#'
#' @section Request syntax:
#' ```
#' svc$update_registry(
#' Description = "string",
#' RegistryName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_update_registry
schemas_update_registry <- function(Description = NULL, RegistryName) {
op <- new_operation(
name = "UpdateRegistry",
http_method = "PUT",
http_path = "/v1/registries/name/{registryName}",
paginator = list()
)
input <- .schemas$update_registry_input(Description = Description, RegistryName = RegistryName)
output <- .schemas$update_registry_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$update_registry <- schemas_update_registry
#' Updates the schema definition Inactive schemas will be deleted after two
#' years
#'
#' @description
#' Updates the schema definition
#'
#' Inactive schemas will be deleted after two years.
#'
#' @usage
#' schemas_update_schema(ClientTokenId, Content, Description, RegistryName,
#' SchemaName, Type)
#'
#' @param ClientTokenId The ID of the client token.
#' @param Content The source of the schema definition.
#' @param Description The description of the schema.
#' @param RegistryName [required] The name of the registry.
#' @param SchemaName [required] The name of the schema.
#' @param Type The schema type for the events schema.
#'
#' @section Request syntax:
#' ```
#' svc$update_schema(
#' ClientTokenId = "string",
#' Content = "string",
#' Description = "string",
#' RegistryName = "string",
#' SchemaName = "string",
#' Type = "OpenApi3"|"JSONSchemaDraft4"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname schemas_update_schema
schemas_update_schema <- function(ClientTokenId = NULL, Content = NULL, Description = NULL, RegistryName, SchemaName, Type = NULL) {
op <- new_operation(
name = "UpdateSchema",
http_method = "PUT",
http_path = "/v1/registries/name/{registryName}/schemas/name/{schemaName}",
paginator = list()
)
input <- .schemas$update_schema_input(ClientTokenId = ClientTokenId, Content = Content, Description = Description, RegistryName = RegistryName, SchemaName = SchemaName, Type = Type)
output <- .schemas$update_schema_output()
config <- get_config()
svc <- .schemas$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.schemas$operations$update_schema <- schemas_update_schema
|
bib2acad2 <-function (bibfile = "", copybib = TRUE, abstract = TRUE, overwrite = FALSE)
{
msg1 <- "You must specify a .bib file as input for the conversion."
msg2 <- paste0("Cannot find file '", bibfile, "'. Check path and/or file name.")
if (bibfile == "") {
return(message(msg1))
}
if (!file.exists(bibfile)) {
return(message(msg2))
}
outfold <- "my-md-folder"
pubfold <- "my-pdf-folder"
if (copybib) {
bibfold <- "my-bib-folder"
}
dir.create("my-md-folder", showWarnings = FALSE)
dir.create("my-pdf-folder", showWarnings = FALSE)
dir.create("my-bib-folder", showWarnings = FALSE)
mypubs <- RefManageR::ReadBib(bibfile, check = "warn", .Encoding = "UTF-8")
mypubs <- as.data.frame(mypubs)
mypubs$key <- rownames(mypubs)
mypubs <- dplyr::mutate(mypubs, pubtype = dplyr::case_when(bibtype ==
"Article" ~ "2", bibtype == "Article in Press" ~ "2",
bibtype == "InProceedings" ~ "1", bibtype == "Proceedings" ~
"1", bibtype == "Conference" ~ "1", bibtype == "Conference Paper" ~
"1", bibtype == "MastersThesis" ~ "3", bibtype ==
"PhdThesis" ~ "3", bibtype == "Manual" ~ "4", bibtype ==
"TechReport" ~ "4", bibtype == "Book" ~ "5", bibtype ==
"InCollection" ~ "6", bibtype == "InBook" ~ "6",
bibtype == "Book Chapter" ~ "6", bibtype == "Misc" ~ "0", TRUE ~ "0"))
create_md <- function(x) {
if (!is.na(x[["date"]])) {
if(nchar(x[["date"]]) == 4){
x[["date"]] <- paste0(substr(x[["date"]],1,4), "-01-01")
}
if(nchar(x[["date"]]) == 7){
x[["date"]] <- paste0(substr(x[["date"]],1,7), "-01")
}
if(nchar(x[["date"]]) == 10){
x[["date"]] <- x[["date"]]
}
}
else {
x[["date"]] <- "2999-01-01"
}
filename_md <- paste0(x[["key"]], ".md")
if (!file.exists(file.path(outfold, filename_md)) | overwrite) {
fileConn <- file.path(outfold, filename_md)
write("+++", fileConn)
write(paste0("title = \"", cleanStr(x[["title"]]),
"\""), fileConn, append = T)
write(paste0("date = \"", x[["date"]], "\""), fileConn,
append = T)
write(paste0("publication_types = [\"", x[["pubtype"]],
"\"]"), fileConn, append = T)
if (!is.na(x[["author"]])) {
authors <- stringr::str_replace_all(stringr::str_squish(x["author"]),
" and ", "\", \"")
#authors <- stringr::str_remove_all(authors, "{")
#authors <- stringr::str_remove_all(authors, "}")
authors <- stringr::str_replace(authors, "Thomas P. C. Dorlo",
"**Thomas P. C. Dorlo**")
authors <- stringr::str_replace(authors, "T. P. C. Dorlo",
"**T. P. C. Dorlo**")
authors <- stringi::stri_trans_general(authors,
"latin-ascii")
write(paste0("authors = [\"", cleanStrA(authors), "\"]"),
fileConn, append = T)
}
else {
editors <- stringr::str_replace_all(stringr::str_squish(x["editor"]),
" and ", "\", \"")
editors <- stringi::stri_trans_general(editors,
"latin-ascii")
write(paste0("editors = [\"", editors, "\"]"),
fileConn, append = T)
}
publication <- NULL
if ("booktitle" %in% names(x) && !is.na(x[["booktitle"]])) {
publication <- paste0(publication, "In: ", cleanStr(x[["booktitle"]]))
if ("publisher" %in% names(x) && !is.na(x[["publisher"]])) {
publication <- paste0(publication, ", ", cleanStr(x[["publisher"]]))
}
if ("address" %in% names(x) && !is.na(x[["address"]])) {
publication <- paste0(publication, ", ", cleanStr(x[["address"]]))
}
if ("pages" %in% names(x) && !is.na(x[["pages"]])) {
publication <- paste0(publication, ", _pp. ",
cleanStr(x[["pages"]]), "_")
}
}
if ("journaltitle" %in% names(x) && !is.na(x[["journaltitle"]])) {
publication <- paste0(publication, "_", cleanStr(x[["journaltitle"]]), "_")
#if ("number" %in% names(x) && !is.na(x[["number"]])) {
# publication <- paste0(publication, " ", cleanStr(x[["number"]]))
#}
#if ("volume" %in% names(x) && !is.na(x[["volume"]])) {
# publication <- paste0(publication, " (", cleanStr(x[["volume"]]),
# ") ")
#}
#if ("pages" %in% names(x) && !is.na(x[["pages"]])) {
# publication <- paste0(publication, ": ",
# cleanStr(x[["pages"]]), "_")
#}
#if ("doi" %in% names(x) && !is.na(x[["doi"]])) {
# publication <- paste0(publication, ", ", paste0("https://doi.org/",
# cleanStr(x[["doi"]])))
#}
#if ("url" %in% names(x) && !is.na(x[["url"]])) {
# publication <- paste0(publication, ", ", cleanStr(x[["url"]]))
#}
}
write(paste0("publication = \"", publication, "\""),
fileConn, append = T)
if ((abstract) && "abstract" %in% names(x) && !is.na(x[["abstract"]])) {
write(paste0("abstract = \"", cleanStr(x[["abstract"]]),
"\""), fileConn, append = T)
}
else {
write("abstract = \"\"", fileConn, append = T)
}
if ("doi" %in% names(x) && !is.na(x[["doi"]])) {
write(paste0("doi = \"", cleanStr(x[["doi"]]),
"\""), fileConn, append = T)
}
else {
write("doi = \"\"", fileConn, append = T)
}
if ("pmid" %in% names(x) && !is.na(x[["pmid"]])) {
write(paste0("links = [{name = \"PubMed\", url = \"https://www.ncbi.nlm.nih.gov/pubmed/", cleanStr(x[["pmid"]]),
"\"}]"), fileConn, append = T)
}
if ("url" %in% names(x) && !is.na(x[["url"]])) {
write(paste0("links = [{name = \"Web\", url = \"",x[["url"]],
"\"}]"), fileConn, append = T)
}
# url_custom = [{name = "Custom Link", url = "http://example.org"}]
filename_pdf <- (gsub(".md", ".pdf", filename_md))
if (file.exists(file.path("static/pdf", filename_pdf))) {
write(paste0("url_pdf = \"pdf/", filename_pdf,"\""), fileConn, append = T)
}
else if ("file" %in% names(x) && !is.na(x[["file"]])) {
write(paste0("url_pdf = \"pdf/", filename_pdf,"\""), fileConn, append = T)
}
else {
write("url_pdf = \"\"", fileConn, append = T)
}
write(paste0("abstract_short = \"", "\""), fileConn,
append = T)
write("image_preview = \"\"", fileConn, append = T)
write("selected = false", fileConn, append = T)
write("projects = []", fileConn, append = T)
write("tags = []", fileConn, append = T)
write("url_preprint = \"\"", fileConn, append = T)
write("url_code = \"\"", fileConn, append = T)
write("url_dataset = \"\"", fileConn, append = T)
write("url_project = \"\"", fileConn, append = T)
write("url_slides = \"\"", fileConn, append = T)
write("url_video = \"\"", fileConn, append = T)
write("url_poster = \"\"", fileConn, append = T)
write("url_source = \"\"", fileConn, append = T)
write("math = true", fileConn, append = T)
write("highlight = true", fileConn, append = T)
write("[header]", fileConn, append = T)
write("image = \"\"", fileConn, append = T)
write("caption = \"\"", fileConn, append = T)
write("+++", fileConn, append = T)
}
if (copybib) {
filename_bib <- (gsub(".md", ".bib", filename_md))
y <- as.list(x)
y["pubtype"] <- NULL
y <- RefManageR::as.BibEntry(y)
if (!file.exists(file.path(bibfold, filename_bib)) |
overwrite) {
RefManageR::WriteBib(y, file = file.path(bibfold,
filename_bib), verbose = FALSE)
}
}
if ("file" %in% names(x) && !is.na(x[["file"]])) {
filename_pdf <- (gsub(".md", ".pdf", filename_md))
pdfloc <- gsub("Full Text:", "", x[["file"]])
pdfloc <- gsub("Accepted Version:", "", pdfloc, fixed=TRUE)
pdfloc <- gsub("C\\:", "C:", pdfloc, fixed=TRUE)
pdfloc <- gsub(":application/pdf","", pdfloc, fixed=TRUE)
file.rename(from = pdfloc, to = file.path(pubfold, filename_pdf))
}
}
pb <- pbapply::startpb(min = 0, max = nrow(mypubs))
pbapply::pbapply(mypubs, FUN = function(x) create_md(x),
MARGIN = 1)
pbapply::closepb(pb)
}
cleanStr <- function(str) {
# if special character has in front a "\": replace it with "\\\\"
str <- gsub('\\', '\\\\', str, fixed = TRUE)
# delete all "{" and "}" in old bibtex files
str <- gsub("[{}]", '', str)
# replace all inline quotes '"' with "four '\\\\"'
str <- gsub('"', '\\\\"', str)
# delete extra lines, tabs and spaces
# (especially important with field 'abstract')
# and return the cleaned string
return(stringr::str_squish(str))
}
cleanStrA <- function(str) {
# if special character has in front a "\": replace it with "\\\\"
str <- gsub('\\', '\\\\', str, fixed = TRUE)
# delete all "{" and "}" in old bibtex files
str <- gsub("[{}]", '', str)
# replace all inline quotes '"' with "four '\\\\"'
#str <- gsub('"', '\\\\"', str)
# delete extra lines, tabs and spaces
# (especially important with field 'abstract')
# and return the cleaned string
return(stringr::str_squish(str))
}
opendir <- function(dir = getwd()){
if (.Platform['OS.type'] == "windows"){
shell.exec(dir)
} else {
system(paste(Sys.getenv("R_BROWSER"), dir))
}
}
#bib2acad2(bibfile="C:/Users/thoma/Dropbox/Site/academic-kickstart-master/content/publication/bibtex/Dorlo10.bib", overwrite=T)
#f <- "Full Text:C\:\\Users\\thoma\\Zotero\\storage\\8PE97TAN\\de Souza and Dorlo - 2018 - Safe mass drug administration for neglected tropic.pdf:application\pdf"
| /r/bibtex_alt.r | permissive | thomasdorlo/thomasdorlo | R | false | false | 10,594 | r | bib2acad2 <-function (bibfile = "", copybib = TRUE, abstract = TRUE, overwrite = FALSE)
{
msg1 <- "You must specify a .bib file as input for the conversion."
msg2 <- paste0("Cannot find file '", bibfile, "'. Check path and/or file name.")
if (bibfile == "") {
return(message(msg1))
}
if (!file.exists(bibfile)) {
return(message(msg2))
}
outfold <- "my-md-folder"
pubfold <- "my-pdf-folder"
if (copybib) {
bibfold <- "my-bib-folder"
}
dir.create("my-md-folder", showWarnings = FALSE)
dir.create("my-pdf-folder", showWarnings = FALSE)
dir.create("my-bib-folder", showWarnings = FALSE)
mypubs <- RefManageR::ReadBib(bibfile, check = "warn", .Encoding = "UTF-8")
mypubs <- as.data.frame(mypubs)
mypubs$key <- rownames(mypubs)
mypubs <- dplyr::mutate(mypubs, pubtype = dplyr::case_when(bibtype ==
"Article" ~ "2", bibtype == "Article in Press" ~ "2",
bibtype == "InProceedings" ~ "1", bibtype == "Proceedings" ~
"1", bibtype == "Conference" ~ "1", bibtype == "Conference Paper" ~
"1", bibtype == "MastersThesis" ~ "3", bibtype ==
"PhdThesis" ~ "3", bibtype == "Manual" ~ "4", bibtype ==
"TechReport" ~ "4", bibtype == "Book" ~ "5", bibtype ==
"InCollection" ~ "6", bibtype == "InBook" ~ "6",
bibtype == "Book Chapter" ~ "6", bibtype == "Misc" ~ "0", TRUE ~ "0"))
create_md <- function(x) {
if (!is.na(x[["date"]])) {
if(nchar(x[["date"]]) == 4){
x[["date"]] <- paste0(substr(x[["date"]],1,4), "-01-01")
}
if(nchar(x[["date"]]) == 7){
x[["date"]] <- paste0(substr(x[["date"]],1,7), "-01")
}
if(nchar(x[["date"]]) == 10){
x[["date"]] <- x[["date"]]
}
}
else {
x[["date"]] <- "2999-01-01"
}
filename_md <- paste0(x[["key"]], ".md")
if (!file.exists(file.path(outfold, filename_md)) | overwrite) {
fileConn <- file.path(outfold, filename_md)
write("+++", fileConn)
write(paste0("title = \"", cleanStr(x[["title"]]),
"\""), fileConn, append = T)
write(paste0("date = \"", x[["date"]], "\""), fileConn,
append = T)
write(paste0("publication_types = [\"", x[["pubtype"]],
"\"]"), fileConn, append = T)
if (!is.na(x[["author"]])) {
authors <- stringr::str_replace_all(stringr::str_squish(x["author"]),
" and ", "\", \"")
#authors <- stringr::str_remove_all(authors, "{")
#authors <- stringr::str_remove_all(authors, "}")
authors <- stringr::str_replace(authors, "Thomas P. C. Dorlo",
"**Thomas P. C. Dorlo**")
authors <- stringr::str_replace(authors, "T. P. C. Dorlo",
"**T. P. C. Dorlo**")
authors <- stringi::stri_trans_general(authors,
"latin-ascii")
write(paste0("authors = [\"", cleanStrA(authors), "\"]"),
fileConn, append = T)
}
else {
editors <- stringr::str_replace_all(stringr::str_squish(x["editor"]),
" and ", "\", \"")
editors <- stringi::stri_trans_general(editors,
"latin-ascii")
write(paste0("editors = [\"", editors, "\"]"),
fileConn, append = T)
}
publication <- NULL
if ("booktitle" %in% names(x) && !is.na(x[["booktitle"]])) {
publication <- paste0(publication, "In: ", cleanStr(x[["booktitle"]]))
if ("publisher" %in% names(x) && !is.na(x[["publisher"]])) {
publication <- paste0(publication, ", ", cleanStr(x[["publisher"]]))
}
if ("address" %in% names(x) && !is.na(x[["address"]])) {
publication <- paste0(publication, ", ", cleanStr(x[["address"]]))
}
if ("pages" %in% names(x) && !is.na(x[["pages"]])) {
publication <- paste0(publication, ", _pp. ",
cleanStr(x[["pages"]]), "_")
}
}
if ("journaltitle" %in% names(x) && !is.na(x[["journaltitle"]])) {
publication <- paste0(publication, "_", cleanStr(x[["journaltitle"]]), "_")
#if ("number" %in% names(x) && !is.na(x[["number"]])) {
# publication <- paste0(publication, " ", cleanStr(x[["number"]]))
#}
#if ("volume" %in% names(x) && !is.na(x[["volume"]])) {
# publication <- paste0(publication, " (", cleanStr(x[["volume"]]),
# ") ")
#}
#if ("pages" %in% names(x) && !is.na(x[["pages"]])) {
# publication <- paste0(publication, ": ",
# cleanStr(x[["pages"]]), "_")
#}
#if ("doi" %in% names(x) && !is.na(x[["doi"]])) {
# publication <- paste0(publication, ", ", paste0("https://doi.org/",
# cleanStr(x[["doi"]])))
#}
#if ("url" %in% names(x) && !is.na(x[["url"]])) {
# publication <- paste0(publication, ", ", cleanStr(x[["url"]]))
#}
}
write(paste0("publication = \"", publication, "\""),
fileConn, append = T)
if ((abstract) && "abstract" %in% names(x) && !is.na(x[["abstract"]])) {
write(paste0("abstract = \"", cleanStr(x[["abstract"]]),
"\""), fileConn, append = T)
}
else {
write("abstract = \"\"", fileConn, append = T)
}
if ("doi" %in% names(x) && !is.na(x[["doi"]])) {
write(paste0("doi = \"", cleanStr(x[["doi"]]),
"\""), fileConn, append = T)
}
else {
write("doi = \"\"", fileConn, append = T)
}
if ("pmid" %in% names(x) && !is.na(x[["pmid"]])) {
write(paste0("links = [{name = \"PubMed\", url = \"https://www.ncbi.nlm.nih.gov/pubmed/", cleanStr(x[["pmid"]]),
"\"}]"), fileConn, append = T)
}
if ("url" %in% names(x) && !is.na(x[["url"]])) {
write(paste0("links = [{name = \"Web\", url = \"",x[["url"]],
"\"}]"), fileConn, append = T)
}
# url_custom = [{name = "Custom Link", url = "http://example.org"}]
filename_pdf <- (gsub(".md", ".pdf", filename_md))
if (file.exists(file.path("static/pdf", filename_pdf))) {
write(paste0("url_pdf = \"pdf/", filename_pdf,"\""), fileConn, append = T)
}
else if ("file" %in% names(x) && !is.na(x[["file"]])) {
write(paste0("url_pdf = \"pdf/", filename_pdf,"\""), fileConn, append = T)
}
else {
write("url_pdf = \"\"", fileConn, append = T)
}
write(paste0("abstract_short = \"", "\""), fileConn,
append = T)
write("image_preview = \"\"", fileConn, append = T)
write("selected = false", fileConn, append = T)
write("projects = []", fileConn, append = T)
write("tags = []", fileConn, append = T)
write("url_preprint = \"\"", fileConn, append = T)
write("url_code = \"\"", fileConn, append = T)
write("url_dataset = \"\"", fileConn, append = T)
write("url_project = \"\"", fileConn, append = T)
write("url_slides = \"\"", fileConn, append = T)
write("url_video = \"\"", fileConn, append = T)
write("url_poster = \"\"", fileConn, append = T)
write("url_source = \"\"", fileConn, append = T)
write("math = true", fileConn, append = T)
write("highlight = true", fileConn, append = T)
write("[header]", fileConn, append = T)
write("image = \"\"", fileConn, append = T)
write("caption = \"\"", fileConn, append = T)
write("+++", fileConn, append = T)
}
if (copybib) {
filename_bib <- (gsub(".md", ".bib", filename_md))
y <- as.list(x)
y["pubtype"] <- NULL
y <- RefManageR::as.BibEntry(y)
if (!file.exists(file.path(bibfold, filename_bib)) |
overwrite) {
RefManageR::WriteBib(y, file = file.path(bibfold,
filename_bib), verbose = FALSE)
}
}
if ("file" %in% names(x) && !is.na(x[["file"]])) {
filename_pdf <- (gsub(".md", ".pdf", filename_md))
pdfloc <- gsub("Full Text:", "", x[["file"]])
pdfloc <- gsub("Accepted Version:", "", pdfloc, fixed=TRUE)
pdfloc <- gsub("C\\:", "C:", pdfloc, fixed=TRUE)
pdfloc <- gsub(":application/pdf","", pdfloc, fixed=TRUE)
file.rename(from = pdfloc, to = file.path(pubfold, filename_pdf))
}
}
pb <- pbapply::startpb(min = 0, max = nrow(mypubs))
pbapply::pbapply(mypubs, FUN = function(x) create_md(x),
MARGIN = 1)
pbapply::closepb(pb)
}
cleanStr <- function(str) {
# if special character has in front a "\": replace it with "\\\\"
str <- gsub('\\', '\\\\', str, fixed = TRUE)
# delete all "{" and "}" in old bibtex files
str <- gsub("[{}]", '', str)
# replace all inline quotes '"' with "four '\\\\"'
str <- gsub('"', '\\\\"', str)
# delete extra lines, tabs and spaces
# (especially important with field 'abstract')
# and return the cleaned string
return(stringr::str_squish(str))
}
cleanStrA <- function(str) {
# if special character has in front a "\": replace it with "\\\\"
str <- gsub('\\', '\\\\', str, fixed = TRUE)
# delete all "{" and "}" in old bibtex files
str <- gsub("[{}]", '', str)
# replace all inline quotes '"' with "four '\\\\"'
#str <- gsub('"', '\\\\"', str)
# delete extra lines, tabs and spaces
# (especially important with field 'abstract')
# and return the cleaned string
return(stringr::str_squish(str))
}
opendir <- function(dir = getwd()){
if (.Platform['OS.type'] == "windows"){
shell.exec(dir)
} else {
system(paste(Sys.getenv("R_BROWSER"), dir))
}
}
#bib2acad2(bibfile="C:/Users/thoma/Dropbox/Site/academic-kickstart-master/content/publication/bibtex/Dorlo10.bib", overwrite=T)
#f <- "Full Text:C\:\\Users\\thoma\\Zotero\\storage\\8PE97TAN\\de Souza and Dorlo - 2018 - Safe mass drug administration for neglected tropic.pdf:application\pdf"
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gamesManagement_functions.R
\docType{package}
\name{gamesManagement_googleAuthR}
\alias{gamesManagement_googleAuthR}
\alias{gamesManagement_googleAuthR-package}
\title{Google Play Game Services Management API
The Management API for Google Play Game Services.}
\description{
Auto-generated code by googleAuthR::gar_create_api_skeleton
at 2017-03-05 19:52:53
filename: /Users/mark/dev/R/autoGoogleAPI/googlegamesManagementv1management.auto/R/gamesManagement_functions.R
api_json: api_json
}
\details{
Authentication scopes used are:
\itemize{
\item https://www.googleapis.com/auth/games
\item https://www.googleapis.com/auth/plus.login
}
}
| /googlegamesManagementv1management.auto/man/gamesManagement_googleAuthR.Rd | permissive | GVersteeg/autoGoogleAPI | R | false | true | 719 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gamesManagement_functions.R
\docType{package}
\name{gamesManagement_googleAuthR}
\alias{gamesManagement_googleAuthR}
\alias{gamesManagement_googleAuthR-package}
\title{Google Play Game Services Management API
The Management API for Google Play Game Services.}
\description{
Auto-generated code by googleAuthR::gar_create_api_skeleton
at 2017-03-05 19:52:53
filename: /Users/mark/dev/R/autoGoogleAPI/googlegamesManagementv1management.auto/R/gamesManagement_functions.R
api_json: api_json
}
\details{
Authentication scopes used are:
\itemize{
\item https://www.googleapis.com/auth/games
\item https://www.googleapis.com/auth/plus.login
}
}
|
#Reading the data
#Read partial sample for identifying classes by columns
data<-read.table("household_power_consumption.txt",header=TRUE,nrows=50,na.strings="?",sep=";")
classes<-sapply(data,class)
#Reading the whole sample using obtained classes (for less time)
dataAll<-read.table("household_power_consumption.txt",header=TRUE, na.strings="?", sep=";",comment.char="",colClasses=classes)
dataSub<-dataAll[dataAll$Date %in% c("1/2/2007","2/2/2007"),]
#Converting dates to POSIXlt for appropriate x axis values
datesStrSub<-paste(dataSub$Date,dataSub$Time,sep=' ')
datesFullSub<-strptime(datesStrSub,format = "%d/%m/%Y %T")
#Building graphic
txt_reduce = 0.8
plot(datesFullSub,dataSub$Sub_metering_1,type="l",xlab="",ylab="Energy sub metering", cex.axis=txt_reduce, cex.lab=txt_reduce)
points(datesFullSub,dataSub$Sub_metering_2,type="l",col="red")
points(datesFullSub,dataSub$Sub_metering_3,type="l",col="blue")
dataSubColNamesLength<-length(colnames(dataSub))
legendNames<-colnames(dataSub)[(dataSubColNamesLength-2):dataSubColNamesLength]
legend("topright",col=c("black","red","blue"), legend=legendNames, lty = 1, cex = txt_reduce)
dev.copy(png,"plot3.png")
dev.off() | /plot3.R | no_license | omaksymov/ExData_Plotting1 | R | false | false | 1,171 | r | #Reading the data
#Read partial sample for identifying classes by columns
data<-read.table("household_power_consumption.txt",header=TRUE,nrows=50,na.strings="?",sep=";")
classes<-sapply(data,class)
#Reading the whole sample using obtained classes (for less time)
dataAll<-read.table("household_power_consumption.txt",header=TRUE, na.strings="?", sep=";",comment.char="",colClasses=classes)
dataSub<-dataAll[dataAll$Date %in% c("1/2/2007","2/2/2007"),]
#Converting dates to POSIXlt for appropriate x axis values
datesStrSub<-paste(dataSub$Date,dataSub$Time,sep=' ')
datesFullSub<-strptime(datesStrSub,format = "%d/%m/%Y %T")
#Building graphic
txt_reduce = 0.8
plot(datesFullSub,dataSub$Sub_metering_1,type="l",xlab="",ylab="Energy sub metering", cex.axis=txt_reduce, cex.lab=txt_reduce)
points(datesFullSub,dataSub$Sub_metering_2,type="l",col="red")
points(datesFullSub,dataSub$Sub_metering_3,type="l",col="blue")
dataSubColNamesLength<-length(colnames(dataSub))
legendNames<-colnames(dataSub)[(dataSubColNamesLength-2):dataSubColNamesLength]
legend("topright",col=c("black","red","blue"), legend=legendNames, lty = 1, cex = txt_reduce)
dev.copy(png,"plot3.png")
dev.off() |
.libPaths("E:/library")
library(magrittr)
# HOME -----
data_path<-"Z:/WD Backup.swstor/MyPC/MDNkNjQ2ZjE0ZTcwNGM0Mz/Volume{3cf9130b-f942-4f48-a322-418d1c20f05f}/study/ENCODE-TCGA-LUAD/芯片-免疫组化/data"
# E Zhou -----
data_path<-"H:/WD Backup.swstor/MyPC/MDNkNjQ2ZjE0ZTcwNGM0Mz/Volume{3cf9130b-f942-4f48-a322-418d1c20f05f}/study/ENCODE-TCGA-LUAD/芯片-免疫组化/data"
result_path<-"H:/WD Backup.swstor/MyPC/MDNkNjQ2ZjE0ZTcwNGM0Mz/Volume{3cf9130b-f942-4f48-a322-418d1c20f05f}/study/ENCODE-TCGA-LUAD/芯片-免疫组化/result"
# HUST ----
data_path<-"G:/WD Backup.swstor/MyPC/MDNkNjQ2ZjE0ZTcwNGM0Mz/Volume{3cf9130b-f942-4f48-a322-418d1c20f05f}/study/ENCODE-TCGA-LUAD/芯片-免疫组化/data"
result_path<-"G:/WD Backup.swstor/MyPC/MDNkNjQ2ZjE0ZTcwNGM0Mz/Volume{3cf9130b-f942-4f48-a322-418d1c20f05f}/study/ENCODE-TCGA-LUAD/芯片-免疫组化/result"
immune_histone <- read.table(file.path(data_path,"immune_histone.txt"),sep = "\t",header = T) %>%
dplyr::mutate(stage = as.character(stage)) %>%
dplyr::mutate(stage = ifelse(is.na(stage),"N",stage)) %>%
tidyr::drop_na() %>%
dplyr::mutate(sample_type=ifelse(sample_type=="T","Tumor","Normal")) %>%
# dplyr::mutate(EZH2_cytoplsm=ifelse(is.na(EZH2_cytoplsm),0,EZH2_cytoplsm)) %>%
# dplyr::mutate(EZH2_karyon=ifelse(is.na(EZH2_karyon),0,EZH2_karyon)) %>%
# dplyr::mutate(CBX2_cytoplsm=ifelse(is.na(CBX2_cytoplsm),0,CBX2_cytoplsm)) %>%
# dplyr::mutate(CBX2_karyon=ifelse(is.na(CBX2_karyon),0,CBX2_karyon)) %>%
dplyr::mutate(CBX2_mean = (CBX2_cytoplsm+CBX2_karyon)/2) %>%
dplyr::mutate(EZH2_mean = (EZH2_karyon+EZH2_cytoplsm)/2) %>%
dplyr::mutate(CBX2_max = ifelse(CBX2_cytoplsm > CBX2_karyon, CBX2_cytoplsm, CBX2_karyon)) %>%
dplyr::mutate(EZH2_max = ifelse(EZH2_cytoplsm > EZH2_karyon, EZH2_cytoplsm, EZH2_karyon))
# Preleminary test to check the test assumptions
shapiro.test(immune_histone$EZH2_mean) # p-value < 0.05, don't follow a normal distribution.
shapiro.test(immune_histone$CBX2_mean) # p-value < 0.05, don't follow a normal distribution.
# do correlation for tumor and normal samples, respectively
## data precessing
immune_histone %>%
dplyr::filter(sample_type=="Tumor") -> immune_histone.T
immune_histone %>%
dplyr::filter(sample_type=="Normal") -> immune_histone.N
## function to get scientific numeric
human_read <- function(.x){
if (.x > 0.1) {
.x %>% signif(digits = 2) %>% toString()
} else if (.x < 0.1 && .x > 0.001 ) {
.x %>% signif(digits = 1) %>% toString()
} else {
.x %>% format(digits = 2, scientific = TRUE)
}
}
## do
broom::tidy(
cor.test(immune_histone.T$CBX2_mean,immune_histone.T$EZH2_mean,method = "pearson")) %>%
dplyr::as_tibble() %>%
dplyr::mutate(fdr=p.adjust(p.value,method = "fdr")) %>%
dplyr::mutate(p.value = purrr::map_chr(p.value,human_read)) %>%
dplyr::mutate(x=1,y=1.8,sample_type="1Tumor",n=nrow(immune_histone.T), estimate = signif(estimate,2)) %>%
dplyr::mutate(label=purrr::map2(
.x=p.value,
.y=estimate,
.z=n,
.f=function(.x,.y,.z){
if(grepl(pattern = "e",x=.x)){
sub("-0", "-", strsplit(split = "e", x = .x, fixed = TRUE)[[1]]) -> .xx
latex2exp::TeX(glue::glue("r = <<.y>>, p = $<<.xx[1]>> \\times 10^{<<.xx[2]>>}$, n = <<.z>>", .open = "<<", .close = ">>"))
} else {
latex2exp::TeX(glue::glue("r = {.y}, p = {.x}, n = {.z}"))
}
}
)) ->CBX2_EZH2.T
broom::tidy(
cor.test(immune_histone.N$CBX2_max,immune_histone.N$EZH2_max,method = "kendall")) %>%
dplyr::as_tibble() %>%
dplyr::mutate(fdr=p.adjust(p.value,method = "fdr")) %>%
dplyr::mutate(p.value = purrr::map_chr(p.value,human_read)) %>%
dplyr::mutate(x=0.41,y=0.8,sample_type="2Normal",n=nrow(immune_histone.N), estimate = signif(estimate,2)) %>%
dplyr::mutate(label=purrr::map2(
.x=p.value,
.y=estimate,
.z=n,
.f=function(.x,.y,.z){
if(grepl(pattern = "e",x=.x)){
sub("-0", "-", strsplit(split = "e", x = .x, fixed = TRUE)[[1]]) -> .xx
latex2exp::TeX(glue::glue("r = <<.y>>, p = $<<.xx[1]>> \\times 10^{<<.xx[2]>>}$, n = <<.z>>", .open = "<<", .close = ">>"))
} else {
latex2exp::TeX(glue::glue("r = {.y}, p = {.x}, n = {.z}"))
}
}
)) ->CBX2_EZH2.N
rbind(CBX2_EZH2.T,CBX2_EZH2.N) %>%
dplyr::as.tbl() ->CBX2_EZH2;CBX2_EZH2
facet_names <- list(
'1Tumor'="Tumor",
'2Normal'="Normal"
)
facet_labeller <- function(variable,value){
return(facet_names[value])
}
immune_histone %>%
ggplot(aes(x=EZH2_mean,y=CBX2_mean)) +
geom_point(aes(color = sample_type)) +
geom_smooth(se = FALSE, fullrange=TRUE, color = "#039BE5") +
theme_bw() +
theme(panel.border = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.line = element_line(color = "black")
) +
scale_color_manual(
values = c("#EE6363","#00C5CD"),
labels = CBX2_EZH2$label
) +
labs(
x = "EZH2 cytoplsm",
y = "CBX2 cytoplsm"
) + facet_wrap(~sample_type,scales = "free",labeller=facet_labeller) -> p1;p1
| /7.immune_histochemical/correlation_new.R | no_license | Huffyphenix/LUAD_pic_code | R | false | false | 5,035 | r | .libPaths("E:/library")
library(magrittr)
# HOME -----
data_path<-"Z:/WD Backup.swstor/MyPC/MDNkNjQ2ZjE0ZTcwNGM0Mz/Volume{3cf9130b-f942-4f48-a322-418d1c20f05f}/study/ENCODE-TCGA-LUAD/芯片-免疫组化/data"
# E Zhou -----
data_path<-"H:/WD Backup.swstor/MyPC/MDNkNjQ2ZjE0ZTcwNGM0Mz/Volume{3cf9130b-f942-4f48-a322-418d1c20f05f}/study/ENCODE-TCGA-LUAD/芯片-免疫组化/data"
result_path<-"H:/WD Backup.swstor/MyPC/MDNkNjQ2ZjE0ZTcwNGM0Mz/Volume{3cf9130b-f942-4f48-a322-418d1c20f05f}/study/ENCODE-TCGA-LUAD/芯片-免疫组化/result"
# HUST ----
data_path<-"G:/WD Backup.swstor/MyPC/MDNkNjQ2ZjE0ZTcwNGM0Mz/Volume{3cf9130b-f942-4f48-a322-418d1c20f05f}/study/ENCODE-TCGA-LUAD/芯片-免疫组化/data"
result_path<-"G:/WD Backup.swstor/MyPC/MDNkNjQ2ZjE0ZTcwNGM0Mz/Volume{3cf9130b-f942-4f48-a322-418d1c20f05f}/study/ENCODE-TCGA-LUAD/芯片-免疫组化/result"
immune_histone <- read.table(file.path(data_path,"immune_histone.txt"),sep = "\t",header = T) %>%
dplyr::mutate(stage = as.character(stage)) %>%
dplyr::mutate(stage = ifelse(is.na(stage),"N",stage)) %>%
tidyr::drop_na() %>%
dplyr::mutate(sample_type=ifelse(sample_type=="T","Tumor","Normal")) %>%
# dplyr::mutate(EZH2_cytoplsm=ifelse(is.na(EZH2_cytoplsm),0,EZH2_cytoplsm)) %>%
# dplyr::mutate(EZH2_karyon=ifelse(is.na(EZH2_karyon),0,EZH2_karyon)) %>%
# dplyr::mutate(CBX2_cytoplsm=ifelse(is.na(CBX2_cytoplsm),0,CBX2_cytoplsm)) %>%
# dplyr::mutate(CBX2_karyon=ifelse(is.na(CBX2_karyon),0,CBX2_karyon)) %>%
dplyr::mutate(CBX2_mean = (CBX2_cytoplsm+CBX2_karyon)/2) %>%
dplyr::mutate(EZH2_mean = (EZH2_karyon+EZH2_cytoplsm)/2) %>%
dplyr::mutate(CBX2_max = ifelse(CBX2_cytoplsm > CBX2_karyon, CBX2_cytoplsm, CBX2_karyon)) %>%
dplyr::mutate(EZH2_max = ifelse(EZH2_cytoplsm > EZH2_karyon, EZH2_cytoplsm, EZH2_karyon))
# Preleminary test to check the test assumptions
shapiro.test(immune_histone$EZH2_mean) # p-value < 0.05, don't follow a normal distribution.
shapiro.test(immune_histone$CBX2_mean) # p-value < 0.05, don't follow a normal distribution.
# do correlation for tumor and normal samples, respectively
## data precessing
immune_histone %>%
dplyr::filter(sample_type=="Tumor") -> immune_histone.T
immune_histone %>%
dplyr::filter(sample_type=="Normal") -> immune_histone.N
## function to get scientific numeric
human_read <- function(.x){
if (.x > 0.1) {
.x %>% signif(digits = 2) %>% toString()
} else if (.x < 0.1 && .x > 0.001 ) {
.x %>% signif(digits = 1) %>% toString()
} else {
.x %>% format(digits = 2, scientific = TRUE)
}
}
## do
broom::tidy(
cor.test(immune_histone.T$CBX2_mean,immune_histone.T$EZH2_mean,method = "pearson")) %>%
dplyr::as_tibble() %>%
dplyr::mutate(fdr=p.adjust(p.value,method = "fdr")) %>%
dplyr::mutate(p.value = purrr::map_chr(p.value,human_read)) %>%
dplyr::mutate(x=1,y=1.8,sample_type="1Tumor",n=nrow(immune_histone.T), estimate = signif(estimate,2)) %>%
dplyr::mutate(label=purrr::map2(
.x=p.value,
.y=estimate,
.z=n,
.f=function(.x,.y,.z){
if(grepl(pattern = "e",x=.x)){
sub("-0", "-", strsplit(split = "e", x = .x, fixed = TRUE)[[1]]) -> .xx
latex2exp::TeX(glue::glue("r = <<.y>>, p = $<<.xx[1]>> \\times 10^{<<.xx[2]>>}$, n = <<.z>>", .open = "<<", .close = ">>"))
} else {
latex2exp::TeX(glue::glue("r = {.y}, p = {.x}, n = {.z}"))
}
}
)) ->CBX2_EZH2.T
broom::tidy(
cor.test(immune_histone.N$CBX2_max,immune_histone.N$EZH2_max,method = "kendall")) %>%
dplyr::as_tibble() %>%
dplyr::mutate(fdr=p.adjust(p.value,method = "fdr")) %>%
dplyr::mutate(p.value = purrr::map_chr(p.value,human_read)) %>%
dplyr::mutate(x=0.41,y=0.8,sample_type="2Normal",n=nrow(immune_histone.N), estimate = signif(estimate,2)) %>%
dplyr::mutate(label=purrr::map2(
.x=p.value,
.y=estimate,
.z=n,
.f=function(.x,.y,.z){
if(grepl(pattern = "e",x=.x)){
sub("-0", "-", strsplit(split = "e", x = .x, fixed = TRUE)[[1]]) -> .xx
latex2exp::TeX(glue::glue("r = <<.y>>, p = $<<.xx[1]>> \\times 10^{<<.xx[2]>>}$, n = <<.z>>", .open = "<<", .close = ">>"))
} else {
latex2exp::TeX(glue::glue("r = {.y}, p = {.x}, n = {.z}"))
}
}
)) ->CBX2_EZH2.N
rbind(CBX2_EZH2.T,CBX2_EZH2.N) %>%
dplyr::as.tbl() ->CBX2_EZH2;CBX2_EZH2
facet_names <- list(
'1Tumor'="Tumor",
'2Normal'="Normal"
)
facet_labeller <- function(variable,value){
return(facet_names[value])
}
immune_histone %>%
ggplot(aes(x=EZH2_mean,y=CBX2_mean)) +
geom_point(aes(color = sample_type)) +
geom_smooth(se = FALSE, fullrange=TRUE, color = "#039BE5") +
theme_bw() +
theme(panel.border = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.line = element_line(color = "black")
) +
scale_color_manual(
values = c("#EE6363","#00C5CD"),
labels = CBX2_EZH2$label
) +
labs(
x = "EZH2 cytoplsm",
y = "CBX2 cytoplsm"
) + facet_wrap(~sample_type,scales = "free",labeller=facet_labeller) -> p1;p1
|
testlist <- list(doy = -1.72131968218895e+83, latitude = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 1.73250065464873e+162))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result) | /meteor/inst/testfiles/ET0_ThornthwaiteWilmott/AFL_ET0_ThornthwaiteWilmott/ET0_ThornthwaiteWilmott_valgrind_files/1615828645-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 734 | r | testlist <- list(doy = -1.72131968218895e+83, latitude = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 1.73250065464873e+162))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result) |
extent_to_bbox <- function(ras){
bb <- bbox(ras)
bbx <- list(p1 = list(long=bb[1,1],lat=bb[2,1]),
p2 = list(long=bb[1,2],lat=bb[2,2]))
return(bbx)
}
convert_extent_to_utm <- function(longlat_extent,zone = "18T"){
sp_geo <- SpatialPoints(longlat_extent, CRS("+proj=longlat +datum=WGS84"))
sp_utm <- spTransform(sp_geo,CRS(glue::glue("+proj=utm +zone={zone} +datum=WGS84")))
return(extent(sp_utm))
}
# reduce elevations in map border to zero
# totally screwed kup
# I do not know why I have to transpose matrix back and forth
# top and and bottom are still backwards
zero_out_border <- function(elev_matrix,full_extent,borderless_extent){
ras1 <- raster::raster(elev_matrix)
extent(ras1) <- borderless_extent
ras2 <- raster::crop(ras1,borderless_extent) %>%
raster::extend(full_extent,value=0)
return(as.matrix(ras2))
}
# # convert a lat/long to pixels on an image
# house_pos <- find_image_coordinates(latlon_house$long,
# latlon_house$lat,
# bbox = bbox,
# image_width=image_size$width,
# image_height=image_size$height)
#get the ratio of x and y bounding box sizes
# your bbox structure and variable names might vary
bbox_size_ratio <- function(bbox1,bbox2) {
x <- (bbox1$p1$long-bbox1$p2$long) / (bbox2$p1$long-bbox2$p2$long)
y <- (bbox1$p1$lat-bbox1$p2$lat) / (bbox2$p1$lat-bbox2$p2$lat)
return (c(x,y))
}
# enlarge matrix, putting original in center of new
# set border to init_value
border_matrix <-function(m,size_ratio=c(x=1,y=1),init_value=0){
d <- dim(m)
new_m <- matrix(data=init_value,
nrow = round(d[1]*size_ratio[1]),
ncol = round(d[2]*size_ratio[2]))
new_d <- dim(new_m)
insert_start <- c(round((new_d[1]-d[1])/2+1),
round((new_d[2]-d[2])/2)+1)
insert_end <- c(insert_start[1]+d[1]-1,
insert_start[2]+d[2]-1)
new_m[insert_start[1]:insert_end[1],
insert_start[2]:insert_end[2]] <- m
return(new_m)
}
# pad evevation matrix with zero height border
# to overlay an image larger than the raster such
# as a map
# takes a lat/long bbox
make_elev_matrix_border <- function(elev_matrix, bbox_big,bbox_sm) {
new_m <- enlarge_matrix(m,
bbox_size_ratio(bbox_big,bbox_sm))
return(new_m)
}
# normalize an object
normalize <- function(x) {
return ((x - min(x)) / (max(x) - min(x)))
}
dms_to_dec <- function(deg=0, min=0, sec=0) {
return(deg + min / 60 + sec / 3600)
}
# Change zero depths to fake depth based on distance to shore
fake_depth <- function(elev_depth_matrix, depth_step = 5) {
zeroes <- which(elev_depth_matrix == 0, arr.ind = T)
maxrow <- dim(elev_depth_matrix)[1]
maxcol <- dim(elev_depth_matrix)[2]
for (i in 1:nrow(zeroes)) {
row <- zeroes[i, 1]
col <- zeroes[i, 2]
found_shore = FALSE
distance_to_shore = 1
adjacent_level <- c(0, 0, 0, 0)
while (!found_shore) {
if (row > distance_to_shore)
adjacent_level[1] <-
elev_depth_matrix[row - distance_to_shore, col] # south
if (col > distance_to_shore)
adjacent_level[2] <-
elev_depth_matrix[row , col - distance_to_shore] # west
if (row < maxrow - distance_to_shore)
adjacent_level[3] <-
elev_depth_matrix[row + distance_to_shore, col] # north
if (col < maxcol - distance_to_shore)
adjacent_level[4] <-
elev_depth_matrix[row , col + distance_to_shore] # east
found_shore <- (max(adjacent_level) > 0)
if (found_shore) {
elev_depth_matrix[row, col] <- -depth_step * distance_to_shore
} else {
distance_to_shore <- distance_to_shore + 1
}
}
}
return(elev_depth_matrix)
}
# -------------------------------------------------------------------
# Crop raster image
crop_img <- function(elev_img, bbox) {
new_extent <- unlist(bbox) %>%
matrix(nrow = 2, ncol = 2) %>%
extent()
elev_img <- elev_img %>%
crop(new_extent)
return(elev_img)
}
# Downscale elevation matrix
downscale_elev <- function(elev_matrix, target_image_size) {
spacing_w = dim(elev_matrix)[1] / target_image_size$width
spacing_h = dim(elev_matrix)[2] / target_image_size$height
# downsample but truncate items if rounding returns more points than target
# this breaks if rounding dimensions LESS than target_image_size
sample_w <- round(seq(1, dim(elev_matrix)[1], by = spacing_w))
sample_h <- round(seq(1, dim(elev_matrix)[2], by = spacing_h))
return(elev_matrix[sample_w, sample_h])
}
#rayshader utilities from Will Bishop @wcmbiship
#' Translate the given long/lat coordinates into an image position (x, y).
#'
#' @param long longitude value
#' @param lat latitude value
#' @param bbox bounding box coordinates (list of 2 points with long/lat values)
#' @param image_width image width, in pixels
#' @param image_height image height, in pixels
#'
#' @return named list with elements "x" and "y" defining an image position
#'
find_image_coordinates <-
function(long, lat, bbox, image_width, image_height) {
x_img <-
round(image_width * (long - min(bbox$p1$long, bbox$p2$long)) /
abs(bbox$p1$long - bbox$p2$long))
y_img <-
round(image_height * (lat - min(bbox$p1$lat, bbox$p2$lat)) /
abs(bbox$p1$lat - bbox$p2$lat))
list(x = x_img, y = y_img)
}
#' Define image size variables from the given bounding box coordinates.
#'
#' @param bbox bounding box coordinates (list of 2 points with long/lat values)
#' @param major_dim major image dimension, in pixels.
#' Default is 400 (meaning larger dimension will be 400 pixels)
#'
#' @return list with items "width", "height", and "size" (string of format "<width>,<height>")
#'
#' @examples
#' bbox <- list(
#' p1 = list(long = -122.522, lat = 37.707),
#' p2 = list(long = -122.354, lat = 37.84)
#' )
#' image_size <- define_image_size(bbox, 600)
#'
define_image_size <- function(bbox, major_dim = 400) {
# calculate aspect ration (width/height) from lat/long bounding box
aspect_ratio <-
abs((bbox$p1$long - bbox$p2$long) / (bbox$p1$lat - bbox$p2$lat))
# define dimensions
img_width <-
ifelse(aspect_ratio > 1, major_dim, major_dim * aspect_ratio) %>% round()
img_height <-
ifelse(aspect_ratio < 1, major_dim, major_dim / aspect_ratio) %>% round()
size_str <- paste(img_width, img_height, sep = ",")
list(height = img_height,
width = img_width,
size = size_str)
}
#' Download USGS elevation data from the ArcGIS REST API.
#'
#' @param bbox bounding box coordinates (list of 2 points with long/lat values)
#' @param size image size as a string with format "<width>,<height>"
#' @param file file path to save to. Default is NULL, which will create a temp file.
#' @param sr_bbox Spatial Reference code for bounding box
#' @param sr_image Spatial Reference code for elevation image
#'
#' @details This function uses the ArcGIS REST API, specifically the
#' exportImage task. You can find links below to a web UI for this
#' rest endpoint and API documentation.
#'
#' Web UI: https://elevation.nationalmap.gov/arcgis/rest/services/3DEPElevation/ImageServer/exportImage
#' API docs: https://developers.arcgis.com/rest/services-reference/export-image.htm
#'
#' @return file path for downloaded elevation .tif file. This can be read with
#' \code{read_elevation_file()}.
#'
#' @examples
#' bbox <- list(
#' p1 = list(long = -122.522, lat = 37.707),
#' p2 = list(long = -122.354, lat = 37.84)
#' )
#' image_size <- define_image_size(bbox, 600)
#' elev_file <- get_usgs_elevation_data(bbox, size = image_size$size)
#'
get_usgs_elevation_data <-
function(bbox,
size = "400,400",
file = NULL,
sr_bbox = 4326,
sr_image = 4326) {
require(httr)
# TODO - validate inputs
url <-
parse_url(
"https://elevation.nationalmap.gov/arcgis/rest/services/3DEPElevation/ImageServer/exportImage"
)
res <- GET(
url,
query = list(
bbox = paste(bbox$p1$long, bbox$p1$lat, bbox$p2$long, bbox$p2$lat,
sep = ","),
bboxSR = sr_bbox,
imageSR = sr_image,
size = size,
format = "tiff",
pixelType = "F32",
noDataInterpretation = "esriNoDataMatchAny",
interpolation = "+RSP_BilinearInterpolation",
f = "json"
)
)
if (status_code(res) == 200) {
body <- content(res, type = "application/json")
# TODO - check that bbox values are correct
# message(jsonlite::toJSON(body, auto_unbox = TRUE, pretty = TRUE))
img_res <- GET(body$href)
img_bin <- content(img_res, "raw")
if (is.null(file))
file <- tempfile("elev_matrix", fileext = ".tif")
writeBin(img_bin, file)
message(paste("image saved to file:", file))
} else {
warning(res)
}
invisible(file)
}
#' Download a map image from the ArcGIS REST API
#'
#' @param bbox bounding box coordinates (list of 2 points with long/lat values)
#' @param map_type map type to download - options are World_Street_Map, World_Imagery, World_Topo_Map
#' @param file file path to save to. Default is NULL, which will create a temp file.
#' @param width image width (pixels)
#' @param height image height (pixels)
#' @param sr_bbox Spatial Reference code for bounding box
#'
#' @details This function uses the ArcGIS REST API, specifically the
#' "Execute Web Map Task" task. You can find links below to a web UI for this
#' rest endpoint and API documentation.
#'
#' Web UI: https://utility.arcgisonline.com/arcgis/rest/services/Utilities/PrintingTools/GPServer/Export%20Web%20Map%20Task/execute
#' API docs: https://developers.arcgis.com/rest/services-reference/export-web-map-task.htm
#'
#' @return file path for the downloaded .png map image
#'
#' @examples
#' bbox <- list(
#' p1 = list(long = -122.522, lat = 37.707),
#' p2 = list(long = -122.354, lat = 37.84)
#' )
#' image_size <- define_image_size(bbox, 600)
#' overlay_file <- get_arcgis_map_image(bbox, width = image_size$width,
#' height = image_size$height)
#'
get_arcgis_map_image <-
function(bbox,
map_type = "World_Street_Map",
file = NULL,
width = 400,
height = 400,
sr_bbox = 4326) {
require(httr)
require(glue)
require(jsonlite)
url <-
parse_url(
"https://utility.arcgisonline.com/arcgis/rest/services/Utilities/PrintingTools/GPServer/Export%20Web%20Map%20Task/execute"
)
# define JSON query parameter
web_map_param <- list(
baseMap = list(baseMapLayers = list(list(
url = jsonlite::unbox(
glue(
"https://services.arcgisonline.com/ArcGIS/rest/services/{map_type}/MapServer",
map_type = map_type
)
)
))),
exportOptions = list(outputSize = c(width, height)),
mapOptions = list(
extent = list(
spatialReference = list(wkid = jsonlite::unbox(sr_bbox)),
xmax = jsonlite::unbox(max(bbox$p1$long, bbox$p2$long)),
xmin = jsonlite::unbox(min(bbox$p1$long, bbox$p2$long)),
ymax = jsonlite::unbox(max(bbox$p1$lat, bbox$p2$lat)),
ymin = jsonlite::unbox(min(bbox$p1$lat, bbox$p2$lat))
)
)
)
res <- GET(
url,
query = list(
f = "json",
Format = "PNG32",
Layout_Template = "MAP_ONLY",
Web_Map_as_JSON = jsonlite::toJSON(web_map_param)
)
)
if (status_code(res) == 200) {
body <- content(res, type = "application/json")
message(jsonlite::toJSON(body, auto_unbox = TRUE, pretty = TRUE))
if (is.null(file))
file <- tempfile("overlay_img", fileext = ".png")
img_res <- GET(body$results[[1]]$value$url)
img_bin <- content(img_res, "raw")
writeBin(img_bin, file)
message(paste("image saved to file:", file))
} else {
message(res)
}
invisible(file)
}
| /utilities_ray.R | no_license | apsteinmetz/rays | R | false | false | 12,205 | r | extent_to_bbox <- function(ras){
bb <- bbox(ras)
bbx <- list(p1 = list(long=bb[1,1],lat=bb[2,1]),
p2 = list(long=bb[1,2],lat=bb[2,2]))
return(bbx)
}
convert_extent_to_utm <- function(longlat_extent,zone = "18T"){
sp_geo <- SpatialPoints(longlat_extent, CRS("+proj=longlat +datum=WGS84"))
sp_utm <- spTransform(sp_geo,CRS(glue::glue("+proj=utm +zone={zone} +datum=WGS84")))
return(extent(sp_utm))
}
# reduce elevations in map border to zero
# totally screwed kup
# I do not know why I have to transpose matrix back and forth
# top and and bottom are still backwards
zero_out_border <- function(elev_matrix,full_extent,borderless_extent){
ras1 <- raster::raster(elev_matrix)
extent(ras1) <- borderless_extent
ras2 <- raster::crop(ras1,borderless_extent) %>%
raster::extend(full_extent,value=0)
return(as.matrix(ras2))
}
# # convert a lat/long to pixels on an image
# house_pos <- find_image_coordinates(latlon_house$long,
# latlon_house$lat,
# bbox = bbox,
# image_width=image_size$width,
# image_height=image_size$height)
#get the ratio of x and y bounding box sizes
# your bbox structure and variable names might vary
bbox_size_ratio <- function(bbox1,bbox2) {
x <- (bbox1$p1$long-bbox1$p2$long) / (bbox2$p1$long-bbox2$p2$long)
y <- (bbox1$p1$lat-bbox1$p2$lat) / (bbox2$p1$lat-bbox2$p2$lat)
return (c(x,y))
}
# enlarge matrix, putting original in center of new
# set border to init_value
border_matrix <-function(m,size_ratio=c(x=1,y=1),init_value=0){
d <- dim(m)
new_m <- matrix(data=init_value,
nrow = round(d[1]*size_ratio[1]),
ncol = round(d[2]*size_ratio[2]))
new_d <- dim(new_m)
insert_start <- c(round((new_d[1]-d[1])/2+1),
round((new_d[2]-d[2])/2)+1)
insert_end <- c(insert_start[1]+d[1]-1,
insert_start[2]+d[2]-1)
new_m[insert_start[1]:insert_end[1],
insert_start[2]:insert_end[2]] <- m
return(new_m)
}
# pad evevation matrix with zero height border
# to overlay an image larger than the raster such
# as a map
# takes a lat/long bbox
make_elev_matrix_border <- function(elev_matrix, bbox_big,bbox_sm) {
new_m <- enlarge_matrix(m,
bbox_size_ratio(bbox_big,bbox_sm))
return(new_m)
}
# normalize an object
normalize <- function(x) {
return ((x - min(x)) / (max(x) - min(x)))
}
dms_to_dec <- function(deg=0, min=0, sec=0) {
return(deg + min / 60 + sec / 3600)
}
# Change zero depths to fake depth based on distance to shore
fake_depth <- function(elev_depth_matrix, depth_step = 5) {
zeroes <- which(elev_depth_matrix == 0, arr.ind = T)
maxrow <- dim(elev_depth_matrix)[1]
maxcol <- dim(elev_depth_matrix)[2]
for (i in 1:nrow(zeroes)) {
row <- zeroes[i, 1]
col <- zeroes[i, 2]
found_shore = FALSE
distance_to_shore = 1
adjacent_level <- c(0, 0, 0, 0)
while (!found_shore) {
if (row > distance_to_shore)
adjacent_level[1] <-
elev_depth_matrix[row - distance_to_shore, col] # south
if (col > distance_to_shore)
adjacent_level[2] <-
elev_depth_matrix[row , col - distance_to_shore] # west
if (row < maxrow - distance_to_shore)
adjacent_level[3] <-
elev_depth_matrix[row + distance_to_shore, col] # north
if (col < maxcol - distance_to_shore)
adjacent_level[4] <-
elev_depth_matrix[row , col + distance_to_shore] # east
found_shore <- (max(adjacent_level) > 0)
if (found_shore) {
elev_depth_matrix[row, col] <- -depth_step * distance_to_shore
} else {
distance_to_shore <- distance_to_shore + 1
}
}
}
return(elev_depth_matrix)
}
# -------------------------------------------------------------------
# Crop raster image
crop_img <- function(elev_img, bbox) {
new_extent <- unlist(bbox) %>%
matrix(nrow = 2, ncol = 2) %>%
extent()
elev_img <- elev_img %>%
crop(new_extent)
return(elev_img)
}
# Downscale elevation matrix
downscale_elev <- function(elev_matrix, target_image_size) {
spacing_w = dim(elev_matrix)[1] / target_image_size$width
spacing_h = dim(elev_matrix)[2] / target_image_size$height
# downsample but truncate items if rounding returns more points than target
# this breaks if rounding dimensions LESS than target_image_size
sample_w <- round(seq(1, dim(elev_matrix)[1], by = spacing_w))
sample_h <- round(seq(1, dim(elev_matrix)[2], by = spacing_h))
return(elev_matrix[sample_w, sample_h])
}
#rayshader utilities from Will Bishop @wcmbiship
#' Translate the given long/lat coordinates into an image position (x, y).
#'
#' @param long longitude value
#' @param lat latitude value
#' @param bbox bounding box coordinates (list of 2 points with long/lat values)
#' @param image_width image width, in pixels
#' @param image_height image height, in pixels
#'
#' @return named list with elements "x" and "y" defining an image position
#'
find_image_coordinates <-
function(long, lat, bbox, image_width, image_height) {
x_img <-
round(image_width * (long - min(bbox$p1$long, bbox$p2$long)) /
abs(bbox$p1$long - bbox$p2$long))
y_img <-
round(image_height * (lat - min(bbox$p1$lat, bbox$p2$lat)) /
abs(bbox$p1$lat - bbox$p2$lat))
list(x = x_img, y = y_img)
}
#' Define image size variables from the given bounding box coordinates.
#'
#' @param bbox bounding box coordinates (list of 2 points with long/lat values)
#' @param major_dim major image dimension, in pixels.
#' Default is 400 (meaning larger dimension will be 400 pixels)
#'
#' @return list with items "width", "height", and "size" (string of format "<width>,<height>")
#'
#' @examples
#' bbox <- list(
#' p1 = list(long = -122.522, lat = 37.707),
#' p2 = list(long = -122.354, lat = 37.84)
#' )
#' image_size <- define_image_size(bbox, 600)
#'
define_image_size <- function(bbox, major_dim = 400) {
# calculate aspect ration (width/height) from lat/long bounding box
aspect_ratio <-
abs((bbox$p1$long - bbox$p2$long) / (bbox$p1$lat - bbox$p2$lat))
# define dimensions
img_width <-
ifelse(aspect_ratio > 1, major_dim, major_dim * aspect_ratio) %>% round()
img_height <-
ifelse(aspect_ratio < 1, major_dim, major_dim / aspect_ratio) %>% round()
size_str <- paste(img_width, img_height, sep = ",")
list(height = img_height,
width = img_width,
size = size_str)
}
#' Download USGS elevation data from the ArcGIS REST API.
#'
#' @param bbox bounding box coordinates (list of 2 points with long/lat values)
#' @param size image size as a string with format "<width>,<height>"
#' @param file file path to save to. Default is NULL, which will create a temp file.
#' @param sr_bbox Spatial Reference code for bounding box
#' @param sr_image Spatial Reference code for elevation image
#'
#' @details This function uses the ArcGIS REST API, specifically the
#' exportImage task. You can find links below to a web UI for this
#' rest endpoint and API documentation.
#'
#' Web UI: https://elevation.nationalmap.gov/arcgis/rest/services/3DEPElevation/ImageServer/exportImage
#' API docs: https://developers.arcgis.com/rest/services-reference/export-image.htm
#'
#' @return file path for downloaded elevation .tif file. This can be read with
#' \code{read_elevation_file()}.
#'
#' @examples
#' bbox <- list(
#' p1 = list(long = -122.522, lat = 37.707),
#' p2 = list(long = -122.354, lat = 37.84)
#' )
#' image_size <- define_image_size(bbox, 600)
#' elev_file <- get_usgs_elevation_data(bbox, size = image_size$size)
#'
get_usgs_elevation_data <-
function(bbox,
size = "400,400",
file = NULL,
sr_bbox = 4326,
sr_image = 4326) {
require(httr)
# TODO - validate inputs
url <-
parse_url(
"https://elevation.nationalmap.gov/arcgis/rest/services/3DEPElevation/ImageServer/exportImage"
)
res <- GET(
url,
query = list(
bbox = paste(bbox$p1$long, bbox$p1$lat, bbox$p2$long, bbox$p2$lat,
sep = ","),
bboxSR = sr_bbox,
imageSR = sr_image,
size = size,
format = "tiff",
pixelType = "F32",
noDataInterpretation = "esriNoDataMatchAny",
interpolation = "+RSP_BilinearInterpolation",
f = "json"
)
)
if (status_code(res) == 200) {
body <- content(res, type = "application/json")
# TODO - check that bbox values are correct
# message(jsonlite::toJSON(body, auto_unbox = TRUE, pretty = TRUE))
img_res <- GET(body$href)
img_bin <- content(img_res, "raw")
if (is.null(file))
file <- tempfile("elev_matrix", fileext = ".tif")
writeBin(img_bin, file)
message(paste("image saved to file:", file))
} else {
warning(res)
}
invisible(file)
}
#' Download a map image from the ArcGIS REST API
#'
#' @param bbox bounding box coordinates (list of 2 points with long/lat values)
#' @param map_type map type to download - options are World_Street_Map, World_Imagery, World_Topo_Map
#' @param file file path to save to. Default is NULL, which will create a temp file.
#' @param width image width (pixels)
#' @param height image height (pixels)
#' @param sr_bbox Spatial Reference code for bounding box
#'
#' @details This function uses the ArcGIS REST API, specifically the
#' "Execute Web Map Task" task. You can find links below to a web UI for this
#' rest endpoint and API documentation.
#'
#' Web UI: https://utility.arcgisonline.com/arcgis/rest/services/Utilities/PrintingTools/GPServer/Export%20Web%20Map%20Task/execute
#' API docs: https://developers.arcgis.com/rest/services-reference/export-web-map-task.htm
#'
#' @return file path for the downloaded .png map image
#'
#' @examples
#' bbox <- list(
#' p1 = list(long = -122.522, lat = 37.707),
#' p2 = list(long = -122.354, lat = 37.84)
#' )
#' image_size <- define_image_size(bbox, 600)
#' overlay_file <- get_arcgis_map_image(bbox, width = image_size$width,
#' height = image_size$height)
#'
get_arcgis_map_image <-
function(bbox,
map_type = "World_Street_Map",
file = NULL,
width = 400,
height = 400,
sr_bbox = 4326) {
require(httr)
require(glue)
require(jsonlite)
url <-
parse_url(
"https://utility.arcgisonline.com/arcgis/rest/services/Utilities/PrintingTools/GPServer/Export%20Web%20Map%20Task/execute"
)
# define JSON query parameter
web_map_param <- list(
baseMap = list(baseMapLayers = list(list(
url = jsonlite::unbox(
glue(
"https://services.arcgisonline.com/ArcGIS/rest/services/{map_type}/MapServer",
map_type = map_type
)
)
))),
exportOptions = list(outputSize = c(width, height)),
mapOptions = list(
extent = list(
spatialReference = list(wkid = jsonlite::unbox(sr_bbox)),
xmax = jsonlite::unbox(max(bbox$p1$long, bbox$p2$long)),
xmin = jsonlite::unbox(min(bbox$p1$long, bbox$p2$long)),
ymax = jsonlite::unbox(max(bbox$p1$lat, bbox$p2$lat)),
ymin = jsonlite::unbox(min(bbox$p1$lat, bbox$p2$lat))
)
)
)
res <- GET(
url,
query = list(
f = "json",
Format = "PNG32",
Layout_Template = "MAP_ONLY",
Web_Map_as_JSON = jsonlite::toJSON(web_map_param)
)
)
if (status_code(res) == 200) {
body <- content(res, type = "application/json")
message(jsonlite::toJSON(body, auto_unbox = TRUE, pretty = TRUE))
if (is.null(file))
file <- tempfile("overlay_img", fileext = ".png")
img_res <- GET(body$results[[1]]$value$url)
img_bin <- content(img_res, "raw")
writeBin(img_bin, file)
message(paste("image saved to file:", file))
} else {
message(res)
}
invisible(file)
}
|
library(lubridate)
##Read the data
ds <- read.table("household_power_consumption.txt",header = T,sep = ";", na.strings = "?",colClasses = c("character","character","numeric","numeric","numeric","numeric","numeric","numeric","numeric"))
myData <- ds[ds$Date %in% c("1/2/2007","2/2/2007"),]
##Making plot
par(mfrow = c(1,1))
with(myData,plot(as.integer(dmy_hms(paste(Date, Time))),Global_active_power,type = "l", xaxt = "n", yaxt = "n", xlab = "",ylab = "Global Active Power (kilowatts)"))
axis(1, at=c(1170288000,1170374400,1170460800), labels = c("Thu","Fri","Sat"))
axis(2, at=c(0,2,4,6),labels = c(0,2,4,6))
##Copy the plot to png file
dev.copy(png,file = "plot2.png")
dev.off()
| /plot2.R | no_license | quietseason/ExData_Plotting1 | R | false | false | 684 | r | library(lubridate)
##Read the data
ds <- read.table("household_power_consumption.txt",header = T,sep = ";", na.strings = "?",colClasses = c("character","character","numeric","numeric","numeric","numeric","numeric","numeric","numeric"))
myData <- ds[ds$Date %in% c("1/2/2007","2/2/2007"),]
##Making plot
par(mfrow = c(1,1))
with(myData,plot(as.integer(dmy_hms(paste(Date, Time))),Global_active_power,type = "l", xaxt = "n", yaxt = "n", xlab = "",ylab = "Global Active Power (kilowatts)"))
axis(1, at=c(1170288000,1170374400,1170460800), labels = c("Thu","Fri","Sat"))
axis(2, at=c(0,2,4,6),labels = c(0,2,4,6))
##Copy the plot to png file
dev.copy(png,file = "plot2.png")
dev.off()
|
#' Encode a local favicon image to be passed to the API
#'
#' This function takes a local image path and returns a formatted list expected by the JSON request
#' @param image_path character; path to the image
#' @return list; list containing the embedded image and required extra parameters needed for the JSON request
#' @export
local_image <- function(image_path) {
image <- readBin(image_path, what = "raw", n = fs::file_info(image_path)$size)
list(
type = "inline",
content = openssl::base64_encode(image)
)
}
#' Provide an image via url to be passed to the API
#'
#' This function takes a url to an image and returns a formatted list needed by the JSON request
#' @param url character; url to image
#' @return list; list containing the url and required extra parameters needed for the JSON request
#' @export
url_image <- function(url) {
list(
type = "url",
url = url
)
}
#' Create a list and remove NULLs
#'
#' Useful to remove NULL values when creating the favicon_design
#' @param ... values to be added to list
#' @return list; list without NULL values
#' @export
remove_null_list <- function(...) {
raw_list <- list(...)
plyr::compact(raw_list)
}
#' Create a list excluding empty elements
#'
#' @param ... values to be coerced to a list
#' @return list; list without empty values
#' @export
remove_empty_list <- function(...) {
raw_list <- list(...)
raw_list[!vapply(raw_list, Negate(length), NA)]
}
| /R/utility.R | no_license | ARawles/faviconR | R | false | false | 1,449 | r | #' Encode a local favicon image to be passed to the API
#'
#' This function takes a local image path and returns a formatted list expected by the JSON request
#' @param image_path character; path to the image
#' @return list; list containing the embedded image and required extra parameters needed for the JSON request
#' @export
local_image <- function(image_path) {
image <- readBin(image_path, what = "raw", n = fs::file_info(image_path)$size)
list(
type = "inline",
content = openssl::base64_encode(image)
)
}
#' Provide an image via url to be passed to the API
#'
#' This function takes a url to an image and returns a formatted list needed by the JSON request
#' @param url character; url to image
#' @return list; list containing the url and required extra parameters needed for the JSON request
#' @export
url_image <- function(url) {
list(
type = "url",
url = url
)
}
#' Create a list and remove NULLs
#'
#' Useful to remove NULL values when creating the favicon_design
#' @param ... values to be added to list
#' @return list; list without NULL values
#' @export
remove_null_list <- function(...) {
raw_list <- list(...)
plyr::compact(raw_list)
}
#' Create a list excluding empty elements
#'
#' @param ... values to be coerced to a list
#' @return list; list without empty values
#' @export
remove_empty_list <- function(...) {
raw_list <- list(...)
raw_list[!vapply(raw_list, Negate(length), NA)]
}
|
rm(list=ls())
source("function_DY.R")
#Set data
datname="managed sample data.txt"
rawdatamat=read.table(datname,encoding="UTF-8")
subjlabels=rawdatamat[,257] #reads extra information in datafile if available
subjgroup= rawdatamat[,258]
#Set simulation conditions
maxiter <- 50
maxsubj <- 88
lengthvec <- 128-rowSums(rawdatamat[,1:128]==0)
modelstorun <- 5
parbounds <- c(0,0,.01,.01,1,1,5,5) #boundaries for r, p, d, i
lb=parbounds[1:4]
ub=parbounds[5:8]
stretchpars=function(opars) -log((ub-lb)/(opars-lb)-1) #opars=original pars
contractpars=function(spars) (ub-lb)/(exp(-spars)+1)+lb #spars=stretched pars
#Generate Model Names
freeparsmat <- expand.grid(r=c("r",""),i=c("i","0","1"),d=c("d",""),p=c("p",""))
freeparsmat <- as.matrix(freeparsmat)
freeparsmat <- freeparsmat[,c(1,4,3,2)] #Needed to match the sequence as the original one.
fixedvalsmat <- expand.grid(r=c(-1,1),i=c(-1,0.0001,1-1e-8),d=c(-1,1),p=c(-1,1)) # -1 means free parameter
fixedvalsmat <- as.matrix(fixedvalsmat)
fixedvalsmat <- fixedvalsmat[,c(1,4,3,2)]
pequalsrmat <- fixedvalsmat[,"p"]
pequalsrmat[pequalsrmat==-1] <- 0
modnames <- apply(freeparsmat, 1, paste, collapse="")
#Initailize stacks
twoLLstack <- array(rep(NA,(maxsubj*maxiter*24)),dim=c(maxsubj,maxiter,24)) #row is subj, col is LL, dim is number of models
BICstack=array(rep(NA,(maxsubj*maxiter*24)),dim=c(maxsubj,maxiter,24))
parstack <- array(NA, c(maxsubj, 4, 24)) #col is parameters
finalLLstack <- array(NA, dim=c(maxsubj,1,24))
finalBICstack <- array(NA, dim=c(maxsubj,1,24))
##For hypothesized models
#Run simulations
for (cursubj in 50:88){ #for 88 subjects
curlength=lengthvec[cursubj]
curchoices=data.frame(rawdatamat[cursubj,1:curlength])
curreinf=data.frame(rawdatamat[cursubj,129:(128+curlength)])
for (curmod in modelstorun){ #for 24 models
temppars <- runif(4)*ub
setmod <- optim(temppars, vattG2overarchfun, freeletters=freeparsmat[curmod,],fixedvals=fixedvalsmat[curmod,],
pequalsr=pequalsrmat[curmod],tempchoices=curchoices,tempreinf=curreinf,predpfun=vattpredpfun9,
method="Nelder-Mead") #abnormal termination happens with L-BFGS-B. have to manually re-range parameters
for (curiter in 1:maxiter){ #run 100 iterations. Optimize the -LL from MLE model
temppars <- runif(4)*ub
tempmod <- optim(temppars, vattG2overarchfun, freeletters=freeparsmat[curmod,],fixedvals=fixedvalsmat[curmod,],
pequalsr=pequalsrmat[curmod],tempchoices=curchoices,tempreinf=curreinf,predpfun=vattpredpfun9,
method="Nelder-Mead") #abnormal termination happens with L-BFGS-B. have to manually re-range parameters
twoLLstack[cursubj,curiter,curmod] <- tempmod$value
BICstack[cursubj,curiter,curmod] <- tempmod$value+sum(freeparsmat[curmod,]!="")*log(curlength-1)
if (tempmod$value < setmod$value){ #Stack parameters
setmod <- tempmod}
roundpars <- round(contractpars(tempmod$par),3)
print(noquote(c("subj#=",cursubj," iter=",curiter," model=",modnames[curmod], " -2LL=",round(tempmod$value,3) )))
print(noquote(c("r=",roundpars[1]," p=",roundpars[2]," d=",roundpars[3]," i=",roundpars[4])))
print(noquote(""))
flush.console()
} #iteration loop
#Calculate information criteria
parstack[cursubj,,curmod] <- contractpars(setmod$par)
finalLLstack[cursubj,,curmod] <- setmod$value
finalBICstack[cursubj,,curmod] <- tempmod$value+sum(freeparsmat[curmod,]!="")*log(curlength-1)
} #model loop
} #subject loop
##For baseline model
#Calculate information criteria for baseline model. case3로 해보자!
deckbaseG2 <- c()
deckbaseG2_DY <- c()
catt33G2 <- c()
deckbaseBIC <- c()
for(cursubj in 1:maxsubj){
curlength=lengthvec[cursubj]
curchoices=data.frame(rawdatamat[cursubj,1:curlength])
curreinf=data.frame(rawdatamat[cursubj,129:(128+curlength)])
deckobsf <- c()
for (i in 1:4){deckobsf[i] <- c(sum(curchoices==i))}
deckexpf <- sum(deckobsf)*c(1/4,1/4,1/4,1/4) #Expected frequency assuming independence
deckobsp <- deckobsf/lengthvec[cursubj]
deckbaseG2[cursubj]=-2*sum(deckobsf*log(deckobsp)) #original code 지금 이건 loglikelihood랑 g2를 섞은것 같은데...
deckbaseG2_DY[cursubj] <- -2*lengthvec[cursubj]*log(0.25)
catt33G2[cursubj]=cattG2fun(rep((1/3),3),curchoices) #G2 아니고 2LL임. attention에 따라 deckchoice probability를 준 뒤, 그 probability를 case3
deckbaseBIC[cursubj]=deckbaseG2[cursubj]+3*log(curlength-1) #왜 1개 빼지? cattg2fun에서도 그러던데..그럼 deckbaseg2에서도 빼야하는거 아님?
}
########여기서부터 내 실습
cursubj <- 1
curlength=lengthvec[cursubj]
curchoices=data.frame(rawdatamat[cursubj,1:curlength])
curreinf=data.frame(rawdatamat[cursubj,129:(128+curlength)])
deckbaseG2 <- c()
catt33G2 <- c()
deckbaseBIC <- c()
deckobsf <- c()
for (i in 1:4){deckobsf[i] <- c(sum(curchoices==i))}
deckexpf <- sum(deckobsf)*c(1/4,1/4,1/4,1/4) #Expected frequency assuming independence
deckobsp <- deckobsf/lengthvec[cursubj]
##Case1. original code
deckbaseG2[cursubj]=-2*sum(deckobsf*log(deckobsp)) #original code 지금 이건 loglikelihood랑 g2를 섞은것 같은데...
catt33G2[cursubj]=cattG2fun(rep((1/3),3),curchoices) #G2 아니고 2LL임. attention에 따라 deckchoice probability를 준 뒤, 그 probability를 case3
deckbaseBIC[cursubj]=deckbaseG2[cursubj]+3*log(curlength-1) #왜 1개 빼지? cattg2fun에서도 그러던데..그럼 deckbaseg2에서도 빼야하는거 아님?
##Case3. choice에 대한 probability sum. lengthvec*log(0.25) 이게 single trial에 대한 multinomial을 우도함수로 사용한 것인듯?
-2*lengthvec[cursubj]*log(0.25)
##
curmod <- 5
catt33G2 <- array(NA, c(maxsubj, 1, 24))
catt33BIC <- array(NA, c(maxsubj, 1, 24))
for(cursubj in 1:88){
curlength=lengthvec[cursubj]
curchoices=data.frame(rawdatamat[cursubj,1:curlength])
curreinf=data.frame(rawdatamat[cursubj,129:(128+curlength)])
catt33G2[cursubj,1,curmod] <- cattG2fun(rep(1/3,3),curchoices)
catt33BIC[cursubj,1,curmod] <-catt33G2[cursubj,1,curmod]+3*log(curlength-1)
}
####summarize tables 1:49, 50:88 control/sdi
r_BIC <- finalBICstack[,,5]
r_LL <- finalLLstack[,,5]
r_par <- parstack[,,5]
r_baseLL <- catt33G2[,,5]
r_baseBIc <- catt33BIC[,,5]
mean_par <- rbind(control=colMeans(r_par[1:49,]), sdi=colMeans(r_par[50:88,]))
median_par <- rbind(control=apply(r_par[1:49,],2,median), sdi=apply(r_par[50:88,],2,median))
sd_par <- rbind(control=apply(r_par[1:49,],2,sd), sdi=apply(r_par[50:88,],2,sd))
BIC_5 <- mean(r_BIC)
LL_base <- mean(r_baseLL)
BIC_base <- mean(r_baseBIc)
| /code/body_rpd1_DY.R | no_license | mindy2801/WCST | R | false | false | 6,777 | r |
rm(list=ls())
source("function_DY.R")
#Set data
datname="managed sample data.txt"
rawdatamat=read.table(datname,encoding="UTF-8")
subjlabels=rawdatamat[,257] #reads extra information in datafile if available
subjgroup= rawdatamat[,258]
#Set simulation conditions
maxiter <- 50
maxsubj <- 88
lengthvec <- 128-rowSums(rawdatamat[,1:128]==0)
modelstorun <- 5
parbounds <- c(0,0,.01,.01,1,1,5,5) #boundaries for r, p, d, i
lb=parbounds[1:4]
ub=parbounds[5:8]
stretchpars=function(opars) -log((ub-lb)/(opars-lb)-1) #opars=original pars
contractpars=function(spars) (ub-lb)/(exp(-spars)+1)+lb #spars=stretched pars
#Generate Model Names
freeparsmat <- expand.grid(r=c("r",""),i=c("i","0","1"),d=c("d",""),p=c("p",""))
freeparsmat <- as.matrix(freeparsmat)
freeparsmat <- freeparsmat[,c(1,4,3,2)] #Needed to match the sequence as the original one.
fixedvalsmat <- expand.grid(r=c(-1,1),i=c(-1,0.0001,1-1e-8),d=c(-1,1),p=c(-1,1)) # -1 means free parameter
fixedvalsmat <- as.matrix(fixedvalsmat)
fixedvalsmat <- fixedvalsmat[,c(1,4,3,2)]
pequalsrmat <- fixedvalsmat[,"p"]
pequalsrmat[pequalsrmat==-1] <- 0
modnames <- apply(freeparsmat, 1, paste, collapse="")
#Initailize stacks
twoLLstack <- array(rep(NA,(maxsubj*maxiter*24)),dim=c(maxsubj,maxiter,24)) #row is subj, col is LL, dim is number of models
BICstack=array(rep(NA,(maxsubj*maxiter*24)),dim=c(maxsubj,maxiter,24))
parstack <- array(NA, c(maxsubj, 4, 24)) #col is parameters
finalLLstack <- array(NA, dim=c(maxsubj,1,24))
finalBICstack <- array(NA, dim=c(maxsubj,1,24))
##For hypothesized models
#Run simulations
for (cursubj in 50:88){ #for 88 subjects
curlength=lengthvec[cursubj]
curchoices=data.frame(rawdatamat[cursubj,1:curlength])
curreinf=data.frame(rawdatamat[cursubj,129:(128+curlength)])
for (curmod in modelstorun){ #for 24 models
temppars <- runif(4)*ub
setmod <- optim(temppars, vattG2overarchfun, freeletters=freeparsmat[curmod,],fixedvals=fixedvalsmat[curmod,],
pequalsr=pequalsrmat[curmod],tempchoices=curchoices,tempreinf=curreinf,predpfun=vattpredpfun9,
method="Nelder-Mead") #abnormal termination happens with L-BFGS-B. have to manually re-range parameters
for (curiter in 1:maxiter){ #run 100 iterations. Optimize the -LL from MLE model
temppars <- runif(4)*ub
tempmod <- optim(temppars, vattG2overarchfun, freeletters=freeparsmat[curmod,],fixedvals=fixedvalsmat[curmod,],
pequalsr=pequalsrmat[curmod],tempchoices=curchoices,tempreinf=curreinf,predpfun=vattpredpfun9,
method="Nelder-Mead") #abnormal termination happens with L-BFGS-B. have to manually re-range parameters
twoLLstack[cursubj,curiter,curmod] <- tempmod$value
BICstack[cursubj,curiter,curmod] <- tempmod$value+sum(freeparsmat[curmod,]!="")*log(curlength-1)
if (tempmod$value < setmod$value){ #Stack parameters
setmod <- tempmod}
roundpars <- round(contractpars(tempmod$par),3)
print(noquote(c("subj#=",cursubj," iter=",curiter," model=",modnames[curmod], " -2LL=",round(tempmod$value,3) )))
print(noquote(c("r=",roundpars[1]," p=",roundpars[2]," d=",roundpars[3]," i=",roundpars[4])))
print(noquote(""))
flush.console()
} #iteration loop
#Calculate information criteria
parstack[cursubj,,curmod] <- contractpars(setmod$par)
finalLLstack[cursubj,,curmod] <- setmod$value
finalBICstack[cursubj,,curmod] <- tempmod$value+sum(freeparsmat[curmod,]!="")*log(curlength-1)
} #model loop
} #subject loop
##For baseline model
#Calculate information criteria for baseline model. case3로 해보자!
deckbaseG2 <- c()
deckbaseG2_DY <- c()
catt33G2 <- c()
deckbaseBIC <- c()
for(cursubj in 1:maxsubj){
curlength=lengthvec[cursubj]
curchoices=data.frame(rawdatamat[cursubj,1:curlength])
curreinf=data.frame(rawdatamat[cursubj,129:(128+curlength)])
deckobsf <- c()
for (i in 1:4){deckobsf[i] <- c(sum(curchoices==i))}
deckexpf <- sum(deckobsf)*c(1/4,1/4,1/4,1/4) #Expected frequency assuming independence
deckobsp <- deckobsf/lengthvec[cursubj]
deckbaseG2[cursubj]=-2*sum(deckobsf*log(deckobsp)) #original code 지금 이건 loglikelihood랑 g2를 섞은것 같은데...
deckbaseG2_DY[cursubj] <- -2*lengthvec[cursubj]*log(0.25)
catt33G2[cursubj]=cattG2fun(rep((1/3),3),curchoices) #G2 아니고 2LL임. attention에 따라 deckchoice probability를 준 뒤, 그 probability를 case3
deckbaseBIC[cursubj]=deckbaseG2[cursubj]+3*log(curlength-1) #왜 1개 빼지? cattg2fun에서도 그러던데..그럼 deckbaseg2에서도 빼야하는거 아님?
}
########여기서부터 내 실습
cursubj <- 1
curlength=lengthvec[cursubj]
curchoices=data.frame(rawdatamat[cursubj,1:curlength])
curreinf=data.frame(rawdatamat[cursubj,129:(128+curlength)])
deckbaseG2 <- c()
catt33G2 <- c()
deckbaseBIC <- c()
deckobsf <- c()
for (i in 1:4){deckobsf[i] <- c(sum(curchoices==i))}
deckexpf <- sum(deckobsf)*c(1/4,1/4,1/4,1/4) #Expected frequency assuming independence
deckobsp <- deckobsf/lengthvec[cursubj]
##Case1. original code
deckbaseG2[cursubj]=-2*sum(deckobsf*log(deckobsp)) #original code 지금 이건 loglikelihood랑 g2를 섞은것 같은데...
catt33G2[cursubj]=cattG2fun(rep((1/3),3),curchoices) #G2 아니고 2LL임. attention에 따라 deckchoice probability를 준 뒤, 그 probability를 case3
deckbaseBIC[cursubj]=deckbaseG2[cursubj]+3*log(curlength-1) #왜 1개 빼지? cattg2fun에서도 그러던데..그럼 deckbaseg2에서도 빼야하는거 아님?
##Case3. choice에 대한 probability sum. lengthvec*log(0.25) 이게 single trial에 대한 multinomial을 우도함수로 사용한 것인듯?
-2*lengthvec[cursubj]*log(0.25)
##
curmod <- 5
catt33G2 <- array(NA, c(maxsubj, 1, 24))
catt33BIC <- array(NA, c(maxsubj, 1, 24))
for(cursubj in 1:88){
curlength=lengthvec[cursubj]
curchoices=data.frame(rawdatamat[cursubj,1:curlength])
curreinf=data.frame(rawdatamat[cursubj,129:(128+curlength)])
catt33G2[cursubj,1,curmod] <- cattG2fun(rep(1/3,3),curchoices)
catt33BIC[cursubj,1,curmod] <-catt33G2[cursubj,1,curmod]+3*log(curlength-1)
}
####summarize tables 1:49, 50:88 control/sdi
r_BIC <- finalBICstack[,,5]
r_LL <- finalLLstack[,,5]
r_par <- parstack[,,5]
r_baseLL <- catt33G2[,,5]
r_baseBIc <- catt33BIC[,,5]
mean_par <- rbind(control=colMeans(r_par[1:49,]), sdi=colMeans(r_par[50:88,]))
median_par <- rbind(control=apply(r_par[1:49,],2,median), sdi=apply(r_par[50:88,],2,median))
sd_par <- rbind(control=apply(r_par[1:49,],2,sd), sdi=apply(r_par[50:88,],2,sd))
BIC_5 <- mean(r_BIC)
LL_base <- mean(r_baseLL)
BIC_base <- mean(r_baseBIc)
|
#-----------------------------------------------------------------------------#
#
# Author: Logan Stundal
# Date: April 09, 2021
# Purpose: 5.0: Table Construction
#
#
# Copyright (c): Logan Stundal, 2021
# Email: stund005@umn.edu
#
#-----------------------------------------------------------------------------#
#
# Notes:
# Create tables for SPDE model results
#
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
# ADMINISTRATIVE ----
#-----------------------------------------------------------------------------#
#---------------------------#
# Clear working environment
#---------------------------#
rm(list = ls())
#---------------------------#
# Load required packages
#---------------------------#
library(tidyverse)
library(magrittr)
# library(sf)
library(INLA)
library(kableExtra)
#---------------------------#
# Load data
#---------------------------#
load("Results/inla-mods.Rdata")
#---------------------------#
# Functions
#---------------------------#
qoi <- function(mod_list, centrality = "mean"){
# Takes an inla model list and returns a list containing quantities
# of interest
# ----------------------------------- #
# Extract partials from structural model
# ----------------------------------- #
inla_betas <- lapply(mod_list, function(mod){
tmp <- round(mod$summary.fixed[,c(ifelse(centrality == "mean", "mean", "0.5quant"),
"0.025quant","0.975quant")],3) %>%
as.data.frame()
if(centrality == "mean"){
tmp %<>% rename(mean = `mean`,
lb = `0.025quant`,
ub = `0.975quant`) %>%
rownames_to_column(var = "variable")
} else if(centrality == "median"){
tmp %<>% rename(median = `0.5quant`,
lb = `0.025quant`,
ub = `0.975quant`) %>%
rownames_to_column(var = "variable")
} else{
stop("Centrality parameter must be one of: 'median' or 'mean'.")
}
})
# ----------------------------------- #
# ----------------------------------- #
# Extract hyper-parameters
# ----------------------------------- #
inla_hyper <- lapply(mod_list, function(mod, round_digits = 3){
spde_pars <- inla.spde2.result(inla = mod,
name = "spatial.field",
spde,do.transform = TRUE)
# ----------------------------------- #
# ----------------------------------- #
# Tidy hyper-parameter centrality measures
# ----------------------------------- #
if(centrality == "median"){
Kappa <- inla.qmarginal(0.50, spde_pars$marginals.kappa[[1]]) # kappa (median)
Sigma <- inla.qmarginal(0.50, spde_pars$marginals.variance.nominal[[1]]) # variance (median)
Range <- inla.qmarginal(0.50, spde_pars$marginals.range.nominal[[1]]) # range (median)
} else if(centrality == "mean"){
Kappa <- inla.emarginal(function(x) x, spde_pars$marginals.kappa[[1]]) # kappa (mean)
Sigma <- inla.emarginal(function(x) x, spde_pars$marginals.variance.nominal[[1]]) # variance (mean)
Range <- inla.emarginal(function(x) x, spde_pars$marginals.range.nominal[[1]]) # range (mean)
} else{
stop("Centrality parameter must be one of: 'median' or 'mean'.")
}
# ----------------------------------- #
# ----------------------------------- #
# Extract HPDs
# ----------------------------------- #
Kappahpd <- inla.hpdmarginal(0.95, spde_pars$marginals.kappa[[1]]) # kappa (hpd 95%)
Sigmahpd <- inla.hpdmarginal(0.95, spde_pars$marginals.variance.nominal[[1]]) # variance (hpd 95%)
Rangehpd <- inla.hpdmarginal(0.95, spde_pars$marginals.range.nominal[[1]]) # range (hpd 95%)
# ----------------------------------- #
# ----------------------------------- #
# Convert range to km (degrees = 2*pi*6371/360)
# ----------------------------------- #
Range <- Range * 2*pi*6371/360
Rangehpd <- Rangehpd * 2*pi*6371/360
# ----------------------------------- #
# ----------------------------------- #
# Tidy up return object
# ----------------------------------- #
df <- rbind(cbind(Kappa, Kappahpd),
cbind(Sigma, Sigmahpd),
cbind(Range, Rangehpd)) %>%
as.data.frame()
colnames(df) <- c(centrality,"lb","ub")
rownames(df) <- 1:nrow(df)
df$variable <- c("Kappa","Sigma","Range")
# ----------------------------------- #
return(df)
})
inla_lliks <- lapply(mod_list, function(mod){
mod$mlik[1]
})
return(list("betas" = inla_betas,
"hyper" = inla_hyper,
"lliks" = inla_lliks))
}
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
# SPECIFY PREFERRED CENTRALITY MEASURE ----
#-----------------------------------------------------------------------------#
cent <- "median"
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
# QOI ----
#-----------------------------------------------------------------------------#
# Extract quantities-of-interest: regression coefficients and hyperparameters
res_vals <- sapply(yr_grp, function(x){
qoi(inla_mods[[x]], centrality = cent)
}, simplify = F)
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
# TIDY PARAMS ----
#-----------------------------------------------------------------------------#
tidy_vals <- list()
for(yr in yr_grp){
for(dv in dvs){
bs <- res_vals[[yr]][["betas"]][[dv]]
hy <- res_vals[[yr]][["hyper"]][[dv]]
llik <- res_vals[[yr]][["lliks"]][[dv]]
vl <- bind_rows(bs, hy)
vl <- bind_cols(vl,
"model" = dv,
"years" = yr,
"lliks" = llik,
"n" = "1116")
id <- paste(dv, yr, sep = "_._")
tidy_vals[[id]] <- vl
}
};rm(yr, dv, bs, hy, llik, vl, id)
# Bind parameters to DF
tidy_vals <- bind_rows(tidy_vals)
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
# MODEL TIDY PARAMS ----
#-----------------------------------------------------------------------------#
tab_vals <- tidy_vals %>%
mutate(across(c(!!cent, lb, ub, lliks),
~format(round(.x, 3), nsmall = 3))) %>%
mutate(hpd = sprintf("[%s, %s]", lb, ub)) %>%
dplyr::select(variable, !!cent, hpd, model, years) %>%
pivot_longer(.,
cols = c(!!cent, hpd),
names_to = "type") %>%
pivot_wider(.,
id_cols = c(variable, type, years),
names_from = model,
values_from = value)
lliks_n <- tidy_vals %>%
group_by(model, years) %>%
summarize(lliks = lliks[1],
n = n[1],
.groups = "keep") %>%
ungroup() %>%
mutate(across(c(lliks),
~format(round(.x, 3), nsmall = 3))) %>%
dplyr::select(lliks, n, model, years) %>%
pivot_longer(.,
cols = c(lliks, n),
names_to = "variable") %>%
pivot_wider(.,
id_cols = c(variable, years),
names_from = model,
values_from = value) %>%
mutate(type = NA)
tab_vals <- bind_rows(tab_vals, lliks_n) %>%
dplyr::select(-type) %>%
mutate(variable = case_when(variable == "intercept" ~ "Intercept",
variable == "dist" ~ "Dist. Bogota, km (log)",
variable == "pop" ~ "Population (log)",
variable == "tri" ~ "TRI",
variable == "lliks" ~ "LogLik",
variable == "n" ~ "N",
TRUE ~ variable))
rm(lliks_n)
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
# SAVE ----
#-----------------------------------------------------------------------------#
save(tab_vals, cent, model_colors, file = "Results/Tables/tidy-mods.Rdata")
rm(list = ls())
#-----------------------------------------------------------------------------#
| /Scripts/5.0-Results-Tables-Continuous.R | no_license | loganstundal/EventData-Space-Colombia | R | false | false | 9,024 | r | #-----------------------------------------------------------------------------#
#
# Author: Logan Stundal
# Date: April 09, 2021
# Purpose: 5.0: Table Construction
#
#
# Copyright (c): Logan Stundal, 2021
# Email: stund005@umn.edu
#
#-----------------------------------------------------------------------------#
#
# Notes:
# Create tables for SPDE model results
#
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
# ADMINISTRATIVE ----
#-----------------------------------------------------------------------------#
#---------------------------#
# Clear working environment
#---------------------------#
rm(list = ls())
#---------------------------#
# Load required packages
#---------------------------#
library(tidyverse)
library(magrittr)
# library(sf)
library(INLA)
library(kableExtra)
#---------------------------#
# Load data
#---------------------------#
load("Results/inla-mods.Rdata")
#---------------------------#
# Functions
#---------------------------#
qoi <- function(mod_list, centrality = "mean"){
# Takes an inla model list and returns a list containing quantities
# of interest
# ----------------------------------- #
# Extract partials from structural model
# ----------------------------------- #
inla_betas <- lapply(mod_list, function(mod){
tmp <- round(mod$summary.fixed[,c(ifelse(centrality == "mean", "mean", "0.5quant"),
"0.025quant","0.975quant")],3) %>%
as.data.frame()
if(centrality == "mean"){
tmp %<>% rename(mean = `mean`,
lb = `0.025quant`,
ub = `0.975quant`) %>%
rownames_to_column(var = "variable")
} else if(centrality == "median"){
tmp %<>% rename(median = `0.5quant`,
lb = `0.025quant`,
ub = `0.975quant`) %>%
rownames_to_column(var = "variable")
} else{
stop("Centrality parameter must be one of: 'median' or 'mean'.")
}
})
# ----------------------------------- #
# ----------------------------------- #
# Extract hyper-parameters
# ----------------------------------- #
inla_hyper <- lapply(mod_list, function(mod, round_digits = 3){
spde_pars <- inla.spde2.result(inla = mod,
name = "spatial.field",
spde,do.transform = TRUE)
# ----------------------------------- #
# ----------------------------------- #
# Tidy hyper-parameter centrality measures
# ----------------------------------- #
if(centrality == "median"){
Kappa <- inla.qmarginal(0.50, spde_pars$marginals.kappa[[1]]) # kappa (median)
Sigma <- inla.qmarginal(0.50, spde_pars$marginals.variance.nominal[[1]]) # variance (median)
Range <- inla.qmarginal(0.50, spde_pars$marginals.range.nominal[[1]]) # range (median)
} else if(centrality == "mean"){
Kappa <- inla.emarginal(function(x) x, spde_pars$marginals.kappa[[1]]) # kappa (mean)
Sigma <- inla.emarginal(function(x) x, spde_pars$marginals.variance.nominal[[1]]) # variance (mean)
Range <- inla.emarginal(function(x) x, spde_pars$marginals.range.nominal[[1]]) # range (mean)
} else{
stop("Centrality parameter must be one of: 'median' or 'mean'.")
}
# ----------------------------------- #
# ----------------------------------- #
# Extract HPDs
# ----------------------------------- #
Kappahpd <- inla.hpdmarginal(0.95, spde_pars$marginals.kappa[[1]]) # kappa (hpd 95%)
Sigmahpd <- inla.hpdmarginal(0.95, spde_pars$marginals.variance.nominal[[1]]) # variance (hpd 95%)
Rangehpd <- inla.hpdmarginal(0.95, spde_pars$marginals.range.nominal[[1]]) # range (hpd 95%)
# ----------------------------------- #
# ----------------------------------- #
# Convert range to km (degrees = 2*pi*6371/360)
# ----------------------------------- #
Range <- Range * 2*pi*6371/360
Rangehpd <- Rangehpd * 2*pi*6371/360
# ----------------------------------- #
# ----------------------------------- #
# Tidy up return object
# ----------------------------------- #
df <- rbind(cbind(Kappa, Kappahpd),
cbind(Sigma, Sigmahpd),
cbind(Range, Rangehpd)) %>%
as.data.frame()
colnames(df) <- c(centrality,"lb","ub")
rownames(df) <- 1:nrow(df)
df$variable <- c("Kappa","Sigma","Range")
# ----------------------------------- #
return(df)
})
inla_lliks <- lapply(mod_list, function(mod){
mod$mlik[1]
})
return(list("betas" = inla_betas,
"hyper" = inla_hyper,
"lliks" = inla_lliks))
}
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
# SPECIFY PREFERRED CENTRALITY MEASURE ----
#-----------------------------------------------------------------------------#
cent <- "median"
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
# QOI ----
#-----------------------------------------------------------------------------#
# Extract quantities-of-interest: regression coefficients and hyperparameters
res_vals <- sapply(yr_grp, function(x){
qoi(inla_mods[[x]], centrality = cent)
}, simplify = F)
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
# TIDY PARAMS ----
#-----------------------------------------------------------------------------#
tidy_vals <- list()
for(yr in yr_grp){
for(dv in dvs){
bs <- res_vals[[yr]][["betas"]][[dv]]
hy <- res_vals[[yr]][["hyper"]][[dv]]
llik <- res_vals[[yr]][["lliks"]][[dv]]
vl <- bind_rows(bs, hy)
vl <- bind_cols(vl,
"model" = dv,
"years" = yr,
"lliks" = llik,
"n" = "1116")
id <- paste(dv, yr, sep = "_._")
tidy_vals[[id]] <- vl
}
};rm(yr, dv, bs, hy, llik, vl, id)
# Bind parameters to DF
tidy_vals <- bind_rows(tidy_vals)
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
# MODEL TIDY PARAMS ----
#-----------------------------------------------------------------------------#
tab_vals <- tidy_vals %>%
mutate(across(c(!!cent, lb, ub, lliks),
~format(round(.x, 3), nsmall = 3))) %>%
mutate(hpd = sprintf("[%s, %s]", lb, ub)) %>%
dplyr::select(variable, !!cent, hpd, model, years) %>%
pivot_longer(.,
cols = c(!!cent, hpd),
names_to = "type") %>%
pivot_wider(.,
id_cols = c(variable, type, years),
names_from = model,
values_from = value)
lliks_n <- tidy_vals %>%
group_by(model, years) %>%
summarize(lliks = lliks[1],
n = n[1],
.groups = "keep") %>%
ungroup() %>%
mutate(across(c(lliks),
~format(round(.x, 3), nsmall = 3))) %>%
dplyr::select(lliks, n, model, years) %>%
pivot_longer(.,
cols = c(lliks, n),
names_to = "variable") %>%
pivot_wider(.,
id_cols = c(variable, years),
names_from = model,
values_from = value) %>%
mutate(type = NA)
tab_vals <- bind_rows(tab_vals, lliks_n) %>%
dplyr::select(-type) %>%
mutate(variable = case_when(variable == "intercept" ~ "Intercept",
variable == "dist" ~ "Dist. Bogota, km (log)",
variable == "pop" ~ "Population (log)",
variable == "tri" ~ "TRI",
variable == "lliks" ~ "LogLik",
variable == "n" ~ "N",
TRUE ~ variable))
rm(lliks_n)
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
# SAVE ----
#-----------------------------------------------------------------------------#
save(tab_vals, cent, model_colors, file = "Results/Tables/tidy-mods.Rdata")
rm(list = ls())
#-----------------------------------------------------------------------------#
|
# Demonstration of maRketSim capabilities on the bond side
# - Simple bonds - #
mkt1 <- market(market.bond(i=.05),t=0) # All that is required to specify a bond market is an interest rate
mkt1C <- market(market.bond(i=.1),t=0)
bnd.A <- bond(mkt=mkt1,mat=5) # Bonds can be specified by maturity
bnd.B <- bond(mkt=mkt1,dur=2.5) # or duration, in which case the maturity under prevailing interest rates is calculated
bnd.C <- bond(mkt=mkt1C,mat=15)
bnd.A # You can display the basic characteristics of a bond
summary(bnd.A,mkt1) # Or more sophisticated information like duration and convexity
# - Bonds in time and yield curves - #
mkt2 <- market(market.bond(yield.curve=quote(0.01 + log10( mat + 1 )/ 20),MMrate=.01),t=2) #yield curve must be in format specified here. t=2 implies this is a rate change in the future
mkt1B <- market(market.bond(yield.curve=quote(0.01 + log10( mat + 1 )/ 20),MMrate=.01),t=0) # we'll need this guy later to demonstrate automatic portfolio generation
sum.bnd.A <- summary(bnd.A,mkt2) # Now we're evaluating the same bond two years later, with the intervening coupon payments disappearing into the ether (accounts will address that)
str(sum.bnd.A) # The summary object has structure with useful quantities to be extracted
# Example of extracting duration
durs <- c()
ts <- seq(0,15,.5)
for(t in ts) {
d <- summary(bnd.C,market(market.bond(i=.1),t=t))$dur
durs <- c(durs,d)
}
plot(durs~ts,main="Duration vs. the passage of time",xlab="Time",ylab="Duration")
# - Portfolios of bonds - #
prt1 <- portfolio(name="bond1",bonds=list(bnd.A,bnd.B,bnd.C),mkt=mkt1)
prt1 # Display the bonds in the portfolio
summary(prt1) # Display the portfolio's characteristics under its original market conditions
summary(prt1,mkt=mkt2) # Display the portfolio's characteristics under new market conditions
as.data.frame(prt1) #Another way of looking at the portfolio. Useful for exporting to user-written functions or spreadsheets.
# Create random portfolios of bonds with certain portfolio characteristics
prt2 <- genPortfolio.bond(10,mkt=mkt1B,dur=5,dur.sd=2,name="bond2",type="random.constrained")
prt2
summary(prt2)
cat("Duration of our generated portfolio is",round(abs(5-summary(prt2)$portfolio.sum$dur),2),"away from 5.\n")
# - Market histories - #
mkt3 <- market(market.bond(yield.curve=quote(mat/75+0.02),MMrate=.02),t=3)
h.mkt.simple <- history.market(list(mkt1,mkt2,mkt3))
plot(h.mkt.simple) # Shows how yield curves are handled
plot(h.mkt.simple,plot.mats=c(1,3,5),start.t=0,end.t=5,plot.MMrate=FALSE) # Shows how to change time period plotted, how to change the maturities plotted, and how to turn off plotting the money market rate
h.mkt.updown <- genHistory.market(
i.fxn=quote(1/(10*exp(t))*t^2+.02),
start.t=0,end.t=5,f=.5
)
plot(h.mkt.updown) # Note that it automatically jitters the coordinates so they are visible
# - Accounts - #
# Creating accounts
prts <- list(prt1,prt2,cash(name="cash1",value=300,mkt=mkt1))
acct1 <- account(prts=prts,hist.mkt=h.mkt.updown)
acct2 <- account(prts=prts,hist.mkt=h.mkt.simple)
# Looking at account
acct1
cat("Observe that the value invested doesn't equal the sum of the par values of the bonds plus the cash holdings!\n")
cat("What happened? Recall that bnd.C we created somewhat nonsensibly with a different prevailing interest rate.\n")
cat("Therefore, although its par is $1000, its coupon is higher, and it immediately became worth more.\n")
cat("pv() will calculate the present value of a particular object, such as our bond: $",pv(bnd.C,mkt=mkt1),"\n")
cat("It works for other objects too. Here's the present value of prt1: $",pv(prt1,mkt=mkt1),"\n")
# Seeing what happens in the future
summary(acct1,t=5,rebal.function.args=list(min.bond.size=1000,new.bond.dur=5,new.bond.mat=NA)) # this is using the default rebalance function
### More examples ###
# Flat yield curve
mkt1 <- market(market.bond(i=.05),t=0)
mkt2 <- market(market.bond(i=.07),t=0)
bonds.ladder <- list(
bond(mkt=mkt1,mat=1),
bond(mkt=mkt1,mat=2),
bond(mkt=mkt1,mat=3),
bond(mkt=mkt1,mat=4),
bond(mkt=mkt1,mat=5),
bond(mkt=mkt1,mat=6),
bond(mkt=mkt1,mat=7),
bond(mkt=mkt1,mat=8),
bond(mkt=mkt1,mat=9),
bond(mkt=mkt1,mat=10)
)
prt.ladder <- portfolio(name="Ladder",bonds=bonds.ladder,mkt=mkt1)
prt.bul <- portfolio(name="Bullet",bonds=list(
bond(mkt=mkt1,mat=4),
bond(mkt=mkt1,mat=4),
bond(mkt=mkt1,mat=5),
bond(mkt=mkt1,mat=5),
bond(mkt=mkt1,mat=6),
bond(mkt=mkt1,mat=6),
bond(mkt=mkt1,mat=7),
bond(mkt=mkt1,mat=7),
bond(mkt=mkt1,mat=4),
bond(mkt=mkt1,mat=5)
),mkt=mkt1)
prt.ladder
prt.bul
summary(prt.ladder)
summary(prt.bul)
cat("After changing interest rates from 5% to 7%:\n")
summary(prt.ladder,mkt=mkt2)
summary(prt.bul,mkt=mkt2)
# Sharply upward-sloping yield curve (MMrate=.026, 1-year 5%, 5-year 9.5%, 10-year 12%, 20-year 15%)
mkt.bond.up <- market.bond(yield.curve=quote(log(mat+1.7)/20))
##mkt.bond.up <- market.bond(i=.05)
mkt.up0 <- market(mkt.bond.up,t=0)
prt.ladder <- portfolio(name="Ladder",bonds=list(
bond(mkt=mkt.up0,mat=1),
bond(mkt=mkt.up0,mat=2),
bond(mkt=mkt.up0,mat=3),
bond(mkt=mkt.up0,mat=4),
bond(mkt=mkt.up0,mat=5),
bond(mkt=mkt.up0,mat=6),
bond(mkt=mkt.up0,mat=7),
bond(mkt=mkt.up0,mat=8),
bond(mkt=mkt.up0,mat=9),
bond(mkt=mkt.up0,mat=10)
),mkt=mkt.up0)
prt.bul <- portfolio(name="Bullet",bonds=list(
bond(mkt=mkt.up0,mat=4),
bond(mkt=mkt.up0,mat=4),
bond(mkt=mkt.up0,mat=5),
bond(mkt=mkt.up0,mat=5),
bond(mkt=mkt.up0,mat=6),
bond(mkt=mkt.up0,mat=6),
bond(mkt=mkt.up0,mat=5),
bond(mkt=mkt.up0,mat=5),
bond(mkt=mkt.up0,mat=4),
bond(mkt=mkt.up0,mat=5)
),mkt=mkt.up0)
mkt.up.hist <- history.market(list(
market(mkt.bond.up,t=0),
market(mkt.bond.up,t=40)
))
#plot(mkt.up.hist,end.t=50)
acct.ladder <- account(prts=list(prt.ladder),hist.mkt=mkt.up.hist)
acct.bul <- account(prts=list(prt.bul),hist.mkt=mkt.up.hist)
#sum.ladder <- summary(acct.ladder,t=20,rebal.function.args=list(min.bond.size=1000,new.bond.mat=10,new.bond.dur=NA,sell.mat=NA))
sum.bul <- summary(acct.bul,t=20,rebal.function.args=list(min.bond.size=1000,new.bond.mat=NA,new.bond.dur=3.8,sell.mat=0))
#plot(sum.ladder,main="Ladder")
plot(sum.bul,main="Bullet", which="pv")
plot(sum.bul,main="Bullet", which="duration")
plot(sum.bul,main="Bullet", which=c("pv","duration"))
## Still another example: Portfolio values under rising interest rates
# Generate a market history with rising interest rate (10 percentage point total gain over 20 years), starting at a low value, with parallel yield curve shifts
# Yield curve from 7/1/13, fit with lognormal regression with no intercept
mkt3070113list <- lapply(
seq(0,19),
function(t) { market(market.bond(
yield.curve=eval(substitute(quote(.005*t + log(mat)*1.005 / 100),list(t=t))),
MMrate=.0001+0.005*t
),t=t) }
)
h.mkt.risingParallelYC <- history.market(mkt3070113list)
plot(h.mkt.risingParallelYC)
prt.ladder <- portfolio(bonds=bonds.ladder,mkt=mkt3070113list[[1]])
dur.ladder <- duration(prt.ladder,mkt=mkt3070113list[[1]])
acct.ladder <- account(
list(
prt.ladder,
cash(name="cash1",value=0,mkt=mkt3070113list[[1]])
), hist.mkt=h.mkt.risingParallelYC
)
sum.acct.risingParallelYC <- summary(acct.ladder,t=20,rebal.function.args=list(min.bond.size=1000,new.bond.mat=NA,new.bond.dur=dur.ladder,sell.mat=0))
# With falling interest rate
mkt3070113listRev <- lapply(
seq(0,19),
function(t) { market(market.bond(
yield.curve=eval(substitute(quote(.005*(19-t) + log(mat)*1.005 / 100),list(t=t))),
MMrate=.0001+0.005*(19-t)
),t=t) }
)
h.mkt.fallingParallelYC <- history.market(mkt3070113listRev)
plot(h.mkt.fallingParallelYC)
prt.ladder <- portfolio(bonds=bonds.ladder,mkt=mkt3070113listRev[[1]])
dur.ladder <- duration(prt.ladder,mkt=mkt3070113listRev[[1]])
acct.ladder <- account(
list(
prt.ladder,
cash(name="cash1",value=0,mkt=mkt3070113listRev[[1]])
), hist.mkt=h.mkt.fallingParallelYC
)
sum.acct.fallingParallelYC <- summary(acct.ladder,t=1,rebal.function.args=list(min.bond.size=1000,new.bond.mat=NA,new.bond.dur=dur.ladder,sell.mat=0))
sum.acct.fallingParallelYC | /demo/demo_bond.R | no_license | arturochian/maRketSim | R | false | false | 8,128 | r | # Demonstration of maRketSim capabilities on the bond side
# - Simple bonds - #
mkt1 <- market(market.bond(i=.05),t=0) # All that is required to specify a bond market is an interest rate
mkt1C <- market(market.bond(i=.1),t=0)
bnd.A <- bond(mkt=mkt1,mat=5) # Bonds can be specified by maturity
bnd.B <- bond(mkt=mkt1,dur=2.5) # or duration, in which case the maturity under prevailing interest rates is calculated
bnd.C <- bond(mkt=mkt1C,mat=15)
bnd.A # You can display the basic characteristics of a bond
summary(bnd.A,mkt1) # Or more sophisticated information like duration and convexity
# - Bonds in time and yield curves - #
mkt2 <- market(market.bond(yield.curve=quote(0.01 + log10( mat + 1 )/ 20),MMrate=.01),t=2) #yield curve must be in format specified here. t=2 implies this is a rate change in the future
mkt1B <- market(market.bond(yield.curve=quote(0.01 + log10( mat + 1 )/ 20),MMrate=.01),t=0) # we'll need this guy later to demonstrate automatic portfolio generation
sum.bnd.A <- summary(bnd.A,mkt2) # Now we're evaluating the same bond two years later, with the intervening coupon payments disappearing into the ether (accounts will address that)
str(sum.bnd.A) # The summary object has structure with useful quantities to be extracted
# Example of extracting duration
durs <- c()
ts <- seq(0,15,.5)
for(t in ts) {
d <- summary(bnd.C,market(market.bond(i=.1),t=t))$dur
durs <- c(durs,d)
}
plot(durs~ts,main="Duration vs. the passage of time",xlab="Time",ylab="Duration")
# - Portfolios of bonds - #
prt1 <- portfolio(name="bond1",bonds=list(bnd.A,bnd.B,bnd.C),mkt=mkt1)
prt1 # Display the bonds in the portfolio
summary(prt1) # Display the portfolio's characteristics under its original market conditions
summary(prt1,mkt=mkt2) # Display the portfolio's characteristics under new market conditions
as.data.frame(prt1) #Another way of looking at the portfolio. Useful for exporting to user-written functions or spreadsheets.
# Create random portfolios of bonds with certain portfolio characteristics
prt2 <- genPortfolio.bond(10,mkt=mkt1B,dur=5,dur.sd=2,name="bond2",type="random.constrained")
prt2
summary(prt2)
cat("Duration of our generated portfolio is",round(abs(5-summary(prt2)$portfolio.sum$dur),2),"away from 5.\n")
# - Market histories - #
mkt3 <- market(market.bond(yield.curve=quote(mat/75+0.02),MMrate=.02),t=3)
h.mkt.simple <- history.market(list(mkt1,mkt2,mkt3))
plot(h.mkt.simple) # Shows how yield curves are handled
plot(h.mkt.simple,plot.mats=c(1,3,5),start.t=0,end.t=5,plot.MMrate=FALSE) # Shows how to change time period plotted, how to change the maturities plotted, and how to turn off plotting the money market rate
h.mkt.updown <- genHistory.market(
i.fxn=quote(1/(10*exp(t))*t^2+.02),
start.t=0,end.t=5,f=.5
)
plot(h.mkt.updown) # Note that it automatically jitters the coordinates so they are visible
# - Accounts - #
# Creating accounts
prts <- list(prt1,prt2,cash(name="cash1",value=300,mkt=mkt1))
acct1 <- account(prts=prts,hist.mkt=h.mkt.updown)
acct2 <- account(prts=prts,hist.mkt=h.mkt.simple)
# Looking at account
acct1
cat("Observe that the value invested doesn't equal the sum of the par values of the bonds plus the cash holdings!\n")
cat("What happened? Recall that bnd.C we created somewhat nonsensibly with a different prevailing interest rate.\n")
cat("Therefore, although its par is $1000, its coupon is higher, and it immediately became worth more.\n")
cat("pv() will calculate the present value of a particular object, such as our bond: $",pv(bnd.C,mkt=mkt1),"\n")
cat("It works for other objects too. Here's the present value of prt1: $",pv(prt1,mkt=mkt1),"\n")
# Seeing what happens in the future
summary(acct1,t=5,rebal.function.args=list(min.bond.size=1000,new.bond.dur=5,new.bond.mat=NA)) # this is using the default rebalance function
### More examples ###
# Flat yield curve
mkt1 <- market(market.bond(i=.05),t=0)
mkt2 <- market(market.bond(i=.07),t=0)
bonds.ladder <- list(
bond(mkt=mkt1,mat=1),
bond(mkt=mkt1,mat=2),
bond(mkt=mkt1,mat=3),
bond(mkt=mkt1,mat=4),
bond(mkt=mkt1,mat=5),
bond(mkt=mkt1,mat=6),
bond(mkt=mkt1,mat=7),
bond(mkt=mkt1,mat=8),
bond(mkt=mkt1,mat=9),
bond(mkt=mkt1,mat=10)
)
prt.ladder <- portfolio(name="Ladder",bonds=bonds.ladder,mkt=mkt1)
prt.bul <- portfolio(name="Bullet",bonds=list(
bond(mkt=mkt1,mat=4),
bond(mkt=mkt1,mat=4),
bond(mkt=mkt1,mat=5),
bond(mkt=mkt1,mat=5),
bond(mkt=mkt1,mat=6),
bond(mkt=mkt1,mat=6),
bond(mkt=mkt1,mat=7),
bond(mkt=mkt1,mat=7),
bond(mkt=mkt1,mat=4),
bond(mkt=mkt1,mat=5)
),mkt=mkt1)
prt.ladder
prt.bul
summary(prt.ladder)
summary(prt.bul)
cat("After changing interest rates from 5% to 7%:\n")
summary(prt.ladder,mkt=mkt2)
summary(prt.bul,mkt=mkt2)
# Sharply upward-sloping yield curve (MMrate=.026, 1-year 5%, 5-year 9.5%, 10-year 12%, 20-year 15%)
mkt.bond.up <- market.bond(yield.curve=quote(log(mat+1.7)/20))
##mkt.bond.up <- market.bond(i=.05)
mkt.up0 <- market(mkt.bond.up,t=0)
prt.ladder <- portfolio(name="Ladder",bonds=list(
bond(mkt=mkt.up0,mat=1),
bond(mkt=mkt.up0,mat=2),
bond(mkt=mkt.up0,mat=3),
bond(mkt=mkt.up0,mat=4),
bond(mkt=mkt.up0,mat=5),
bond(mkt=mkt.up0,mat=6),
bond(mkt=mkt.up0,mat=7),
bond(mkt=mkt.up0,mat=8),
bond(mkt=mkt.up0,mat=9),
bond(mkt=mkt.up0,mat=10)
),mkt=mkt.up0)
prt.bul <- portfolio(name="Bullet",bonds=list(
bond(mkt=mkt.up0,mat=4),
bond(mkt=mkt.up0,mat=4),
bond(mkt=mkt.up0,mat=5),
bond(mkt=mkt.up0,mat=5),
bond(mkt=mkt.up0,mat=6),
bond(mkt=mkt.up0,mat=6),
bond(mkt=mkt.up0,mat=5),
bond(mkt=mkt.up0,mat=5),
bond(mkt=mkt.up0,mat=4),
bond(mkt=mkt.up0,mat=5)
),mkt=mkt.up0)
mkt.up.hist <- history.market(list(
market(mkt.bond.up,t=0),
market(mkt.bond.up,t=40)
))
#plot(mkt.up.hist,end.t=50)
acct.ladder <- account(prts=list(prt.ladder),hist.mkt=mkt.up.hist)
acct.bul <- account(prts=list(prt.bul),hist.mkt=mkt.up.hist)
#sum.ladder <- summary(acct.ladder,t=20,rebal.function.args=list(min.bond.size=1000,new.bond.mat=10,new.bond.dur=NA,sell.mat=NA))
sum.bul <- summary(acct.bul,t=20,rebal.function.args=list(min.bond.size=1000,new.bond.mat=NA,new.bond.dur=3.8,sell.mat=0))
#plot(sum.ladder,main="Ladder")
plot(sum.bul,main="Bullet", which="pv")
plot(sum.bul,main="Bullet", which="duration")
plot(sum.bul,main="Bullet", which=c("pv","duration"))
## Still another example: Portfolio values under rising interest rates
# Generate a market history with rising interest rate (10 percentage point total gain over 20 years), starting at a low value, with parallel yield curve shifts
# Yield curve from 7/1/13, fit with lognormal regression with no intercept
mkt3070113list <- lapply(
seq(0,19),
function(t) { market(market.bond(
yield.curve=eval(substitute(quote(.005*t + log(mat)*1.005 / 100),list(t=t))),
MMrate=.0001+0.005*t
),t=t) }
)
h.mkt.risingParallelYC <- history.market(mkt3070113list)
plot(h.mkt.risingParallelYC)
prt.ladder <- portfolio(bonds=bonds.ladder,mkt=mkt3070113list[[1]])
dur.ladder <- duration(prt.ladder,mkt=mkt3070113list[[1]])
acct.ladder <- account(
list(
prt.ladder,
cash(name="cash1",value=0,mkt=mkt3070113list[[1]])
), hist.mkt=h.mkt.risingParallelYC
)
sum.acct.risingParallelYC <- summary(acct.ladder,t=20,rebal.function.args=list(min.bond.size=1000,new.bond.mat=NA,new.bond.dur=dur.ladder,sell.mat=0))
# With falling interest rate
mkt3070113listRev <- lapply(
seq(0,19),
function(t) { market(market.bond(
yield.curve=eval(substitute(quote(.005*(19-t) + log(mat)*1.005 / 100),list(t=t))),
MMrate=.0001+0.005*(19-t)
),t=t) }
)
h.mkt.fallingParallelYC <- history.market(mkt3070113listRev)
plot(h.mkt.fallingParallelYC)
prt.ladder <- portfolio(bonds=bonds.ladder,mkt=mkt3070113listRev[[1]])
dur.ladder <- duration(prt.ladder,mkt=mkt3070113listRev[[1]])
acct.ladder <- account(
list(
prt.ladder,
cash(name="cash1",value=0,mkt=mkt3070113listRev[[1]])
), hist.mkt=h.mkt.fallingParallelYC
)
sum.acct.fallingParallelYC <- summary(acct.ladder,t=1,rebal.function.args=list(min.bond.size=1000,new.bond.mat=NA,new.bond.dur=dur.ladder,sell.mat=0))
sum.acct.fallingParallelYC |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/covariance.R
\name{getCormatFirstOrder}
\alias{getCormatFirstOrder}
\title{get rho matrix first order}
\usage{
getCormatFirstOrder(rho, time.step = as.difftime(1, units = "hours"),
max.tao = as.difftime(1, units = "days"))
}
\arguments{
\item{rho}{the covariance asdefined as difference between the times divided by the time step}
\item{time.step}{default is 1 hour}
\item{max.tao}{don't consider covariance for values further apart then this.}
}
\value{
covariance defined as difference between the times divided by the time step don't calculate covariance for
values further away then max.tao.
}
\description{
get rho matrix first order
}
| /man/getCormatFirstOrder.Rd | no_license | jordansread/loadflex | R | false | true | 734 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/covariance.R
\name{getCormatFirstOrder}
\alias{getCormatFirstOrder}
\title{get rho matrix first order}
\usage{
getCormatFirstOrder(rho, time.step = as.difftime(1, units = "hours"),
max.tao = as.difftime(1, units = "days"))
}
\arguments{
\item{rho}{the covariance asdefined as difference between the times divided by the time step}
\item{time.step}{default is 1 hour}
\item{max.tao}{don't consider covariance for values further apart then this.}
}
\value{
covariance defined as difference between the times divided by the time step don't calculate covariance for
values further away then max.tao.
}
\description{
get rho matrix first order
}
|
plot4 <- function() {
## Project 1 Exploratory Data Analysis
## Use strptime() and as.Date() to convert the text entries
## to date data types
## Note that in this dataset missing values are coded as ?.
library(lubridate)
library(dplyr)
## Reads whole UC Irvine householdx power data set in &
## reeadies data for plotting
dataPlotTotal <- data.frame()
dataPlotTarget <- data.frame()
dataSubs <- vector()
## Read file
fileUrl <- "./household_power_consumption.txt"
dateDownloaded <- "2015-06-06"
list.files()
## Read data into table; colClasses = char to suppress conversions
dataPlotTotal <- read.table(fileUrl, header=TRUE, sep=";", na.strings="?",
colClasses = "character")
## Remove incomplete data
dataPlotTotal <- dataPlotTotal[complete.cases(dataPlotTotal), ]
## Subset data to just the dates requested in Project 1: 2/01-2/02/2007
dataPlotTarget <- dataPlotTotal[((dataPlotTotal$Date == "1/2/2007") |
(dataPlotTotal$Date == "2/2/2007")), ]
## Merge Date & Time variables into Date
dataPlotTarget <- mutate(dataPlotTarget, Date = paste(Date, Time, sep=' '))
## Convert Date from text to date type
dataPlotTarget$Date <- strptime(dataPlotTarget$Date, format="%d/%m/%Y %H:%M:%S",
tz="America/Los_Angeles")
## Remove unneeded Time variable
dataPlotTarget <- dataPlotTarget[ , c(1, 3:9)]
date <- dataPlotTarget$Date
weekday <- wday(date, label=TRUE, abbr=TRUE)
lineColors <- c("black", "red", "blue")
plotLayout <- c(2,2)
innerMargins <- c(5,4,2,2)
outerMargins <- c(2,2,2,2)
## open connection to png file device
png(filename="./plot4.png", width=480, height=480, units="px")
par(mfcol=plotLayout, mar=innerMargins, oma=outerMargins)
## First plot in layout - same as Prog Assignment 1 - Plot 2
with(dataPlotTarget, plot(date, Global_active_power, type="l", xlab="",
ylab="Global Active Power"))
## Second plot in layout - same as Prog Assignment 1 - Plot 3
with(dataPlotTarget, plot(date, Sub_metering_1, type="n", xlab="", ylab="Energy sub metering"))
with(subset(dataPlotTarget, dataPlotTarget$Sub_metering_1>0), points(date,
Sub_metering_1, col="black", type="l"))
with(subset(dataPlotTarget, dataPlotTarget$Sub_metering_2>0), points(date,
Sub_metering_2, col="red", type="l"))
with(subset(dataPlotTarget, dataPlotTarget$Sub_metering_3>0), points(date,
Sub_metering_3, col="blue", type="l"))
legend("topright", col=lineColors, lty=1, bty="n", cex=0.70, legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
## Third plot in layout
with(dataPlotTarget, plot(date, Voltage, type="l", xlab="date/time"))
## Fourth plot in layout
with(dataPlotTarget, plot(date, Global_reactive_power, type="l", xlab="date/time"))
## close connection to png file device
dev.off()
## print(head(dataPlotTarget[1:4], n=5)) ## test
## print(tail(dataPlotTarget[1:4], n=5)) ## test
return("done plot4")
}
| /plot4.R | no_license | wiju/ExData_Plotting1 | R | false | false | 3,487 | r | plot4 <- function() {
## Project 1 Exploratory Data Analysis
## Use strptime() and as.Date() to convert the text entries
## to date data types
## Note that in this dataset missing values are coded as ?.
library(lubridate)
library(dplyr)
## Reads whole UC Irvine householdx power data set in &
## reeadies data for plotting
dataPlotTotal <- data.frame()
dataPlotTarget <- data.frame()
dataSubs <- vector()
## Read file
fileUrl <- "./household_power_consumption.txt"
dateDownloaded <- "2015-06-06"
list.files()
## Read data into table; colClasses = char to suppress conversions
dataPlotTotal <- read.table(fileUrl, header=TRUE, sep=";", na.strings="?",
colClasses = "character")
## Remove incomplete data
dataPlotTotal <- dataPlotTotal[complete.cases(dataPlotTotal), ]
## Subset data to just the dates requested in Project 1: 2/01-2/02/2007
dataPlotTarget <- dataPlotTotal[((dataPlotTotal$Date == "1/2/2007") |
(dataPlotTotal$Date == "2/2/2007")), ]
## Merge Date & Time variables into Date
dataPlotTarget <- mutate(dataPlotTarget, Date = paste(Date, Time, sep=' '))
## Convert Date from text to date type
dataPlotTarget$Date <- strptime(dataPlotTarget$Date, format="%d/%m/%Y %H:%M:%S",
tz="America/Los_Angeles")
## Remove unneeded Time variable
dataPlotTarget <- dataPlotTarget[ , c(1, 3:9)]
date <- dataPlotTarget$Date
weekday <- wday(date, label=TRUE, abbr=TRUE)
lineColors <- c("black", "red", "blue")
plotLayout <- c(2,2)
innerMargins <- c(5,4,2,2)
outerMargins <- c(2,2,2,2)
## open connection to png file device
png(filename="./plot4.png", width=480, height=480, units="px")
par(mfcol=plotLayout, mar=innerMargins, oma=outerMargins)
## First plot in layout - same as Prog Assignment 1 - Plot 2
with(dataPlotTarget, plot(date, Global_active_power, type="l", xlab="",
ylab="Global Active Power"))
## Second plot in layout - same as Prog Assignment 1 - Plot 3
with(dataPlotTarget, plot(date, Sub_metering_1, type="n", xlab="", ylab="Energy sub metering"))
with(subset(dataPlotTarget, dataPlotTarget$Sub_metering_1>0), points(date,
Sub_metering_1, col="black", type="l"))
with(subset(dataPlotTarget, dataPlotTarget$Sub_metering_2>0), points(date,
Sub_metering_2, col="red", type="l"))
with(subset(dataPlotTarget, dataPlotTarget$Sub_metering_3>0), points(date,
Sub_metering_3, col="blue", type="l"))
legend("topright", col=lineColors, lty=1, bty="n", cex=0.70, legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
## Third plot in layout
with(dataPlotTarget, plot(date, Voltage, type="l", xlab="date/time"))
## Fourth plot in layout
with(dataPlotTarget, plot(date, Global_reactive_power, type="l", xlab="date/time"))
## close connection to png file device
dev.off()
## print(head(dataPlotTarget[1:4], n=5)) ## test
## print(tail(dataPlotTarget[1:4], n=5)) ## test
return("done plot4")
}
|
t=0:10
z= t*t
plot(t,z)
| /19_n^2.r | no_license | maxangelo987/CCS614-PL-MSCS2-19-20 | R | false | false | 24 | r | t=0:10
z= t*t
plot(t,z)
|
cat("==========================================\n")
cat(" SUMA DE NÚMEROS \n")
cat("==========================================\n\n")
cat("¿Cuántos números va a ingresar?\n")
n <- scan("stdin", n = 1, quiet = TRUE)
cat("\nIngrese los números, presionando Enter luego de cada uno:\n")
v <- scan("stdin", n = n, quiet = TRUE)
suma <- 0
for (i in 1:length(v)) {
suma <- suma + v[i]
}
cat("\nLa suma de los números es:", suma, "\n")
| /archivos/suma.R | no_license | mpru/introprog | R | false | false | 460 | r | cat("==========================================\n")
cat(" SUMA DE NÚMEROS \n")
cat("==========================================\n\n")
cat("¿Cuántos números va a ingresar?\n")
n <- scan("stdin", n = 1, quiet = TRUE)
cat("\nIngrese los números, presionando Enter luego de cada uno:\n")
v <- scan("stdin", n = n, quiet = TRUE)
suma <- 0
for (i in 1:length(v)) {
suma <- suma + v[i]
}
cat("\nLa suma de los números es:", suma, "\n")
|
#' @title Function for DIb
#' @description The function computes the binomial dispersion index for a given number of trials \eqn{N\in \{1,2,\ldots\}}.
#' @param X A count random variable
#' @param N The number of trials of binomial distribution
#' @details
#' \code{dib.fun} computes the dispersion index with respect to the binomial distribution. See Touré et al. (2020) and Weiss (2018) for more details.
#' @importFrom stats var
#' @return Returns
#' \item{dib}{The binomial dispersion index}
#' @author
#' Aboubacar Y. Touré and Célestin C. Kokonendji
#' @references
#' Touré, A.Y., Dossou-Gbété, S. and Kokonendji, C.C. (2020). Asymptotic normality of the test statistics for relative dispersion and relative variation indexes, \emph{Journal of Applied Statistics} \bold{47}, 2479-2491.\cr
#' \cr
#' Weiss, C.H. (2018). An Introduction to Discrete-Valued Times Series. \emph{Wiley}, Hoboken NJ.
#' @export dib.fun
#'
#' @examples
#' X<-c(12,9,0,8,5,7,6,5,3,4,9,4)
#' dib.fun(X,12)
#' Y<-c(0,0,1,1,0,1,1)
#' dib.fun(Y,7)
dib.fun<-function(X,N){
data.frame(dib=var(X)/(mean(X)*(1-mean(X)/N)))
}
| /R/diB.R | no_license | cran/GWI | R | false | false | 1,133 | r | #' @title Function for DIb
#' @description The function computes the binomial dispersion index for a given number of trials \eqn{N\in \{1,2,\ldots\}}.
#' @param X A count random variable
#' @param N The number of trials of binomial distribution
#' @details
#' \code{dib.fun} computes the dispersion index with respect to the binomial distribution. See Touré et al. (2020) and Weiss (2018) for more details.
#' @importFrom stats var
#' @return Returns
#' \item{dib}{The binomial dispersion index}
#' @author
#' Aboubacar Y. Touré and Célestin C. Kokonendji
#' @references
#' Touré, A.Y., Dossou-Gbété, S. and Kokonendji, C.C. (2020). Asymptotic normality of the test statistics for relative dispersion and relative variation indexes, \emph{Journal of Applied Statistics} \bold{47}, 2479-2491.\cr
#' \cr
#' Weiss, C.H. (2018). An Introduction to Discrete-Valued Times Series. \emph{Wiley}, Hoboken NJ.
#' @export dib.fun
#'
#' @examples
#' X<-c(12,9,0,8,5,7,6,5,3,4,9,4)
#' dib.fun(X,12)
#' Y<-c(0,0,1,1,0,1,1)
#' dib.fun(Y,7)
dib.fun<-function(X,N){
data.frame(dib=var(X)/(mean(X)*(1-mean(X)/N)))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pred.acc.R
\name{pred.acc}
\alias{pred.acc}
\title{Predictive error and accuracy measures for predictive models based on cross-validation}
\usage{
pred.acc(obs, pred)
}
\arguments{
\item{obs}{a vector of observation values of validation samples.}
\item{pred}{a vector of prediction values of predictive models for validation samples.}
}
\value{
A list with the following components:
me, rme, mae, rmae, mse, rmse, rrmse, vecv and e1 for numerical data;
ccr, kappa, sens, spec and tss for categorical data with two levels; and
ccr, kappa for categorical data with more than two levels.
}
\description{
This function is used to calculate the mean error (me), mean absolute error
(mae), mean squared error (mse), relative me (rme), relative mae (rmae),
root mse (rmse), relative rmse (rrmse), variance explained by predictive
models based on cross-validation (vecv), and Legates and McCabe's E1 (e1) for numerical data; and
it also calculates correct classification rate (ccr), kappa (kappa), sensitivity (sens), specificity
(spec), and true skill statistic (tss) for categorical data with the observed (obs) data specified
as factor. They are based on the differences between the predicted values for and the observed values
of validation samples for cross-validation. For 0 and 1 data, the observed values need to be specified
as factor in order to use accuracy measures for categorical data. Moreover, sens, spec, tss and rmse are
for categorical data with two levels (e.g. presence and absence data).
}
\examples{
set.seed(1234)
x <- sample(1:30, 30)
e <- rnorm(30, 1)
y <- x + e
pred.acc(x, y)
y <- 0.8 * x + e
pred.acc(x, y)
}
\references{
Li, J., 2016. Assessing spatial predictive models in the environmental sciences: accuracy
measures, data variation and variance explained. Environmental Modelling & Software 80 1-8.
Li, J., 2017. Assessing the accuracy of predictive models for numerical data: Not r nor r2, why not?
Then what? PLOS ONE 12 (8): e0183250.
Allouche, O., Tsoar, A., Kadmon, R., 2006. Assessing the accuracy of species distribution models:
prevalence, kappa and true skill statistic (TSS). Journal of Applied Ecology 43 1223-1232.
}
\author{
Jin Li
}
| /man/pred.acc.Rd | no_license | cran/spm | R | false | true | 2,311 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pred.acc.R
\name{pred.acc}
\alias{pred.acc}
\title{Predictive error and accuracy measures for predictive models based on cross-validation}
\usage{
pred.acc(obs, pred)
}
\arguments{
\item{obs}{a vector of observation values of validation samples.}
\item{pred}{a vector of prediction values of predictive models for validation samples.}
}
\value{
A list with the following components:
me, rme, mae, rmae, mse, rmse, rrmse, vecv and e1 for numerical data;
ccr, kappa, sens, spec and tss for categorical data with two levels; and
ccr, kappa for categorical data with more than two levels.
}
\description{
This function is used to calculate the mean error (me), mean absolute error
(mae), mean squared error (mse), relative me (rme), relative mae (rmae),
root mse (rmse), relative rmse (rrmse), variance explained by predictive
models based on cross-validation (vecv), and Legates and McCabe's E1 (e1) for numerical data; and
it also calculates correct classification rate (ccr), kappa (kappa), sensitivity (sens), specificity
(spec), and true skill statistic (tss) for categorical data with the observed (obs) data specified
as factor. They are based on the differences between the predicted values for and the observed values
of validation samples for cross-validation. For 0 and 1 data, the observed values need to be specified
as factor in order to use accuracy measures for categorical data. Moreover, sens, spec, tss and rmse are
for categorical data with two levels (e.g. presence and absence data).
}
\examples{
set.seed(1234)
x <- sample(1:30, 30)
e <- rnorm(30, 1)
y <- x + e
pred.acc(x, y)
y <- 0.8 * x + e
pred.acc(x, y)
}
\references{
Li, J., 2016. Assessing spatial predictive models in the environmental sciences: accuracy
measures, data variation and variance explained. Environmental Modelling & Software 80 1-8.
Li, J., 2017. Assessing the accuracy of predictive models for numerical data: Not r nor r2, why not?
Then what? PLOS ONE 12 (8): e0183250.
Allouche, O., Tsoar, A., Kadmon, R., 2006. Assessing the accuracy of species distribution models:
prevalence, kappa and true skill statistic (TSS). Journal of Applied Ecology 43 1223-1232.
}
\author{
Jin Li
}
|
## This file will read in each of the source corpus files
## and then split them into Training and Test datasets
## as well as making some smaller sets for development.
##
## intermediate datasets will be saved into ./datawork
##
## files will be named source-group.txt
## where source is the tag for the source file - blog, news, twit
## and group is the subset of the original data
## train100 - 100% of training portion - 70% of original
## train50 - 50% of training portion
## train10 - 10% of training portion
## train1 - 1% of training portion
## test1 - 50% of test portion - 30% of original
## test2 - other 50% of test portion
##
# set up vectors for data loading
tag <- c("news", "blog", "twit")
path <- "./Coursera-SwiftKey/final/en_US/"
file <- c("en_US.news.txt", "en_US.blogs.txt", "en_US.twitter.txt")
# load the files
for (i in 1:length(file)){
assign(paste(tag[i]), readLines(con=paste(path,file[i],sep=""), warn=FALSE, encoding='UTF-8' ))
}
# set up vectors for splitting and saving
spath <- "./datawork"
# set up function for making a vector with splits to the dataset
splits <- function(n, blocks=c(train=0.70, test1=0.15, test2=0.15) ){
sp <- as.integer(n*blocks)
err <- n - sum(sp)
sp[1] <- sp[1] + err
names(sp) <- names(blocks)
sample(c(rep(names(sp),sp)))
}
# create split vector for each dataset
news.s <- splits(length(news))
blog.s <- splits(length(blog))
twit.s <- splits(length(twit))
bl <- c("train", "test1", "test2")
# split and write
for (i in 1:length(tag)){
for (j in 1:length(bl)){
fname <- paste(spath,"/",tag[i],"-",bl[j],".txt", sep="")
dataf <- paste(tag[i],"[",tag[i],".s=='",bl[j],"']",sep="")
datas <- eval(parse(text=dataf))
writeLines(datas, fname)
close(file(fname))
}
}
# now create the training subsets
for (i in 1:length(tag)){
assign(paste(tag[i],".train",sep=""), eval(parse(text=paste(tag[i],"[",tag[i],".s=='train']",sep=""))) )
}
# set up the split block
bl2 <- c(train50=0.5, train25=0.25, train10=0.10, train01=0.01, trainrest=0.14)
bl2n <- names(bl2)
# create the split vectors for each dataset
for (i in 1:length(tag)){
expr <- paste(tag[i],".t <- splits(length(",tag[i],".train), bl2)", sep="")
print(expr)
eval(parse(text=expr))
}
# split and write
for (i in 1:length(tag)){
for (j in 1:length(bl2n)){
fname <- paste(spath,"/",tag[i],"-",bl2n[j],".txt", sep="")
dataf <- paste(tag[i],".train[",tag[i],".t=='",bl2n[j],"']",sep="")
datas <- eval(parse(text=dataf))
writeLines(datas, fname)
close(file(fname))
}
}
| /3-SourceFileSplitting.R | no_license | MrCheerful/NLP-Project | R | false | false | 2,757 | r | ## This file will read in each of the source corpus files
## and then split them into Training and Test datasets
## as well as making some smaller sets for development.
##
## intermediate datasets will be saved into ./datawork
##
## files will be named source-group.txt
## where source is the tag for the source file - blog, news, twit
## and group is the subset of the original data
## train100 - 100% of training portion - 70% of original
## train50 - 50% of training portion
## train10 - 10% of training portion
## train1 - 1% of training portion
## test1 - 50% of test portion - 30% of original
## test2 - other 50% of test portion
##
# set up vectors for data loading
tag <- c("news", "blog", "twit")
path <- "./Coursera-SwiftKey/final/en_US/"
file <- c("en_US.news.txt", "en_US.blogs.txt", "en_US.twitter.txt")
# load the files
for (i in 1:length(file)){
assign(paste(tag[i]), readLines(con=paste(path,file[i],sep=""), warn=FALSE, encoding='UTF-8' ))
}
# set up vectors for splitting and saving
spath <- "./datawork"
# set up function for making a vector with splits to the dataset
splits <- function(n, blocks=c(train=0.70, test1=0.15, test2=0.15) ){
sp <- as.integer(n*blocks)
err <- n - sum(sp)
sp[1] <- sp[1] + err
names(sp) <- names(blocks)
sample(c(rep(names(sp),sp)))
}
# create split vector for each dataset
news.s <- splits(length(news))
blog.s <- splits(length(blog))
twit.s <- splits(length(twit))
bl <- c("train", "test1", "test2")
# split and write
for (i in 1:length(tag)){
for (j in 1:length(bl)){
fname <- paste(spath,"/",tag[i],"-",bl[j],".txt", sep="")
dataf <- paste(tag[i],"[",tag[i],".s=='",bl[j],"']",sep="")
datas <- eval(parse(text=dataf))
writeLines(datas, fname)
close(file(fname))
}
}
# now create the training subsets
for (i in 1:length(tag)){
assign(paste(tag[i],".train",sep=""), eval(parse(text=paste(tag[i],"[",tag[i],".s=='train']",sep=""))) )
}
# set up the split block
bl2 <- c(train50=0.5, train25=0.25, train10=0.10, train01=0.01, trainrest=0.14)
bl2n <- names(bl2)
# create the split vectors for each dataset
for (i in 1:length(tag)){
expr <- paste(tag[i],".t <- splits(length(",tag[i],".train), bl2)", sep="")
print(expr)
eval(parse(text=expr))
}
# split and write
for (i in 1:length(tag)){
for (j in 1:length(bl2n)){
fname <- paste(spath,"/",tag[i],"-",bl2n[j],".txt", sep="")
dataf <- paste(tag[i],".train[",tag[i],".t=='",bl2n[j],"']",sep="")
datas <- eval(parse(text=dataf))
writeLines(datas, fname)
close(file(fname))
}
}
|
simulatorRRW1 = function(tree, rates, sigmas=c(0.1,0.1), cor=0, envVariables=list(), mostRecentSamplingDatum,
ancestPosition, reciprocalRates=TRUE, n1=100, n2=0, showingPlots=FALSE, newPlot=TRUE, fixedNodes=c()) {
rotation = function(pt1, pt2, angle)
{
s = sin(angle); c = cos(angle)
x = pt2[1]-pt1[1]; y = pt2[2]-pt1[2]
x_new = (x*c)-(y*s); y_new = (x*s)+(y*c)
x_new = x_new+pt1[1]; y_new = y_new+pt1[2]
return(c(x_new,y_new))
}
nodesOnly = FALSE; pointCol = "red"
colNames= c("node1","node2","length","startLat","startLon","endLat","endLon",
"endNodeL","startNodeL","startYear","endYear","greatCircleDist_km")
simulation = matrix(nrow=length(tree$edge.length), ncol=length(colNames))
colnames(simulation) = colNames
# if (model == "fixed") phi_b = rep(1, length(tree$edge.length))
# if (model == "cauchy") phi_b = rgamma(length(tree$edge.length), shape=0.5, scale=0.5)
# if (model == "gamma") phi_b = rgamma(length(tree$edge.length), shape=halfDF, scale=halfDF)
# if (model == "logN") phi_b = rlnorm(length(tree$edge.length), meanlog=1, sdlog=sdLogN)
# if (model == "cauchy") sd_BM = sqrt(tree$edge.length/phi_b) # corresponds to BEAST reciprocalRates="true"
# if (model == "gamma") sd_BM = sqrt(tree$edge.length/phi_b) # corresponds to BEAST reciprocalRates="true"
# if (model == "logN") sd_BM = sqrt(tree$edge.length*phi_b) # corresponds to BEAST reciprocalRates="false"
phi_b = rates
if (reciprocalRates == FALSE) sd_BM = sqrt(tree$edge.length*phi_b)
if (reciprocalRates == TRUE) sd_BM = sqrt(tree$edge.length/phi_b)
nd = node.depth(tree); nd_max = max(nd)
t1 = rep(ancestPosition[2], length(tree$tip.label)+tree$Nnode)
t2 = rep(ancestPosition[1], length(tree$tip.label)+tree$Nnode)
if (showingPlots == TRUE)
{
if ((newPlot == TRUE)&(length(envVariables) > 0))
{
plotRaster(rast=envVariables[[1]], cols="gray90", colNA="white", addBox=F)
}
points(cbind(ancestPosition[1],ancestPosition[2]), pch=16, col=pointCol, cex=0.5)
}
i = 0
while (i != (nd_max-1))
{
i = i+1
my_nodes = which(nd==nd_max-i)
if (length(my_nodes) > 0)
{
for (j in 1:length(my_nodes))
{
my_node = my_nodes[j]; # print(c(i,my_nodes[j]))
parent_branch = match(my_node, tree$edge[,2])
parent_node = tree$edge[parent_branch,1]
simulatingNode = TRUE
if (my_node%in%fixedNodes)
{
simulatingNode = FALSE
index = which(tree$edge[,2] == my_node)
new_t1 = tree$annotations[[index]]$location[[1]]
new_t2 = tree$annotations[[index]]$location[[2]]
}
if (simulatingNode == TRUE)
{
onTheArea = FALSE
sd_bm = sd_BM[parent_branch]
increment1 = rnorm(1)*sd_bm
increment2 = rnorm(1)*sd_bm
increment2 = sigmas[2]*((cor*increment1)+(sqrt(1-cor^2)*increment2))
increment1 = sigmas[1]*increment1
new_t1 = t1[parent_node] + increment1
new_t2 = t2[parent_node] + increment2
if (length(envVariables) > 0)
{
onTheArea = TRUE
for (k in 1:length(envVariables))
{
if (is.na(raster::extract(envVariables[[k]],cbind(new_t2,new_t1))))
{
onTheArea = FALSE
}
}
} else {
onTheArea = TRUE
}
if (onTheArea == FALSE)
{
c2 = 0; c1 = 0
pt1 = cbind(t2[parent_node],t1[parent_node])
pt2 = cbind(new_t2,new_t1)
while (onTheArea == FALSE)
{
c2 = c2+1; # print(c(c2,c1))
if (n1 > 0)
{
angle = (2*pi)*runif(1)
pt2_rotated = rotation(pt1, pt2, angle)
onTheArea = TRUE
for (k in 1:length(envVariables))
{
if (is.na(raster::extract(envVariables[[k]],cbind(pt2_rotated[1],pt2_rotated[2]))))
{
onTheArea = FALSE
} else {
new_t1 = pt2_rotated[2]
new_t2 = pt2_rotated[1]
}
}
}
if (c2 > n1)
{
c2 = 0; c1 = c1+1
if (c1 > n2)
{
onTheArea = TRUE; i = 0
} else {
# print(paste0("...re-simulating a branch - node:", my_node))
increment1 = rnorm(1)*sd_bm
increment2 = rnorm(1)*sd_bm
increment2 = sigmas[2]*((cor*increment1)+(sqrt(1-cor^2)*increment2))
increment1 = sigmas[1]*increment1
new_t1 = t1[parent_node] + increment1
new_t2 = t2[parent_node] + increment2
onTheArea = TRUE
for (k in 1:length(envVariables))
{
if (is.na(raster::extract(envVariables[[k]],cbind(new_t2,new_t1))))
{
onTheArea = FALSE
}
}
}
}
}
}
}
t1[my_node] = new_t1
t2[my_node] = new_t2
if (showingPlots == TRUE)
{
if (nodesOnly == FALSE)
{
segments(t2[parent_node], t1[parent_node], new_t2, new_t1, col=pointCol, lwd=0.2)
points(cbind(new_t2,new_t1), pch=16, col=pointCol, cex=0.25)
} else {
points(cbind(new_t2,new_t1), pch=16, col=pointCol, cex=0.25)
}
}
}
if (i == 0)
{
cat(paste0("...re-starting the simulation\n"))
t1 = rep(ancestPosition[2], length(tree$tip.label)+tree$Nnode)
t2 = rep(ancestPosition[1], length(tree$tip.label)+tree$Nnode)
if (showingPlots == TRUE)
{
plotRaster(rast=envVariables[[1]], cols="gray90", colNA="white", addBox=F, new=F)
points(cbind(ancestPosition[1],ancestPosition[2]), pch=16, col=pointCol, cex=0.5)
}
}
}
}
x = t2; y = t1
for (i in 1:dim(tree$edge)[1])
{
node_i = tree$edge[i,1]
node_f = tree$edge[i,2]
simulation[i,"node1"] = node_i
simulation[i,"node2"] = node_f
simulation[i,"length"] = tree$edge.length[i]
simulation[i,"startLat"] = y[node_i]
simulation[i,"startLon"] = x[node_i]
simulation[i,"endLat"] = y[node_f]
simulation[i,"endLon"] = x[node_f]
x1 = cbind(x[node_i],y[node_i]); x2 = cbind(x[node_f],y[node_f])
simulation[i,"greatCircleDist_km"] = rdist.earth(x1, x2, miles=FALSE, R=NULL)
}
l = length(simulation[,1])
ll = matrix(1:l,nrow=l,ncol=l); ll[] = 0
for (j in 1:l)
{
subMat = simulation[j,2]
subMat = subset(simulation,simulation[,2]==subMat)
ll[j,1] = subMat[,3]
subMat = subMat[1,1]
subMat1 = subset(simulation,simulation[,2]==subMat)
for (k in 1:l)
{
if (nrow(subMat1) > 0)
{
ll[j,k+1] = subMat1[,3]
subMat2 = subMat1[1,1]
subMat1 = subset(simulation,simulation[,2]==subMat2)
}
}
}
endNodeL = rowSums(ll) # root to node distance for each node
simulation[,"endNodeL"] = endNodeL
startNodeL = matrix(1:l,nrow=l,ncol=1)
startNodeL[] = 0
for (j in 1:l)
{
r = simulation[j,1]
s = subset(simulation,simulation[,2]==r)
for (k in 1:l)
{
if (nrow(s) > 0)
{
startNodeL[j,1] = s[,"endNodeL"]
}
}
}
simulation[,"startNodeL"] = startNodeL
maxEndLIndice = which.max(simulation[,"endNodeL"])
maxEndL = simulation[maxEndLIndice,"endNodeL"]
endYear = matrix(simulation[,"endNodeL"]-maxEndL)
endYear = matrix(mostRecentSamplingDatum+(endYear[,1]))
startYear = matrix(simulation[,"startNodeL"]-maxEndL)
startYear = matrix(mostRecentSamplingDatum+(startYear[,1]))
simulation[,c("startYear","endYear")] = cbind(startYear,endYear)
if (showingPlots == TRUE) dev.off()
return(simulation)
}
| /windows/R/simulatorRRW1.r | no_license | sdellicour/seraphim | R | false | false | 7,914 | r | simulatorRRW1 = function(tree, rates, sigmas=c(0.1,0.1), cor=0, envVariables=list(), mostRecentSamplingDatum,
ancestPosition, reciprocalRates=TRUE, n1=100, n2=0, showingPlots=FALSE, newPlot=TRUE, fixedNodes=c()) {
rotation = function(pt1, pt2, angle)
{
s = sin(angle); c = cos(angle)
x = pt2[1]-pt1[1]; y = pt2[2]-pt1[2]
x_new = (x*c)-(y*s); y_new = (x*s)+(y*c)
x_new = x_new+pt1[1]; y_new = y_new+pt1[2]
return(c(x_new,y_new))
}
nodesOnly = FALSE; pointCol = "red"
colNames= c("node1","node2","length","startLat","startLon","endLat","endLon",
"endNodeL","startNodeL","startYear","endYear","greatCircleDist_km")
simulation = matrix(nrow=length(tree$edge.length), ncol=length(colNames))
colnames(simulation) = colNames
# if (model == "fixed") phi_b = rep(1, length(tree$edge.length))
# if (model == "cauchy") phi_b = rgamma(length(tree$edge.length), shape=0.5, scale=0.5)
# if (model == "gamma") phi_b = rgamma(length(tree$edge.length), shape=halfDF, scale=halfDF)
# if (model == "logN") phi_b = rlnorm(length(tree$edge.length), meanlog=1, sdlog=sdLogN)
# if (model == "cauchy") sd_BM = sqrt(tree$edge.length/phi_b) # corresponds to BEAST reciprocalRates="true"
# if (model == "gamma") sd_BM = sqrt(tree$edge.length/phi_b) # corresponds to BEAST reciprocalRates="true"
# if (model == "logN") sd_BM = sqrt(tree$edge.length*phi_b) # corresponds to BEAST reciprocalRates="false"
phi_b = rates
if (reciprocalRates == FALSE) sd_BM = sqrt(tree$edge.length*phi_b)
if (reciprocalRates == TRUE) sd_BM = sqrt(tree$edge.length/phi_b)
nd = node.depth(tree); nd_max = max(nd)
t1 = rep(ancestPosition[2], length(tree$tip.label)+tree$Nnode)
t2 = rep(ancestPosition[1], length(tree$tip.label)+tree$Nnode)
if (showingPlots == TRUE)
{
if ((newPlot == TRUE)&(length(envVariables) > 0))
{
plotRaster(rast=envVariables[[1]], cols="gray90", colNA="white", addBox=F)
}
points(cbind(ancestPosition[1],ancestPosition[2]), pch=16, col=pointCol, cex=0.5)
}
i = 0
while (i != (nd_max-1))
{
i = i+1
my_nodes = which(nd==nd_max-i)
if (length(my_nodes) > 0)
{
for (j in 1:length(my_nodes))
{
my_node = my_nodes[j]; # print(c(i,my_nodes[j]))
parent_branch = match(my_node, tree$edge[,2])
parent_node = tree$edge[parent_branch,1]
simulatingNode = TRUE
if (my_node%in%fixedNodes)
{
simulatingNode = FALSE
index = which(tree$edge[,2] == my_node)
new_t1 = tree$annotations[[index]]$location[[1]]
new_t2 = tree$annotations[[index]]$location[[2]]
}
if (simulatingNode == TRUE)
{
onTheArea = FALSE
sd_bm = sd_BM[parent_branch]
increment1 = rnorm(1)*sd_bm
increment2 = rnorm(1)*sd_bm
increment2 = sigmas[2]*((cor*increment1)+(sqrt(1-cor^2)*increment2))
increment1 = sigmas[1]*increment1
new_t1 = t1[parent_node] + increment1
new_t2 = t2[parent_node] + increment2
if (length(envVariables) > 0)
{
onTheArea = TRUE
for (k in 1:length(envVariables))
{
if (is.na(raster::extract(envVariables[[k]],cbind(new_t2,new_t1))))
{
onTheArea = FALSE
}
}
} else {
onTheArea = TRUE
}
if (onTheArea == FALSE)
{
c2 = 0; c1 = 0
pt1 = cbind(t2[parent_node],t1[parent_node])
pt2 = cbind(new_t2,new_t1)
while (onTheArea == FALSE)
{
c2 = c2+1; # print(c(c2,c1))
if (n1 > 0)
{
angle = (2*pi)*runif(1)
pt2_rotated = rotation(pt1, pt2, angle)
onTheArea = TRUE
for (k in 1:length(envVariables))
{
if (is.na(raster::extract(envVariables[[k]],cbind(pt2_rotated[1],pt2_rotated[2]))))
{
onTheArea = FALSE
} else {
new_t1 = pt2_rotated[2]
new_t2 = pt2_rotated[1]
}
}
}
if (c2 > n1)
{
c2 = 0; c1 = c1+1
if (c1 > n2)
{
onTheArea = TRUE; i = 0
} else {
# print(paste0("...re-simulating a branch - node:", my_node))
increment1 = rnorm(1)*sd_bm
increment2 = rnorm(1)*sd_bm
increment2 = sigmas[2]*((cor*increment1)+(sqrt(1-cor^2)*increment2))
increment1 = sigmas[1]*increment1
new_t1 = t1[parent_node] + increment1
new_t2 = t2[parent_node] + increment2
onTheArea = TRUE
for (k in 1:length(envVariables))
{
if (is.na(raster::extract(envVariables[[k]],cbind(new_t2,new_t1))))
{
onTheArea = FALSE
}
}
}
}
}
}
}
t1[my_node] = new_t1
t2[my_node] = new_t2
if (showingPlots == TRUE)
{
if (nodesOnly == FALSE)
{
segments(t2[parent_node], t1[parent_node], new_t2, new_t1, col=pointCol, lwd=0.2)
points(cbind(new_t2,new_t1), pch=16, col=pointCol, cex=0.25)
} else {
points(cbind(new_t2,new_t1), pch=16, col=pointCol, cex=0.25)
}
}
}
if (i == 0)
{
cat(paste0("...re-starting the simulation\n"))
t1 = rep(ancestPosition[2], length(tree$tip.label)+tree$Nnode)
t2 = rep(ancestPosition[1], length(tree$tip.label)+tree$Nnode)
if (showingPlots == TRUE)
{
plotRaster(rast=envVariables[[1]], cols="gray90", colNA="white", addBox=F, new=F)
points(cbind(ancestPosition[1],ancestPosition[2]), pch=16, col=pointCol, cex=0.5)
}
}
}
}
x = t2; y = t1
for (i in 1:dim(tree$edge)[1])
{
node_i = tree$edge[i,1]
node_f = tree$edge[i,2]
simulation[i,"node1"] = node_i
simulation[i,"node2"] = node_f
simulation[i,"length"] = tree$edge.length[i]
simulation[i,"startLat"] = y[node_i]
simulation[i,"startLon"] = x[node_i]
simulation[i,"endLat"] = y[node_f]
simulation[i,"endLon"] = x[node_f]
x1 = cbind(x[node_i],y[node_i]); x2 = cbind(x[node_f],y[node_f])
simulation[i,"greatCircleDist_km"] = rdist.earth(x1, x2, miles=FALSE, R=NULL)
}
l = length(simulation[,1])
ll = matrix(1:l,nrow=l,ncol=l); ll[] = 0
for (j in 1:l)
{
subMat = simulation[j,2]
subMat = subset(simulation,simulation[,2]==subMat)
ll[j,1] = subMat[,3]
subMat = subMat[1,1]
subMat1 = subset(simulation,simulation[,2]==subMat)
for (k in 1:l)
{
if (nrow(subMat1) > 0)
{
ll[j,k+1] = subMat1[,3]
subMat2 = subMat1[1,1]
subMat1 = subset(simulation,simulation[,2]==subMat2)
}
}
}
endNodeL = rowSums(ll) # root to node distance for each node
simulation[,"endNodeL"] = endNodeL
startNodeL = matrix(1:l,nrow=l,ncol=1)
startNodeL[] = 0
for (j in 1:l)
{
r = simulation[j,1]
s = subset(simulation,simulation[,2]==r)
for (k in 1:l)
{
if (nrow(s) > 0)
{
startNodeL[j,1] = s[,"endNodeL"]
}
}
}
simulation[,"startNodeL"] = startNodeL
maxEndLIndice = which.max(simulation[,"endNodeL"])
maxEndL = simulation[maxEndLIndice,"endNodeL"]
endYear = matrix(simulation[,"endNodeL"]-maxEndL)
endYear = matrix(mostRecentSamplingDatum+(endYear[,1]))
startYear = matrix(simulation[,"startNodeL"]-maxEndL)
startYear = matrix(mostRecentSamplingDatum+(startYear[,1]))
simulation[,c("startYear","endYear")] = cbind(startYear,endYear)
if (showingPlots == TRUE) dev.off()
return(simulation)
}
|
\name{DistanceMatrix}
\alias{DistanceMatrix}
\title{Pairwise distances between points in X and X.out}
\usage{
DistanceMatrix(X, X.out = X)
}
\arguments{
\item{X}{A numeric matrix of input points.}
\item{X.out}{A matrix of output points, whose distance to
every point in 'X' is desired.}
}
\value{
A matrix whose [i, j] component gives the Euclidean
distance from X.out[i, ] to X[j, ].
}
\description{
Computes the distance from every point in X to every
point in X.out. Both arguments are assumed to be numeric
matrices with as many columns as the dimensionality of
the space. (i.e., N 2D points would be represented by an
(N x 2) matrix, etc.) Vector arguments are assumed to be
1D points, and are automatically converted to matrices.
}
| /man/DistanceMatrix.Rd | no_license | chiphogg/gppois | R | false | false | 767 | rd | \name{DistanceMatrix}
\alias{DistanceMatrix}
\title{Pairwise distances between points in X and X.out}
\usage{
DistanceMatrix(X, X.out = X)
}
\arguments{
\item{X}{A numeric matrix of input points.}
\item{X.out}{A matrix of output points, whose distance to
every point in 'X' is desired.}
}
\value{
A matrix whose [i, j] component gives the Euclidean
distance from X.out[i, ] to X[j, ].
}
\description{
Computes the distance from every point in X to every
point in X.out. Both arguments are assumed to be numeric
matrices with as many columns as the dimensionality of
the space. (i.e., N 2D points would be represented by an
(N x 2) matrix, etc.) Vector arguments are assumed to be
1D points, and are automatically converted to matrices.
}
|
library(tidyverse)
download.file(url = "https://raw.githubusercontent.com/dmi3kno/SWC-tidyverse/master/data/gapminder_plus.csv",
destfile = "Data/gapminder_plus.csv")
#this containes the joint data of the files on fertility and mortality
gapminder_plus <- read_csv(file = 'Data/gapminder_plus.csv')
gapminder_plus %>%
filter(continent=='Africa',year==2007) %>%
mutate(babies_dead=infantMort*pop/10^3) %>%
filter(babies_dead>2*10^6) %>%
select(country) %>%
left_join(gapminder_plus) %>%
mutate(babies_dead=infantMort*pop/10^3,gdp_bln=gdpPercap/1e9,pop_mln=pop/1e6) %>%
select(-c(continent,pop,babies_dead)) %>%
gather(key=variable,value=values, -c(country,year)) %>%
ggplot()+ #. plece the data dropped by the pipe in that position. In this case is superfluos since if absent it always drop it in the first position, that in this case is data
geom_text(data=. %>% filter(year==2007) %>% group_by(variable) %>%
mutate(max_value=max(values)) %>%
filter(values==max_value),mapping=aes(x=year-10,y=values,color=country,label=country))+#the shift is for the sake of visualisation
geom_line(mapping=aes(x=year,y=values,color=country))+
facet_wrap(~variable,scales = 'free_y')+
labs(title='adfd',subtitle='xc',caption='gdhs',y=NULL,x='Year')+
theme_bw()+
theme(legend.position = 'none')
| /Scripts/ExerciseBeginningSecondDay.R | no_license | alecapg/R_tidyverse_Workshop_06_06_2017 | R | false | false | 1,373 | r | library(tidyverse)
download.file(url = "https://raw.githubusercontent.com/dmi3kno/SWC-tidyverse/master/data/gapminder_plus.csv",
destfile = "Data/gapminder_plus.csv")
#this containes the joint data of the files on fertility and mortality
gapminder_plus <- read_csv(file = 'Data/gapminder_plus.csv')
gapminder_plus %>%
filter(continent=='Africa',year==2007) %>%
mutate(babies_dead=infantMort*pop/10^3) %>%
filter(babies_dead>2*10^6) %>%
select(country) %>%
left_join(gapminder_plus) %>%
mutate(babies_dead=infantMort*pop/10^3,gdp_bln=gdpPercap/1e9,pop_mln=pop/1e6) %>%
select(-c(continent,pop,babies_dead)) %>%
gather(key=variable,value=values, -c(country,year)) %>%
ggplot()+ #. plece the data dropped by the pipe in that position. In this case is superfluos since if absent it always drop it in the first position, that in this case is data
geom_text(data=. %>% filter(year==2007) %>% group_by(variable) %>%
mutate(max_value=max(values)) %>%
filter(values==max_value),mapping=aes(x=year-10,y=values,color=country,label=country))+#the shift is for the sake of visualisation
geom_line(mapping=aes(x=year,y=values,color=country))+
facet_wrap(~variable,scales = 'free_y')+
labs(title='adfd',subtitle='xc',caption='gdhs',y=NULL,x='Year')+
theme_bw()+
theme(legend.position = 'none')
|
library(shiny)
library(dplyr)
library(readr)
library(ggplot2)
library(ggvis)
tradeA <- read_csv('tradeA.csv')
tradeA$ID <- as.character(seq(1, length(tradeA$ccode)))
tradeA$tau_imput <- as.factor(tradeA$tau_imput)
# Country mapping
C <- read_csv('IDE_ISIC.csv')
c <- C %>% group_by(ccode, name) %>%
summarise(n = n())
shinyServer(function(input, output) {
trade_dat <- reactive({
t <- tradeA
t <- filter(tradeA, inv_imp_pen_elast < input$maxxval)
if (input$imp_comp == 'yes') {
t <- filter(t, net_imp_tv > 0)
}
if (input$tar_type == 'MFN') {
if (input$ntb == 'yes') {
t$tauhat <- (t$tar_iwmfn + t$ave_core_wgt - 2) / (t$tar_iwmfn + t$ave_core_wgt - 1)
}
else {
t$tauhat <- (t$tar_iwmfn - 1) / t$tar_iwmfn
}
}
if (input$tar_type == 'Applied') {
if (input$ntb == 'yes') {
t$tauhat <- (t$tar_iwahs + t$ave_core_wgt - 2) / (t$tar_iwahs + t$ave_core_wgt - 1)
}
else {
t$tauhat <- (t$tar_iwahs - 1) / t$tar_iwahs
}
}
t$indicator <- as.factor(ifelse(t$ccode == input$country, 1, 0))
t$imput_col <- ifelse(t$tau_imput == 0, 'darkorange', 'red')
t <- as.data.frame(t)
t
})
country <- reactive ({
})
trade_tooltip <- function(x) {
if (is.null(x)) return(NULL)
ttemp <- isolate(trade_dat())
obs <- ttemp[ttemp$ID == x$ID, ]
paste0("<b>", obs$ccode, "</b>", ", ", obs$year, "<br>",
obs$isicnames, "<br>",
paste('alpha = ', round(obs$alpha,3), sep = "")
)
}
tradePlotA <- reactive({
tradeC <- filter(trade_dat, indicator == 1)
tradeNC <- filter(trade_dat, indicator == 0)
trade_dat %>% ggvis(x = ~inv_imp_pen_elast, y = ~tauhat) %>%
layer_points(size = ~alpha, size.hover := 200,
fillOpacity := 0.2, fillOpacity.hover := 0.5,
stroke := 'steelblue',
key := ~ID, data = tradeNC) %>%
layer_points(size = ~alpha, size.hover := 200,
fillOpacity := 0.2, fillOpacity.hover := 0.5,
stroke := 'darkorange',
key := ~ID, data = tradeC) %>%
add_tooltip(trade_tooltip, "hover") %>%
add_axis("x", title = 'Inverse Import Penetration * Inverse Elasticity') %>%
add_axis("y", title = 'Magnitude of Tariff and Nontariff Barriers')
})
output$plot1head <- renderText({'All Observations'})
tradePlotA %>% bind_shiny('all')
tradePlotC <- reactive({
tradec <- filter(trade_dat, indicator == 1)
tradec %>% ggvis(x = ~inv_imp_pen_elast, y = ~tauhat) %>%
layer_points(size = ~alpha, size.hover := 500,
fillOpacity := 0.2, fillOpacity.hover := 0.5,
stroke := ~imput_col,
key := ~ID) %>%
layer_model_predictions(model = 'lm', formula = tauhat ~ inv_imp_pen - 1) %>%
add_tooltip(trade_tooltip, "hover") %>%
add_axis("x", title = 'Inverse Import Penetration * Inverse Elasticity') %>%
add_axis("y", title = 'Magnitude of Tariff and Nontariff Barriers') %>%
hide_legend('stroke')
})
country <- reactive({
n <- filter(c, ccode == input$country)
n$name
})
output$plot2head <- renderText({country()})
tradePlotC %>% bind_shiny('n')
})
## next steps:
# 1) allow for elasticity adjustment
# 2) look at different combinations of tariff types | /server.R | no_license | brendancooley/tpp-explorer | R | false | false | 3,418 | r | library(shiny)
library(dplyr)
library(readr)
library(ggplot2)
library(ggvis)
tradeA <- read_csv('tradeA.csv')
tradeA$ID <- as.character(seq(1, length(tradeA$ccode)))
tradeA$tau_imput <- as.factor(tradeA$tau_imput)
# Country mapping
C <- read_csv('IDE_ISIC.csv')
c <- C %>% group_by(ccode, name) %>%
summarise(n = n())
shinyServer(function(input, output) {
trade_dat <- reactive({
t <- tradeA
t <- filter(tradeA, inv_imp_pen_elast < input$maxxval)
if (input$imp_comp == 'yes') {
t <- filter(t, net_imp_tv > 0)
}
if (input$tar_type == 'MFN') {
if (input$ntb == 'yes') {
t$tauhat <- (t$tar_iwmfn + t$ave_core_wgt - 2) / (t$tar_iwmfn + t$ave_core_wgt - 1)
}
else {
t$tauhat <- (t$tar_iwmfn - 1) / t$tar_iwmfn
}
}
if (input$tar_type == 'Applied') {
if (input$ntb == 'yes') {
t$tauhat <- (t$tar_iwahs + t$ave_core_wgt - 2) / (t$tar_iwahs + t$ave_core_wgt - 1)
}
else {
t$tauhat <- (t$tar_iwahs - 1) / t$tar_iwahs
}
}
t$indicator <- as.factor(ifelse(t$ccode == input$country, 1, 0))
t$imput_col <- ifelse(t$tau_imput == 0, 'darkorange', 'red')
t <- as.data.frame(t)
t
})
country <- reactive ({
})
trade_tooltip <- function(x) {
if (is.null(x)) return(NULL)
ttemp <- isolate(trade_dat())
obs <- ttemp[ttemp$ID == x$ID, ]
paste0("<b>", obs$ccode, "</b>", ", ", obs$year, "<br>",
obs$isicnames, "<br>",
paste('alpha = ', round(obs$alpha,3), sep = "")
)
}
tradePlotA <- reactive({
tradeC <- filter(trade_dat, indicator == 1)
tradeNC <- filter(trade_dat, indicator == 0)
trade_dat %>% ggvis(x = ~inv_imp_pen_elast, y = ~tauhat) %>%
layer_points(size = ~alpha, size.hover := 200,
fillOpacity := 0.2, fillOpacity.hover := 0.5,
stroke := 'steelblue',
key := ~ID, data = tradeNC) %>%
layer_points(size = ~alpha, size.hover := 200,
fillOpacity := 0.2, fillOpacity.hover := 0.5,
stroke := 'darkorange',
key := ~ID, data = tradeC) %>%
add_tooltip(trade_tooltip, "hover") %>%
add_axis("x", title = 'Inverse Import Penetration * Inverse Elasticity') %>%
add_axis("y", title = 'Magnitude of Tariff and Nontariff Barriers')
})
output$plot1head <- renderText({'All Observations'})
tradePlotA %>% bind_shiny('all')
tradePlotC <- reactive({
tradec <- filter(trade_dat, indicator == 1)
tradec %>% ggvis(x = ~inv_imp_pen_elast, y = ~tauhat) %>%
layer_points(size = ~alpha, size.hover := 500,
fillOpacity := 0.2, fillOpacity.hover := 0.5,
stroke := ~imput_col,
key := ~ID) %>%
layer_model_predictions(model = 'lm', formula = tauhat ~ inv_imp_pen - 1) %>%
add_tooltip(trade_tooltip, "hover") %>%
add_axis("x", title = 'Inverse Import Penetration * Inverse Elasticity') %>%
add_axis("y", title = 'Magnitude of Tariff and Nontariff Barriers') %>%
hide_legend('stroke')
})
country <- reactive({
n <- filter(c, ccode == input$country)
n$name
})
output$plot2head <- renderText({country()})
tradePlotC %>% bind_shiny('n')
})
## next steps:
# 1) allow for elasticity adjustment
# 2) look at different combinations of tariff types |
mes <- function(x) {
dmy(paste("1", month(x), year(x)))
}
p<-ggplot(lll, aes(x=fch_a1, y=total)) + geom_line()
fff %>% group_by(fch_a1) %>% summarise(total=sum(a1))
select(hhh, matches("(^a\\d)|(^fch_)"), Sexo, edad, UAP, grupo, Identificador_de_Paciente)->sss
gather(sss, key, val, -c(Sexo, edad, UAP, grupo, Identificador_de_Paciente), -matches("^a\\d"))->ddd
| /PVS/mes.R | no_license | arturiax/shiny-server | R | false | false | 368 | r | mes <- function(x) {
dmy(paste("1", month(x), year(x)))
}
p<-ggplot(lll, aes(x=fch_a1, y=total)) + geom_line()
fff %>% group_by(fch_a1) %>% summarise(total=sum(a1))
select(hhh, matches("(^a\\d)|(^fch_)"), Sexo, edad, UAP, grupo, Identificador_de_Paciente)->sss
gather(sss, key, val, -c(Sexo, edad, UAP, grupo, Identificador_de_Paciente), -matches("^a\\d"))->ddd
|
library("aroma.core");
verbose <- Arguments$getVerbose(-8, timestamp=TRUE);
ar <- AromaRepository(verbose=TRUE);
verbose && enter(verbose, "Downloading annotation data");
chipType <- "Mapping10K_Xba142";
verbose && cat(verbose, "Chip type: ", chipType);
pathname <- downloadCDF(ar, chipType);
verbose && cat(verbose, "CDF: ", pathname);
pathname <- downloadACS(ar, chipType, tags=".*");
verbose && cat(verbose, "ACS: ", pathname);
pathname <- downloadUGP(ar, chipType, tags=".*");
verbose && cat(verbose, "UGP: ", pathname);
pathname <- downloadUFL(ar, chipType, tags=".*");
verbose && cat(verbose, "UFL: ", pathname);
verbose && exit(verbose);
| /aroma.affymetrix/inst/testScripts/robustness/chipTypes/Mapping10K_Xba142/01a.downloadAnnotData.R | no_license | ingted/R-Examples | R | false | false | 674 | r | library("aroma.core");
verbose <- Arguments$getVerbose(-8, timestamp=TRUE);
ar <- AromaRepository(verbose=TRUE);
verbose && enter(verbose, "Downloading annotation data");
chipType <- "Mapping10K_Xba142";
verbose && cat(verbose, "Chip type: ", chipType);
pathname <- downloadCDF(ar, chipType);
verbose && cat(verbose, "CDF: ", pathname);
pathname <- downloadACS(ar, chipType, tags=".*");
verbose && cat(verbose, "ACS: ", pathname);
pathname <- downloadUGP(ar, chipType, tags=".*");
verbose && cat(verbose, "UGP: ", pathname);
pathname <- downloadUFL(ar, chipType, tags=".*");
verbose && cat(verbose, "UFL: ", pathname);
verbose && exit(verbose);
|
# Copyright 2022 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
# ================
#' This function retrieves hourly data from aswe sites, including both archived and current year data
#' @param parameter Defines the parameter (type of data) your want to retrieve
#' @param get_year Define the year that you want to retrieve. Defaults to "All"
#' @param id Station ID you are looking for
#' @keywords internal
#' @importFrom magrittr %>%
#' @importFrom bcdata bcdc_get_data
#' @export
#' @examples \dontrun{}
daily_archive <- function(parameter = c("swe", "snow_depth", "precipitation", "temperature"), get_year = "All", id) {
yr <- get_year
# Knit the current year with past year data if you need both current and archived data
if (any(yr %in% c("all", "All", "ALL")) | any(yr %in% wtr_yr(Sys.Date()))) {
if (parameter == "swe") {
# knit the daily swe archive with daily SWE for this water year
data_i <- bcdata::bcdc_get_data("5e7acd31-b242-4f09-8a64-000af872d68f", resource = "666b7263-6111-488c-89aa-7480031f74cd") %>%
dplyr::select(contains(c("DATE(UTC)", id)))
colnames(data_i) <- gsub( " .*$", "", colnames(data_i))
# Melt dataframe
data <- data.frame(data_i, check.names = FALSE) %>%
reshape::melt(id = "DATE(UTC)") %>%
dplyr::mutate(parameter = parameter) %>%
dplyr::rename(date_utc = "DATE(UTC)")
if ("variable" %in% colnames(data)) {
data <- data %>%
dplyr::rename(id = "variable") %>%
dplyr::full_join(daily_current(parameter, id)) %>%
dplyr::arrange(id, date_utc) %>%
dplyr::filter(!is.na(value))
} else {
data <- data %>%
dplyr::mutate(value = NA) %>%
dplyr::full_join(daily_current(parameter, id)) %>%
dplyr::arrange(id, date_utc) %>%
dplyr::filter(!is.na(value))
}
} else if (parameter == "snow_depth") {
# Get snow depth from historic daily data - not always complete to present water year
historic_daily <- bcdata::bcdc_get_data("5e7acd31-b242-4f09-8a64-000af872d68f", resource = "945c144a-d094-4a20-a3c6-9fe74cad368a") %>%
dplyr::filter(variable == "SD", Pillow_ID %in% paste0("_", id)) %>%
dplyr::rename(id = "Pillow_ID", date_utc = "Date") %>%
dplyr::mutate(parameter = "snow_depth", id = stringr::str_replace(id, "_", "")) %>%
dplyr::select(-code, -variable)
# knit the daily snow depth available pre 2003 with hourly 2003-current
data <- bcdata::bcdc_get_data("5e7acd31-b242-4f09-8a64-000af872d68f", resource = "204f91d4-b136-41d2-98b3-125ecefd6887") %>%
dplyr::select(contains(c(id, "DATE(UTC)")))
colnames(data) <- gsub( " .*$", "", colnames(data))
# Needs to be a dataframe to melt + melt dataframe
data <- data.frame(data, check.names = FALSE) %>%
reshape::melt(id = "DATE(UTC)") %>%
dplyr::mutate(parameter = parameter) %>%
dplyr::rename(date_utc = "DATE(UTC)")
if ("variable" %in% colnames(data)) {
data <- data %>%
dplyr::rename(id = "variable") %>%
dplyr::mutate(date = as.Date(date_utc)) %>%
dplyr::group_by(id, date, parameter) %>%
dplyr::summarise(value = mean(value, na.rm = TRUE)) %>%
# cut out the data that is available within daily archive and knit together
dplyr::rename(date_utc = "date") %>%
dplyr::filter(date_utc > max(historic_daily$date_utc, na.rm = TRUE)) %>%
dplyr::full_join(historic_daily) %>%
dplyr::arrange(id, date_utc) %>%
# get current year sd
dplyr::full_join(daily_current(parameter = parameter, id = id)) %>%
dplyr::arrange(id, date_utc) %>%
unique() %>%
dplyr::filter(!is.na(value))
} else (
data <- data %>%
dplyr::mutate(value = NA) %>%
dplyr::mutate(date = as.Date(date_utc), parameter = "snow_depth") %>%
dplyr::group_by(date, parameter) %>%
dplyr::summarise(value = mean(value, na.rm = TRUE)) %>%
# cut out the data that is available within daily archive and knit together
dplyr::rename(date_utc = "date") %>%
dplyr::filter(date_utc > max(historic_daily$date_utc)) %>%
dplyr::full_join(historic_daily) %>%
dplyr::arrange(date_utc) %>%
# get current year sd
dplyr::full_join(daily_current(parameter = parameter, id = id)) %>%
dplyr::arrange(date_utc) %>%
unique() %>%
dplyr::filter(!is.na(value))
)
} else if (parameter == "precipitation") {
# Get t max and t min from historic daily data - not always complete to present water year
historic_daily <- bcdata::bcdc_get_data("5e7acd31-b242-4f09-8a64-000af872d68f", resource = "945c144a-d094-4a20-a3c6-9fe74cad368a") %>%
dplyr::filter(variable %in% c("AccumP"), Pillow_ID %in% paste0("_", id)) %>%
dplyr::rename(id = "Pillow_ID", date_utc = "Date") %>%
dplyr::mutate(parameter = "cum_precip", id = stringr::str_replace(id, "_", "")) %>%
dplyr::select(-code, -variable)
# knit the precipitation available until 2003 to the current year data.
# Note that precip data is only hourly from the data catalog.
# ************* WILL NEED TO CHANGE UTC BEFORE TAKING DAILY MEAN********************
data <- bcdata::bcdc_get_data("5e7acd31-b242-4f09-8a64-000af872d68f", resource = "371a0479-1c6a-4f15-a456-11d778824f38") %>%
dplyr::select(contains(c(id, "DATE(UTC)")))
colnames(data) <- gsub( " .*$", "", colnames(data))
data <- data.frame(data, check.names = FALSE) %>%
reshape::melt(id = "DATE(UTC)") %>%
dplyr::mutate(parameter = "cum_precip") %>%
dplyr::rename(date_utc = "DATE(UTC)")
if ("variable" %in% colnames(data)) {
data <- data %>%
dplyr::rename(id = "variable") %>%
dplyr::mutate(date = as.Date(date_utc)) %>%
dplyr::filter(date > max(historic_daily$date_utc)) %>%
dplyr::group_by(id, date, parameter) %>%
dplyr::summarise(value = mean(value, na.rm = TRUE)) %>%
dplyr::rename(date_utc = date) %>%
# Join with the daily mean
dplyr::full_join(historic_daily) %>%
# join with current year daily mean precip
dplyr::full_join(daily_current(parameter = parameter, id = id)) %>%
dplyr::arrange(id, date_utc) %>%
unique() %>%
dplyr::filter(!is.na(value))
} else (
data <- data %>%
dplyr::mutate(value = NA) %>%
dplyr::mutate(date = as.Date(date_utc)) %>%
dplyr::filter(date > max(historic_daily$date_utc)) %>%
dplyr::group_by(date, parameter) %>%
dplyr::summarise(value = mean(value, na.rm = TRUE)) %>%
dplyr::rename(date_utc = date) %>%
# Join with the daily mean
dplyr::full_join(historic_daily) %>%
# join with current year daily mean precip
dplyr::full_join(daily_current(parameter = parameter, id = id)) %>%
dplyr::arrange(date_utc) %>%
unique() %>%
dplyr::filter(!is.na(value))
)
} else if (parameter == "temperature") {
# Get t max and t min from historic daily data - not always complete to present water year
historic_daily <- bcdata::bcdc_get_data("5e7acd31-b242-4f09-8a64-000af872d68f", resource = "945c144a-d094-4a20-a3c6-9fe74cad368a") %>%
dplyr::filter(variable %in% c("T_Max", "T_Min"), Pillow_ID %in% paste0("_", id)) %>%
dplyr::rename(id = "Pillow_ID", date_utc = "Date") %>%
dplyr::mutate(parameter = as.character(ifelse(variable == "T_Max", "t_max", "t_min")), id = as.character(stringr::str_replace(id, "_", ""))) %>%
dplyr::select(-code, -variable)
# knit the daily snow depth available pre 2003 with hourly 2003-current
data <- bcdata::bcdc_get_data(record = "5e7acd31-b242-4f09-8a64-000af872d68f", resource = "fba88311-34b9-4422-b5ae-572fd23b2a00") %>%
dplyr::select(contains(c(id, "DATE(UTC)")))
colnames(data) <- gsub( " .*$", "", colnames(data))
# Needs to be a dataframe to melt
data <- data.frame(data, check.names = FALSE) %>%
reshape::melt(id = "DATE(UTC)") %>%
dplyr::mutate(parameter = parameter) %>%
dplyr::rename(date_utc = "DATE(UTC)")
if ("variable" %in% colnames(data)) {
data <- data %>%
dplyr::rename(id = "variable") %>%
dplyr::arrange(id, date_utc) %>%
dplyr::mutate(date = as.Date(date_utc), "id" = id) %>%
dplyr::filter(date > max(historic_daily$date_utc)) %>%
dplyr::group_by(id, date) %>%
dplyr::summarise(t_max = max(value, na.rm = TRUE),
t_min = min(value, na.rm = TRUE)) %>%
dplyr::rename(date_utc = date) %>%
reshape2::melt(id = c("date_utc", "id")) %>%
dplyr::rename(parameter = "variable") %>%
dplyr::full_join(historic_daily) %>%
# get current year temperature
dplyr::full_join(daily_current(parameter = parameter, id = id)) %>%
dplyr::arrange(id, date_utc) %>%
unique() %>%
dplyr::filter(!is.na(value)) %>%
dplyr::filter(!is.infinite(value))
} else (
data <- data %>%
dplyr::mutate(value = NA) %>%
dplyr::mutate(date = as.Date(date_utc), "id" = id) %>%
dplyr::filter(date > max(historic_daily$date_utc)) %>%
dplyr::group_by(id, date) %>%
dplyr::summarise(t_max = max(value, na.rm = TRUE),
t_min = min(value, na.rm = TRUE)) %>%
dplyr::rename(date_utc = date) %>%
reshape2::melt(id = c("date_utc", "id")) %>%
dplyr::rename(parameter = "variable") %>%
dplyr::full_join(historic_daily) %>%
# get current year temperature
dplyr::full_join(daily_current(parameter = parameter, id = id)) %>%
dplyr::arrange(date_utc) %>%
unique() %>%
dplyr::filter(!is.na(value))
)
}
} else {
if (parameter == "swe") {
# get only the archived data
# knit the daily swe archive with daily SWE for this water year
data_i <- bcdata::bcdc_get_data("5e7acd31-b242-4f09-8a64-000af872d68f", resource = "666b7263-6111-488c-89aa-7480031f74cd") %>%
dplyr::select(contains(c(id, "DATE(UTC)")))
colnames(data_i) <- gsub( " .*$", "", colnames(data_i))
# Melt dataframe
data <- data.frame(data_i, check.names = FALSE) %>%
reshape::melt(id = "DATE(UTC)") %>%
dplyr::mutate(parameter = parameter) %>%
dplyr::rename(date_utc = "DATE(UTC)")
if ("variable" %in% colnames(data)) {
data <- data %>%
dplyr::filter(!is.na(value)) %>%
dplyr::rename(id = "variable")
} else (
data <- data %>%
dplyr::mutate(value = NA) %>%
dplyr::filter(!is.na(value))
)
} else if (parameter == "snow_depth") {
# Get snow depth from historic daily data - not always complete to present water year
historic_daily <- bcdata::bcdc_get_data("5e7acd31-b242-4f09-8a64-000af872d68f", resource = "945c144a-d094-4a20-a3c6-9fe74cad368a") %>%
dplyr::filter(variable == "SD", Pillow_ID %in% paste0("_", id)) %>%
dplyr::rename(id = "Pillow_ID", date_utc = "Date") %>%
dplyr::mutate(parameter = "snow_depth", id = stringr::str_replace(id, "_", "")) %>%
dplyr::select(-code, -variable)
# knit the daily snow depth available pre 2003 with hourly 2003-current
data <- bcdata::bcdc_get_data("5e7acd31-b242-4f09-8a64-000af872d68f", resource = "204f91d4-b136-41d2-98b3-125ecefd6887") %>%
dplyr::select(contains(c(id, "DATE(UTC)")))
#dplyr::mutate(date = as.Date(`DATE(UTC)`)) %>%
#dplyr::rename(value = contains(id), date_utc = "DATE(UTC)")
colnames(data) <- gsub( " .*$", "", colnames(data))
# Needs to be a dataframe to melt
data <- data.frame(data, check.names = FALSE) %>%
reshape::melt(id = "DATE(UTC)") %>%
dplyr::mutate(parameter = parameter) %>%
dplyr::rename(date_utc = "DATE(UTC)", id = "variable")
if ("value" %in% colnames(data)) {
data <- data %>%
dplyr::mutate(date = as.Date(date_utc)) %>%
dplyr::group_by(id, date, parameter) %>%
dplyr::summarise(value = mean(value, na.rm = TRUE)) %>%
# cut out the data that is available within daily archive and knit together
dplyr::rename(date_utc = "date") %>%
dplyr::filter(date_utc > max(historic_daily$date_utc, na.rm = TRUE)) %>%
dplyr::full_join(historic_daily) %>%
dplyr::arrange(id, date_utc) %>%
unique() %>%
dplyr::filter(!is.na(value))
} else (
data <- data %>%
dplyr::mutate(value = NA) %>%
dplyr::mutate(date = as.Date(date_utc), parameter = "snow_depth") %>%
dplyr::group_by(id, date, parameter) %>%
dplyr::summarise(value = mean(value, na.rm = TRUE)) %>%
# cut out the data that is available within daily archive and knit together
dplyr::rename(date_utc = "date") %>%
dplyr::filter(date_utc > max(historic_daily$date_utc)) %>%
dplyr::full_join(historic_daily) %>%
dplyr::arrange(id, date_utc) %>%
unique() %>%
dplyr::filter(!is.na(value))
)
} else if (parameter == "precipitation") {
# Get precipitation from historic daily data - not always complete to present water year
historic_daily <- bcdata::bcdc_get_data("5e7acd31-b242-4f09-8a64-000af872d68f", resource = "945c144a-d094-4a20-a3c6-9fe74cad368a") %>%
dplyr::filter(variable %in% c("AccumP"), Pillow_ID %in% paste0("_", id)) %>%
dplyr::rename(id = "Pillow_ID", date_utc = "Date") %>%
dplyr::mutate(parameter = "cum_precip", id = stringr::str_replace(id, "_", "")) %>%
dplyr::select(-code, -variable)
# knit the precipitation available until 2003 to the current year data.
# Note that precip data is only hourly from the data catalog.
# ************* WILL NEED TO CHANGE UTC BEFORE TAKING DAILY MEAN********************
data <- bcdata::bcdc_get_data("5e7acd31-b242-4f09-8a64-000af872d68f", resource = "371a0479-1c6a-4f15-a456-11d778824f38") %>%
dplyr::select(contains(c(id, "DATE(UTC)")))
colnames(data) <- gsub( " .*$", "", colnames(data))
# Needs to be a dataframe to melt
data <- data.frame(data, check.names = FALSE) %>%
reshape::melt(id = "DATE(UTC)") %>%
dplyr::mutate(parameter = "cum_precip") %>%
dplyr::rename(date_utc = "DATE(UTC", id = "variable")
if ("value" %in% colnames(data)) {
data <- data %>%
dplyr::mutate(date = as.Date(date_utc)) %>%
dplyr::filter(date > max(historic_daily$date_utc)) %>%
dplyr::group_by(id, date, parameter) %>%
dplyr::summarise(value = mean(value, na.rm = TRUE)) %>%
dplyr::rename(date_utc = date) %>%
# Join with the daily mean
dplyr::full_join(historic_daily) %>%
dplyr::arrange(id, date_utc) %>%
unique() %>%
dplyr::filter(!is.na(value))
} else (
data <- data %>%
dplyr::mutate(value = NA) %>%
dplyr::mutate(date = as.Date(date_utc)) %>%
dplyr::filter(date > max(historic_daily$date_utc)) %>%
dplyr::group_by(id, date, parameter) %>%
dplyr::summarise(value = mean(value, na.rm = TRUE)) %>%
dplyr::rename(date_utc = date) %>%
# Join with the daily mean
dplyr::full_join(historic_daily) %>%
# join with current year daily mean precip
dplyr::arrange(id, date_utc) %>%
unique() %>%
dplyr::filter(!is.na(value))
)
} else if (parameter == "temperature") {
# Get t max and t min from historic daily data - not always complete to present water year
historic_daily <- bcdata::bcdc_get_data("5e7acd31-b242-4f09-8a64-000af872d68f", resource = "945c144a-d094-4a20-a3c6-9fe74cad368a") %>%
dplyr::filter(variable %in% c("T_Max", "T_Min"), Pillow_ID %in% paste0("_", id)) %>%
dplyr::rename(id = "Pillow_ID", date_utc = "Date") %>%
dplyr::mutate(parameter = as.character(ifelse(variable == "T_Max", "t_max", "t_min")), id = as.character(stringr::str_replace(id, "_", ""))) %>%
dplyr::select(-code, -variable)
# knit the daily snow depth available pre 2003 with hourly 2003-current
data <- bcdata::bcdc_get_data(record = "5e7acd31-b242-4f09-8a64-000af872d68f", resource = "fba88311-34b9-4422-b5ae-572fd23b2a00") %>%
dplyr::select(contains(c(id, "DATE(UTC)")))
colnames(data) <- gsub( " .*$", "", colnames(data))
# Needs to be a dataframe to melt
data <- data.frame(data, check.names = FALSE) %>%
reshape::melt(id = "DATE(UTC)") %>%
dplyr::mutate(parameter = parameter) %>%
dplyr::rename(date_utc = "DATE(UTC)", id = "variable")
if ("value" %in% colnames(data)) {
data <- data %>%
dplyr::mutate(date = as.Date(date_utc), "id" = id) %>%
dplyr::filter(date > max(historic_daily$date_utc)) %>%
dplyr::group_by(id, date) %>%
dplyr::summarise(t_max = max(value, na.rm = TRUE),
t_min = min(value, na.rm = TRUE)) %>%
dplyr::rename(date_utc = date) %>%
reshape2::melt(id = c("date_utc", "id")) %>%
dplyr::rename(parameter = "variable") %>%
dplyr::full_join(historic_daily) %>%
# get current year temperature
dplyr::arrange(id, date_utc) %>%
unique() %>%
dplyr::filter(!is.na(value)) %>%
dplyr::filter(!is.infinite(value))
} else (
data <- data %>%
dplyr::mutate(value = NA) %>%
dplyr::mutate(date = as.Date(date_utc), "id" = id) %>%
dplyr::filter(date > max(historic_daily$date_utc)) %>%
dplyr::group_by(id, date) %>%
dplyr::summarise(t_max = max(value, na.rm = TRUE),
t_min = min(value, na.rm = TRUE)) %>%
dplyr::rename(date_utc = date) %>%
reshape2::melt(id = c("date_utc", "id")) %>%
dplyr::rename(parameter = "variable") %>%
dplyr::full_join(historic_daily) %>%
# get current year temperature
dplyr::arrange(date_utc) %>%
unique() %>%
dplyr::filter(!is.na(value))
)
}
}
# filter for specified years and check that the DAILY data only is present - slight glitch in some stations that data catalogue has some hourly data
if (any(yr %in% c("ALL", "all", "All"))) {
data_o <- data %>%
dplyr::filter(lubridate::hour(date_utc) == "16")
} else {
# Filter for the years your specify
data_o <- data %>%
dplyr::filter(lubridate::year(date_utc) %in% yr) %>%
dplyr::filter(lubridate::hour(date_utc) == "16")
}
return(data_o)
}
| /R/daily_archive_function.R | permissive | bcgov/bcsnowdata | R | false | false | 20,032 | r | # Copyright 2022 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
# ================
#' This function retrieves hourly data from aswe sites, including both archived and current year data
#' @param parameter Defines the parameter (type of data) your want to retrieve
#' @param get_year Define the year that you want to retrieve. Defaults to "All"
#' @param id Station ID you are looking for
#' @keywords internal
#' @importFrom magrittr %>%
#' @importFrom bcdata bcdc_get_data
#' @export
#' @examples \dontrun{}
daily_archive <- function(parameter = c("swe", "snow_depth", "precipitation", "temperature"), get_year = "All", id) {
yr <- get_year
# Knit the current year with past year data if you need both current and archived data
if (any(yr %in% c("all", "All", "ALL")) | any(yr %in% wtr_yr(Sys.Date()))) {
if (parameter == "swe") {
# knit the daily swe archive with daily SWE for this water year
data_i <- bcdata::bcdc_get_data("5e7acd31-b242-4f09-8a64-000af872d68f", resource = "666b7263-6111-488c-89aa-7480031f74cd") %>%
dplyr::select(contains(c("DATE(UTC)", id)))
colnames(data_i) <- gsub( " .*$", "", colnames(data_i))
# Melt dataframe
data <- data.frame(data_i, check.names = FALSE) %>%
reshape::melt(id = "DATE(UTC)") %>%
dplyr::mutate(parameter = parameter) %>%
dplyr::rename(date_utc = "DATE(UTC)")
if ("variable" %in% colnames(data)) {
data <- data %>%
dplyr::rename(id = "variable") %>%
dplyr::full_join(daily_current(parameter, id)) %>%
dplyr::arrange(id, date_utc) %>%
dplyr::filter(!is.na(value))
} else {
data <- data %>%
dplyr::mutate(value = NA) %>%
dplyr::full_join(daily_current(parameter, id)) %>%
dplyr::arrange(id, date_utc) %>%
dplyr::filter(!is.na(value))
}
} else if (parameter == "snow_depth") {
# Get snow depth from historic daily data - not always complete to present water year
historic_daily <- bcdata::bcdc_get_data("5e7acd31-b242-4f09-8a64-000af872d68f", resource = "945c144a-d094-4a20-a3c6-9fe74cad368a") %>%
dplyr::filter(variable == "SD", Pillow_ID %in% paste0("_", id)) %>%
dplyr::rename(id = "Pillow_ID", date_utc = "Date") %>%
dplyr::mutate(parameter = "snow_depth", id = stringr::str_replace(id, "_", "")) %>%
dplyr::select(-code, -variable)
# knit the daily snow depth available pre 2003 with hourly 2003-current
data <- bcdata::bcdc_get_data("5e7acd31-b242-4f09-8a64-000af872d68f", resource = "204f91d4-b136-41d2-98b3-125ecefd6887") %>%
dplyr::select(contains(c(id, "DATE(UTC)")))
colnames(data) <- gsub( " .*$", "", colnames(data))
# Needs to be a dataframe to melt + melt dataframe
data <- data.frame(data, check.names = FALSE) %>%
reshape::melt(id = "DATE(UTC)") %>%
dplyr::mutate(parameter = parameter) %>%
dplyr::rename(date_utc = "DATE(UTC)")
if ("variable" %in% colnames(data)) {
data <- data %>%
dplyr::rename(id = "variable") %>%
dplyr::mutate(date = as.Date(date_utc)) %>%
dplyr::group_by(id, date, parameter) %>%
dplyr::summarise(value = mean(value, na.rm = TRUE)) %>%
# cut out the data that is available within daily archive and knit together
dplyr::rename(date_utc = "date") %>%
dplyr::filter(date_utc > max(historic_daily$date_utc, na.rm = TRUE)) %>%
dplyr::full_join(historic_daily) %>%
dplyr::arrange(id, date_utc) %>%
# get current year sd
dplyr::full_join(daily_current(parameter = parameter, id = id)) %>%
dplyr::arrange(id, date_utc) %>%
unique() %>%
dplyr::filter(!is.na(value))
} else (
data <- data %>%
dplyr::mutate(value = NA) %>%
dplyr::mutate(date = as.Date(date_utc), parameter = "snow_depth") %>%
dplyr::group_by(date, parameter) %>%
dplyr::summarise(value = mean(value, na.rm = TRUE)) %>%
# cut out the data that is available within daily archive and knit together
dplyr::rename(date_utc = "date") %>%
dplyr::filter(date_utc > max(historic_daily$date_utc)) %>%
dplyr::full_join(historic_daily) %>%
dplyr::arrange(date_utc) %>%
# get current year sd
dplyr::full_join(daily_current(parameter = parameter, id = id)) %>%
dplyr::arrange(date_utc) %>%
unique() %>%
dplyr::filter(!is.na(value))
)
} else if (parameter == "precipitation") {
# Get t max and t min from historic daily data - not always complete to present water year
historic_daily <- bcdata::bcdc_get_data("5e7acd31-b242-4f09-8a64-000af872d68f", resource = "945c144a-d094-4a20-a3c6-9fe74cad368a") %>%
dplyr::filter(variable %in% c("AccumP"), Pillow_ID %in% paste0("_", id)) %>%
dplyr::rename(id = "Pillow_ID", date_utc = "Date") %>%
dplyr::mutate(parameter = "cum_precip", id = stringr::str_replace(id, "_", "")) %>%
dplyr::select(-code, -variable)
# knit the precipitation available until 2003 to the current year data.
# Note that precip data is only hourly from the data catalog.
# ************* WILL NEED TO CHANGE UTC BEFORE TAKING DAILY MEAN********************
data <- bcdata::bcdc_get_data("5e7acd31-b242-4f09-8a64-000af872d68f", resource = "371a0479-1c6a-4f15-a456-11d778824f38") %>%
dplyr::select(contains(c(id, "DATE(UTC)")))
colnames(data) <- gsub( " .*$", "", colnames(data))
data <- data.frame(data, check.names = FALSE) %>%
reshape::melt(id = "DATE(UTC)") %>%
dplyr::mutate(parameter = "cum_precip") %>%
dplyr::rename(date_utc = "DATE(UTC)")
if ("variable" %in% colnames(data)) {
data <- data %>%
dplyr::rename(id = "variable") %>%
dplyr::mutate(date = as.Date(date_utc)) %>%
dplyr::filter(date > max(historic_daily$date_utc)) %>%
dplyr::group_by(id, date, parameter) %>%
dplyr::summarise(value = mean(value, na.rm = TRUE)) %>%
dplyr::rename(date_utc = date) %>%
# Join with the daily mean
dplyr::full_join(historic_daily) %>%
# join with current year daily mean precip
dplyr::full_join(daily_current(parameter = parameter, id = id)) %>%
dplyr::arrange(id, date_utc) %>%
unique() %>%
dplyr::filter(!is.na(value))
} else (
data <- data %>%
dplyr::mutate(value = NA) %>%
dplyr::mutate(date = as.Date(date_utc)) %>%
dplyr::filter(date > max(historic_daily$date_utc)) %>%
dplyr::group_by(date, parameter) %>%
dplyr::summarise(value = mean(value, na.rm = TRUE)) %>%
dplyr::rename(date_utc = date) %>%
# Join with the daily mean
dplyr::full_join(historic_daily) %>%
# join with current year daily mean precip
dplyr::full_join(daily_current(parameter = parameter, id = id)) %>%
dplyr::arrange(date_utc) %>%
unique() %>%
dplyr::filter(!is.na(value))
)
} else if (parameter == "temperature") {
# Get t max and t min from historic daily data - not always complete to present water year
historic_daily <- bcdata::bcdc_get_data("5e7acd31-b242-4f09-8a64-000af872d68f", resource = "945c144a-d094-4a20-a3c6-9fe74cad368a") %>%
dplyr::filter(variable %in% c("T_Max", "T_Min"), Pillow_ID %in% paste0("_", id)) %>%
dplyr::rename(id = "Pillow_ID", date_utc = "Date") %>%
dplyr::mutate(parameter = as.character(ifelse(variable == "T_Max", "t_max", "t_min")), id = as.character(stringr::str_replace(id, "_", ""))) %>%
dplyr::select(-code, -variable)
# knit the daily snow depth available pre 2003 with hourly 2003-current
data <- bcdata::bcdc_get_data(record = "5e7acd31-b242-4f09-8a64-000af872d68f", resource = "fba88311-34b9-4422-b5ae-572fd23b2a00") %>%
dplyr::select(contains(c(id, "DATE(UTC)")))
colnames(data) <- gsub( " .*$", "", colnames(data))
# Needs to be a dataframe to melt
data <- data.frame(data, check.names = FALSE) %>%
reshape::melt(id = "DATE(UTC)") %>%
dplyr::mutate(parameter = parameter) %>%
dplyr::rename(date_utc = "DATE(UTC)")
if ("variable" %in% colnames(data)) {
data <- data %>%
dplyr::rename(id = "variable") %>%
dplyr::arrange(id, date_utc) %>%
dplyr::mutate(date = as.Date(date_utc), "id" = id) %>%
dplyr::filter(date > max(historic_daily$date_utc)) %>%
dplyr::group_by(id, date) %>%
dplyr::summarise(t_max = max(value, na.rm = TRUE),
t_min = min(value, na.rm = TRUE)) %>%
dplyr::rename(date_utc = date) %>%
reshape2::melt(id = c("date_utc", "id")) %>%
dplyr::rename(parameter = "variable") %>%
dplyr::full_join(historic_daily) %>%
# get current year temperature
dplyr::full_join(daily_current(parameter = parameter, id = id)) %>%
dplyr::arrange(id, date_utc) %>%
unique() %>%
dplyr::filter(!is.na(value)) %>%
dplyr::filter(!is.infinite(value))
} else (
data <- data %>%
dplyr::mutate(value = NA) %>%
dplyr::mutate(date = as.Date(date_utc), "id" = id) %>%
dplyr::filter(date > max(historic_daily$date_utc)) %>%
dplyr::group_by(id, date) %>%
dplyr::summarise(t_max = max(value, na.rm = TRUE),
t_min = min(value, na.rm = TRUE)) %>%
dplyr::rename(date_utc = date) %>%
reshape2::melt(id = c("date_utc", "id")) %>%
dplyr::rename(parameter = "variable") %>%
dplyr::full_join(historic_daily) %>%
# get current year temperature
dplyr::full_join(daily_current(parameter = parameter, id = id)) %>%
dplyr::arrange(date_utc) %>%
unique() %>%
dplyr::filter(!is.na(value))
)
}
} else {
if (parameter == "swe") {
# get only the archived data
# knit the daily swe archive with daily SWE for this water year
data_i <- bcdata::bcdc_get_data("5e7acd31-b242-4f09-8a64-000af872d68f", resource = "666b7263-6111-488c-89aa-7480031f74cd") %>%
dplyr::select(contains(c(id, "DATE(UTC)")))
colnames(data_i) <- gsub( " .*$", "", colnames(data_i))
# Melt dataframe
data <- data.frame(data_i, check.names = FALSE) %>%
reshape::melt(id = "DATE(UTC)") %>%
dplyr::mutate(parameter = parameter) %>%
dplyr::rename(date_utc = "DATE(UTC)")
if ("variable" %in% colnames(data)) {
data <- data %>%
dplyr::filter(!is.na(value)) %>%
dplyr::rename(id = "variable")
} else (
data <- data %>%
dplyr::mutate(value = NA) %>%
dplyr::filter(!is.na(value))
)
} else if (parameter == "snow_depth") {
# Get snow depth from historic daily data - not always complete to present water year
historic_daily <- bcdata::bcdc_get_data("5e7acd31-b242-4f09-8a64-000af872d68f", resource = "945c144a-d094-4a20-a3c6-9fe74cad368a") %>%
dplyr::filter(variable == "SD", Pillow_ID %in% paste0("_", id)) %>%
dplyr::rename(id = "Pillow_ID", date_utc = "Date") %>%
dplyr::mutate(parameter = "snow_depth", id = stringr::str_replace(id, "_", "")) %>%
dplyr::select(-code, -variable)
# knit the daily snow depth available pre 2003 with hourly 2003-current
data <- bcdata::bcdc_get_data("5e7acd31-b242-4f09-8a64-000af872d68f", resource = "204f91d4-b136-41d2-98b3-125ecefd6887") %>%
dplyr::select(contains(c(id, "DATE(UTC)")))
#dplyr::mutate(date = as.Date(`DATE(UTC)`)) %>%
#dplyr::rename(value = contains(id), date_utc = "DATE(UTC)")
colnames(data) <- gsub( " .*$", "", colnames(data))
# Needs to be a dataframe to melt
data <- data.frame(data, check.names = FALSE) %>%
reshape::melt(id = "DATE(UTC)") %>%
dplyr::mutate(parameter = parameter) %>%
dplyr::rename(date_utc = "DATE(UTC)", id = "variable")
if ("value" %in% colnames(data)) {
data <- data %>%
dplyr::mutate(date = as.Date(date_utc)) %>%
dplyr::group_by(id, date, parameter) %>%
dplyr::summarise(value = mean(value, na.rm = TRUE)) %>%
# cut out the data that is available within daily archive and knit together
dplyr::rename(date_utc = "date") %>%
dplyr::filter(date_utc > max(historic_daily$date_utc, na.rm = TRUE)) %>%
dplyr::full_join(historic_daily) %>%
dplyr::arrange(id, date_utc) %>%
unique() %>%
dplyr::filter(!is.na(value))
} else (
data <- data %>%
dplyr::mutate(value = NA) %>%
dplyr::mutate(date = as.Date(date_utc), parameter = "snow_depth") %>%
dplyr::group_by(id, date, parameter) %>%
dplyr::summarise(value = mean(value, na.rm = TRUE)) %>%
# cut out the data that is available within daily archive and knit together
dplyr::rename(date_utc = "date") %>%
dplyr::filter(date_utc > max(historic_daily$date_utc)) %>%
dplyr::full_join(historic_daily) %>%
dplyr::arrange(id, date_utc) %>%
unique() %>%
dplyr::filter(!is.na(value))
)
} else if (parameter == "precipitation") {
# Get precipitation from historic daily data - not always complete to present water year
historic_daily <- bcdata::bcdc_get_data("5e7acd31-b242-4f09-8a64-000af872d68f", resource = "945c144a-d094-4a20-a3c6-9fe74cad368a") %>%
dplyr::filter(variable %in% c("AccumP"), Pillow_ID %in% paste0("_", id)) %>%
dplyr::rename(id = "Pillow_ID", date_utc = "Date") %>%
dplyr::mutate(parameter = "cum_precip", id = stringr::str_replace(id, "_", "")) %>%
dplyr::select(-code, -variable)
# knit the precipitation available until 2003 to the current year data.
# Note that precip data is only hourly from the data catalog.
# ************* WILL NEED TO CHANGE UTC BEFORE TAKING DAILY MEAN********************
data <- bcdata::bcdc_get_data("5e7acd31-b242-4f09-8a64-000af872d68f", resource = "371a0479-1c6a-4f15-a456-11d778824f38") %>%
dplyr::select(contains(c(id, "DATE(UTC)")))
colnames(data) <- gsub( " .*$", "", colnames(data))
# Needs to be a dataframe to melt
data <- data.frame(data, check.names = FALSE) %>%
reshape::melt(id = "DATE(UTC)") %>%
dplyr::mutate(parameter = "cum_precip") %>%
dplyr::rename(date_utc = "DATE(UTC", id = "variable")
if ("value" %in% colnames(data)) {
data <- data %>%
dplyr::mutate(date = as.Date(date_utc)) %>%
dplyr::filter(date > max(historic_daily$date_utc)) %>%
dplyr::group_by(id, date, parameter) %>%
dplyr::summarise(value = mean(value, na.rm = TRUE)) %>%
dplyr::rename(date_utc = date) %>%
# Join with the daily mean
dplyr::full_join(historic_daily) %>%
dplyr::arrange(id, date_utc) %>%
unique() %>%
dplyr::filter(!is.na(value))
} else (
data <- data %>%
dplyr::mutate(value = NA) %>%
dplyr::mutate(date = as.Date(date_utc)) %>%
dplyr::filter(date > max(historic_daily$date_utc)) %>%
dplyr::group_by(id, date, parameter) %>%
dplyr::summarise(value = mean(value, na.rm = TRUE)) %>%
dplyr::rename(date_utc = date) %>%
# Join with the daily mean
dplyr::full_join(historic_daily) %>%
# join with current year daily mean precip
dplyr::arrange(id, date_utc) %>%
unique() %>%
dplyr::filter(!is.na(value))
)
} else if (parameter == "temperature") {
# Get t max and t min from historic daily data - not always complete to present water year
historic_daily <- bcdata::bcdc_get_data("5e7acd31-b242-4f09-8a64-000af872d68f", resource = "945c144a-d094-4a20-a3c6-9fe74cad368a") %>%
dplyr::filter(variable %in% c("T_Max", "T_Min"), Pillow_ID %in% paste0("_", id)) %>%
dplyr::rename(id = "Pillow_ID", date_utc = "Date") %>%
dplyr::mutate(parameter = as.character(ifelse(variable == "T_Max", "t_max", "t_min")), id = as.character(stringr::str_replace(id, "_", ""))) %>%
dplyr::select(-code, -variable)
# knit the daily snow depth available pre 2003 with hourly 2003-current
data <- bcdata::bcdc_get_data(record = "5e7acd31-b242-4f09-8a64-000af872d68f", resource = "fba88311-34b9-4422-b5ae-572fd23b2a00") %>%
dplyr::select(contains(c(id, "DATE(UTC)")))
colnames(data) <- gsub( " .*$", "", colnames(data))
# Needs to be a dataframe to melt
data <- data.frame(data, check.names = FALSE) %>%
reshape::melt(id = "DATE(UTC)") %>%
dplyr::mutate(parameter = parameter) %>%
dplyr::rename(date_utc = "DATE(UTC)", id = "variable")
if ("value" %in% colnames(data)) {
data <- data %>%
dplyr::mutate(date = as.Date(date_utc), "id" = id) %>%
dplyr::filter(date > max(historic_daily$date_utc)) %>%
dplyr::group_by(id, date) %>%
dplyr::summarise(t_max = max(value, na.rm = TRUE),
t_min = min(value, na.rm = TRUE)) %>%
dplyr::rename(date_utc = date) %>%
reshape2::melt(id = c("date_utc", "id")) %>%
dplyr::rename(parameter = "variable") %>%
dplyr::full_join(historic_daily) %>%
# get current year temperature
dplyr::arrange(id, date_utc) %>%
unique() %>%
dplyr::filter(!is.na(value)) %>%
dplyr::filter(!is.infinite(value))
} else (
data <- data %>%
dplyr::mutate(value = NA) %>%
dplyr::mutate(date = as.Date(date_utc), "id" = id) %>%
dplyr::filter(date > max(historic_daily$date_utc)) %>%
dplyr::group_by(id, date) %>%
dplyr::summarise(t_max = max(value, na.rm = TRUE),
t_min = min(value, na.rm = TRUE)) %>%
dplyr::rename(date_utc = date) %>%
reshape2::melt(id = c("date_utc", "id")) %>%
dplyr::rename(parameter = "variable") %>%
dplyr::full_join(historic_daily) %>%
# get current year temperature
dplyr::arrange(date_utc) %>%
unique() %>%
dplyr::filter(!is.na(value))
)
}
}
# filter for specified years and check that the DAILY data only is present - slight glitch in some stations that data catalogue has some hourly data
if (any(yr %in% c("ALL", "all", "All"))) {
data_o <- data %>%
dplyr::filter(lubridate::hour(date_utc) == "16")
} else {
# Filter for the years your specify
data_o <- data %>%
dplyr::filter(lubridate::year(date_utc) %in% yr) %>%
dplyr::filter(lubridate::hour(date_utc) == "16")
}
return(data_o)
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/bess.sph.r
\name{bess.sph}
\alias{bess.sph}
\title{Calculates Spherical Bessel functions from 0 to nmax.}
\usage{
bess.sph(nmax, x, code = "C")
}
\arguments{
\item{nmax}{The maximum order of \eqn{j_n(x)}}
\item{x}{The argument of \eqn{j_n(x)}}
\item{code}{If you prefer to use native R or C language.
The algorithm is the same.}
}
\value{
An array of Spherical Bessel functions and its derivatives
from 0 to \code{nmax} at point \code{x}, and also the logarithmic
derivative \eqn{c_n=j_n'/j_n} and the ratio \eqn{\rho_n=j_n/j_{n+1}}.
}
\description{
Calculates Spherical Bessel functions from 0 to nmax.
}
\details{
\code{bess.sph} calculates the Spherical Bessel
functions using downward recurrence, from \eqn{j_nmax(x)} to \eqn{j_0(x)}.
The system of equations is given by \eqn{S_n(x)=n/x},
\eqn{\rho_n=j_n(x)/j_{n+1}(x)}{r[n]=j_n/j_{n+1}} and
\eqn{c_n=j_n'(x)/j_n(x)}. The system can be solved by means of
the recurrence relations of the Spherical Bessel functions
\deqn{ \rho_{n-1}+\frac{1 }{\rho_n}=S_{2n+1} }{ r[n-1]+ 1/r[n]=S[2n+1]}
\deqn{n\rho_{n-1}-\frac{n+1}{\rho_n}=(2n+1)c_{n}}{nr[n-1]-(n+1)/r[n]=(2n+1)c[n]}
that can be rewriten
\deqn{\rho_n=S_{n+2}+c_{n+1} }{ r[n]=S[n+2]+c[n+1]}
\deqn{\frac{1}{\rho_n}=S_n-c_n. }{1/r[n]=S[n ]-c[n ].}
The logarithmic derivatives obeys the relation,
\deqn{(S_{n+2}-c_{n+1})(S_n+c_n)=1. }{(S[n+2]-c[n])(S[n]+C[n])=1.}
The values can be calculated upward or downward.
}
\examples{
x<-30
nmax<-50
a<-bess.sph(nmax,x,code="C")
b<-bess.sph(nmax,x,code="R")
d<-sqrt(pi/(2*x))*besselJ(x=x,nu=.5+(0:nmax))
plot(a$jn,type='b')
points(b$jn,col='red',pch=4)
points(d,col='blue',pch=3)
}
| /man/bess.sph.Rd | no_license | wendellopes/rvswf | R | false | false | 1,738 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/bess.sph.r
\name{bess.sph}
\alias{bess.sph}
\title{Calculates Spherical Bessel functions from 0 to nmax.}
\usage{
bess.sph(nmax, x, code = "C")
}
\arguments{
\item{nmax}{The maximum order of \eqn{j_n(x)}}
\item{x}{The argument of \eqn{j_n(x)}}
\item{code}{If you prefer to use native R or C language.
The algorithm is the same.}
}
\value{
An array of Spherical Bessel functions and its derivatives
from 0 to \code{nmax} at point \code{x}, and also the logarithmic
derivative \eqn{c_n=j_n'/j_n} and the ratio \eqn{\rho_n=j_n/j_{n+1}}.
}
\description{
Calculates Spherical Bessel functions from 0 to nmax.
}
\details{
\code{bess.sph} calculates the Spherical Bessel
functions using downward recurrence, from \eqn{j_nmax(x)} to \eqn{j_0(x)}.
The system of equations is given by \eqn{S_n(x)=n/x},
\eqn{\rho_n=j_n(x)/j_{n+1}(x)}{r[n]=j_n/j_{n+1}} and
\eqn{c_n=j_n'(x)/j_n(x)}. The system can be solved by means of
the recurrence relations of the Spherical Bessel functions
\deqn{ \rho_{n-1}+\frac{1 }{\rho_n}=S_{2n+1} }{ r[n-1]+ 1/r[n]=S[2n+1]}
\deqn{n\rho_{n-1}-\frac{n+1}{\rho_n}=(2n+1)c_{n}}{nr[n-1]-(n+1)/r[n]=(2n+1)c[n]}
that can be rewriten
\deqn{\rho_n=S_{n+2}+c_{n+1} }{ r[n]=S[n+2]+c[n+1]}
\deqn{\frac{1}{\rho_n}=S_n-c_n. }{1/r[n]=S[n ]-c[n ].}
The logarithmic derivatives obeys the relation,
\deqn{(S_{n+2}-c_{n+1})(S_n+c_n)=1. }{(S[n+2]-c[n])(S[n]+C[n])=1.}
The values can be calculated upward or downward.
}
\examples{
x<-30
nmax<-50
a<-bess.sph(nmax,x,code="C")
b<-bess.sph(nmax,x,code="R")
d<-sqrt(pi/(2*x))*besselJ(x=x,nu=.5+(0:nmax))
plot(a$jn,type='b')
points(b$jn,col='red',pch=4)
points(d,col='blue',pch=3)
}
|
setwd("/Users/Dipendra/Desktop/Coursera/ExData_Plotting1")
#Loading lubridate package to library
library(lubridate)
# Seggregating the data of two days from the entire dataset
varclass <- c(rep('character',2),rep('numeric',7))
pwrconsumption <-read.table("household_power_consumption.txt",header=TRUE, sep = ";", na.strings = "?",colClasses=varclass)
pwrconsumption <- pwrconsumption[pwrconsumption$Date=="1/2/2007" | pwrconsumption$Date=="2/2/2007",]
# clean up the variable names and convert date/time fields
cols <-c('Date','Time','GlobalActivePower','GlobalReactivePower','Voltage','GlobalIntensity', 'SubMetering1','SubMetering2','SubMetering3')
colnames(pwrconsumption) <- cols
pwrconsumption$DateTime <- dmy(pwrconsumption$Date)+hms(pwrconsumption$Time)
pwrconsumption <- pwrconsumption[,c(10,3:9)]
#Plot Data
plot(pwrconsumption$DateTime, pwrconsumption$SubMetering1,type='l', col='black',xlab='',ylab='Energy sub metering')
lines (pwrconsumption$DateTime, pwrconsumption$SubMetering2, col='Red')
lines(pwrconsumption$DateTime, pwrconsumption$SubMetering3,col='Blue')
legend('topright',legend=c('Sub_metering_1','Sub_metering_2','Sub_metering_3'),col= c('black','red','blue') ,lty='solid')
#Copy from screen to the file
dev.copy(png, file='plot3.png', width = 480, height = 480, units = 'px')
# Turn off device
dev.off()
| /plot3.R | no_license | kcdipendra/ExData_Plotting1 | R | false | false | 1,339 | r | setwd("/Users/Dipendra/Desktop/Coursera/ExData_Plotting1")
#Loading lubridate package to library
library(lubridate)
# Seggregating the data of two days from the entire dataset
varclass <- c(rep('character',2),rep('numeric',7))
pwrconsumption <-read.table("household_power_consumption.txt",header=TRUE, sep = ";", na.strings = "?",colClasses=varclass)
pwrconsumption <- pwrconsumption[pwrconsumption$Date=="1/2/2007" | pwrconsumption$Date=="2/2/2007",]
# clean up the variable names and convert date/time fields
cols <-c('Date','Time','GlobalActivePower','GlobalReactivePower','Voltage','GlobalIntensity', 'SubMetering1','SubMetering2','SubMetering3')
colnames(pwrconsumption) <- cols
pwrconsumption$DateTime <- dmy(pwrconsumption$Date)+hms(pwrconsumption$Time)
pwrconsumption <- pwrconsumption[,c(10,3:9)]
#Plot Data
plot(pwrconsumption$DateTime, pwrconsumption$SubMetering1,type='l', col='black',xlab='',ylab='Energy sub metering')
lines (pwrconsumption$DateTime, pwrconsumption$SubMetering2, col='Red')
lines(pwrconsumption$DateTime, pwrconsumption$SubMetering3,col='Blue')
legend('topright',legend=c('Sub_metering_1','Sub_metering_2','Sub_metering_3'),col= c('black','red','blue') ,lty='solid')
#Copy from screen to the file
dev.copy(png, file='plot3.png', width = 480, height = 480, units = 'px')
# Turn off device
dev.off()
|
#' @title plot the deficit hydrique from Ernage data for the period of interest
#' @param data dataframe
#' @param int boolean
#' @import ggplot2
#' @return ggplot graph
#' @export
dhw_plot <- function(data, int){
plot = ggplot(data, aes(x = Décade, y = Déficit)) +
geom_line(aes(color = Année), na.rm = TRUE) +
geom_point(aes(color = Année, shape = Année), na.rm = TRUE) +
scale_color_manual(values = c("#377eb8", "#4daf4a", "#984ea3", "#ff7f00", "#e41a1c")) +
scale_x_continuous(breaks = round(seq(0, 40, by = 4)), limits = c(0,40)) +
scale_y_continuous(breaks = round(seq(-160,0, by = 20)), limits = c(-160,0)) +
ylab("Déficit hydrique (mm)") +
ggtitle("Déficit hydrique (Station Ernage, Belgique)") +
theme(panel.background = element_rect(fill = NA),
panel.border = element_rect(color = "black", fill = NA),
legend.justification = c(0, 0),
legend.position = c(0.01, 0.02),
legend.background = element_rect(color = "black"))
if (int == FALSE) {
return(plot)
}else {
return(plotly::ggplotly(plot))
}
} | /R/dhw_plot.R | no_license | pokyah/defHydWal | R | false | false | 1,082 | r | #' @title plot the deficit hydrique from Ernage data for the period of interest
#' @param data dataframe
#' @param int boolean
#' @import ggplot2
#' @return ggplot graph
#' @export
dhw_plot <- function(data, int){
plot = ggplot(data, aes(x = Décade, y = Déficit)) +
geom_line(aes(color = Année), na.rm = TRUE) +
geom_point(aes(color = Année, shape = Année), na.rm = TRUE) +
scale_color_manual(values = c("#377eb8", "#4daf4a", "#984ea3", "#ff7f00", "#e41a1c")) +
scale_x_continuous(breaks = round(seq(0, 40, by = 4)), limits = c(0,40)) +
scale_y_continuous(breaks = round(seq(-160,0, by = 20)), limits = c(-160,0)) +
ylab("Déficit hydrique (mm)") +
ggtitle("Déficit hydrique (Station Ernage, Belgique)") +
theme(panel.background = element_rect(fill = NA),
panel.border = element_rect(color = "black", fill = NA),
legend.justification = c(0, 0),
legend.position = c(0.01, 0.02),
legend.background = element_rect(color = "black"))
if (int == FALSE) {
return(plot)
}else {
return(plotly::ggplotly(plot))
}
} |
#' @name nh_analysis_generateR
#' @title NewHybrids analysis file maker
#'
#' @description \code{nh_analysis_GenerateR} Merges simulated genotypes with the genotypes of unknown/experimental individuals, producing a file to be analyzed by NewHybrids. Will also output a dataframe containing the names of the individuals (including those that were simulated) in the NewHybrids formatted file.
#' @param ReferencePopsData A file path to a either a NewHybrids or GENEPOP formatted file containing genotypes from the simulated ancestral populations. This can be the result of any of the freqbasedsim functions, or a file created using the function genepop_newhybrids from the package genepopedit
#' @param UnknownIndivs A file path to a file containing the genotypes of the individuals to be analyzed for possible hybrid ancestry. This can either be a genepop format file, or a NewHybrids format file. Note - the number of loci and the names of the loci in ReferencePopsData and UnknownIndivs must be the same
#' @param sim.pops.include Optional character vector list denoting which hybrid categories from the simulatedd data should be included in the output. The default is Pure Population 1 and Pure Population 2.
#' @param outputName A character vector to be applied as the name of the output.
#' @export
#' @importFrom genepopedit subset_genepop genepop_flatten genepop_detective subset_genepop_aggregate
#' @importFrom stringr str_split str_detect
#' @import plyr
nh_analysis_generateR <- function(ReferencePopsData, UnknownIndivs, sim.pops.include = c("Pure1", "Pure2"), output.name){
### read in the simulated data
sim.file <- read.table(ReferencePopsData, header = FALSE, sep = "\t", stringsAsFactors = FALSE)
path.start <- getwd()
## check if the simulated data is GENEPOP or NewHybrids format. This will make a difference.
header.sim <- sim.file[1,] ## if it is genepop, it will have a single entry in the first position
if(str_detect(string = header.sim, pattern = "NumIndivs")==FALSE){
cats <- genepopedit::genepop_detective(GenePop = ReferencePopsData) ### get the names of the populations -- not sure if strictly needed - ask Ryan if can use numeric pop ID
writeLines("GENEPOP format detected for SIMULATED DATA. Assuming hybrid category order = Pure 1, Pure 2, F1, F2, Back Cross to Pure 1, Back Cross to Pure 2") ### warn that assuming this order
pop.no.convert <- c("Pure1", "Pure2", "F1", "F2", "BC1", "BC2") ### make a dataframe that matches up to the order of hybrid categories assumed
inds.get <- which(pop.no.convert %in% sim.pops.include) ### numeric value of which pops assumed match those requested
genepopedit::subset_genepop(GenePop = ReferencePopsData, keep = TRUE, sPop = inds.get, path = paste0(path.start, "/", "sim.subset.txt")) ## subset
sim.inds.include <- genepopedit::genepop_flatten(GenePop = paste0(path.start, "/", "sim.subset.txt")) ### read back in and flatten
sim.inds.include <- sim.inds.include[,-c(2,3)]
file.remove(paste0(path.start, "/", "sim.subset.txt")) ### remove the file that was made by subset_genepop
sim.inds.include.vector <- sim.inds.include[,1] ### get a vector of individual IDs
sim.inds.Loci <- colnames(sim.inds.include)
}
## if the input file is NewHybrids format, it should have two items in the first row
if(str_detect(string = header.sim, pattern = "NumIndivs")==TRUE){
sim.file <- read.table(ReferencePopsData, header = FALSE, skip = 4, stringsAsFactors = FALSE) ## read it in, but skip the first 4 rows because these are not needed - makes a flattened DF
sim.inds.Loci <- sim.file[1,] ### the first row will ahve the loci names,
sim.file <- sim.file[-1,] ## remove the loci names
colnames(sim.file) <- sim.inds.Loci ## add them back in as column names
NHResultsDir_Split <- unlist(str_split(string = ReferencePopsData, pattern = "/")) ### need to get the directory in which the file is so can get the idnvidual file to get the number of inds in each cat
NHResultsDir_Split <- NHResultsDir_Split[-grep(x = NHResultsDir_Split, pattern = ".txt")]
NHResultsDir <- paste0(paste(NHResultsDir_Split, collapse = "/"), "/")
get.files.list <- list.files(NHResultsDir)
indiv.file <- read.table(paste0(NHResultsDir, "/", get.files.list[grep(x = get.files.list, pattern = "individuals")])) ## read in the individual file
Output <- n_class(x = paste0(NHResultsDir, "/", get.files.list[grep(x = get.files.list, pattern = "individuals")])) ## get the # of inds in each cat
### Need to determine the range of rows that represent each hybrid category, the subset the requested individuals
Pure1 <- Output[1,2]
Pure2 <- Output[2,2]
F1 <- Output[3,2]
F2 <- Output[4,2]
BC1 <- Output[5,2]
BC2 <- Output[6,2]
Pure1.inds <- 1:Pure1
Pure2.inds <- (Pure1 + 1):(Pure1 + Pure2)
F1.inds <- (Pure1 + Pure2 + 1):(Pure1 + Pure2 + F1)
F2.inds <- (Pure1 + Pure2 + F1 + 1):(Pure1 + Pure2 + F1 + F2)
BC1.inds <- (Pure1 + Pure2 + F1 + F2 + 1):(Pure1 + Pure2 + F1 + F2 + BC1)
BC2.inds <- (Pure1 + Pure2 + F1 + F2 + BC1 + 1):sum(Output$n)
pop.location.vec <- list(Pure1.inds, Pure2.inds, F1.inds, F2.inds, BC1.inds, BC2.inds)
Output$Class <- c("Pure1", "Pure2", "F1", "F2", "BC1", "BC2")
inds.get <- which(Output$Class %in% sim.pops.include)
inds.get.subset.vec <- unlist(pop.location.vec[inds.get])
sim.inds.include <- sim.file[inds.get.subset.vec,]
sim.inds.include.vector <- indiv.file[inds.get.subset.vec, 1]
} ## END IF Simulated data is NH format
### end of input section for simulated data
### meow read in the unknown/experimental data
## as was done for the simulated data, need to check if entry is a NewHybrids or GENEPOP format file
unknown.file <- read.table(UnknownIndivs, header = FALSE, sep = "\t", stringsAsFactors = FALSE)
header.unknown <- unknown.file[1,]
if(stringr::str_detect(string = header.unknown, pattern = "NumIndivs")==FALSE){ ### if a GenePop format file then will have a single entry in the first row
unknown.indivs.exist <- genepopedit::genepop_detective(GenePop = UnknownIndivs, variable = "Inds") ## get a list of individuals
pops.exist <- genepopedit::genepop_detective(GenePop = UnknownIndivs) ##
ag.frame <- data.frame(Exits=pops.exist, ag.to = rep("Pop1", times = length(pops.exist)))
genepopedit::subset_genepop_aggregate(GenePop = UnknownIndivs, keep = TRUE, agPopFrame = ag.frame, path = paste0(path.start, "/", "unknown.agged.txt"))
unknown.flattened <- genepopedit::genepop_flatten(GenePop = paste0(path.start, "/", "unknown.agged.txt"))
unknown.flattened <- unknown.flattened[,-c(2,3)]
unknown.inds.include <- unknown.flattened
unknown.Loci <- colnames(unknown.flattened)
file.remove(paste0(path.start, "/", "unknown.agged.txt"))
}
#### if it is a NewHybrids format file
if(stringr::str_detect(string = header.unknown, pattern = "NumIndivs")==TRUE){
unknown.file <- read.table(UnknownIndivs, header = FALSE, skip = 4, stringsAsFactors = FALSE) ## skip the first 4 lines, will build these after anyways
unknown.Loci <- unknown.file[1,] ## the loci are in the first row
unknown.file <- unknown.file[-1,] ### remove the first row, these are the loci names - not needed here
colnames(unknown.file) <- unknown.Loci ## now make them the column names
unknown.inds.include <- unknown.file ### data to include
## if the data are read in as a NH file, then there should be an associated individual file - modify the path to the NH file to get the individual file
NHResultsDir_Split <- unlist(stringr::str_split(string = ReferencePopsData, pattern = "/"))
NHResultsDir_Split <- NHResultsDir_Split[-grep(x = NHResultsDir_Split, pattern = ".txt")]
NHResultsDir <- paste0(paste(NHResultsDir_Split, collapse = "/"), "/")
get.files.list <- list.files(NHResultsDir)
unknown.indivs.exist <- as.matrix(read.table(paste0(NHResultsDir, "/", get.files.list[grep(x = get.files.list, pattern = "individuals")]))) ### hold the individual file to appened to teh simulated individuals
Output <- n_class(x = paste0(NHResultsDir, "/", get.files.list[grep(x = get.files.list, pattern = "individuals")])) ## also want to have the numbers of individuals in each population
}
### error check that the simulated individuals and the unknown individuals have the same number of alleles - if not, fail and return error message
if(length(setdiff(unknown.Loci[-1], sim.inds.Loci[-1])) > 0){stop("The Simulated and Unknown datasets must contain the same marker names.")}
###
indivs.in.dataset <- c(as.character(sim.inds.include.vector), unknown.indivs.exist)
insertNumIndivs <- paste("NumIndivs", length(indivs.in.dataset))
insertNumLoci <- paste("NumLoci", length(sim.inds.Loci[-1])) ## will probably have to be -1
### hard coded stuff
insertDigits <- "Digits 3"
insertFormat <- "Format Lumped"
LociNames <- paste(sim.inds.Loci[-1], collapse = " ")
insertLociName <- paste("LocusNames", LociNames)
insert.meta.data <- c(insertNumIndivs, insertNumLoci, insertDigits, insertFormat, insertLociName)
sim.unknown.combined <- rbind(sim.inds.include[,-1], unknown.inds.include[,-1])
sim.ind.renameforNH <- c(1:nrow(sim.unknown.combined))
sim.unknown.combined <- data.frame(sim.ind.renameforNH, sim.unknown.combined)
sim.unknown.output <- do.call(paste, c(data.frame(sim.unknown.combined[,]), sep = " "))
data.out <- c(insert.meta.data, sim.unknown.output)
write(x = data.out, file = output.name)
indivs.out.file <- gsub(x = output.name, pattern = ".txt", replacement = "")
indivs.out.file <- paste0(indivs.out.file, "_individuals.txt")
write(x = indivs.in.dataset, file = indivs.out.file)
} | /R/nh_analysis_GenerateR.R | no_license | anne-laureferchaud/hybriddetective | R | false | false | 9,814 | r | #' @name nh_analysis_generateR
#' @title NewHybrids analysis file maker
#'
#' @description \code{nh_analysis_GenerateR} Merges simulated genotypes with the genotypes of unknown/experimental individuals, producing a file to be analyzed by NewHybrids. Will also output a dataframe containing the names of the individuals (including those that were simulated) in the NewHybrids formatted file.
#' @param ReferencePopsData A file path to a either a NewHybrids or GENEPOP formatted file containing genotypes from the simulated ancestral populations. This can be the result of any of the freqbasedsim functions, or a file created using the function genepop_newhybrids from the package genepopedit
#' @param UnknownIndivs A file path to a file containing the genotypes of the individuals to be analyzed for possible hybrid ancestry. This can either be a genepop format file, or a NewHybrids format file. Note - the number of loci and the names of the loci in ReferencePopsData and UnknownIndivs must be the same
#' @param sim.pops.include Optional character vector list denoting which hybrid categories from the simulatedd data should be included in the output. The default is Pure Population 1 and Pure Population 2.
#' @param outputName A character vector to be applied as the name of the output.
#' @export
#' @importFrom genepopedit subset_genepop genepop_flatten genepop_detective subset_genepop_aggregate
#' @importFrom stringr str_split str_detect
#' @import plyr
nh_analysis_generateR <- function(ReferencePopsData, UnknownIndivs, sim.pops.include = c("Pure1", "Pure2"), output.name){
### read in the simulated data
sim.file <- read.table(ReferencePopsData, header = FALSE, sep = "\t", stringsAsFactors = FALSE)
path.start <- getwd()
## check if the simulated data is GENEPOP or NewHybrids format. This will make a difference.
header.sim <- sim.file[1,] ## if it is genepop, it will have a single entry in the first position
if(str_detect(string = header.sim, pattern = "NumIndivs")==FALSE){
cats <- genepopedit::genepop_detective(GenePop = ReferencePopsData) ### get the names of the populations -- not sure if strictly needed - ask Ryan if can use numeric pop ID
writeLines("GENEPOP format detected for SIMULATED DATA. Assuming hybrid category order = Pure 1, Pure 2, F1, F2, Back Cross to Pure 1, Back Cross to Pure 2") ### warn that assuming this order
pop.no.convert <- c("Pure1", "Pure2", "F1", "F2", "BC1", "BC2") ### make a dataframe that matches up to the order of hybrid categories assumed
inds.get <- which(pop.no.convert %in% sim.pops.include) ### numeric value of which pops assumed match those requested
genepopedit::subset_genepop(GenePop = ReferencePopsData, keep = TRUE, sPop = inds.get, path = paste0(path.start, "/", "sim.subset.txt")) ## subset
sim.inds.include <- genepopedit::genepop_flatten(GenePop = paste0(path.start, "/", "sim.subset.txt")) ### read back in and flatten
sim.inds.include <- sim.inds.include[,-c(2,3)]
file.remove(paste0(path.start, "/", "sim.subset.txt")) ### remove the file that was made by subset_genepop
sim.inds.include.vector <- sim.inds.include[,1] ### get a vector of individual IDs
sim.inds.Loci <- colnames(sim.inds.include)
}
## if the input file is NewHybrids format, it should have two items in the first row
if(str_detect(string = header.sim, pattern = "NumIndivs")==TRUE){
sim.file <- read.table(ReferencePopsData, header = FALSE, skip = 4, stringsAsFactors = FALSE) ## read it in, but skip the first 4 rows because these are not needed - makes a flattened DF
sim.inds.Loci <- sim.file[1,] ### the first row will ahve the loci names,
sim.file <- sim.file[-1,] ## remove the loci names
colnames(sim.file) <- sim.inds.Loci ## add them back in as column names
NHResultsDir_Split <- unlist(str_split(string = ReferencePopsData, pattern = "/")) ### need to get the directory in which the file is so can get the idnvidual file to get the number of inds in each cat
NHResultsDir_Split <- NHResultsDir_Split[-grep(x = NHResultsDir_Split, pattern = ".txt")]
NHResultsDir <- paste0(paste(NHResultsDir_Split, collapse = "/"), "/")
get.files.list <- list.files(NHResultsDir)
indiv.file <- read.table(paste0(NHResultsDir, "/", get.files.list[grep(x = get.files.list, pattern = "individuals")])) ## read in the individual file
Output <- n_class(x = paste0(NHResultsDir, "/", get.files.list[grep(x = get.files.list, pattern = "individuals")])) ## get the # of inds in each cat
### Need to determine the range of rows that represent each hybrid category, the subset the requested individuals
Pure1 <- Output[1,2]
Pure2 <- Output[2,2]
F1 <- Output[3,2]
F2 <- Output[4,2]
BC1 <- Output[5,2]
BC2 <- Output[6,2]
Pure1.inds <- 1:Pure1
Pure2.inds <- (Pure1 + 1):(Pure1 + Pure2)
F1.inds <- (Pure1 + Pure2 + 1):(Pure1 + Pure2 + F1)
F2.inds <- (Pure1 + Pure2 + F1 + 1):(Pure1 + Pure2 + F1 + F2)
BC1.inds <- (Pure1 + Pure2 + F1 + F2 + 1):(Pure1 + Pure2 + F1 + F2 + BC1)
BC2.inds <- (Pure1 + Pure2 + F1 + F2 + BC1 + 1):sum(Output$n)
pop.location.vec <- list(Pure1.inds, Pure2.inds, F1.inds, F2.inds, BC1.inds, BC2.inds)
Output$Class <- c("Pure1", "Pure2", "F1", "F2", "BC1", "BC2")
inds.get <- which(Output$Class %in% sim.pops.include)
inds.get.subset.vec <- unlist(pop.location.vec[inds.get])
sim.inds.include <- sim.file[inds.get.subset.vec,]
sim.inds.include.vector <- indiv.file[inds.get.subset.vec, 1]
} ## END IF Simulated data is NH format
### end of input section for simulated data
### meow read in the unknown/experimental data
## as was done for the simulated data, need to check if entry is a NewHybrids or GENEPOP format file
unknown.file <- read.table(UnknownIndivs, header = FALSE, sep = "\t", stringsAsFactors = FALSE)
header.unknown <- unknown.file[1,]
if(stringr::str_detect(string = header.unknown, pattern = "NumIndivs")==FALSE){ ### if a GenePop format file then will have a single entry in the first row
unknown.indivs.exist <- genepopedit::genepop_detective(GenePop = UnknownIndivs, variable = "Inds") ## get a list of individuals
pops.exist <- genepopedit::genepop_detective(GenePop = UnknownIndivs) ##
ag.frame <- data.frame(Exits=pops.exist, ag.to = rep("Pop1", times = length(pops.exist)))
genepopedit::subset_genepop_aggregate(GenePop = UnknownIndivs, keep = TRUE, agPopFrame = ag.frame, path = paste0(path.start, "/", "unknown.agged.txt"))
unknown.flattened <- genepopedit::genepop_flatten(GenePop = paste0(path.start, "/", "unknown.agged.txt"))
unknown.flattened <- unknown.flattened[,-c(2,3)]
unknown.inds.include <- unknown.flattened
unknown.Loci <- colnames(unknown.flattened)
file.remove(paste0(path.start, "/", "unknown.agged.txt"))
}
#### if it is a NewHybrids format file
if(stringr::str_detect(string = header.unknown, pattern = "NumIndivs")==TRUE){
unknown.file <- read.table(UnknownIndivs, header = FALSE, skip = 4, stringsAsFactors = FALSE) ## skip the first 4 lines, will build these after anyways
unknown.Loci <- unknown.file[1,] ## the loci are in the first row
unknown.file <- unknown.file[-1,] ### remove the first row, these are the loci names - not needed here
colnames(unknown.file) <- unknown.Loci ## now make them the column names
unknown.inds.include <- unknown.file ### data to include
## if the data are read in as a NH file, then there should be an associated individual file - modify the path to the NH file to get the individual file
NHResultsDir_Split <- unlist(stringr::str_split(string = ReferencePopsData, pattern = "/"))
NHResultsDir_Split <- NHResultsDir_Split[-grep(x = NHResultsDir_Split, pattern = ".txt")]
NHResultsDir <- paste0(paste(NHResultsDir_Split, collapse = "/"), "/")
get.files.list <- list.files(NHResultsDir)
unknown.indivs.exist <- as.matrix(read.table(paste0(NHResultsDir, "/", get.files.list[grep(x = get.files.list, pattern = "individuals")]))) ### hold the individual file to appened to teh simulated individuals
Output <- n_class(x = paste0(NHResultsDir, "/", get.files.list[grep(x = get.files.list, pattern = "individuals")])) ## also want to have the numbers of individuals in each population
}
### error check that the simulated individuals and the unknown individuals have the same number of alleles - if not, fail and return error message
if(length(setdiff(unknown.Loci[-1], sim.inds.Loci[-1])) > 0){stop("The Simulated and Unknown datasets must contain the same marker names.")}
###
indivs.in.dataset <- c(as.character(sim.inds.include.vector), unknown.indivs.exist)
insertNumIndivs <- paste("NumIndivs", length(indivs.in.dataset))
insertNumLoci <- paste("NumLoci", length(sim.inds.Loci[-1])) ## will probably have to be -1
### hard coded stuff
insertDigits <- "Digits 3"
insertFormat <- "Format Lumped"
LociNames <- paste(sim.inds.Loci[-1], collapse = " ")
insertLociName <- paste("LocusNames", LociNames)
insert.meta.data <- c(insertNumIndivs, insertNumLoci, insertDigits, insertFormat, insertLociName)
sim.unknown.combined <- rbind(sim.inds.include[,-1], unknown.inds.include[,-1])
sim.ind.renameforNH <- c(1:nrow(sim.unknown.combined))
sim.unknown.combined <- data.frame(sim.ind.renameforNH, sim.unknown.combined)
sim.unknown.output <- do.call(paste, c(data.frame(sim.unknown.combined[,]), sep = " "))
data.out <- c(insert.meta.data, sim.unknown.output)
write(x = data.out, file = output.name)
indivs.out.file <- gsub(x = output.name, pattern = ".txt", replacement = "")
indivs.out.file <- paste0(indivs.out.file, "_individuals.txt")
write(x = indivs.in.dataset, file = indivs.out.file)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/toy.R
\name{toy_example}
\alias{toy_example}
\title{Access to toy examples bundled in this package}
\usage{
toy_example(name = NULL)
}
\arguments{
\item{name}{Name of the example, default: return all}
}
\value{
A named vector of file system paths.
}
\description{
Returns the paths to all available toy examples, or to a specific toy
example. Load via \code{\link[=readRDS]{readRDS()}}.
}
| /man/toy_example.Rd | no_license | castillag/MultiLevelIPF | R | false | true | 468 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/toy.R
\name{toy_example}
\alias{toy_example}
\title{Access to toy examples bundled in this package}
\usage{
toy_example(name = NULL)
}
\arguments{
\item{name}{Name of the example, default: return all}
}
\value{
A named vector of file system paths.
}
\description{
Returns the paths to all available toy examples, or to a specific toy
example. Load via \code{\link[=readRDS]{readRDS()}}.
}
|
## Phantom - Data
## Phantom - VMatrix
PhantomCoreBase <- CoreBuilder(ActSkills=c("Joker", "BlackJack", "MarkofPhantom", "LiftBreak",
CommonV("Thief", "Heroes")[2:5]),
ActSkillsLv=c(25, 25, 25, 25, 25, 1, 25, 25),
UsefulSkills=c("SharpEyes", "CombatOrders"),
SpecSet=get(DPMCalcOption$SpecSet),
VPassiveList=PhantomVPassive,
VPassivePrior=PhantomVPrior,
SelfBind=F)
PhantomCore <- MatrixSet(PasSkills=PhantomCoreBase$PasSkills$Skills,
PasLvs=PhantomCoreBase$PasSkills$Lv,
PasMP=PhantomCoreBase$PasSkills$MP,
ActSkills=PhantomCoreBase$ActSkills$Skills,
ActLvs=PhantomCoreBase$ActSkills$Lv,
ActMP=PhantomCoreBase$ActSkills$MP,
UsefulSkills=PhantomCoreBase$UsefulSkills,
UsefulLvs=20,
UsefulMP=0,
SpecSet=get(DPMCalcOption$SpecSet),
SpecialCore=PhantomCoreBase$SpecialCoreUse)
## Phantom - Basic Info
PhantomBase <- JobBase(ChrInfo=ChrInfo,
MobInfo=get(DPMCalcOption$MobSet),
SpecSet=get(DPMCalcOption$SpecSet),
Job="Phantom",
CoreData=PhantomCore,
BuffDurationNeeded=57,
AbilList=FindJob(get(paste(DPMCalcOption$SpecSet, "Ability", sep="")), "Phantom"),
LinkList=FindJob(get(paste(DPMCalcOption$SpecSet, "Link", sep="")), "Phantom"),
MonsterLife=get(FindJob(MonsterLifePreSet, "Phantom")[DPMCalcOption$MonsterLifeLevel][1, 1]),
Weapon=WeaponUpgrade(1, DPMCalcOption$WeaponSF, 4, 0, 0, 0, 0, 3, 0, 0, "Cane", get(DPMCalcOption$SpecSet)$WeaponType)[, 1:16],
WeaponType=get(DPMCalcOption$SpecSet)$WeaponType,
SubWeapon=SubWeapon[rownames(SubWeapon)=="Card", ],
Emblem=Emblem[rownames(Emblem)=="Heroes", ],
CoolReduceHat=as.logical(FindJob(get(paste(DPMCalcOption$SpecSet, "CoolReduceHat", sep="")), "Phantom")))
## Phantom - Passive
{option <- factor(c("SubStat1"), levels=PSkill)
value <- c(40)
HighDexterity <- data.frame(option, value)
option <- factor(c("MainStat"), levels=PSkill)
value <- c(60)
LuckMonopoly <- data.frame(option, value)
option <- factor(c("ATKSpeed", "MainStat"), levels=PSkill)
value <- c(2, 20)
CaneAcceleration <- data.frame(option, value)
option <- factor(c("MainStat"), levels=PSkill)
value <- c(60)
LuckofPhantomThief <- data.frame(option, value)
option <- factor(c("ATK"), levels=PSkill)
value <- c(40)
MoonLight <- data.frame(option, value)
option <- factor(c("FDR", "CRR"), levels=PSkill)
value <- c(30, 35)
AcuteSense <- data.frame(option, value)
option <- factor(c("BDR", "IGR"), levels=PSkill)
value <- c(30 + PhantomBase$PSkillLv, 30 + PhantomBase$PSkillLv)
PrayofAria <- data.frame(option, value)
option <- factor(c("Mastery", "ATK", "CDMR", "FDR"), levels=PSkill)
value <- c(70 + ceiling(PhantomBase$PSkillLv/2), 40 + PhantomBase$PSkillLv, 15, 25 + floor(PhantomBase$PSkillLv/2))
CaneExpert <- data.frame(option, value)
option <- factor(c("ATK"), levels=PSkill)
value <- c(GetCoreLv(PhantomCore, "ReadyToDie"))
ReadytoDiePassive <- data.frame(option, value)
option <- factor(c("ATK"), levels=PSkill)
value <- c(GetCoreLv(PhantomCore, "Blink"))
BlinkPassive <- data.frame(option, value)
option <- factor(c("MainStat", "SubStat1"), levels=PSkill)
value <- c(rep(GetCoreLv(PhantomCore, "RopeConnect"), 2))
RopeConnectPassive <- data.frame(option, value)}
PhantomPassive <- Passive(list(HighDexterity=HighDexterity, LuckMonopoly=LuckMonopoly, CaneAcceleration=CaneAcceleration, LuckofPhantomThief=LuckofPhantomThief, MoonLight=MoonLight, AcuteSense=AcuteSense,
PrayofAria=PrayofAria, CaneExpert=CaneExpert, ReadytoDiePassive=ReadytoDiePassive, BlinkPassive=BlinkPassive, RopeConnectPassive=RopeConnectPassive))
## Phantom - Buff
{option <- factor("ATK", levels=BSkill)
value <- c(30)
info <- c(180 * (100 + PhantomBase$BuffDurationNeeded + 10) / 100, NA, 0, F, NA, NA, T)
info <- data.frame(BInfo, info)
colnames(info) <- c("option", "value")
Fury <- rbind(data.frame(option, value), info)
option <- factor("FDR", levels=BSkill)
value <- c(20)
info <- c(200 * (100 + PhantomBase$BuffDurationNeeded + 10) / 100, NA, 0, F, NA, NA, T)
info <- data.frame(BInfo, info)
colnames(info) <- c("option", "value")
CrossOverChain <- rbind(data.frame(option, value), info)
option <- factor("MainStat", levels=BSkill)
value <- c(floor((PhantomBase$ChrLv * 5 + 18) * (0.15 + 0.01 * ceiling(PhantomBase$SkillLv/2))))
info <- c(900 + 30 * PhantomBase$SkillLv, NA, 0, T, NA, NA, T)
info <- data.frame(BInfo, info)
colnames(info) <- c("option", "value")
MapleSoldier <- rbind(data.frame(option, value), info)
option <- factor("FDR", levels=BSkill)
value <- c(40 + PhantomBase$SkillLv)
info <- c(60 * (100 + PhantomBase$BuffDurationNeeded + 10) / 100, 90, 0, F, NA, NA, T)
info <- data.frame(BInfo, info)
colnames(info) <- c("option", "value")
FinalCutBuff <- rbind(data.frame(option, value), info)
option <- factor(c("IGR"), levels=BSkill)
value <- c(20 + floor(PhantomBase$SkillLv/2))
info <- c(15, 240, 0, F, NA, NA, F)
info <- data.frame(BInfo, info)
colnames(info) <- c("option", "value")
TwilightDebuff <- rbind(data.frame(option, value), info)
option <- factor("BDR", levels=BSkill)
value <- c(5)
info <- c(120, 120, 0, F, F, F, T)
info <- data.frame(BInfo, info)
colnames(info) <- c("option", "value")
HeroesOath <- rbind(data.frame(option, value), info)
option <- factor(c("CRR", "CDMR", "IGR", "BDR"), levels=BSkill)
value <- c(20, 10, 20, 20)
info <- c(30, 180, 960, F, F, F, T)
info <- data.frame(BInfo, info)
colnames(info) <- c("option", "value")
Bullseye <- rbind(data.frame(option, value), info)
Useful <- UsefulSkills(PhantomCore)
UsefulSharpEyes <- Useful$UsefulSharpEyes
UsefulCombatOrders <- Useful$UsefulCombatOrders
if(sum(names(Useful)=="UsefulAdvancedBless") >= 1) {
UsefulAdvancedBless <- Useful$UsefulAdvancedBless
}
option <- factor(c("FDR"), levels=BSkill)
value <- c(ceiling(GetCoreLv(PhantomCore, "Joker")/5))
info <- c(30, 150, 1620, F, T, F, T)
info <- data.frame(BInfo, info)
colnames(info) <- c("option", "value")
JokerBuff <- rbind(data.frame(option, value), info)
option <- factor(levels=BSkill)
value <- c()
info <- c(30, 150, 1620, F, T, F, T)
info <- data.frame(BInfo, info)
colnames(info) <- c("option", "value")
JokerBuffFail <- rbind(data.frame(option, value), info)
option <- factor("FDR", levels=BSkill)
value <- c(10 + floor(GetCoreLv(PhantomCore, "ReadyToDie")/10))
info <- c(30, 90 - floor(GetCoreLv(PhantomCore, "ReadyToDie")/2), 780, F, T, F, T)
info <- data.frame(BInfo, info)
colnames(info) <- c("option", "value")
ReadyToDie1Stack <- rbind(data.frame(option, value), info)
option <- factor("FDR", levels=BSkill)
value <- c(30 + floor(GetCoreLv(PhantomCore, "ReadyToDie")/5))
info <- c((30 - 0.78)/2 + 0.78, 90 - floor(GetCoreLv(PhantomCore, "ReadyToDie")/2), 1560, F, T, F, T)
info <- data.frame(BInfo, info)
colnames(info) <- c("option", "value")
ReadyToDie2Stack <- rbind(data.frame(option, value), info)
option <- factor(c("MainStat", "BDR"), levels=BSkill)
value <- c(floor(((1 + 0.1 * GetCoreLv(PhantomCore, "MapleWarriors2")) * MapleSoldier[1, 2]) * PhantomBase$MainStatP), 5 + floor(GetCoreLv(PhantomCore, "MapleWarriors2")/2))
info <- c(60, 180, 630, F, T, F, T)
info <- data.frame(BInfo, info)
colnames(info) <- c("option", "value")
MapleWarriors2 <- rbind(data.frame(option, value), info)
option <- factor(levels=BSkill)
value <- c()
info <- c(0, 1, 0, F, F, F, F)
info <- data.frame(BInfo, info)
colnames(info) <- c("option", "value")
NoirCarteStack <- rbind(data.frame(option, value), info)}
PhantomBuff <- list(Fury=Fury, CrossOverChain=CrossOverChain, MapleSoldier=MapleSoldier, FinalCutBuff=FinalCutBuff,
TwilightDebuff=TwilightDebuff, HeroesOath=HeroesOath, Bullseye=Bullseye, UsefulSharpEyes=UsefulSharpEyes, UsefulCombatOrders=UsefulCombatOrders,
JokerBuff=JokerBuff, JokerBuffFail=JokerBuffFail, ReadyToDie1Stack=ReadyToDie1Stack, ReadyToDie2Stack=ReadyToDie2Stack, MapleWarriors2=MapleWarriors2,
NoirCarteStack=NoirCarteStack, Restraint4=Restraint4, SoulContractLink=SoulContractLink)
if(sum(names(Useful)=="UsefulAdvancedBless") >= 1) {
PhantomBuff[[length(PhantomBuff)+1]] <- UsefulAdvancedBless
names(PhantomBuff)[[length(PhantomBuff)]] <- "UsefulAdvancedBless"
}
PhantomBuff <- Buff(PhantomBuff)
PhantomAllTimeBuff <- AllTimeBuff(PhantomBuff)
## PetBuff : Fury(1080ms), CrossOverChain(720ms), MapleSoldier(0ms), UsefulCombatOrders(1500ms), UsefulSharpEyes(900ms), (UsefulAdvancedBless)
## Phantom - Union & HyperStat & SoulWeapon
PhantomSpec <- JobSpec(JobBase=PhantomBase,
Passive=PhantomPassive,
AllTimeBuff=PhantomAllTimeBuff,
MobInfo=get(DPMCalcOption$MobSet),
SpecSet=get(DPMCalcOption$SpecSet),
WeaponName="Cane",
UnionStance=0)
PhantomUnionRemained <- PhantomSpec$UnionRemained
PhantomHyperStatBase <- PhantomSpec$HyperStatBase
PhantomCoolReduceType <- PhantomSpec$CoolReduceType
PhantomSpec <- PhantomSpec$Spec
## Phantom - Spider In Mirror
SIM <- SIMData(GetCoreLv(PhantomCore, "SpiderInMirror"))
SpiderInMirror <- SIM$SpiderInMirror
SpiderInMirrorStart <- SIM$SpiderInMirrorStart
SpiderInMirror1 <- SIM$SpiderInMirror1
SpiderInMirror2 <- SIM$SpiderInMirror2
SpiderInMirror3 <- SIM$SpiderInMirror3
SpiderInMirror4 <- SIM$SpiderInMirror4
SpiderInMirror5 <- SIM$SpiderInMirror5
SpiderInMirrorWait <- SIM$SpiderInMirrorWait
## Phantom - Attacks
{option <- factor(c("IGR", "BDR", "FDR"), levels=ASkill)
value <- c(IGRCalc(c(20, ifelse(GetCoreLv(PhantomCore, "UltimateDrive")>=40, 20, 0))), 20, 2 * GetCoreLv(PhantomCore, "UltimateDrive"))
info <- c(140 + PhantomSpec$SkillLv, 3, 150, NA, NA, NA, NA, F)
info <- data.frame(AInfo, info)
colnames(info) <- c("option", "value")
UltimateDrive <- rbind(data.frame(option, value), info)
option <- factor(c("IGR", "BDR", "FDR"), levels=ASkill)
value <- c(ifelse(GetCoreLv(PhantomCore, "TempestofCard")>=40, 20, 0), 20, 2 * GetCoreLv(PhantomCore, "TempestofCard"))
info <- c(200 + 2 * PhantomSpec$SkillLv, 3, 10000, 180, 10 + Cooldown(18, T, 20 + PhantomSpec$CoolReduceP, PhantomSpec$CoolReduce), F, T, F)
info <- data.frame(AInfo, info)
colnames(info) <- c("option", "value")
TempestofCard <- rbind(data.frame(option, value), info)
option <- factor(c("IGR", "FDR"), levels=ASkill)
value <- c(ifelse(GetCoreLv(PhantomCore, "NoirCarte")>=40, 20, 0), 2 * GetCoreLv(PhantomCore, "NoirCarte"))
info <- c(270, 1, 0, NA, NA, NA, NA, F)
info <- data.frame(AInfo, info)
colnames(info) <- c("option", "value")
NoirCarte <- rbind(data.frame(option, value), info)
option <- factor(c("IGR", "FDR"), levels=ASkill)
value <- c(ifelse(GetCoreLv(PhantomCore, "NoirCarte")>=40, 20, 0), 2 * GetCoreLv(PhantomCore, "NoirCarte"))
info <- c(270, 10, 0, NA, NA, NA, NA, F)
info <- data.frame(AInfo, info)
colnames(info) <- c("option", "value")
NoirCarteJudgement <- rbind(data.frame(option, value), info)
option <- factor(c("IGR", "FDR"), levels=ASkill)
value <- c(ifelse(GetCoreLv(PhantomCore, "Twilight")>=40, 20, 0), 2 * GetCoreLv(PhantomCore, "Twilight"))
info <- c(450 + 3 * PhantomBase$SkillLv, 3, 180, NA, NA, NA, NA, F)
info <- data.frame(AInfo, info)
colnames(info) <- c("option", "value")
Twilight1 <- rbind(data.frame(option, value), info)
option <- factor(levels=ASkill)
value <- c()
info <- c(0, 0, 540, NA, NA, NA, NA, F)
info <- data.frame(AInfo, info)
colnames(info) <- c("option", "value")
Twilight2 <- rbind(data.frame(option, value), info)
option <- factor(c("IGR", "FDR"), levels=ASkill)
value <- c(ifelse(GetCoreLv(PhantomCore, "RoseCarteFinale")>=40, 20, 0), 2 * GetCoreLv(PhantomCore, "RoseCarteFinale"))
info <- c(700, 6, 1200, NA, 30, F, F, F)
info <- data.frame(AInfo, info)
colnames(info) <- c("option", "value")
RoseCarteFinale <- rbind(data.frame(option, value), info)
option <- factor(c("IGR", "FDR"), levels=ASkill)
value <- c(ifelse(GetCoreLv(PhantomCore, "RoseCarteFinale")>=40, 20, 0), 2 * GetCoreLv(PhantomCore, "RoseCarteFinale"))
info <- c(200, 2, 0, 930, 30, F, F, F)
info <- data.frame(AInfo, info)
colnames(info) <- c("option", "value")
RoseCarteFinaleAdd <- rbind(data.frame(option, value), info) ## 12 Hits, FirstATK : 2400
option <- factor(levels=ASkill)
value <- c()
info <- c(0, 0, 1000, NA, NA, NA, NA, F)
info <- data.frame(AInfo, info)
colnames(info) <- c("option", "value")
FinalCutPre <- rbind(data.frame(option, value), info)
option <- factor(c("IGR", "FDR"), levels=ASkill)
value <- c(ifelse(GetCoreLv(PhantomCore, "TalentofPhantomThief4")>=40, 20, 0), 2 * GetCoreLv(PhantomCore, "TalentofPhantomThief4"))
info <- c((2000 + 20 * PhantomSpec$SkillLv)/ 1.3 * 1.2, 1, 180, NA, NA, NA, NA, F)
info <- data.frame(AInfo, info)
colnames(info) <- c("option", "value")
FinalCut <- rbind(data.frame(option, value), info)
option <- factor(levels=ASkill)
value <- c()
info <- c(240 + 9 * GetCoreLv(PhantomCore, "Joker"), 3, 6000 + floor(GetCoreLv(PhantomCore, "Joker")/25) * 1000, 50, 150, T, F, F)
info <- data.frame(AInfo, info)
colnames(info) <- c("option", "value")
Joker <- rbind(data.frame(option, value), info)
option <- factor(levels=ASkill)
value <- c()
info <- c(600 + 24 * GetCoreLv(PhantomCore, "BlackJack"), 3, 760, 450, 15, T, F, F)
info <- data.frame(AInfo, info)
colnames(info) <- c("option", "value")
BlackJack <- rbind(data.frame(option, value), info) ## FirstATK : 1200
option <- factor(levels=ASkill)
value <- c()
info <- c(800 + 32 * GetCoreLv(PhantomCore, "BlackJack"), 18, 0, 0, 15, T, F, F)
info <- data.frame(AInfo, info)
colnames(info) <- c("option", "value")
BlackJackLast <- rbind(data.frame(option, value), info)
option <- factor(levels=ASkill)
value <- c()
info <- c(300 + 12 * GetCoreLv(PhantomCore, "MarkofPhantom"), 6, 900, 75, 30, T, F, F)
info <- data.frame(AInfo, info)
colnames(info) <- c("option", "value")
MarkofPhantom <- rbind(data.frame(option, value), info) ## FirstATK : 660
option <- factor(levels=ASkill)
value <- c()
info <- c(485 + 19 * GetCoreLv(PhantomCore, "MarkofPhantom"), 15, 0, 30, 30, T, F, F)
info <- data.frame(AInfo, info)
colnames(info) <- c("option", "value")
MarkofPhantomFinal <- rbind(data.frame(option, value), info) ## FirstATK : 1440
option <- factor(levels=ASkill)
value <- c()
info <- c(400 + 16 * GetCoreLv(PhantomCore, "LiftBreak"), 7, 990, 0, 30, T, F, F)
info <- data.frame(AInfo, info)
colnames(info) <- c("option", "value")
LiftBreak <- rbind(data.frame(option, value), info) ## FirstATK : 0, Delays : 270, 270, 1230, 30, 30, 30
}
PhantomATK <- Attack(list(UltimateDrive=UltimateDrive, TempestofCard=TempestofCard, NoirCarte=NoirCarte, NoirCarteJudgement=NoirCarteJudgement, Twilight1=Twilight1, Twilight2=Twilight2,
RoseCarteFinale=RoseCarteFinale, RoseCarteFinaleAdd=RoseCarteFinaleAdd, FinalCutPre=FinalCutPre, FinalCut=FinalCut, Joker=Joker, BlackJack=BlackJack, BlackJackLast=BlackJackLast,
MarkofPhantom=MarkofPhantom, MarkofPhantomFinal=MarkofPhantomFinal, LiftBreak=LiftBreak, SpiderInMirror=SpiderInMirror))
## Phantom - Summoned
PhantomSummoned <- Summoned(list(SpiderInMirrorStart=SpiderInMirrorStart, SpiderInMirror1=SpiderInMirror1, SpiderInMirror2=SpiderInMirror2, SpiderInMirror3=SpiderInMirror3,
SpiderInMirror4=SpiderInMirror4, SpiderInMirror5=SpiderInMirror5, SpiderInMirrorWait=SpiderInMirrorWait))
## Phantom - DealCycle & Deal Calculation
ATKFinal <- data.frame(PhantomATK)
ATKFinal$Delay[c(-1, -2, -9, -11)] <- Delay(ATKFinal$Delay, PhantomSpec$ATKSpeed)[c(-1, -2, -9, -11)]
ATKFinal$CoolTime <- Cooldown(ATKFinal$CoolTime, ATKFinal$CoolReduceAvailable, PhantomSpec$CoolReduceP, PhantomSpec$CoolReduce)
BuffFinal <- data.frame(PhantomBuff)
BuffFinal$CoolTime <- Cooldown(BuffFinal$CoolTime, BuffFinal$CoolReduceAvailable, PhantomSpec$CoolReduceP, PhantomSpec$CoolReduce)
BuffFinal$Duration <- BuffFinal$Duration + BuffFinal$Duration * ifelse(BuffFinal$BuffDurationAvailable==T, PhantomSpec$BuffDuration / 100, 0) +
ifelse(BuffFinal$ServerLag==T, General$General$Serverlag, 0)
SummonedFinal <- data.frame(PhantomSummoned)
SummonedFinal$CoolTime <- Cooldown(SummonedFinal$CoolTime, SummonedFinal$CoolReduceAvailable, PhantomSpec$CoolReduceP, PhantomSpec$CoolReduce)
SummonedFinal$Duration <- SummonedFinal$Duration + ifelse(SummonedFinal$SummonedDurationAvailable==T, SummonedFinal$Duration * PhantomSpec$SummonedDuration / 100, 0)
## Phantom - DealCycle
DealCycle <- c("Skills", "Time", rownames(PhantomBuff))
PhantomDealCycle <- t(rep(0, length(DealCycle)))
colnames(PhantomDealCycle) <- DealCycle
PhantomDealCycle <- data.frame(PhantomDealCycle)
PhantomCycle <- function(PreDealCycle, ATKFinal, BuffFinal, SummonedFinal, Spec,
Period=180, CycleTime=360) {
BuffSummonedPrior <- c("Fury", "CrossOverChain", "UsefulSharpEyes", "UsefulCombatOrders", "UsefulAdvancedBless", "HeroesOath",
"FinalCutBuff", "MapleWarriors2", "Bullseye", "ReadyToDie2Stack", "SoulContractLink", "Restraint4")
Times180 <- c(0, 0, 0, 0, 0, 0,
2, 1, 1, 2, 2, 1)
if(nrow(BuffFinal[rownames(BuffFinal)=="UsefulAdvancedBless", ]) == 0) {
Times180 <- Times180[BuffSummonedPrior!="UsefulAdvancedBless"]
BuffSummonedPrior <- BuffSummonedPrior[BuffSummonedPrior!="UsefulAdvancedBless"]
}
SubTime <- rep(Period, length(BuffSummonedPrior))
TotalTime <- CycleTime
for(i in 1:length(BuffSummonedPrior)) {
SubTime[i] <- SubTime[i] / ifelse(Times180[i]==0, Inf, Times180[i])
}
SubTimeUniques <- unique(SubTime)
SubTimeUniques <- SubTimeUniques[SubTimeUniques > 0]
TimeTypes <- c()
for(i in 1:length(SubTimeUniques)) {
Time <- 0 ; r <- 1
while(Time < TotalTime) {
Time <- SubTimeUniques[i] * r
r <- r + 1
TimeTypes <- c(TimeTypes, Time)
}
}
TimeTypes <- TimeTypes[TimeTypes < TotalTime]
TimeTypes <- unique(TimeTypes)
TimeTypes <- TimeTypes[order(TimeTypes)]
Buffs <- data.frame(Buff=BuffSummonedPrior, SubTime=SubTime, stringsAsFactors=F)
Buffs <- subset(Buffs, Buffs$SubTime > 0)
BuffList <- list()
BuffList[[1]] <- BuffSummonedPrior
for(i in 1:length(TimeTypes)) {
s <- c()
for(j in 1:nrow(Buffs)) {
if(round(TimeTypes[i] / Buffs[j, 2]) == TimeTypes[i] / Buffs[j, 2]) {
s <- c(s, Buffs[j, 1])
}
}
BuffList[[i+1]] <- s
}
DelayDataB <- data.frame(Name=rownames(BuffFinal), Delay=BuffFinal$Delay)
DelayDataS <- data.frame(Name=rownames(SummonedFinal), Delay=SummonedFinal$Delay)
DelayData <- rbind(DelayDataB, DelayDataS)
BuffDelays <- list()
for(i in 1:length(BuffList)) {
t <- c()
for(j in 1:length(BuffList[[i]])) {
for(k in 1:nrow(DelayData)) {
if(DelayData$Name[k]==BuffList[[i]][j]) {
t <- c(t, k)
}
}
}
BuffDelays[[i]] <- DelayData$Delay[t]
}
TotalTime <- TotalTime * 1000
DealCycle <- PreDealCycle
for(i in 1:length(BuffList[[1]])) {
if(sum(rownames(BuffFinal)==BuffList[[1]][i]) > 0) {
if(BuffList[[1]][i]=="FinalCutBuff") {
DealCycle <- DCATK(DealCycle, "FinalCutPre", ATKFinal)
}
DealCycle <- DCBuff(DealCycle, BuffList[[1]][i], BuffFinal)
if(DealCycle$Skills[nrow(DealCycle)] == "FinalCutBuff") {
DealCycle <- DCATK(DealCycle, "FinalCut", ATKFinal)
} else if(DealCycle$Skills[nrow(DealCycle)] == "HeroesOath") {
DealCycle <- DCATK(DealCycle, "SpiderInMirror", ATKFinal)
}
} else {
DealCycle <- DCSummoned(DealCycle, BuffList[[1]][i], SummonedFinal)
}
}
SubTimeList <- data.frame(Skills=BuffSummonedPrior, SubTime=SubTime, stringsAsFactors=F)
NoSubTime <- subset(SubTimeList, SubTimeList$SubTime==0)$Skills
NoSubTimeBuff <- c()
for(i in 1:length(NoSubTime)) {
NoSubTimeBuff <- c(NoSubTimeBuff, NoSubTime[i])
}
ColNums <- c()
for(i in 1:length(NoSubTimeBuff)) {
for(j in 1:length(colnames(DealCycle))) {
if(NoSubTimeBuff[i]==colnames(DealCycle)[j]) {
ColNums[i] <- j
}
}
}
BuffList[[length(BuffList)+1]] <- BuffList[[1]]
BuffDelays[[length(BuffDelays)+1]] <- BuffDelays[[1]]
TimeTypes <- c(0, TimeTypes, TotalTime/1000)
BJCool <- subset(ATKFinal, rownames(ATKFinal)=="MarkofPhantom")$CoolTime * 1000 / 2
MPCool <- subset(ATKFinal, rownames(ATKFinal)=="MarkofPhantom")$CoolTime * 1000
RCCool <- subset(ATKFinal, rownames(ATKFinal)=="RoseCarteFinale")$CoolTime * 1000
BJRemain <- 0 ; RCRemain <- 0 ; MOPDummy <- 0 ; TOCDummy <- 0
for(k in 2:length(BuffList)) {
CycleBuffList <- data.frame(Skills=BuffList[[k]], Delay=BuffDelays[[k]])
BuffEndTime <- c()
for(i in 1:length(BuffList[[k]])) {
a <- subset(DealCycle, BuffList[[k]][i]==DealCycle$Skills)
a <- rbind(a, subset(DealCycle, paste(BuffList[[k]][i], "Summoned", sep="")==DealCycle$Skills))
for(j in 1:nrow(CycleBuffList)) {
if(CycleBuffList$Skills[j]==BuffList[[k]][i]) {
Idx <- j
break
}
}
BuffEndTime[i] <- max(a$Time) +
min(subset(SubTimeList, SubTimeList$Skills==BuffList[[k]][i])$SubTime * 1000, subset(BuffFinal, rownames(BuffFinal)==BuffList[[k]][i])$CoolTime * 1000,
subset(SummonedFinal, rownames(SummonedFinal)==BuffList[[k]][i])$CoolTime * 1000) +
sum(CycleBuffList$Delay[Idx:nrow(CycleBuffList)])
}
BuffEndTime <- max(BuffEndTime)
BuffEndTime <- max(BuffEndTime, TimeTypes[k] * 1000)
BuffStartTime <- BuffEndTime - sum(CycleBuffList$Delay)
while(DealCycle$Time[nrow(DealCycle)] + DealCycle$Time[1] < BuffStartTime) {
for(i in 1:length(ColNums)) {
if(DealCycle[nrow(DealCycle), ColNums[i]] - DealCycle$Time[1] < 3000) {
DealCycle <- DCBuff(DealCycle, colnames(DealCycle)[ColNums[i]], BuffFinal)
BJRemain <- max(0, BJRemain - DealCycle$Time[1])
RCRemain <- max(0, RCRemain - DealCycle$Time[1])
}
}
## BlackJack, Mark of Phantom, Lift Break
if(BJRemain == 0 & MOPDummy == 0 & k==length(BuffList) & DealCycle$Time[nrow(DealCycle)] + DealCycle$Time[1] + MPCool <= BuffStartTime + 8000 |
BJRemain == 0 & MOPDummy == 0 & k!=length(BuffList)) {
DealCycle <- DCATK(DealCycle, "BlackJack", ATKFinal)
BJRemain <- BJCool - DealCycle$Time[1]
RCRemain <- max(0, RCRemain - DealCycle$Time[1])
DealCycle <- DCBuff(DealCycle, "TwilightDebuff", BuffFinal)
BJRemain <- max(0, BJRemain - DealCycle$Time[1])
RCRemain <- max(0, RCRemain - DealCycle$Time[1])
DealCycle <- DCATK(DealCycle, "Twilight1", ATKFinal)
BJRemain <- max(0, BJRemain - DealCycle$Time[1])
RCRemain <- max(0, RCRemain - DealCycle$Time[1])
DealCycle <- DCATK(DealCycle, "Twilight2", ATKFinal)
BJRemain <- max(0, BJRemain - DealCycle$Time[1])
RCRemain <- max(0, RCRemain - DealCycle$Time[1])
DealCycle <- DCATK(DealCycle, "MarkofPhantomFinal", ATKFinal)
BJRemain <- max(0, BJRemain - DealCycle$Time[1])
RCRemain <- max(0, RCRemain - DealCycle$Time[1])
DealCycle <- DCATK(DealCycle, "MarkofPhantom", ATKFinal)
BJRemain <- max(0, BJRemain - DealCycle$Time[1])
RCRemain <- max(0, RCRemain - DealCycle$Time[1])
DealCycle <- DCATK(DealCycle, "LiftBreak", ATKFinal)
BJRemain <- max(0, BJRemain - DealCycle$Time[1])
RCRemain <- max(0, RCRemain - DealCycle$Time[1])
if(DealCycle$Restraint4[nrow(DealCycle)] >= 7000) {
DealCycle <- DCATK(DealCycle, "Joker", ATKFinal)
BJRemain <- max(0, BJRemain - DealCycle$Time[1])
RCRemain <- max(0, RCRemain - DealCycle$Time[1])
DealCycle <- DCBuff(DealCycle, "JokerBuffFail", BuffFinal)
BJRemain <- max(0, BJRemain - DealCycle$Time[1])
RCRemain <- max(0, RCRemain - DealCycle$Time[1])
TOCDummy <- 0
} else if(TOCDummy == 0 & DealCycle$Time[nrow(DealCycle)] + DealCycle$Time[1] <= BuffStartTime - 10000) {
DealCycle <- DCATK(DealCycle, "TempestofCard", ATKFinal)
BJRemain <- max(0, BJRemain - DealCycle$Time[1])
RCRemain <- max(0, RCRemain - DealCycle$Time[1])
TOCDummy <- 1
} else {
TOCDummy <- 0
}
MOPDummy <- 1
} else if(BJRemain == 0 & MOPDummy == 1 & k==length(BuffList) & DealCycle$Time[nrow(DealCycle)] + DealCycle$Time[1] + BJCool <= BuffStartTime + 8000 |
BJRemain == 0 & MOPDummy == 1 & k!=length(BuffList)) {
DealCycle <- DCATK(DealCycle, "BlackJack", ATKFinal)
BJRemain <- BJCool - DealCycle$Time[1]
RCRemain <- max(0, RCRemain - DealCycle$Time[1])
DealCycle <- DCBuff(DealCycle, "TwilightDebuff", BuffFinal)
BJRemain <- max(0, BJRemain - DealCycle$Time[1])
RCRemain <- max(0, RCRemain - DealCycle$Time[1])
DealCycle <- DCATK(DealCycle, "Twilight1", ATKFinal)
BJRemain <- max(0, BJRemain - DealCycle$Time[1])
RCRemain <- max(0, RCRemain - DealCycle$Time[1])
DealCycle <- DCATK(DealCycle, "Twilight2", ATKFinal)
BJRemain <- max(0, BJRemain - DealCycle$Time[1])
RCRemain <- max(0, RCRemain - DealCycle$Time[1])
if(DealCycle$Restraint4[nrow(DealCycle)] >= 7000) {
DealCycle <- DCATK(DealCycle, "Joker", ATKFinal)
BJRemain <- max(0, BJRemain - DealCycle$Time[1])
RCRemain <- max(0, RCRemain - DealCycle$Time[1])
DealCycle <- DCBuff(DealCycle, "JokerBuffFail", BuffFinal)
BJRemain <- max(0, BJRemain - DealCycle$Time[1])
RCRemain <- max(0, RCRemain - DealCycle$Time[1])
TOCDummy <- 0
} else if(TOCDummy == 0 & DealCycle$Time[nrow(DealCycle)] + DealCycle$Time[1] <= BuffStartTime - 10000) {
DealCycle <- DCATK(DealCycle, "TempestofCard", ATKFinal)
BJRemain <- max(0, BJRemain - DealCycle$Time[1])
RCRemain <- max(0, RCRemain - DealCycle$Time[1])
TOCDummy <- 1
} else {
TOCDummy <- 0
}
MOPDummy <- 0
}
## Rose Carte Finale
else if(RCRemain == 0 & DealCycle$Time[nrow(DealCycle)] + DealCycle$Time[1] <= 350000) {
DealCycle <- DCATK(DealCycle, "RoseCarteFinaleAdd", ATKFinal)
BJRemain <- max(0, BJRemain - DealCycle$Time[1])
RCRemain <- RCCool - DealCycle$Time[1]
DealCycle <- DCATK(DealCycle, "RoseCarteFinale", ATKFinal)
BJRemain <- max(0, BJRemain - DealCycle$Time[1])
RCRemain <- max(0, RCRemain - DealCycle$Time[1])
}
## Ultimate Drive
else {
DealCycle <- DCATK(DealCycle, c("UltimateDrive"), ATKFinal)
BJRemain <- max(0, BJRemain - DealCycle$Time[1])
RCRemain <- max(0, RCRemain - DealCycle$Time[1])
}
}
if(k != length(BuffList)) {
for(i in 1:length(BuffList[[k]])) {
if(sum(rownames(BuffFinal)==BuffList[[k]][i]) > 0) {
if(BuffList[[k]][i]=="FinalCutBuff") {
DealCycle <- DCATK(DealCycle, "FinalCutPre", ATKFinal)
BJRemain <- max(0, BJRemain - DealCycle$Time[1])
RCRemain <- max(0, RCRemain - DealCycle$Time[1])
}
DealCycle <- DCBuff(DealCycle, BuffList[[k]][i], BuffFinal)
BJRemain <- max(0, BJRemain - DealCycle$Time[1])
RCRemain <- max(0, RCRemain - DealCycle$Time[1])
if(DealCycle$Skills[nrow(DealCycle)] == "FinalCutBuff") {
DealCycle <- DCATK(DealCycle, "FinalCut", ATKFinal)
BJRemain <- max(0, BJRemain - DealCycle$Time[1])
RCRemain <- max(0, RCRemain - DealCycle$Time[1])
}
} else {
DealCycle <- DCSummoned(DealCycle, BuffList[[k]][i], SummonedFinal)
BJRemain <- max(0, BJRemain - DealCycle$Time[1])
RCRemain <- max(0, RCRemain - DealCycle$Time[1])
}
}
}
}
return(DealCycle)
}
PhantomAddATK <- function(DealCycle, ATKFinal, BuffFinal, SummonedFinal, Spec) {
## Joker
DealCycle <- RepATKCycle(DealCycle, "Joker", 140, 0, ATKFinal)
## BlackJack
DealCycle <- RepATKCycle(DealCycle, "BlackJack", 7, 1200, ATKFinal)
## BlackJack (Last)
BJ <- 1
for(i in 1:nrow(DealCycle)) {
if(DealCycle$Skills[i]=="BlackJack") {
if(BJ==7) {
DealCycle$Skills[i] <- "BlackJackLast"
BJ <- 1
} else {
BJ <- BJ + 1
}
}
}
## Mark of Phantom
DealCycle <- RepATKCycle(DealCycle, "MarkofPhantom", 7, 660, ATKFinal)
DealCycle <- RepATKCycle(DealCycle, "MarkofPhantomFinal", 2, 1440, ATKFinal)
## Lift Break
LiftBreakTime <- c(0, 270, 540, 1770, 1800, 1830, 1860)
DealCycle[DealCycle$Skills=="LiftBreak", ]$Skills <- "LiftBreakStart"
DC2 <- subset(DealCycle, DealCycle$Skills=="LiftBreakStart")
for(i in 1:nrow(DC2)) {
for(j in 1:length(LiftBreakTime)) {
DC2 <- rbind(DC2, DC2[i, ])
DC2$Time[nrow(DC2)] <- DC2$Time[i] + LiftBreakTime[j]
DC2$Skills[nrow(DC2)] <- "LiftBreak"
}
}
DC2 <- subset(DC2, DC2$Skills=="LiftBreak")
DC2 <- subset(DC2, DC2$Time <= max(DealCycle$Time))
DealCycle <- rbind(DealCycle, DC2)
DealCycle <- DealCycle[order(DealCycle$Time), ]
rownames(DealCycle) <- 1:nrow(DealCycle)
for(i in 3:nrow(DealCycle)) {
if("LiftBreak"==DealCycle[i, 1]) {
DealCycle[i, 3:ncol(DealCycle)] <- DealCycle[i-1, 3:ncol(DealCycle)] - (DealCycle[i, 2] - DealCycle[i-1, 2])
DealCycle[i, 3:ncol(DealCycle)] <- ifelse(DealCycle[i, 3:ncol(DealCycle)]<0, 0, DealCycle[i, 3:ncol(DealCycle)])
}
}
## Rose Carte Finale (AddATK)
DealCycle <- RepATKCycle(DealCycle, "RoseCarteFinaleAdd", 12, 2400, ATKFinal)
## Tempest of Card
DealCycle <- RepATKCycle(DealCycle, "TempestofCard", 56, 0, ATKFinal)
## Spider In Mirror
DealCycle <- DCSpiderInMirror(DealCycle, SummonedFinal)
## Noir Carte
DealCycle$NoirCarteStack[1] <- 0
for(i in 2:nrow(DealCycle)) {
if(sum(DealCycle$Skills[i]==c("UltimateDrive", "TempestofCard", "Joker", "LiftBreak", "MarkofPhantom", "MarkofPhantomFinal",
"RoseCarteFinale", "Twilight1", "FinalCut", "SpiderInMirror"))==1) {
DealCycle$NoirCarteStack[i] <- DealCycle$NoirCarteStack[i-1] + 1
DealCycle <- rbind(DealCycle, DealCycle[i, ])
DealCycle$Skills[nrow(DealCycle)] <- "NoirCarte"
if(DealCycle$NoirCarteStack[i] == 40) {
DealCycle <- rbind(DealCycle, DealCycle[i, ])
DealCycle$Skills[nrow(DealCycle)] <- "NoirCarteJudgement"
DealCycle$NoirCarteStack[i] <- 0
}
} else if(DealCycle$Skills[i]=="BlackJack") {
DealCycle$NoirCarteStack[i] <- DealCycle$NoirCarteStack[i-1] + 3
DealCycle <- rbind(DealCycle, DealCycle[i, ])
DealCycle$Skills[nrow(DealCycle)] <- "NoirCarte"
DealCycle <- rbind(DealCycle, DealCycle[i, ])
DealCycle$Skills[nrow(DealCycle)] <- "NoirCarte"
DealCycle <- rbind(DealCycle, DealCycle[i, ])
DealCycle$Skills[nrow(DealCycle)] <- "NoirCarte"
if(DealCycle$NoirCarteStack[i] >= 40) {
DealCycle <- rbind(DealCycle, DealCycle[i, ])
DealCycle$Skills[nrow(DealCycle)] <- "NoirCarteJudgement"
DealCycle$NoirCarteStack[i] <- DealCycle$NoirCarteStack[i] - 40
}
} else if(DealCycle$Skills[i]=="BlackJackLast") {
DealCycle$NoirCarteStack[i] <- DealCycle$NoirCarteStack[i-1] + 3
DealCycle <- rbind(DealCycle, DealCycle[i, ])
DealCycle$Skills[nrow(DealCycle)] <- "NoirCarte"
DealCycle <- rbind(DealCycle, DealCycle[i, ])
DealCycle$Skills[nrow(DealCycle)] <- "NoirCarte"
DealCycle <- rbind(DealCycle, DealCycle[i, ])
DealCycle$Skills[nrow(DealCycle)] <- "NoirCarte"
if(DealCycle$NoirCarteStack[i] >= 40) {
DealCycle <- rbind(DealCycle, DealCycle[i, ])
DealCycle$Skills[nrow(DealCycle)] <- "NoirCarteJudgement"
DealCycle$NoirCarteStack[i] <- DealCycle$NoirCarteStack[i] - 40
}
} else {
DealCycle$NoirCarteStack[i] <- DealCycle$NoirCarteStack[i-1]
}
}
DealCycle <- DealCycle[order(DealCycle$Time), ]
rownames(DealCycle) <- 1:nrow(DealCycle)
DealCycle$NoirCarteStack <- 0
return(DealCycle)
}
PhantomDealCycle <- PhantomCycle(PreDealCycle=PhantomDealCycle,
ATKFinal=ATKFinal,
BuffFinal=BuffFinal,
SummonedFinal=SummonedFinal,
Spec=PhantomSpec,
Period=180,
CycleTime=360)
PhantomDealCycle <- DealCycleFinal(PhantomDealCycle)
PhantomDealCycle <- PhantomAddATK(PhantomDealCycle, ATKFinal, BuffFinal, SummonedFinal, PhantomSpec)
PhantomDealCycleReduction1 <- DealCycleReduction(PhantomDealCycle)
PhantomDealCycle2 <- PhantomDealCycle
PhantomDealCycle2$JokerBuff <- PhantomDealCycle2$JokerBuffFail
PhantomDealCycle2$JokerBuffFail <- 0
PhantomDealCycleReduction2 <- DealCycleReduction(PhantomDealCycle2)
Idx1 <- c() ; Idx2 <- c()
for(i in 1:length(PotentialOpt)) {
if(names(PotentialOpt)[i]==DPMCalcOption$SpecSet) {
Idx1 <- i
}
}
for(i in 1:nrow(PotentialOpt[[Idx1]])) {
if(rownames(PotentialOpt[[Idx1]])[i]=="Phantom") {
Idx2 <- i
}
}
if(DPMCalcOption$Optimization==T) {
PhantomSpecOpt1 <- ResetOptimization1(list(PhantomDealCycleReduction1, PhantomDealCycleReduction2), ATKFinal, BuffFinal, SummonedFinal, PhantomSpec, PhantomUnionRemained,
rep(max(PhantomDealCycle$Time), 2), c(0.6, 0.4))
PotentialOpt[[Idx1]][Idx2, ] <- PhantomSpecOpt1[1, 1:3]
} else {
PhantomSpecOpt1 <- PotentialOpt[[Idx1]][Idx2, ]
}
PhantomSpecOpt <- OptDataAdd(PhantomSpec, PhantomSpecOpt1, "Potential", PhantomBase$CRROver, DemonAvenger=F)
if(DPMCalcOption$Optimization==T) {
PhantomSpecOpt2 <- ResetOptimization2(list(PhantomDealCycleReduction1, PhantomDealCycleReduction2), ATKFinal, BuffFinal, SummonedFinal, PhantomSpecOpt, PhantomHyperStatBase, PhantomBase$ChrLv, PhantomBase$CRROver,
HyperStanceLv=0, rep(max(PhantomDealCycle$Time), 2), c(0.6, 0.4))
HyperStatOpt[[Idx1]][Idx2, c(1, 3:10)] <- PhantomSpecOpt2[1, ]
} else {
PhantomSpecOpt2 <- HyperStatOpt[[Idx1]][Idx2, ]
}
PhantomSpecOpt <- OptDataAdd(PhantomSpecOpt, PhantomSpecOpt2, "HyperStat", PhantomBase$CRROver, DemonAvenger=F)
PhantomFinalDPM <- ResetDealCalc(DealCycles=list(PhantomDealCycleReduction1, PhantomDealCycleReduction2),
ATKFinal, BuffFinal, SummonedFinal, PhantomSpecOpt, rep(max(PhantomDealCycle$Time), 2), c(0.6, 0.4))
PhantomFinalDPMwithMax <- ResetDealCalcWithMaxDMR(DealCycles=list(PhantomDealCycleReduction1, PhantomDealCycleReduction2),
ATKFinal, BuffFinal, SummonedFinal, PhantomSpecOpt, rep(max(PhantomDealCycle$Time), 2), c(0.6, 0.4))
PhantomDeal1 <- DealCalcWithMaxDMR(PhantomDealCycle, ATKFinal, BuffFinal, SummonedFinal, PhantomSpecOpt)
PhantomDeal2 <- DealCalcWithMaxDMR(PhantomDealCycle2, ATKFinal, BuffFinal, SummonedFinal, PhantomSpecOpt)
set(get(DPMCalcOption$DataName), as.integer(1), "Phantom", sum(na.omit(PhantomFinalDPMwithMax)) / (max(PhantomDealCycle$Time) / 60000))
set(get(DPMCalcOption$DataName), as.integer(2), "Phantom", sum(na.omit(PhantomFinalDPM)) / (max(PhantomDealCycle$Time) / 60000) - sum(na.omit(PhantomFinalDPMwithMax)) / (max(PhantomDealCycle$Time) / 60000))
PhantomDealRatio <- ResetDealRatio(DealCycles=list(PhantomDealCycle, PhantomDealCycle2), DealDatas=list(PhantomDeal1, PhantomDeal2),
rep(max(PhantomDealCycle$Time), 2), c(0.6, 0.4))
PhantomDealData <- data.frame(PhantomDealCycle$Skills, PhantomDealCycle$Time, PhantomDealCycle$Restraint4, PhantomDeal1)
colnames(PhantomDealData) <- c("Skills", "Time", "R4", "Deal")
set(get(DPMCalcOption$DataName), as.integer(3), "Phantom", Deal_RR(PhantomDealData))
set(get(DPMCalcOption$DataName), as.integer(4), "Phantom", Deal_40s(PhantomDealData)) | /job/Phantom.R | no_license | SouICry/Maplestory_DPM | R | false | false | 36,656 | r | ## Phantom - Data
## Phantom - VMatrix
PhantomCoreBase <- CoreBuilder(ActSkills=c("Joker", "BlackJack", "MarkofPhantom", "LiftBreak",
CommonV("Thief", "Heroes")[2:5]),
ActSkillsLv=c(25, 25, 25, 25, 25, 1, 25, 25),
UsefulSkills=c("SharpEyes", "CombatOrders"),
SpecSet=get(DPMCalcOption$SpecSet),
VPassiveList=PhantomVPassive,
VPassivePrior=PhantomVPrior,
SelfBind=F)
PhantomCore <- MatrixSet(PasSkills=PhantomCoreBase$PasSkills$Skills,
PasLvs=PhantomCoreBase$PasSkills$Lv,
PasMP=PhantomCoreBase$PasSkills$MP,
ActSkills=PhantomCoreBase$ActSkills$Skills,
ActLvs=PhantomCoreBase$ActSkills$Lv,
ActMP=PhantomCoreBase$ActSkills$MP,
UsefulSkills=PhantomCoreBase$UsefulSkills,
UsefulLvs=20,
UsefulMP=0,
SpecSet=get(DPMCalcOption$SpecSet),
SpecialCore=PhantomCoreBase$SpecialCoreUse)
## Phantom - Basic Info
PhantomBase <- JobBase(ChrInfo=ChrInfo,
MobInfo=get(DPMCalcOption$MobSet),
SpecSet=get(DPMCalcOption$SpecSet),
Job="Phantom",
CoreData=PhantomCore,
BuffDurationNeeded=57,
AbilList=FindJob(get(paste(DPMCalcOption$SpecSet, "Ability", sep="")), "Phantom"),
LinkList=FindJob(get(paste(DPMCalcOption$SpecSet, "Link", sep="")), "Phantom"),
MonsterLife=get(FindJob(MonsterLifePreSet, "Phantom")[DPMCalcOption$MonsterLifeLevel][1, 1]),
Weapon=WeaponUpgrade(1, DPMCalcOption$WeaponSF, 4, 0, 0, 0, 0, 3, 0, 0, "Cane", get(DPMCalcOption$SpecSet)$WeaponType)[, 1:16],
WeaponType=get(DPMCalcOption$SpecSet)$WeaponType,
SubWeapon=SubWeapon[rownames(SubWeapon)=="Card", ],
Emblem=Emblem[rownames(Emblem)=="Heroes", ],
CoolReduceHat=as.logical(FindJob(get(paste(DPMCalcOption$SpecSet, "CoolReduceHat", sep="")), "Phantom")))
## Phantom - Passive
{option <- factor(c("SubStat1"), levels=PSkill)
value <- c(40)
HighDexterity <- data.frame(option, value)
option <- factor(c("MainStat"), levels=PSkill)
value <- c(60)
LuckMonopoly <- data.frame(option, value)
option <- factor(c("ATKSpeed", "MainStat"), levels=PSkill)
value <- c(2, 20)
CaneAcceleration <- data.frame(option, value)
option <- factor(c("MainStat"), levels=PSkill)
value <- c(60)
LuckofPhantomThief <- data.frame(option, value)
option <- factor(c("ATK"), levels=PSkill)
value <- c(40)
MoonLight <- data.frame(option, value)
option <- factor(c("FDR", "CRR"), levels=PSkill)
value <- c(30, 35)
AcuteSense <- data.frame(option, value)
option <- factor(c("BDR", "IGR"), levels=PSkill)
value <- c(30 + PhantomBase$PSkillLv, 30 + PhantomBase$PSkillLv)
PrayofAria <- data.frame(option, value)
option <- factor(c("Mastery", "ATK", "CDMR", "FDR"), levels=PSkill)
value <- c(70 + ceiling(PhantomBase$PSkillLv/2), 40 + PhantomBase$PSkillLv, 15, 25 + floor(PhantomBase$PSkillLv/2))
CaneExpert <- data.frame(option, value)
option <- factor(c("ATK"), levels=PSkill)
value <- c(GetCoreLv(PhantomCore, "ReadyToDie"))
ReadytoDiePassive <- data.frame(option, value)
option <- factor(c("ATK"), levels=PSkill)
value <- c(GetCoreLv(PhantomCore, "Blink"))
BlinkPassive <- data.frame(option, value)
option <- factor(c("MainStat", "SubStat1"), levels=PSkill)
value <- c(rep(GetCoreLv(PhantomCore, "RopeConnect"), 2))
RopeConnectPassive <- data.frame(option, value)}
PhantomPassive <- Passive(list(HighDexterity=HighDexterity, LuckMonopoly=LuckMonopoly, CaneAcceleration=CaneAcceleration, LuckofPhantomThief=LuckofPhantomThief, MoonLight=MoonLight, AcuteSense=AcuteSense,
PrayofAria=PrayofAria, CaneExpert=CaneExpert, ReadytoDiePassive=ReadytoDiePassive, BlinkPassive=BlinkPassive, RopeConnectPassive=RopeConnectPassive))
## Phantom - Buff
{option <- factor("ATK", levels=BSkill)
value <- c(30)
info <- c(180 * (100 + PhantomBase$BuffDurationNeeded + 10) / 100, NA, 0, F, NA, NA, T)
info <- data.frame(BInfo, info)
colnames(info) <- c("option", "value")
Fury <- rbind(data.frame(option, value), info)
option <- factor("FDR", levels=BSkill)
value <- c(20)
info <- c(200 * (100 + PhantomBase$BuffDurationNeeded + 10) / 100, NA, 0, F, NA, NA, T)
info <- data.frame(BInfo, info)
colnames(info) <- c("option", "value")
CrossOverChain <- rbind(data.frame(option, value), info)
option <- factor("MainStat", levels=BSkill)
value <- c(floor((PhantomBase$ChrLv * 5 + 18) * (0.15 + 0.01 * ceiling(PhantomBase$SkillLv/2))))
info <- c(900 + 30 * PhantomBase$SkillLv, NA, 0, T, NA, NA, T)
info <- data.frame(BInfo, info)
colnames(info) <- c("option", "value")
MapleSoldier <- rbind(data.frame(option, value), info)
option <- factor("FDR", levels=BSkill)
value <- c(40 + PhantomBase$SkillLv)
info <- c(60 * (100 + PhantomBase$BuffDurationNeeded + 10) / 100, 90, 0, F, NA, NA, T)
info <- data.frame(BInfo, info)
colnames(info) <- c("option", "value")
FinalCutBuff <- rbind(data.frame(option, value), info)
option <- factor(c("IGR"), levels=BSkill)
value <- c(20 + floor(PhantomBase$SkillLv/2))
info <- c(15, 240, 0, F, NA, NA, F)
info <- data.frame(BInfo, info)
colnames(info) <- c("option", "value")
TwilightDebuff <- rbind(data.frame(option, value), info)
option <- factor("BDR", levels=BSkill)
value <- c(5)
info <- c(120, 120, 0, F, F, F, T)
info <- data.frame(BInfo, info)
colnames(info) <- c("option", "value")
HeroesOath <- rbind(data.frame(option, value), info)
option <- factor(c("CRR", "CDMR", "IGR", "BDR"), levels=BSkill)
value <- c(20, 10, 20, 20)
info <- c(30, 180, 960, F, F, F, T)
info <- data.frame(BInfo, info)
colnames(info) <- c("option", "value")
Bullseye <- rbind(data.frame(option, value), info)
Useful <- UsefulSkills(PhantomCore)
UsefulSharpEyes <- Useful$UsefulSharpEyes
UsefulCombatOrders <- Useful$UsefulCombatOrders
if(sum(names(Useful)=="UsefulAdvancedBless") >= 1) {
UsefulAdvancedBless <- Useful$UsefulAdvancedBless
}
option <- factor(c("FDR"), levels=BSkill)
value <- c(ceiling(GetCoreLv(PhantomCore, "Joker")/5))
info <- c(30, 150, 1620, F, T, F, T)
info <- data.frame(BInfo, info)
colnames(info) <- c("option", "value")
JokerBuff <- rbind(data.frame(option, value), info)
option <- factor(levels=BSkill)
value <- c()
info <- c(30, 150, 1620, F, T, F, T)
info <- data.frame(BInfo, info)
colnames(info) <- c("option", "value")
JokerBuffFail <- rbind(data.frame(option, value), info)
option <- factor("FDR", levels=BSkill)
value <- c(10 + floor(GetCoreLv(PhantomCore, "ReadyToDie")/10))
info <- c(30, 90 - floor(GetCoreLv(PhantomCore, "ReadyToDie")/2), 780, F, T, F, T)
info <- data.frame(BInfo, info)
colnames(info) <- c("option", "value")
ReadyToDie1Stack <- rbind(data.frame(option, value), info)
option <- factor("FDR", levels=BSkill)
value <- c(30 + floor(GetCoreLv(PhantomCore, "ReadyToDie")/5))
info <- c((30 - 0.78)/2 + 0.78, 90 - floor(GetCoreLv(PhantomCore, "ReadyToDie")/2), 1560, F, T, F, T)
info <- data.frame(BInfo, info)
colnames(info) <- c("option", "value")
ReadyToDie2Stack <- rbind(data.frame(option, value), info)
option <- factor(c("MainStat", "BDR"), levels=BSkill)
value <- c(floor(((1 + 0.1 * GetCoreLv(PhantomCore, "MapleWarriors2")) * MapleSoldier[1, 2]) * PhantomBase$MainStatP), 5 + floor(GetCoreLv(PhantomCore, "MapleWarriors2")/2))
info <- c(60, 180, 630, F, T, F, T)
info <- data.frame(BInfo, info)
colnames(info) <- c("option", "value")
MapleWarriors2 <- rbind(data.frame(option, value), info)
option <- factor(levels=BSkill)
value <- c()
info <- c(0, 1, 0, F, F, F, F)
info <- data.frame(BInfo, info)
colnames(info) <- c("option", "value")
NoirCarteStack <- rbind(data.frame(option, value), info)}
PhantomBuff <- list(Fury=Fury, CrossOverChain=CrossOverChain, MapleSoldier=MapleSoldier, FinalCutBuff=FinalCutBuff,
TwilightDebuff=TwilightDebuff, HeroesOath=HeroesOath, Bullseye=Bullseye, UsefulSharpEyes=UsefulSharpEyes, UsefulCombatOrders=UsefulCombatOrders,
JokerBuff=JokerBuff, JokerBuffFail=JokerBuffFail, ReadyToDie1Stack=ReadyToDie1Stack, ReadyToDie2Stack=ReadyToDie2Stack, MapleWarriors2=MapleWarriors2,
NoirCarteStack=NoirCarteStack, Restraint4=Restraint4, SoulContractLink=SoulContractLink)
if(sum(names(Useful)=="UsefulAdvancedBless") >= 1) {
PhantomBuff[[length(PhantomBuff)+1]] <- UsefulAdvancedBless
names(PhantomBuff)[[length(PhantomBuff)]] <- "UsefulAdvancedBless"
}
PhantomBuff <- Buff(PhantomBuff)
PhantomAllTimeBuff <- AllTimeBuff(PhantomBuff)
## PetBuff : Fury(1080ms), CrossOverChain(720ms), MapleSoldier(0ms), UsefulCombatOrders(1500ms), UsefulSharpEyes(900ms), (UsefulAdvancedBless)
## Phantom - Union & HyperStat & SoulWeapon
PhantomSpec <- JobSpec(JobBase=PhantomBase,
Passive=PhantomPassive,
AllTimeBuff=PhantomAllTimeBuff,
MobInfo=get(DPMCalcOption$MobSet),
SpecSet=get(DPMCalcOption$SpecSet),
WeaponName="Cane",
UnionStance=0)
PhantomUnionRemained <- PhantomSpec$UnionRemained
PhantomHyperStatBase <- PhantomSpec$HyperStatBase
PhantomCoolReduceType <- PhantomSpec$CoolReduceType
PhantomSpec <- PhantomSpec$Spec
## Phantom - Spider In Mirror
SIM <- SIMData(GetCoreLv(PhantomCore, "SpiderInMirror"))
SpiderInMirror <- SIM$SpiderInMirror
SpiderInMirrorStart <- SIM$SpiderInMirrorStart
SpiderInMirror1 <- SIM$SpiderInMirror1
SpiderInMirror2 <- SIM$SpiderInMirror2
SpiderInMirror3 <- SIM$SpiderInMirror3
SpiderInMirror4 <- SIM$SpiderInMirror4
SpiderInMirror5 <- SIM$SpiderInMirror5
SpiderInMirrorWait <- SIM$SpiderInMirrorWait
## Phantom - Attacks
{option <- factor(c("IGR", "BDR", "FDR"), levels=ASkill)
value <- c(IGRCalc(c(20, ifelse(GetCoreLv(PhantomCore, "UltimateDrive")>=40, 20, 0))), 20, 2 * GetCoreLv(PhantomCore, "UltimateDrive"))
info <- c(140 + PhantomSpec$SkillLv, 3, 150, NA, NA, NA, NA, F)
info <- data.frame(AInfo, info)
colnames(info) <- c("option", "value")
UltimateDrive <- rbind(data.frame(option, value), info)
option <- factor(c("IGR", "BDR", "FDR"), levels=ASkill)
value <- c(ifelse(GetCoreLv(PhantomCore, "TempestofCard")>=40, 20, 0), 20, 2 * GetCoreLv(PhantomCore, "TempestofCard"))
info <- c(200 + 2 * PhantomSpec$SkillLv, 3, 10000, 180, 10 + Cooldown(18, T, 20 + PhantomSpec$CoolReduceP, PhantomSpec$CoolReduce), F, T, F)
info <- data.frame(AInfo, info)
colnames(info) <- c("option", "value")
TempestofCard <- rbind(data.frame(option, value), info)
option <- factor(c("IGR", "FDR"), levels=ASkill)
value <- c(ifelse(GetCoreLv(PhantomCore, "NoirCarte")>=40, 20, 0), 2 * GetCoreLv(PhantomCore, "NoirCarte"))
info <- c(270, 1, 0, NA, NA, NA, NA, F)
info <- data.frame(AInfo, info)
colnames(info) <- c("option", "value")
NoirCarte <- rbind(data.frame(option, value), info)
option <- factor(c("IGR", "FDR"), levels=ASkill)
value <- c(ifelse(GetCoreLv(PhantomCore, "NoirCarte")>=40, 20, 0), 2 * GetCoreLv(PhantomCore, "NoirCarte"))
info <- c(270, 10, 0, NA, NA, NA, NA, F)
info <- data.frame(AInfo, info)
colnames(info) <- c("option", "value")
NoirCarteJudgement <- rbind(data.frame(option, value), info)
option <- factor(c("IGR", "FDR"), levels=ASkill)
value <- c(ifelse(GetCoreLv(PhantomCore, "Twilight")>=40, 20, 0), 2 * GetCoreLv(PhantomCore, "Twilight"))
info <- c(450 + 3 * PhantomBase$SkillLv, 3, 180, NA, NA, NA, NA, F)
info <- data.frame(AInfo, info)
colnames(info) <- c("option", "value")
Twilight1 <- rbind(data.frame(option, value), info)
option <- factor(levels=ASkill)
value <- c()
info <- c(0, 0, 540, NA, NA, NA, NA, F)
info <- data.frame(AInfo, info)
colnames(info) <- c("option", "value")
Twilight2 <- rbind(data.frame(option, value), info)
option <- factor(c("IGR", "FDR"), levels=ASkill)
value <- c(ifelse(GetCoreLv(PhantomCore, "RoseCarteFinale")>=40, 20, 0), 2 * GetCoreLv(PhantomCore, "RoseCarteFinale"))
info <- c(700, 6, 1200, NA, 30, F, F, F)
info <- data.frame(AInfo, info)
colnames(info) <- c("option", "value")
RoseCarteFinale <- rbind(data.frame(option, value), info)
option <- factor(c("IGR", "FDR"), levels=ASkill)
value <- c(ifelse(GetCoreLv(PhantomCore, "RoseCarteFinale")>=40, 20, 0), 2 * GetCoreLv(PhantomCore, "RoseCarteFinale"))
info <- c(200, 2, 0, 930, 30, F, F, F)
info <- data.frame(AInfo, info)
colnames(info) <- c("option", "value")
RoseCarteFinaleAdd <- rbind(data.frame(option, value), info) ## 12 Hits, FirstATK : 2400
option <- factor(levels=ASkill)
value <- c()
info <- c(0, 0, 1000, NA, NA, NA, NA, F)
info <- data.frame(AInfo, info)
colnames(info) <- c("option", "value")
FinalCutPre <- rbind(data.frame(option, value), info)
option <- factor(c("IGR", "FDR"), levels=ASkill)
value <- c(ifelse(GetCoreLv(PhantomCore, "TalentofPhantomThief4")>=40, 20, 0), 2 * GetCoreLv(PhantomCore, "TalentofPhantomThief4"))
info <- c((2000 + 20 * PhantomSpec$SkillLv)/ 1.3 * 1.2, 1, 180, NA, NA, NA, NA, F)
info <- data.frame(AInfo, info)
colnames(info) <- c("option", "value")
FinalCut <- rbind(data.frame(option, value), info)
option <- factor(levels=ASkill)
value <- c()
info <- c(240 + 9 * GetCoreLv(PhantomCore, "Joker"), 3, 6000 + floor(GetCoreLv(PhantomCore, "Joker")/25) * 1000, 50, 150, T, F, F)
info <- data.frame(AInfo, info)
colnames(info) <- c("option", "value")
Joker <- rbind(data.frame(option, value), info)
option <- factor(levels=ASkill)
value <- c()
info <- c(600 + 24 * GetCoreLv(PhantomCore, "BlackJack"), 3, 760, 450, 15, T, F, F)
info <- data.frame(AInfo, info)
colnames(info) <- c("option", "value")
BlackJack <- rbind(data.frame(option, value), info) ## FirstATK : 1200
option <- factor(levels=ASkill)
value <- c()
info <- c(800 + 32 * GetCoreLv(PhantomCore, "BlackJack"), 18, 0, 0, 15, T, F, F)
info <- data.frame(AInfo, info)
colnames(info) <- c("option", "value")
BlackJackLast <- rbind(data.frame(option, value), info)
option <- factor(levels=ASkill)
value <- c()
info <- c(300 + 12 * GetCoreLv(PhantomCore, "MarkofPhantom"), 6, 900, 75, 30, T, F, F)
info <- data.frame(AInfo, info)
colnames(info) <- c("option", "value")
MarkofPhantom <- rbind(data.frame(option, value), info) ## FirstATK : 660
option <- factor(levels=ASkill)
value <- c()
info <- c(485 + 19 * GetCoreLv(PhantomCore, "MarkofPhantom"), 15, 0, 30, 30, T, F, F)
info <- data.frame(AInfo, info)
colnames(info) <- c("option", "value")
MarkofPhantomFinal <- rbind(data.frame(option, value), info) ## FirstATK : 1440
option <- factor(levels=ASkill)
value <- c()
info <- c(400 + 16 * GetCoreLv(PhantomCore, "LiftBreak"), 7, 990, 0, 30, T, F, F)
info <- data.frame(AInfo, info)
colnames(info) <- c("option", "value")
LiftBreak <- rbind(data.frame(option, value), info) ## FirstATK : 0, Delays : 270, 270, 1230, 30, 30, 30
}
PhantomATK <- Attack(list(UltimateDrive=UltimateDrive, TempestofCard=TempestofCard, NoirCarte=NoirCarte, NoirCarteJudgement=NoirCarteJudgement, Twilight1=Twilight1, Twilight2=Twilight2,
RoseCarteFinale=RoseCarteFinale, RoseCarteFinaleAdd=RoseCarteFinaleAdd, FinalCutPre=FinalCutPre, FinalCut=FinalCut, Joker=Joker, BlackJack=BlackJack, BlackJackLast=BlackJackLast,
MarkofPhantom=MarkofPhantom, MarkofPhantomFinal=MarkofPhantomFinal, LiftBreak=LiftBreak, SpiderInMirror=SpiderInMirror))
## Phantom - Summoned
PhantomSummoned <- Summoned(list(SpiderInMirrorStart=SpiderInMirrorStart, SpiderInMirror1=SpiderInMirror1, SpiderInMirror2=SpiderInMirror2, SpiderInMirror3=SpiderInMirror3,
SpiderInMirror4=SpiderInMirror4, SpiderInMirror5=SpiderInMirror5, SpiderInMirrorWait=SpiderInMirrorWait))
## Phantom - DealCycle & Deal Calculation
ATKFinal <- data.frame(PhantomATK)
ATKFinal$Delay[c(-1, -2, -9, -11)] <- Delay(ATKFinal$Delay, PhantomSpec$ATKSpeed)[c(-1, -2, -9, -11)]
ATKFinal$CoolTime <- Cooldown(ATKFinal$CoolTime, ATKFinal$CoolReduceAvailable, PhantomSpec$CoolReduceP, PhantomSpec$CoolReduce)
BuffFinal <- data.frame(PhantomBuff)
BuffFinal$CoolTime <- Cooldown(BuffFinal$CoolTime, BuffFinal$CoolReduceAvailable, PhantomSpec$CoolReduceP, PhantomSpec$CoolReduce)
BuffFinal$Duration <- BuffFinal$Duration + BuffFinal$Duration * ifelse(BuffFinal$BuffDurationAvailable==T, PhantomSpec$BuffDuration / 100, 0) +
ifelse(BuffFinal$ServerLag==T, General$General$Serverlag, 0)
SummonedFinal <- data.frame(PhantomSummoned)
SummonedFinal$CoolTime <- Cooldown(SummonedFinal$CoolTime, SummonedFinal$CoolReduceAvailable, PhantomSpec$CoolReduceP, PhantomSpec$CoolReduce)
SummonedFinal$Duration <- SummonedFinal$Duration + ifelse(SummonedFinal$SummonedDurationAvailable==T, SummonedFinal$Duration * PhantomSpec$SummonedDuration / 100, 0)
## Phantom - DealCycle
DealCycle <- c("Skills", "Time", rownames(PhantomBuff))
PhantomDealCycle <- t(rep(0, length(DealCycle)))
colnames(PhantomDealCycle) <- DealCycle
PhantomDealCycle <- data.frame(PhantomDealCycle)
PhantomCycle <- function(PreDealCycle, ATKFinal, BuffFinal, SummonedFinal, Spec,
Period=180, CycleTime=360) {
BuffSummonedPrior <- c("Fury", "CrossOverChain", "UsefulSharpEyes", "UsefulCombatOrders", "UsefulAdvancedBless", "HeroesOath",
"FinalCutBuff", "MapleWarriors2", "Bullseye", "ReadyToDie2Stack", "SoulContractLink", "Restraint4")
Times180 <- c(0, 0, 0, 0, 0, 0,
2, 1, 1, 2, 2, 1)
if(nrow(BuffFinal[rownames(BuffFinal)=="UsefulAdvancedBless", ]) == 0) {
Times180 <- Times180[BuffSummonedPrior!="UsefulAdvancedBless"]
BuffSummonedPrior <- BuffSummonedPrior[BuffSummonedPrior!="UsefulAdvancedBless"]
}
SubTime <- rep(Period, length(BuffSummonedPrior))
TotalTime <- CycleTime
for(i in 1:length(BuffSummonedPrior)) {
SubTime[i] <- SubTime[i] / ifelse(Times180[i]==0, Inf, Times180[i])
}
SubTimeUniques <- unique(SubTime)
SubTimeUniques <- SubTimeUniques[SubTimeUniques > 0]
TimeTypes <- c()
for(i in 1:length(SubTimeUniques)) {
Time <- 0 ; r <- 1
while(Time < TotalTime) {
Time <- SubTimeUniques[i] * r
r <- r + 1
TimeTypes <- c(TimeTypes, Time)
}
}
TimeTypes <- TimeTypes[TimeTypes < TotalTime]
TimeTypes <- unique(TimeTypes)
TimeTypes <- TimeTypes[order(TimeTypes)]
Buffs <- data.frame(Buff=BuffSummonedPrior, SubTime=SubTime, stringsAsFactors=F)
Buffs <- subset(Buffs, Buffs$SubTime > 0)
BuffList <- list()
BuffList[[1]] <- BuffSummonedPrior
for(i in 1:length(TimeTypes)) {
s <- c()
for(j in 1:nrow(Buffs)) {
if(round(TimeTypes[i] / Buffs[j, 2]) == TimeTypes[i] / Buffs[j, 2]) {
s <- c(s, Buffs[j, 1])
}
}
BuffList[[i+1]] <- s
}
DelayDataB <- data.frame(Name=rownames(BuffFinal), Delay=BuffFinal$Delay)
DelayDataS <- data.frame(Name=rownames(SummonedFinal), Delay=SummonedFinal$Delay)
DelayData <- rbind(DelayDataB, DelayDataS)
BuffDelays <- list()
for(i in 1:length(BuffList)) {
t <- c()
for(j in 1:length(BuffList[[i]])) {
for(k in 1:nrow(DelayData)) {
if(DelayData$Name[k]==BuffList[[i]][j]) {
t <- c(t, k)
}
}
}
BuffDelays[[i]] <- DelayData$Delay[t]
}
TotalTime <- TotalTime * 1000
DealCycle <- PreDealCycle
for(i in 1:length(BuffList[[1]])) {
if(sum(rownames(BuffFinal)==BuffList[[1]][i]) > 0) {
if(BuffList[[1]][i]=="FinalCutBuff") {
DealCycle <- DCATK(DealCycle, "FinalCutPre", ATKFinal)
}
DealCycle <- DCBuff(DealCycle, BuffList[[1]][i], BuffFinal)
if(DealCycle$Skills[nrow(DealCycle)] == "FinalCutBuff") {
DealCycle <- DCATK(DealCycle, "FinalCut", ATKFinal)
} else if(DealCycle$Skills[nrow(DealCycle)] == "HeroesOath") {
DealCycle <- DCATK(DealCycle, "SpiderInMirror", ATKFinal)
}
} else {
DealCycle <- DCSummoned(DealCycle, BuffList[[1]][i], SummonedFinal)
}
}
SubTimeList <- data.frame(Skills=BuffSummonedPrior, SubTime=SubTime, stringsAsFactors=F)
NoSubTime <- subset(SubTimeList, SubTimeList$SubTime==0)$Skills
NoSubTimeBuff <- c()
for(i in 1:length(NoSubTime)) {
NoSubTimeBuff <- c(NoSubTimeBuff, NoSubTime[i])
}
ColNums <- c()
for(i in 1:length(NoSubTimeBuff)) {
for(j in 1:length(colnames(DealCycle))) {
if(NoSubTimeBuff[i]==colnames(DealCycle)[j]) {
ColNums[i] <- j
}
}
}
BuffList[[length(BuffList)+1]] <- BuffList[[1]]
BuffDelays[[length(BuffDelays)+1]] <- BuffDelays[[1]]
TimeTypes <- c(0, TimeTypes, TotalTime/1000)
BJCool <- subset(ATKFinal, rownames(ATKFinal)=="MarkofPhantom")$CoolTime * 1000 / 2
MPCool <- subset(ATKFinal, rownames(ATKFinal)=="MarkofPhantom")$CoolTime * 1000
RCCool <- subset(ATKFinal, rownames(ATKFinal)=="RoseCarteFinale")$CoolTime * 1000
BJRemain <- 0 ; RCRemain <- 0 ; MOPDummy <- 0 ; TOCDummy <- 0
for(k in 2:length(BuffList)) {
CycleBuffList <- data.frame(Skills=BuffList[[k]], Delay=BuffDelays[[k]])
BuffEndTime <- c()
for(i in 1:length(BuffList[[k]])) {
a <- subset(DealCycle, BuffList[[k]][i]==DealCycle$Skills)
a <- rbind(a, subset(DealCycle, paste(BuffList[[k]][i], "Summoned", sep="")==DealCycle$Skills))
for(j in 1:nrow(CycleBuffList)) {
if(CycleBuffList$Skills[j]==BuffList[[k]][i]) {
Idx <- j
break
}
}
BuffEndTime[i] <- max(a$Time) +
min(subset(SubTimeList, SubTimeList$Skills==BuffList[[k]][i])$SubTime * 1000, subset(BuffFinal, rownames(BuffFinal)==BuffList[[k]][i])$CoolTime * 1000,
subset(SummonedFinal, rownames(SummonedFinal)==BuffList[[k]][i])$CoolTime * 1000) +
sum(CycleBuffList$Delay[Idx:nrow(CycleBuffList)])
}
BuffEndTime <- max(BuffEndTime)
BuffEndTime <- max(BuffEndTime, TimeTypes[k] * 1000)
BuffStartTime <- BuffEndTime - sum(CycleBuffList$Delay)
while(DealCycle$Time[nrow(DealCycle)] + DealCycle$Time[1] < BuffStartTime) {
for(i in 1:length(ColNums)) {
if(DealCycle[nrow(DealCycle), ColNums[i]] - DealCycle$Time[1] < 3000) {
DealCycle <- DCBuff(DealCycle, colnames(DealCycle)[ColNums[i]], BuffFinal)
BJRemain <- max(0, BJRemain - DealCycle$Time[1])
RCRemain <- max(0, RCRemain - DealCycle$Time[1])
}
}
## BlackJack, Mark of Phantom, Lift Break
if(BJRemain == 0 & MOPDummy == 0 & k==length(BuffList) & DealCycle$Time[nrow(DealCycle)] + DealCycle$Time[1] + MPCool <= BuffStartTime + 8000 |
BJRemain == 0 & MOPDummy == 0 & k!=length(BuffList)) {
DealCycle <- DCATK(DealCycle, "BlackJack", ATKFinal)
BJRemain <- BJCool - DealCycle$Time[1]
RCRemain <- max(0, RCRemain - DealCycle$Time[1])
DealCycle <- DCBuff(DealCycle, "TwilightDebuff", BuffFinal)
BJRemain <- max(0, BJRemain - DealCycle$Time[1])
RCRemain <- max(0, RCRemain - DealCycle$Time[1])
DealCycle <- DCATK(DealCycle, "Twilight1", ATKFinal)
BJRemain <- max(0, BJRemain - DealCycle$Time[1])
RCRemain <- max(0, RCRemain - DealCycle$Time[1])
DealCycle <- DCATK(DealCycle, "Twilight2", ATKFinal)
BJRemain <- max(0, BJRemain - DealCycle$Time[1])
RCRemain <- max(0, RCRemain - DealCycle$Time[1])
DealCycle <- DCATK(DealCycle, "MarkofPhantomFinal", ATKFinal)
BJRemain <- max(0, BJRemain - DealCycle$Time[1])
RCRemain <- max(0, RCRemain - DealCycle$Time[1])
DealCycle <- DCATK(DealCycle, "MarkofPhantom", ATKFinal)
BJRemain <- max(0, BJRemain - DealCycle$Time[1])
RCRemain <- max(0, RCRemain - DealCycle$Time[1])
DealCycle <- DCATK(DealCycle, "LiftBreak", ATKFinal)
BJRemain <- max(0, BJRemain - DealCycle$Time[1])
RCRemain <- max(0, RCRemain - DealCycle$Time[1])
if(DealCycle$Restraint4[nrow(DealCycle)] >= 7000) {
DealCycle <- DCATK(DealCycle, "Joker", ATKFinal)
BJRemain <- max(0, BJRemain - DealCycle$Time[1])
RCRemain <- max(0, RCRemain - DealCycle$Time[1])
DealCycle <- DCBuff(DealCycle, "JokerBuffFail", BuffFinal)
BJRemain <- max(0, BJRemain - DealCycle$Time[1])
RCRemain <- max(0, RCRemain - DealCycle$Time[1])
TOCDummy <- 0
} else if(TOCDummy == 0 & DealCycle$Time[nrow(DealCycle)] + DealCycle$Time[1] <= BuffStartTime - 10000) {
DealCycle <- DCATK(DealCycle, "TempestofCard", ATKFinal)
BJRemain <- max(0, BJRemain - DealCycle$Time[1])
RCRemain <- max(0, RCRemain - DealCycle$Time[1])
TOCDummy <- 1
} else {
TOCDummy <- 0
}
MOPDummy <- 1
} else if(BJRemain == 0 & MOPDummy == 1 & k==length(BuffList) & DealCycle$Time[nrow(DealCycle)] + DealCycle$Time[1] + BJCool <= BuffStartTime + 8000 |
BJRemain == 0 & MOPDummy == 1 & k!=length(BuffList)) {
DealCycle <- DCATK(DealCycle, "BlackJack", ATKFinal)
BJRemain <- BJCool - DealCycle$Time[1]
RCRemain <- max(0, RCRemain - DealCycle$Time[1])
DealCycle <- DCBuff(DealCycle, "TwilightDebuff", BuffFinal)
BJRemain <- max(0, BJRemain - DealCycle$Time[1])
RCRemain <- max(0, RCRemain - DealCycle$Time[1])
DealCycle <- DCATK(DealCycle, "Twilight1", ATKFinal)
BJRemain <- max(0, BJRemain - DealCycle$Time[1])
RCRemain <- max(0, RCRemain - DealCycle$Time[1])
DealCycle <- DCATK(DealCycle, "Twilight2", ATKFinal)
BJRemain <- max(0, BJRemain - DealCycle$Time[1])
RCRemain <- max(0, RCRemain - DealCycle$Time[1])
if(DealCycle$Restraint4[nrow(DealCycle)] >= 7000) {
DealCycle <- DCATK(DealCycle, "Joker", ATKFinal)
BJRemain <- max(0, BJRemain - DealCycle$Time[1])
RCRemain <- max(0, RCRemain - DealCycle$Time[1])
DealCycle <- DCBuff(DealCycle, "JokerBuffFail", BuffFinal)
BJRemain <- max(0, BJRemain - DealCycle$Time[1])
RCRemain <- max(0, RCRemain - DealCycle$Time[1])
TOCDummy <- 0
} else if(TOCDummy == 0 & DealCycle$Time[nrow(DealCycle)] + DealCycle$Time[1] <= BuffStartTime - 10000) {
DealCycle <- DCATK(DealCycle, "TempestofCard", ATKFinal)
BJRemain <- max(0, BJRemain - DealCycle$Time[1])
RCRemain <- max(0, RCRemain - DealCycle$Time[1])
TOCDummy <- 1
} else {
TOCDummy <- 0
}
MOPDummy <- 0
}
## Rose Carte Finale
else if(RCRemain == 0 & DealCycle$Time[nrow(DealCycle)] + DealCycle$Time[1] <= 350000) {
DealCycle <- DCATK(DealCycle, "RoseCarteFinaleAdd", ATKFinal)
BJRemain <- max(0, BJRemain - DealCycle$Time[1])
RCRemain <- RCCool - DealCycle$Time[1]
DealCycle <- DCATK(DealCycle, "RoseCarteFinale", ATKFinal)
BJRemain <- max(0, BJRemain - DealCycle$Time[1])
RCRemain <- max(0, RCRemain - DealCycle$Time[1])
}
## Ultimate Drive
else {
DealCycle <- DCATK(DealCycle, c("UltimateDrive"), ATKFinal)
BJRemain <- max(0, BJRemain - DealCycle$Time[1])
RCRemain <- max(0, RCRemain - DealCycle$Time[1])
}
}
if(k != length(BuffList)) {
for(i in 1:length(BuffList[[k]])) {
if(sum(rownames(BuffFinal)==BuffList[[k]][i]) > 0) {
if(BuffList[[k]][i]=="FinalCutBuff") {
DealCycle <- DCATK(DealCycle, "FinalCutPre", ATKFinal)
BJRemain <- max(0, BJRemain - DealCycle$Time[1])
RCRemain <- max(0, RCRemain - DealCycle$Time[1])
}
DealCycle <- DCBuff(DealCycle, BuffList[[k]][i], BuffFinal)
BJRemain <- max(0, BJRemain - DealCycle$Time[1])
RCRemain <- max(0, RCRemain - DealCycle$Time[1])
if(DealCycle$Skills[nrow(DealCycle)] == "FinalCutBuff") {
DealCycle <- DCATK(DealCycle, "FinalCut", ATKFinal)
BJRemain <- max(0, BJRemain - DealCycle$Time[1])
RCRemain <- max(0, RCRemain - DealCycle$Time[1])
}
} else {
DealCycle <- DCSummoned(DealCycle, BuffList[[k]][i], SummonedFinal)
BJRemain <- max(0, BJRemain - DealCycle$Time[1])
RCRemain <- max(0, RCRemain - DealCycle$Time[1])
}
}
}
}
return(DealCycle)
}
PhantomAddATK <- function(DealCycle, ATKFinal, BuffFinal, SummonedFinal, Spec) {
## Joker
DealCycle <- RepATKCycle(DealCycle, "Joker", 140, 0, ATKFinal)
## BlackJack
DealCycle <- RepATKCycle(DealCycle, "BlackJack", 7, 1200, ATKFinal)
## BlackJack (Last)
BJ <- 1
for(i in 1:nrow(DealCycle)) {
if(DealCycle$Skills[i]=="BlackJack") {
if(BJ==7) {
DealCycle$Skills[i] <- "BlackJackLast"
BJ <- 1
} else {
BJ <- BJ + 1
}
}
}
## Mark of Phantom
DealCycle <- RepATKCycle(DealCycle, "MarkofPhantom", 7, 660, ATKFinal)
DealCycle <- RepATKCycle(DealCycle, "MarkofPhantomFinal", 2, 1440, ATKFinal)
## Lift Break
LiftBreakTime <- c(0, 270, 540, 1770, 1800, 1830, 1860)
DealCycle[DealCycle$Skills=="LiftBreak", ]$Skills <- "LiftBreakStart"
DC2 <- subset(DealCycle, DealCycle$Skills=="LiftBreakStart")
for(i in 1:nrow(DC2)) {
for(j in 1:length(LiftBreakTime)) {
DC2 <- rbind(DC2, DC2[i, ])
DC2$Time[nrow(DC2)] <- DC2$Time[i] + LiftBreakTime[j]
DC2$Skills[nrow(DC2)] <- "LiftBreak"
}
}
DC2 <- subset(DC2, DC2$Skills=="LiftBreak")
DC2 <- subset(DC2, DC2$Time <= max(DealCycle$Time))
DealCycle <- rbind(DealCycle, DC2)
DealCycle <- DealCycle[order(DealCycle$Time), ]
rownames(DealCycle) <- 1:nrow(DealCycle)
for(i in 3:nrow(DealCycle)) {
if("LiftBreak"==DealCycle[i, 1]) {
DealCycle[i, 3:ncol(DealCycle)] <- DealCycle[i-1, 3:ncol(DealCycle)] - (DealCycle[i, 2] - DealCycle[i-1, 2])
DealCycle[i, 3:ncol(DealCycle)] <- ifelse(DealCycle[i, 3:ncol(DealCycle)]<0, 0, DealCycle[i, 3:ncol(DealCycle)])
}
}
## Rose Carte Finale (AddATK)
DealCycle <- RepATKCycle(DealCycle, "RoseCarteFinaleAdd", 12, 2400, ATKFinal)
## Tempest of Card
DealCycle <- RepATKCycle(DealCycle, "TempestofCard", 56, 0, ATKFinal)
## Spider In Mirror
DealCycle <- DCSpiderInMirror(DealCycle, SummonedFinal)
## Noir Carte
DealCycle$NoirCarteStack[1] <- 0
for(i in 2:nrow(DealCycle)) {
if(sum(DealCycle$Skills[i]==c("UltimateDrive", "TempestofCard", "Joker", "LiftBreak", "MarkofPhantom", "MarkofPhantomFinal",
"RoseCarteFinale", "Twilight1", "FinalCut", "SpiderInMirror"))==1) {
DealCycle$NoirCarteStack[i] <- DealCycle$NoirCarteStack[i-1] + 1
DealCycle <- rbind(DealCycle, DealCycle[i, ])
DealCycle$Skills[nrow(DealCycle)] <- "NoirCarte"
if(DealCycle$NoirCarteStack[i] == 40) {
DealCycle <- rbind(DealCycle, DealCycle[i, ])
DealCycle$Skills[nrow(DealCycle)] <- "NoirCarteJudgement"
DealCycle$NoirCarteStack[i] <- 0
}
} else if(DealCycle$Skills[i]=="BlackJack") {
DealCycle$NoirCarteStack[i] <- DealCycle$NoirCarteStack[i-1] + 3
DealCycle <- rbind(DealCycle, DealCycle[i, ])
DealCycle$Skills[nrow(DealCycle)] <- "NoirCarte"
DealCycle <- rbind(DealCycle, DealCycle[i, ])
DealCycle$Skills[nrow(DealCycle)] <- "NoirCarte"
DealCycle <- rbind(DealCycle, DealCycle[i, ])
DealCycle$Skills[nrow(DealCycle)] <- "NoirCarte"
if(DealCycle$NoirCarteStack[i] >= 40) {
DealCycle <- rbind(DealCycle, DealCycle[i, ])
DealCycle$Skills[nrow(DealCycle)] <- "NoirCarteJudgement"
DealCycle$NoirCarteStack[i] <- DealCycle$NoirCarteStack[i] - 40
}
} else if(DealCycle$Skills[i]=="BlackJackLast") {
DealCycle$NoirCarteStack[i] <- DealCycle$NoirCarteStack[i-1] + 3
DealCycle <- rbind(DealCycle, DealCycle[i, ])
DealCycle$Skills[nrow(DealCycle)] <- "NoirCarte"
DealCycle <- rbind(DealCycle, DealCycle[i, ])
DealCycle$Skills[nrow(DealCycle)] <- "NoirCarte"
DealCycle <- rbind(DealCycle, DealCycle[i, ])
DealCycle$Skills[nrow(DealCycle)] <- "NoirCarte"
if(DealCycle$NoirCarteStack[i] >= 40) {
DealCycle <- rbind(DealCycle, DealCycle[i, ])
DealCycle$Skills[nrow(DealCycle)] <- "NoirCarteJudgement"
DealCycle$NoirCarteStack[i] <- DealCycle$NoirCarteStack[i] - 40
}
} else {
DealCycle$NoirCarteStack[i] <- DealCycle$NoirCarteStack[i-1]
}
}
DealCycle <- DealCycle[order(DealCycle$Time), ]
rownames(DealCycle) <- 1:nrow(DealCycle)
DealCycle$NoirCarteStack <- 0
return(DealCycle)
}
PhantomDealCycle <- PhantomCycle(PreDealCycle=PhantomDealCycle,
ATKFinal=ATKFinal,
BuffFinal=BuffFinal,
SummonedFinal=SummonedFinal,
Spec=PhantomSpec,
Period=180,
CycleTime=360)
PhantomDealCycle <- DealCycleFinal(PhantomDealCycle)
PhantomDealCycle <- PhantomAddATK(PhantomDealCycle, ATKFinal, BuffFinal, SummonedFinal, PhantomSpec)
PhantomDealCycleReduction1 <- DealCycleReduction(PhantomDealCycle)
PhantomDealCycle2 <- PhantomDealCycle
PhantomDealCycle2$JokerBuff <- PhantomDealCycle2$JokerBuffFail
PhantomDealCycle2$JokerBuffFail <- 0
PhantomDealCycleReduction2 <- DealCycleReduction(PhantomDealCycle2)
Idx1 <- c() ; Idx2 <- c()
for(i in 1:length(PotentialOpt)) {
if(names(PotentialOpt)[i]==DPMCalcOption$SpecSet) {
Idx1 <- i
}
}
for(i in 1:nrow(PotentialOpt[[Idx1]])) {
if(rownames(PotentialOpt[[Idx1]])[i]=="Phantom") {
Idx2 <- i
}
}
if(DPMCalcOption$Optimization==T) {
PhantomSpecOpt1 <- ResetOptimization1(list(PhantomDealCycleReduction1, PhantomDealCycleReduction2), ATKFinal, BuffFinal, SummonedFinal, PhantomSpec, PhantomUnionRemained,
rep(max(PhantomDealCycle$Time), 2), c(0.6, 0.4))
PotentialOpt[[Idx1]][Idx2, ] <- PhantomSpecOpt1[1, 1:3]
} else {
PhantomSpecOpt1 <- PotentialOpt[[Idx1]][Idx2, ]
}
PhantomSpecOpt <- OptDataAdd(PhantomSpec, PhantomSpecOpt1, "Potential", PhantomBase$CRROver, DemonAvenger=F)
if(DPMCalcOption$Optimization==T) {
PhantomSpecOpt2 <- ResetOptimization2(list(PhantomDealCycleReduction1, PhantomDealCycleReduction2), ATKFinal, BuffFinal, SummonedFinal, PhantomSpecOpt, PhantomHyperStatBase, PhantomBase$ChrLv, PhantomBase$CRROver,
HyperStanceLv=0, rep(max(PhantomDealCycle$Time), 2), c(0.6, 0.4))
HyperStatOpt[[Idx1]][Idx2, c(1, 3:10)] <- PhantomSpecOpt2[1, ]
} else {
PhantomSpecOpt2 <- HyperStatOpt[[Idx1]][Idx2, ]
}
PhantomSpecOpt <- OptDataAdd(PhantomSpecOpt, PhantomSpecOpt2, "HyperStat", PhantomBase$CRROver, DemonAvenger=F)
PhantomFinalDPM <- ResetDealCalc(DealCycles=list(PhantomDealCycleReduction1, PhantomDealCycleReduction2),
ATKFinal, BuffFinal, SummonedFinal, PhantomSpecOpt, rep(max(PhantomDealCycle$Time), 2), c(0.6, 0.4))
PhantomFinalDPMwithMax <- ResetDealCalcWithMaxDMR(DealCycles=list(PhantomDealCycleReduction1, PhantomDealCycleReduction2),
ATKFinal, BuffFinal, SummonedFinal, PhantomSpecOpt, rep(max(PhantomDealCycle$Time), 2), c(0.6, 0.4))
PhantomDeal1 <- DealCalcWithMaxDMR(PhantomDealCycle, ATKFinal, BuffFinal, SummonedFinal, PhantomSpecOpt)
PhantomDeal2 <- DealCalcWithMaxDMR(PhantomDealCycle2, ATKFinal, BuffFinal, SummonedFinal, PhantomSpecOpt)
set(get(DPMCalcOption$DataName), as.integer(1), "Phantom", sum(na.omit(PhantomFinalDPMwithMax)) / (max(PhantomDealCycle$Time) / 60000))
set(get(DPMCalcOption$DataName), as.integer(2), "Phantom", sum(na.omit(PhantomFinalDPM)) / (max(PhantomDealCycle$Time) / 60000) - sum(na.omit(PhantomFinalDPMwithMax)) / (max(PhantomDealCycle$Time) / 60000))
PhantomDealRatio <- ResetDealRatio(DealCycles=list(PhantomDealCycle, PhantomDealCycle2), DealDatas=list(PhantomDeal1, PhantomDeal2),
rep(max(PhantomDealCycle$Time), 2), c(0.6, 0.4))
PhantomDealData <- data.frame(PhantomDealCycle$Skills, PhantomDealCycle$Time, PhantomDealCycle$Restraint4, PhantomDeal1)
colnames(PhantomDealData) <- c("Skills", "Time", "R4", "Deal")
set(get(DPMCalcOption$DataName), as.integer(3), "Phantom", Deal_RR(PhantomDealData))
set(get(DPMCalcOption$DataName), as.integer(4), "Phantom", Deal_40s(PhantomDealData)) |
function (x, window)
{
e <- get("data.env", .GlobalEnv)
e[["blocksums_i_max"]][[length(e[["blocksums_i_max"]]) +
1]] <- list(x = x, window = window)
.Call("_accelerometry_blocksums_i_max", x, window)
}
| /valgrind_test_dir/blocksums_i_max-test.R | no_license | akhikolla/RcppDeepStateTest | R | false | false | 224 | r | function (x, window)
{
e <- get("data.env", .GlobalEnv)
e[["blocksums_i_max"]][[length(e[["blocksums_i_max"]]) +
1]] <- list(x = x, window = window)
.Call("_accelerometry_blocksums_i_max", x, window)
}
|
\name{estimated.sGD}
\alias{estimated.sGD}
\title{Calculated Sum Activity of Step-Up Deiodinases (SPINA-GD) in standardised form}
\description{Calculate total step-up deiodinase activity (SPINA-GD) from equilibrium free hormone concentrations in standardised form resulting from z-transformation. }
\usage{
estimated.sGD(FT4, FT3, mean = 30, sd = 5)
}
\arguments{
\item{FT4}{Free thyroxine (FT4) concentration in pmol/L.}
\item{FT3}{Free triiodothyronine (FT3) concentation in pmol/L}
\item{mean}{mean value of population sample for standardised (z-transformed)
tests}
\item{sd}{standard deviation of population sample for standardised (ztransformed)
tests}
}
\details{This function is able to do vectorised calculations. }
\value{This function returns step-up deiodinase activity in standardised form representing a single value or a vector, depending on the vector length of the arguments. Results are z-transformed and therefore without unit of measurement. }
\references{
Dietrich J. W., Landgrafe G., Fotiadou E. H. 2012 TSH and Thyrotropic Agonists: Key Actors in Thyroid Homeostasis. \emph{J Thyroid Res}. 2012;2012:351864. doi: 10.1155/2012/351864. PMID: 23365787; PMCID: PMC3544290.
Dietrich J. W., Landgrafe-Mende G., Wiora E., Chatzitomaris A., Klein H. H., Midgley J. E., Hoermann R. 2016 Calculated Parameters of Thyroid Homeostasis: Emerging Tools for Differential Diagnosis and Clinical Research. \emph{Front Endocrinol (Lausanne)}. 2016 Jun 9;7:57. doi: 10.3389/fendo.2016.00057. PMID: 27375554; PMCID: PMC4899439.
}
\author{Johannes W. Dietrich}
\note{The software functions described in this document are intended for research
use only. Hormone levels should have been obtained simultaneously in order to avoid
bias by transition effects.}
\seealso{
\code{\link{SPINA.GT}}, \code{\link{estimated.GT}}, \code{\link{SPINA.GTT}}, \code{\link{estimated.GTT}}, \code{\link{SPINA.GD}}, \code{\link{estimated.GD}}, \code{\link{SPINA.GDTT}}, \code{\link{estimated.GDTT}}, \code{\link{SPINA.sGD}}, \code{\link{estimated.TSHI}}, \code{\link{estimated.sTSHI}}, \code{\link{estimated.TTSI}}
}
\examples{
TSH <- c(1, 3.24, 0.7);
FT4 <- c(16.5, 7.7, 9);
FT3 <- c(4.5, 28, 6.2);
print(paste("GT^:", SPINA.GT(TSH, FT4)));
print(paste("GD^:", SPINA.GD(FT4, FT3)));
print(paste("sGD^:", SPINA.sGD(FT4, FT3)));
}
\keyword{SPINA} | /S functions/R Package/SPINA/man/estimated.sGD.Rd | no_license | jwdietrich21/spina | R | false | false | 2,341 | rd | \name{estimated.sGD}
\alias{estimated.sGD}
\title{Calculated Sum Activity of Step-Up Deiodinases (SPINA-GD) in standardised form}
\description{Calculate total step-up deiodinase activity (SPINA-GD) from equilibrium free hormone concentrations in standardised form resulting from z-transformation. }
\usage{
estimated.sGD(FT4, FT3, mean = 30, sd = 5)
}
\arguments{
\item{FT4}{Free thyroxine (FT4) concentration in pmol/L.}
\item{FT3}{Free triiodothyronine (FT3) concentation in pmol/L}
\item{mean}{mean value of population sample for standardised (z-transformed)
tests}
\item{sd}{standard deviation of population sample for standardised (ztransformed)
tests}
}
\details{This function is able to do vectorised calculations. }
\value{This function returns step-up deiodinase activity in standardised form representing a single value or a vector, depending on the vector length of the arguments. Results are z-transformed and therefore without unit of measurement. }
\references{
Dietrich J. W., Landgrafe G., Fotiadou E. H. 2012 TSH and Thyrotropic Agonists: Key Actors in Thyroid Homeostasis. \emph{J Thyroid Res}. 2012;2012:351864. doi: 10.1155/2012/351864. PMID: 23365787; PMCID: PMC3544290.
Dietrich J. W., Landgrafe-Mende G., Wiora E., Chatzitomaris A., Klein H. H., Midgley J. E., Hoermann R. 2016 Calculated Parameters of Thyroid Homeostasis: Emerging Tools for Differential Diagnosis and Clinical Research. \emph{Front Endocrinol (Lausanne)}. 2016 Jun 9;7:57. doi: 10.3389/fendo.2016.00057. PMID: 27375554; PMCID: PMC4899439.
}
\author{Johannes W. Dietrich}
\note{The software functions described in this document are intended for research
use only. Hormone levels should have been obtained simultaneously in order to avoid
bias by transition effects.}
\seealso{
\code{\link{SPINA.GT}}, \code{\link{estimated.GT}}, \code{\link{SPINA.GTT}}, \code{\link{estimated.GTT}}, \code{\link{SPINA.GD}}, \code{\link{estimated.GD}}, \code{\link{SPINA.GDTT}}, \code{\link{estimated.GDTT}}, \code{\link{SPINA.sGD}}, \code{\link{estimated.TSHI}}, \code{\link{estimated.sTSHI}}, \code{\link{estimated.TTSI}}
}
\examples{
TSH <- c(1, 3.24, 0.7);
FT4 <- c(16.5, 7.7, 9);
FT3 <- c(4.5, 28, 6.2);
print(paste("GT^:", SPINA.GT(TSH, FT4)));
print(paste("GD^:", SPINA.GD(FT4, FT3)));
print(paste("sGD^:", SPINA.sGD(FT4, FT3)));
}
\keyword{SPINA} |
library(tidyverse)
mtcars %>% filter(mpg >= 24.4) %>% arrange(desc(mpg))
arrange(mtcars, mpg)
mtcars %>% select(mpg,disp)
mtcars %>% select(wt:gear)
mtcars %>% mutate(wtkg=wt*.48)
mtcars %>% group_by(cyl) %>% summarise(cyl_n = n(), cyl_mean=mean(mpg))
# deberes
mpg %>% filter(manufacturer == 'toyota' & model == 'camry')
mpg %>% group_by(manufacturer) %>% summarise(prom=mean(cty)) %>% arrange(prom) %>%
mutate(sdm='sd(cty)')
mpg %>% group_by(manufacturer) %>% summarise(prom=mean(cty), sd=sd(cty), n=n(), rango=max(cty)-min(cty)) %>%
mutate(sdm = sd/sqrt(n)) %>% select(manufacturer, prom, sdm, rango)
cbind(
mpg %>% filter(year<2004) %>% group_by(manufacturer) %>% summarise(prom99=mean(cty)),
mpg %>% filter(year>2004) %>% group_by(manufacturer) %>% summarise(prom08=mean(cty)) %>% select(prom08)) %>%
mutate(mejora=prom08-prom99) %>% arrange(mejora)
| /tidyverse-dplyr/1-dplyr.r | no_license | jjgoye/cursoIESTA | R | false | false | 884 | r | library(tidyverse)
mtcars %>% filter(mpg >= 24.4) %>% arrange(desc(mpg))
arrange(mtcars, mpg)
mtcars %>% select(mpg,disp)
mtcars %>% select(wt:gear)
mtcars %>% mutate(wtkg=wt*.48)
mtcars %>% group_by(cyl) %>% summarise(cyl_n = n(), cyl_mean=mean(mpg))
# deberes
mpg %>% filter(manufacturer == 'toyota' & model == 'camry')
mpg %>% group_by(manufacturer) %>% summarise(prom=mean(cty)) %>% arrange(prom) %>%
mutate(sdm='sd(cty)')
mpg %>% group_by(manufacturer) %>% summarise(prom=mean(cty), sd=sd(cty), n=n(), rango=max(cty)-min(cty)) %>%
mutate(sdm = sd/sqrt(n)) %>% select(manufacturer, prom, sdm, rango)
cbind(
mpg %>% filter(year<2004) %>% group_by(manufacturer) %>% summarise(prom99=mean(cty)),
mpg %>% filter(year>2004) %>% group_by(manufacturer) %>% summarise(prom08=mean(cty)) %>% select(prom08)) %>%
mutate(mejora=prom08-prom99) %>% arrange(mejora)
|
## ---- initialise ----
# Main 'driver' script to create the unit sources
#
# Gareth Davies, Geoscience Australia 2015/16
#
library(rptha)
library(raster)
###############################################################################
#
# Main input parameters
#
###############################################################################
source('config.R')
## ---- takeCommandLineParameter ----
if(interactive() == FALSE){
#
# Optionally take an input argument when run from Rscript. This should be
# an integer giving the index of the shapefile we want to run
#
# This can be useful to allow the code to be run in batch on NCI
# with 1 job per shapefile.
#
input_arguments = commandArgs(trailingOnly=TRUE)
if(length(input_arguments) != 1){
print('Problem with input arguments')
print(input_arguments)
stop()
}else{
source_index = as.numeric(input_arguments)
}
# Get a vector with all contours that we want to convert to unit sources
all_sourcezone_shapefiles = all_sourcezone_shapefiles[source_index]
all_sourcezone_downdip_shapefiles = all_sourcezone_downdip_shapefiles[source_index]
sourcezone_rake = sourcezone_rake[source_index]
}
## ---- makeDiscretizedSources ----
# Capture plots that occur as source is made in pdf
pdf('UnitSources.pdf', width=10, height=10)
# Loop over all source contour shapefiles, and make the discretized source zone
discretized_sources = list()
discretized_sources_statistics = list()
print('Making discretized sources ...')
for(source_shapefile_index in 1:length(all_sourcezone_shapefiles)){
source_shapefile = all_sourcezone_shapefiles[source_shapefile_index]
source_downdip_lines = all_sourcezone_downdip_shapefiles[source_shapefile_index]
# Extract a name for the source
sourcename = gsub('.shp', '', basename(source_shapefile))
# Create unit sources for source_shapefile
discretized_sources[[sourcename]] =
discretized_source_from_source_contours(source_shapefile,
desired_subfault_length, desired_subfault_width, make_plot=TRUE,
downdip_lines = source_downdip_lines)
# Get unit source summary stats
#discretized_sources_statistics[[sourcename]] =
# #discretized_source_approximate_summary_statistics(
# discretized_source_summary_statistics(
# discretized_sources[[sourcename]],
# default_rake = sourcezone_rake[source_shapefile_index],
# make_plot=TRUE)
usg = unit_source_grid_to_SpatialPolygonsDataFrame(
discretized_sources[[sourcename]]$unit_source_grid)
proj4string(usg) = '+init=epsg:4326'
writeOGR(usg, dsn=paste0(output_base_dir, 'unit_source_grid'),
layer=sourcename, driver='ESRI Shapefile', overwrite=TRUE)
}
saveRDS(discretized_sources, paste0(output_base_dir, 'all_discretized_sources.RDS'))
dev.off() # Save pdf plot
## ---- makeTsunamiSources ----
###############################################################################
#
# Step 2: Make tsunami unit sources
#
###############################################################################
dir.create('Unit_source_data', showWarnings=FALSE)
for(sourcename_index in 1:length(names(discretized_sources))){
sourcename = names(discretized_sources)[sourcename_index]
# Get the discretized source
ds1 = discretized_sources[[sourcename]]
## Get surface points for tsunami source
source_lonlat_extent = extent(ds1$depth_contours)
# Ensure tsunami extent exactly aligns with a degree
# (in practice this will help us align pixels with our propagation model)
tsunami_extent = rbind(floor(source_lonlat_extent[c(1,3)] - c(2,2)),
ceiling(source_lonlat_extent[c(2,4)] + c(2,2)))
tsunami_surface_points_lonlat = expand.grid(
seq(tsunami_extent[1,1], tsunami_extent[2,1], by = tsunami_source_cellsize),
seq(tsunami_extent[1,2], tsunami_extent[2,2], by = tsunami_source_cellsize))
# If elevation data is provided, lookup the depths at the tsunami surface points.
if(!is.null(elevation_raster)){
use_kajiura_filter = TRUE
# Need to ensure that we look up points at longitudes which are within
# the raster longitude range
raster_longitude_midpoint = 0.5 *
(extent(elevation_raster)@xmin + extent(elevation_raster)@xmax)
ltspl = length(tsunami_surface_points_lonlat[,1])
tmp_tsp = adjust_longitude_by_360_deg(tsunami_surface_points_lonlat,
matrix(raster_longitude_midpoint, ncol=2, nrow=ltspl))
# Process in chunks to reduce memory usage
chunk_inds = floor(seq(1, ltspl + 1, len=10))
surface_point_ocean_depths = tmp_tsp[,1]*NA
for(i in 1:(length(chunk_inds)-1)){
inds = chunk_inds[i]:(chunk_inds[i+1]-1)
surface_point_ocean_depths[inds] = extract(elevation_raster, tmp_tsp[inds,1:2])
gc()
}
# Convert negative elevation to depth, and ensure a minimum depth of 10m
# for Kajiura filter. NOTE: When sources are on-land it may be better to increase
# this 10m limit to avoid running out of memory (because it affects the spacing of points
# in the kajiura filter). The only time I saw this was the 'makran2' source in PTHA18
surface_point_ocean_depths = pmax(-surface_point_ocean_depths, 10)
rm(tmp_tsp); gc()
}else{
# In this case depths are not provided, and Kajiura filtering is not used
use_kajiura_filter = FALSE
surface_point_ocean_depths = NULL
}
# Make indices for unit sources in parallel computation.
# If j varies fastest then the shallow unit sources
# will be submitted early, which will be efficient if
# they have more interior points (if the spacing is based on the depth)
ij = expand.grid(j = 1:ds1$discretized_source_dim[2],
i = 1:ds1$discretized_source_dim[1])
print('Making tsunami sources in parallel...')
myrake = sourcezone_rake[sourcename_index]
gc()
source_output_dir = paste0(output_base_dir, 'Unit_source_data/', sourcename, '/')
dir.create(source_output_dir, showWarnings=FALSE, recursive=TRUE)
library(parallel)
# Function to facilitate running in parallel with mcmapply
parallel_fun<-function(ind){
# Make a single tsunami unit source
down_dip_index = ij$i[ind]
along_strike_index = ij$j[ind]
# Set the sub-unit-source point spacing based on the minimum sourcezone depth
di = down_dip_index:(down_dip_index+1)
sj = along_strike_index:(along_strike_index+1)
depth_range = range(ds1$unit_source_grid[di,3,sj])*1000
approx_dx = min(
max(shallow_subunitsource_point_spacing, min(depth_range)),
deep_subunitsource_point_spacing)
approx_dy = approx_dx
# Use within-pixel integration for Okada along the top-row of unit-sources
local_cell_integration_scale = cell_integration_scale * (down_dip_index == 1)
tsunami_ = make_tsunami_unit_source(
down_dip_index,
along_strike_index,
discrete_source=ds1,
rake=myrake,
tsunami_surface_points_lonlat = tsunami_surface_points_lonlat,
approx_dx = approx_dx,
approx_dy = approx_dy,
depths_in_km=TRUE,
kajiura_smooth=use_kajiura_filter,
surface_point_ocean_depths=surface_point_ocean_depths,
kajiura_grid_spacing=kajiura_grid_spacing,
kajiura_where_deformation_exceeds_threshold=kajiura_use_threshold,
minimal_output=minimise_tsunami_unit_source_output,
verbose=FALSE,
dstmx=okada_distance_factor,
edge_taper_width=slip_edge_taper_width,
cell_integration_scale=local_cell_integration_scale)
# Save as RDS
output_RDS_file = paste0(source_output_dir, sourcename, '_',
down_dip_index, '_', along_strike_index, '.RDS')
saveRDS(tsunami_, file = output_RDS_file)
tsunami_source_raster_filename = paste0(source_output_dir, sourcename, '_',
down_dip_index, '_', along_strike_index, '.tif')
# Make a raster
tsunami_unit_source_2_raster(
tsunami_, tsunami_source_raster_filename, saveonly=TRUE,
tsunami_surface_points_lonlat = tsunami_surface_points_lonlat,
res=c(tsunami_source_cellsize, tsunami_source_cellsize))
gc()
return(output_RDS_file)
}
if(MC_CORES > 1){
all_tsunami_files = mcmapply(parallel_fun, ind=as.list(1:length(ij[,1])),
mc.cores=MC_CORES, mc.preschedule=TRUE, SIMPLIFY=FALSE)
}else{
all_tsunami_files = mapply(parallel_fun, ind=as.list(1:length(ij[,1])),
SIMPLIFY=FALSE)
}
if(make_pdf_plot){
# Finally -- read in all the results and make some plots
all_tsunami = lapply(as.list(all_tsunami_files), f<-function(x) readRDS(x))
all_rasters = paste0(source_output_dir, '/',
gsub('.RDS', '', basename(unlist(all_tsunami_files))), '.tif')
all_tsunami_rast = lapply(as.list(all_rasters), f<-function(x) raster(x))
# Plotting -- make a pdf for checking the sources
if(make_pdf_plot) plot_all_tsunami_unit_sources(sourcename, all_tsunami, all_tsunami_rast, ds1)
rm(all_tsunami, all_tsunami_rast); gc()
}
}
###############################################################################
#
# Optional plotting (interactive)
#
###############################################################################
scatter3d<-function(x, y, z, add=FALSE, ...){
library(rgl)
colfun = colorRamp(rainbow(255))
col_01 = (z - min(z))/(max(z) - min(z)+1.0e-20)
colz = colfun(col_01)
colz = rgb(colz[,1],colz[,2], colz[,3], maxColorValue=255)
plot3d(x, y, z, col = colz, add=add, ...)
}
if(make_3d_interactive_plot){
# NOTE: The next line will need to be changed interactively
sourcename = site_name #### 'alaska'
all_tsunami = lapply(
Sys.glob(paste0(output_base_dir, sourcename, '/Unit_source_data/',
sourcename , '/', sourcename, '*.RDS')),
readRDS)
print('Computing unit sources for plotting in parallel...')
ds1 = discretized_sources[[sourcename]]
origin = ds1$unit_source_grid[1,1:2,1]
source_lonlat_extent = extent(ds1$depth_contours)
## Get surface points for tsunami source
tsunami_extent = rbind(floor(source_lonlat_extent[c(1,3)] - c(2,2)),
ceiling(source_lonlat_extent[c(2,4)] + c(2,2)))
tsunami_surface_points_lonlat = expand.grid(
seq(tsunami_extent[1,1], tsunami_extent[2,1], by = tsunami_source_cellsize),
seq(tsunami_extent[1,2], tsunami_extent[2,2], by = tsunami_source_cellsize))
## Compute interior points for all unit sources for plotting purposes
unit_source_indices = expand.grid(1:ds1$discretized_source_dim[1],
1:ds1$discretized_source_dim[2])
unit_source_index_list = list()
for(i in 1:length(unit_source_indices[,1])){
unit_source_index_list[[i]] = c(unit_source_indices[i,1],
unit_source_indices[i,2])
}
library(parallel)
# Make extra unit source points 'for show'
us = mcmapply(unit_source_interior_points_cartesian,
discretized_source=list(ds1),
unit_source_index = unit_source_index_list,
origin=list(origin),
approx_dx = list(5000), approx_dy = list(5000),
mc.preschedule=TRUE, mc.cores=MC_CORES, SIMPLIFY=FALSE)
## Make a 3D plot of the points inside the unit source
#for(i in 1:length(us)){
# plot3d_unit_source_interior_points_cartesian(us[[i]], add=(i>1))
#}
## Make points of tsunami source FOR PLOTTING.
## Origin is the same as unit sources above
tsunami_source_points_4plot = spherical_to_cartesian2d_coordinates(
tsunami_surface_points_lonlat, origin_lonlat = origin)
## Combine all unit sources
zstore = all_tsunami[[1]]$smooth_tsunami_displacement*0
for(i in 1:length(all_tsunami)){
zstore = zstore + all_tsunami[[i]]$smooth_tsunami_displacement
}
# Make a 3D plot of the points inside the unit source
for(i in 1:length(us)){
plot3d_unit_source_interior_points_cartesian(us[[i]], add=(i>1),
add_zero_plane=FALSE)
}
#ti = 1
scatter3d(tsunami_source_points_4plot[,1], tsunami_source_points_4plot[,2],
zstore*1.0e+05, add=TRUE, size=7)
}
| /R/examples/austptha_template/SOURCE_ZONES/TEMPLATE/EQ_SOURCE/produce_unit_sources.R | permissive | GeoscienceAustralia/ptha | R | false | false | 12,713 | r | ## ---- initialise ----
# Main 'driver' script to create the unit sources
#
# Gareth Davies, Geoscience Australia 2015/16
#
library(rptha)
library(raster)
###############################################################################
#
# Main input parameters
#
###############################################################################
source('config.R')
## ---- takeCommandLineParameter ----
if(interactive() == FALSE){
#
# Optionally take an input argument when run from Rscript. This should be
# an integer giving the index of the shapefile we want to run
#
# This can be useful to allow the code to be run in batch on NCI
# with 1 job per shapefile.
#
input_arguments = commandArgs(trailingOnly=TRUE)
if(length(input_arguments) != 1){
print('Problem with input arguments')
print(input_arguments)
stop()
}else{
source_index = as.numeric(input_arguments)
}
# Get a vector with all contours that we want to convert to unit sources
all_sourcezone_shapefiles = all_sourcezone_shapefiles[source_index]
all_sourcezone_downdip_shapefiles = all_sourcezone_downdip_shapefiles[source_index]
sourcezone_rake = sourcezone_rake[source_index]
}
## ---- makeDiscretizedSources ----
# Capture plots that occur as source is made in pdf
pdf('UnitSources.pdf', width=10, height=10)
# Loop over all source contour shapefiles, and make the discretized source zone
discretized_sources = list()
discretized_sources_statistics = list()
print('Making discretized sources ...')
for(source_shapefile_index in 1:length(all_sourcezone_shapefiles)){
source_shapefile = all_sourcezone_shapefiles[source_shapefile_index]
source_downdip_lines = all_sourcezone_downdip_shapefiles[source_shapefile_index]
# Extract a name for the source
sourcename = gsub('.shp', '', basename(source_shapefile))
# Create unit sources for source_shapefile
discretized_sources[[sourcename]] =
discretized_source_from_source_contours(source_shapefile,
desired_subfault_length, desired_subfault_width, make_plot=TRUE,
downdip_lines = source_downdip_lines)
# Get unit source summary stats
#discretized_sources_statistics[[sourcename]] =
# #discretized_source_approximate_summary_statistics(
# discretized_source_summary_statistics(
# discretized_sources[[sourcename]],
# default_rake = sourcezone_rake[source_shapefile_index],
# make_plot=TRUE)
usg = unit_source_grid_to_SpatialPolygonsDataFrame(
discretized_sources[[sourcename]]$unit_source_grid)
proj4string(usg) = '+init=epsg:4326'
writeOGR(usg, dsn=paste0(output_base_dir, 'unit_source_grid'),
layer=sourcename, driver='ESRI Shapefile', overwrite=TRUE)
}
saveRDS(discretized_sources, paste0(output_base_dir, 'all_discretized_sources.RDS'))
dev.off() # Save pdf plot
## ---- makeTsunamiSources ----
###############################################################################
#
# Step 2: Make tsunami unit sources
#
###############################################################################
dir.create('Unit_source_data', showWarnings=FALSE)
for(sourcename_index in 1:length(names(discretized_sources))){
sourcename = names(discretized_sources)[sourcename_index]
# Get the discretized source
ds1 = discretized_sources[[sourcename]]
## Get surface points for tsunami source
source_lonlat_extent = extent(ds1$depth_contours)
# Ensure tsunami extent exactly aligns with a degree
# (in practice this will help us align pixels with our propagation model)
tsunami_extent = rbind(floor(source_lonlat_extent[c(1,3)] - c(2,2)),
ceiling(source_lonlat_extent[c(2,4)] + c(2,2)))
tsunami_surface_points_lonlat = expand.grid(
seq(tsunami_extent[1,1], tsunami_extent[2,1], by = tsunami_source_cellsize),
seq(tsunami_extent[1,2], tsunami_extent[2,2], by = tsunami_source_cellsize))
# If elevation data is provided, lookup the depths at the tsunami surface points.
if(!is.null(elevation_raster)){
use_kajiura_filter = TRUE
# Need to ensure that we look up points at longitudes which are within
# the raster longitude range
raster_longitude_midpoint = 0.5 *
(extent(elevation_raster)@xmin + extent(elevation_raster)@xmax)
ltspl = length(tsunami_surface_points_lonlat[,1])
tmp_tsp = adjust_longitude_by_360_deg(tsunami_surface_points_lonlat,
matrix(raster_longitude_midpoint, ncol=2, nrow=ltspl))
# Process in chunks to reduce memory usage
chunk_inds = floor(seq(1, ltspl + 1, len=10))
surface_point_ocean_depths = tmp_tsp[,1]*NA
for(i in 1:(length(chunk_inds)-1)){
inds = chunk_inds[i]:(chunk_inds[i+1]-1)
surface_point_ocean_depths[inds] = extract(elevation_raster, tmp_tsp[inds,1:2])
gc()
}
# Convert negative elevation to depth, and ensure a minimum depth of 10m
# for Kajiura filter. NOTE: When sources are on-land it may be better to increase
# this 10m limit to avoid running out of memory (because it affects the spacing of points
# in the kajiura filter). The only time I saw this was the 'makran2' source in PTHA18
surface_point_ocean_depths = pmax(-surface_point_ocean_depths, 10)
rm(tmp_tsp); gc()
}else{
# In this case depths are not provided, and Kajiura filtering is not used
use_kajiura_filter = FALSE
surface_point_ocean_depths = NULL
}
# Make indices for unit sources in parallel computation.
# If j varies fastest then the shallow unit sources
# will be submitted early, which will be efficient if
# they have more interior points (if the spacing is based on the depth)
ij = expand.grid(j = 1:ds1$discretized_source_dim[2],
i = 1:ds1$discretized_source_dim[1])
print('Making tsunami sources in parallel...')
myrake = sourcezone_rake[sourcename_index]
gc()
source_output_dir = paste0(output_base_dir, 'Unit_source_data/', sourcename, '/')
dir.create(source_output_dir, showWarnings=FALSE, recursive=TRUE)
library(parallel)
# Function to facilitate running in parallel with mcmapply
parallel_fun<-function(ind){
# Make a single tsunami unit source
down_dip_index = ij$i[ind]
along_strike_index = ij$j[ind]
# Set the sub-unit-source point spacing based on the minimum sourcezone depth
di = down_dip_index:(down_dip_index+1)
sj = along_strike_index:(along_strike_index+1)
depth_range = range(ds1$unit_source_grid[di,3,sj])*1000
approx_dx = min(
max(shallow_subunitsource_point_spacing, min(depth_range)),
deep_subunitsource_point_spacing)
approx_dy = approx_dx
# Use within-pixel integration for Okada along the top-row of unit-sources
local_cell_integration_scale = cell_integration_scale * (down_dip_index == 1)
tsunami_ = make_tsunami_unit_source(
down_dip_index,
along_strike_index,
discrete_source=ds1,
rake=myrake,
tsunami_surface_points_lonlat = tsunami_surface_points_lonlat,
approx_dx = approx_dx,
approx_dy = approx_dy,
depths_in_km=TRUE,
kajiura_smooth=use_kajiura_filter,
surface_point_ocean_depths=surface_point_ocean_depths,
kajiura_grid_spacing=kajiura_grid_spacing,
kajiura_where_deformation_exceeds_threshold=kajiura_use_threshold,
minimal_output=minimise_tsunami_unit_source_output,
verbose=FALSE,
dstmx=okada_distance_factor,
edge_taper_width=slip_edge_taper_width,
cell_integration_scale=local_cell_integration_scale)
# Save as RDS
output_RDS_file = paste0(source_output_dir, sourcename, '_',
down_dip_index, '_', along_strike_index, '.RDS')
saveRDS(tsunami_, file = output_RDS_file)
tsunami_source_raster_filename = paste0(source_output_dir, sourcename, '_',
down_dip_index, '_', along_strike_index, '.tif')
# Make a raster
tsunami_unit_source_2_raster(
tsunami_, tsunami_source_raster_filename, saveonly=TRUE,
tsunami_surface_points_lonlat = tsunami_surface_points_lonlat,
res=c(tsunami_source_cellsize, tsunami_source_cellsize))
gc()
return(output_RDS_file)
}
if(MC_CORES > 1){
all_tsunami_files = mcmapply(parallel_fun, ind=as.list(1:length(ij[,1])),
mc.cores=MC_CORES, mc.preschedule=TRUE, SIMPLIFY=FALSE)
}else{
all_tsunami_files = mapply(parallel_fun, ind=as.list(1:length(ij[,1])),
SIMPLIFY=FALSE)
}
if(make_pdf_plot){
# Finally -- read in all the results and make some plots
all_tsunami = lapply(as.list(all_tsunami_files), f<-function(x) readRDS(x))
all_rasters = paste0(source_output_dir, '/',
gsub('.RDS', '', basename(unlist(all_tsunami_files))), '.tif')
all_tsunami_rast = lapply(as.list(all_rasters), f<-function(x) raster(x))
# Plotting -- make a pdf for checking the sources
if(make_pdf_plot) plot_all_tsunami_unit_sources(sourcename, all_tsunami, all_tsunami_rast, ds1)
rm(all_tsunami, all_tsunami_rast); gc()
}
}
###############################################################################
#
# Optional plotting (interactive)
#
###############################################################################
scatter3d<-function(x, y, z, add=FALSE, ...){
library(rgl)
colfun = colorRamp(rainbow(255))
col_01 = (z - min(z))/(max(z) - min(z)+1.0e-20)
colz = colfun(col_01)
colz = rgb(colz[,1],colz[,2], colz[,3], maxColorValue=255)
plot3d(x, y, z, col = colz, add=add, ...)
}
if(make_3d_interactive_plot){
# NOTE: The next line will need to be changed interactively
sourcename = site_name #### 'alaska'
all_tsunami = lapply(
Sys.glob(paste0(output_base_dir, sourcename, '/Unit_source_data/',
sourcename , '/', sourcename, '*.RDS')),
readRDS)
print('Computing unit sources for plotting in parallel...')
ds1 = discretized_sources[[sourcename]]
origin = ds1$unit_source_grid[1,1:2,1]
source_lonlat_extent = extent(ds1$depth_contours)
## Get surface points for tsunami source
tsunami_extent = rbind(floor(source_lonlat_extent[c(1,3)] - c(2,2)),
ceiling(source_lonlat_extent[c(2,4)] + c(2,2)))
tsunami_surface_points_lonlat = expand.grid(
seq(tsunami_extent[1,1], tsunami_extent[2,1], by = tsunami_source_cellsize),
seq(tsunami_extent[1,2], tsunami_extent[2,2], by = tsunami_source_cellsize))
## Compute interior points for all unit sources for plotting purposes
unit_source_indices = expand.grid(1:ds1$discretized_source_dim[1],
1:ds1$discretized_source_dim[2])
unit_source_index_list = list()
for(i in 1:length(unit_source_indices[,1])){
unit_source_index_list[[i]] = c(unit_source_indices[i,1],
unit_source_indices[i,2])
}
library(parallel)
# Make extra unit source points 'for show'
us = mcmapply(unit_source_interior_points_cartesian,
discretized_source=list(ds1),
unit_source_index = unit_source_index_list,
origin=list(origin),
approx_dx = list(5000), approx_dy = list(5000),
mc.preschedule=TRUE, mc.cores=MC_CORES, SIMPLIFY=FALSE)
## Make a 3D plot of the points inside the unit source
#for(i in 1:length(us)){
# plot3d_unit_source_interior_points_cartesian(us[[i]], add=(i>1))
#}
## Make points of tsunami source FOR PLOTTING.
## Origin is the same as unit sources above
tsunami_source_points_4plot = spherical_to_cartesian2d_coordinates(
tsunami_surface_points_lonlat, origin_lonlat = origin)
## Combine all unit sources
zstore = all_tsunami[[1]]$smooth_tsunami_displacement*0
for(i in 1:length(all_tsunami)){
zstore = zstore + all_tsunami[[i]]$smooth_tsunami_displacement
}
# Make a 3D plot of the points inside the unit source
for(i in 1:length(us)){
plot3d_unit_source_interior_points_cartesian(us[[i]], add=(i>1),
add_zero_plane=FALSE)
}
#ti = 1
scatter3d(tsunami_source_points_4plot[,1], tsunami_source_points_4plot[,2],
zstore*1.0e+05, add=TRUE, size=7)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hierarchy.R
\name{hierarchy}
\alias{hierarchy}
\title{The Hierarchy class}
\usage{
hierarchy(..., .list = NULL)
}
\arguments{
\item{...}{Any number of object of class \code{Taxon} or taxonomic names as
character strings}
\item{.list}{An alternate to the \code{...} input. Any number of object of class
\code{\link[=taxon]{taxon()}} or character vectors in a list. Cannot be used with \code{...}.}
}
\value{
An \code{R6Class} object of class \code{Hierarchy}
}
\description{
A class containing an ordered list of \code{\link[=taxon]{taxon()}} objects that represent a
hierarchical classification.
}
\details{
On initialization, taxa are sorted if they have ranks with a known
order.
\strong{Methods}
\describe{
\item{\code{pop(rank_names)}}{
Remove \code{Taxon} elements by rank name, taxon name or taxon ID. The
change happens in place, so you don't need to assign output to a new
object. returns self - rank_names (character) a vector of rank names
}
\item{\code{pick(rank_names)}}{
Select \code{Taxon} elements by rank name, taxon name or taxon ID. The
change happens in place, so you don't need to assign output to a new
object. returns self - rank_names (character) a vector of rank names
}
}
}
\examples{
(x <- taxon(
name = taxon_name("Poaceae"),
rank = taxon_rank("family"),
id = taxon_id(4479)
))
(y <- taxon(
name = taxon_name("Poa"),
rank = taxon_rank("genus"),
id = taxon_id(4544)
))
(z <- taxon(
name = taxon_name("Poa annua"),
rank = taxon_rank("species"),
id = taxon_id(93036)
))
(res <- hierarchy(z, y, x))
res$taxa
res$ranklist
# pop off a rank
pop(res, ranks("family"))
# pick a rank
(res <- hierarchy(z, y, x))
pick(res, ranks("family"))
# null taxa
x <- taxon(NULL)
(res <- hierarchy(x, x, x))
## similar to hierarchy(), but `taxa` slot is not empty
}
\seealso{
Other classes: \code{\link{hierarchies}},
\code{\link{taxa}}, \code{\link{taxmap}},
\code{\link{taxon_database}}, \code{\link{taxon_id}},
\code{\link{taxon_name}}, \code{\link{taxon_rank}},
\code{\link{taxonomy}}, \code{\link{taxon}}
}
| /man/hierarchy.Rd | permissive | xpingli/taxa | R | false | true | 2,131 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hierarchy.R
\name{hierarchy}
\alias{hierarchy}
\title{The Hierarchy class}
\usage{
hierarchy(..., .list = NULL)
}
\arguments{
\item{...}{Any number of object of class \code{Taxon} or taxonomic names as
character strings}
\item{.list}{An alternate to the \code{...} input. Any number of object of class
\code{\link[=taxon]{taxon()}} or character vectors in a list. Cannot be used with \code{...}.}
}
\value{
An \code{R6Class} object of class \code{Hierarchy}
}
\description{
A class containing an ordered list of \code{\link[=taxon]{taxon()}} objects that represent a
hierarchical classification.
}
\details{
On initialization, taxa are sorted if they have ranks with a known
order.
\strong{Methods}
\describe{
\item{\code{pop(rank_names)}}{
Remove \code{Taxon} elements by rank name, taxon name or taxon ID. The
change happens in place, so you don't need to assign output to a new
object. returns self - rank_names (character) a vector of rank names
}
\item{\code{pick(rank_names)}}{
Select \code{Taxon} elements by rank name, taxon name or taxon ID. The
change happens in place, so you don't need to assign output to a new
object. returns self - rank_names (character) a vector of rank names
}
}
}
\examples{
(x <- taxon(
name = taxon_name("Poaceae"),
rank = taxon_rank("family"),
id = taxon_id(4479)
))
(y <- taxon(
name = taxon_name("Poa"),
rank = taxon_rank("genus"),
id = taxon_id(4544)
))
(z <- taxon(
name = taxon_name("Poa annua"),
rank = taxon_rank("species"),
id = taxon_id(93036)
))
(res <- hierarchy(z, y, x))
res$taxa
res$ranklist
# pop off a rank
pop(res, ranks("family"))
# pick a rank
(res <- hierarchy(z, y, x))
pick(res, ranks("family"))
# null taxa
x <- taxon(NULL)
(res <- hierarchy(x, x, x))
## similar to hierarchy(), but `taxa` slot is not empty
}
\seealso{
Other classes: \code{\link{hierarchies}},
\code{\link{taxa}}, \code{\link{taxmap}},
\code{\link{taxon_database}}, \code{\link{taxon_id}},
\code{\link{taxon_name}}, \code{\link{taxon_rank}},
\code{\link{taxonomy}}, \code{\link{taxon}}
}
|
### Name: USIncExp
### Title: Income and Expenditures in the US
### Aliases: USIncExp
### Keywords: datasets
### ** Examples
## These example are presented in the vignette distributed with this
## package, the code was generated by Stangle("strucchange-intro.Rnw")
###################################################
### chunk number 1: data
###################################################
library(strucchange)
data(USIncExp)
plot(USIncExp, plot.type = "single", col = 1:2, ylab = "billion US$")
legend(1960, max(USIncExp), c("income", "expenditures"),
lty = c(1,1), col = 1:2, bty = "n")
###################################################
### chunk number 2: subset
###################################################
library(strucchange)
data(USIncExp)
library(ts)
USIncExp2 <- window(USIncExp, start = c(1985,12))
###################################################
### chunk number 3: ecm-setup
###################################################
coint.res <- residuals(lm(expenditure ~ income, data = USIncExp2))
coint.res <- lag(ts(coint.res, start = c(1985,12), freq = 12), k = -1)
USIncExp2 <- cbind(USIncExp2, diff(USIncExp2), coint.res)
USIncExp2 <- window(USIncExp2, start = c(1986,1), end = c(2001,2))
colnames(USIncExp2) <- c("income", "expenditure", "diff.income",
"diff.expenditure", "coint.res")
ecm.model <- diff.expenditure ~ coint.res + diff.income
###################################################
### chunk number 4: ts-used
###################################################
plot(USIncExp2[,3:5], main = "")
###################################################
### chunk number 5: efp
###################################################
ocus <- efp(ecm.model, type="OLS-CUSUM", data=USIncExp2)
me <- efp(ecm.model, type="ME", data=USIncExp2, h=0.2)
###################################################
### chunk number 6: efp-boundary
###################################################
bound.ocus <- boundary(ocus, alpha=0.05)
###################################################
### chunk number 7: OLS-CUSUM
###################################################
plot(ocus)
###################################################
### chunk number 8: efp-boundary2
###################################################
plot(ocus, boundary = FALSE)
lines(bound.ocus, col = 4)
lines(-bound.ocus, col = 4)
###################################################
### chunk number 9: ME-null
###################################################
plot(me, functional = NULL)
###################################################
### chunk number 10: efp-sctest
###################################################
sctest(ocus)
###################################################
### chunk number 11: efp-sctest2
###################################################
sctest(ecm.model, type="OLS-CUSUM", data=USIncExp2)
###################################################
### chunk number 12: Fstats
###################################################
fs <- Fstats(ecm.model, from = c(1990, 1), to = c(1999,6), data = USIncExp2)
###################################################
### chunk number 13: Fstats-plot
###################################################
plot(fs)
###################################################
### chunk number 14: pval-plot
###################################################
plot(fs, pval=TRUE)
###################################################
### chunk number 15: aveF-plot
###################################################
plot(fs, aveF=TRUE)
###################################################
### chunk number 16: Fstats-sctest
###################################################
sctest(fs, type="expF")
###################################################
### chunk number 17: Fstats-sctest2
###################################################
sctest(ecm.model, type = "expF", from = 49, to = 162, data = USIncExp2)
###################################################
### chunk number 18: mefp
###################################################
USIncExp3 <- window(USIncExp2, start = c(1986, 1), end = c(1989,12))
me.mefp <- mefp(ecm.model, type = "ME", data = USIncExp3, alpha = 0.05)
###################################################
### chunk number 19: monitor1
###################################################
USIncExp3 <- window(USIncExp2, start = c(1986, 1), end = c(1990,12))
me.mefp <- monitor(me.mefp)
###################################################
### chunk number 20: monitor2
###################################################
USIncExp3 <- window(USIncExp2, start = c(1986, 1))
me.mefp <- monitor(me.mefp)
me.mefp
###################################################
### chunk number 21: monitor-plot
###################################################
plot(me.mefp)
###################################################
### chunk number 22: mefp2
###################################################
USIncExp3 <- window(USIncExp2, start = c(1986, 1), end = c(1989,12))
me.efp <- efp(ecm.model, type = "ME", data = USIncExp3, h = 0.5)
me.mefp <- mefp(me.efp, alpha=0.05)
###################################################
### chunk number 23: monitor3
###################################################
USIncExp3 <- window(USIncExp2, start = c(1986, 1))
me.mefp <- monitor(me.mefp)
###################################################
### chunk number 24: monitor-plot2
###################################################
plot(me.mefp)
| /code/strucchange_1.1-1/strucchange/R-ex/USIncExp.R | no_license | ethorondor/BSPT | R | false | false | 5,621 | r | ### Name: USIncExp
### Title: Income and Expenditures in the US
### Aliases: USIncExp
### Keywords: datasets
### ** Examples
## These example are presented in the vignette distributed with this
## package, the code was generated by Stangle("strucchange-intro.Rnw")
###################################################
### chunk number 1: data
###################################################
library(strucchange)
data(USIncExp)
plot(USIncExp, plot.type = "single", col = 1:2, ylab = "billion US$")
legend(1960, max(USIncExp), c("income", "expenditures"),
lty = c(1,1), col = 1:2, bty = "n")
###################################################
### chunk number 2: subset
###################################################
library(strucchange)
data(USIncExp)
library(ts)
USIncExp2 <- window(USIncExp, start = c(1985,12))
###################################################
### chunk number 3: ecm-setup
###################################################
coint.res <- residuals(lm(expenditure ~ income, data = USIncExp2))
coint.res <- lag(ts(coint.res, start = c(1985,12), freq = 12), k = -1)
USIncExp2 <- cbind(USIncExp2, diff(USIncExp2), coint.res)
USIncExp2 <- window(USIncExp2, start = c(1986,1), end = c(2001,2))
colnames(USIncExp2) <- c("income", "expenditure", "diff.income",
"diff.expenditure", "coint.res")
ecm.model <- diff.expenditure ~ coint.res + diff.income
###################################################
### chunk number 4: ts-used
###################################################
plot(USIncExp2[,3:5], main = "")
###################################################
### chunk number 5: efp
###################################################
ocus <- efp(ecm.model, type="OLS-CUSUM", data=USIncExp2)
me <- efp(ecm.model, type="ME", data=USIncExp2, h=0.2)
###################################################
### chunk number 6: efp-boundary
###################################################
bound.ocus <- boundary(ocus, alpha=0.05)
###################################################
### chunk number 7: OLS-CUSUM
###################################################
plot(ocus)
###################################################
### chunk number 8: efp-boundary2
###################################################
plot(ocus, boundary = FALSE)
lines(bound.ocus, col = 4)
lines(-bound.ocus, col = 4)
###################################################
### chunk number 9: ME-null
###################################################
plot(me, functional = NULL)
###################################################
### chunk number 10: efp-sctest
###################################################
sctest(ocus)
###################################################
### chunk number 11: efp-sctest2
###################################################
sctest(ecm.model, type="OLS-CUSUM", data=USIncExp2)
###################################################
### chunk number 12: Fstats
###################################################
fs <- Fstats(ecm.model, from = c(1990, 1), to = c(1999,6), data = USIncExp2)
###################################################
### chunk number 13: Fstats-plot
###################################################
plot(fs)
###################################################
### chunk number 14: pval-plot
###################################################
plot(fs, pval=TRUE)
###################################################
### chunk number 15: aveF-plot
###################################################
plot(fs, aveF=TRUE)
###################################################
### chunk number 16: Fstats-sctest
###################################################
sctest(fs, type="expF")
###################################################
### chunk number 17: Fstats-sctest2
###################################################
sctest(ecm.model, type = "expF", from = 49, to = 162, data = USIncExp2)
###################################################
### chunk number 18: mefp
###################################################
USIncExp3 <- window(USIncExp2, start = c(1986, 1), end = c(1989,12))
me.mefp <- mefp(ecm.model, type = "ME", data = USIncExp3, alpha = 0.05)
###################################################
### chunk number 19: monitor1
###################################################
USIncExp3 <- window(USIncExp2, start = c(1986, 1), end = c(1990,12))
me.mefp <- monitor(me.mefp)
###################################################
### chunk number 20: monitor2
###################################################
USIncExp3 <- window(USIncExp2, start = c(1986, 1))
me.mefp <- monitor(me.mefp)
me.mefp
###################################################
### chunk number 21: monitor-plot
###################################################
plot(me.mefp)
###################################################
### chunk number 22: mefp2
###################################################
USIncExp3 <- window(USIncExp2, start = c(1986, 1), end = c(1989,12))
me.efp <- efp(ecm.model, type = "ME", data = USIncExp3, h = 0.5)
me.mefp <- mefp(me.efp, alpha=0.05)
###################################################
### chunk number 23: monitor3
###################################################
USIncExp3 <- window(USIncExp2, start = c(1986, 1))
me.mefp <- monitor(me.mefp)
###################################################
### chunk number 24: monitor-plot2
###################################################
plot(me.mefp)
|
library(qfa)
library(data.table)
# Standardise FIT data
######################
folder="BIR1_17"
skip=1
flist=file.path(folder,"ANALYSISOUT",list.files(file.path(folder,"ANALYSISOUT"),pattern="*.txt"))
fdf=do.call(rbind, lapply(flist, data.table::fread,header=TRUE,sep="\t",skip=skip,stringsAsFactors=FALSE))
fdf$g=fdf$"Trimmed G(0)"
fdf$r=fdf$"Trimmed r"
fdf$K=fdf$"Trimmed K"
fdf$v=1
fdf[,c("Area G(0)", "Area r", "Area K", "Area Error", "Greyscale G(0)", "Greyscale r", "Greyscale K", "Greyscale Error", "Trimmed G(0)", "Trimmed r", "Trimmed K", "Trimmed Error")]=NULL
fdf=makeFitness(fdf)
write.table(fdf,file.path(folder,"ANALYSISOUT",paste(folder,"_FIT.out",sep="")),quote=FALSE,row.names=FALSE,sep="\t")
folder="cSGA"
skip=0
flist=file.path(folder,"ANALYSISOUT",list.files(file.path(folder,"ANALYSISOUT"),pattern="*.txt"))
fdf=do.call(rbind, lapply(flist, data.table::fread,header=TRUE,sep="\t",skip=skip,stringsAsFactors=FALSE))
fdf$v=1
fdf=makeFitness(fdf)
write.table(fdf,file.path(folder,"ANALYSISOUT",paste(folder,"_FIT.out",sep="")),quote=FALSE,row.names=FALSE,sep="\t")
# Create GIS.txt files
######################
o2g=fread("F:\\LOGS3\\CommonAUXILIARY\\ORF2GENEv2.txt",header=TRUE,stringsAsFactors=FALSE)
genes=o2g$Gene
names(genes)=o2g$ORF
commonStrip=c("YDR173C","YER069W","YHR018C","YJL071W","YJL088W","YML099C","YMR042W","YMR062C","YOL058W","YOL140W","YBR248C","YCL030C","YFR025C","YER055C",
"YIL020C","YIL116W","YCL018W","YGL009C","YHR002W","YLR451W","YNL104C","YOR108W","YBR115C","YDL131W","YDL182W","YDR034C","YDR234W","YGL154C",
"YIL094C","YIR034C","YNR050C","YMR038C")
sgd=readSGD()
neighbs=getNeighbours(c("YJR089W","YEL021W"),20,sgd)
StripListLink=unique(neighbs$FName)
strip=c(commonStrip,StripListLink)
bdf=data.table::fread("BIR1_17/ANALYSISOUT/BIR1_17_FIT.out",header=TRUE,stringsAsFactors=FALSE,sep="\t")
cdf=data.table::fread("cSGA/ANALYSISOUT/cSGA_FIT.out",header=TRUE,stringsAsFactors=FALSE,sep="\t")
bdf=bdf[!bdf$ORF%in%strip,]
cdf=cdf[!cdf$ORF%in%strip,]
bdf$Gene=genes[bdf$ORF]
cdf$Gene=genes[cdf$ORF]
bdf$Medium="SDM_rhl_CNGHT"
cdf$Medium="SDM_rhl_CNGT"
bdf$ScreenID="bir1-17"
cdf$ScreenID="cSGA"
bdf$PI="DAL"
cdf$PI="DAL"
bdf$Client="MS"
cdf$Client="MS"
bdf$Inoc="DIL"
cdf$Inoc="DIL"
bdf$Screen.Name="bir1-17"
cdf$Screen.Name="cSGA"
bdf$Library="SDLv2"
cdf$Library="SDLv2"
bdf$User="AC"
cdf$User="SGA"
bdf$ExptDate="2010"
cdf$ExptDate="2009"
bdf$TrtMed=paste(bdf$Treatment,bdf$Medium,sep="_")
cdf$TrtMed=paste(cdf$Treatment,cdf$Medium,sep="_")
unique(cdf$TrtMed)
unique(bdf$TrtMed)
ctms=c("20_SDM_rhl_CNGT","27_SDM_rhl_CNGT","37_SDM_rhl_CNGT")
btms=c("20_SDM_rhl_CNGHT","27_SDM_rhl_CNGHT","37_SDM_rhl_CNGHT")
bootstrap=NULL
reg="lmreg"
normalised=c("FALSE")
fdef="MDRMDP"
pdf("FitnessPlots.pdf")
for(fdef in c("MDRMDP","r","K","AUC","MDR","MDP")){
bdf$fit=bdf[[fdef]]
cdf$fit=cdf[[fdef]]
for(wctest in c(TRUE,FALSE)){
if(wctest) {tlab="WILCOX"}else{tlab="TTEST"}
for(i in seq_along(ctms)){
a=bdf[bdf$TrtMed==btms[i],]
b=cdf[cdf$TrtMed==ctms[i],]
clab = paste(unique(b$ScreenID),unique(b$Inoc),unique(b$Library),unique(b$User),unique(b$Screen.Name),unique(b$ExptDate),unique(b$TrtMed),collapse=" ")
qlab = paste(unique(a$ScreenID),unique(a$Inoc),unique(a$Library),unique(a$User),unique(a$Screen.Name),unique(a$ExptDate),unique(a$TrtMed),collapse=" ")
#root = paste(unique(QUER$Client),qfolder[1],unique(QUER$Screen.Name),qTrtMed,"vs",cfolder[1],unique(CONT$Screen.Name),cTrtMed,sep="_")
#if (fileID!="") root=paste(root,fileID,sep="_")
if (wctest) {testlab="Wilcoxon"}else{testlab="t-test"}
#if (!is.null(bootstrap)) testlab="bootstrap"
# Calculate genetic interactions and produce epistasis plot
epi=qfa.epi(a,b,0.05,plot=FALSE,wctest=wctest,bootstrap=bootstrap,modcheck=FALSE,reg=reg)
flab=paste("Fitness plot (",testlab,")",sep="")
mmain=paste("Normalised =",normalised[1],fdef,flab,sep=" ")
qfa.epiplot(epi,0.05,xxlab=clab,yylab=qlab,mmain=mmain,fmax=0)
report.epi(epi$Results,file=paste(fdef,ctms[i],tlab,"GIS.txt",sep="_"))
}
}
}
dev.off()
| /updateFiles.R | no_license | CnrLwlss/BIR1_17 | R | false | false | 4,365 | r | library(qfa)
library(data.table)
# Standardise FIT data
######################
folder="BIR1_17"
skip=1
flist=file.path(folder,"ANALYSISOUT",list.files(file.path(folder,"ANALYSISOUT"),pattern="*.txt"))
fdf=do.call(rbind, lapply(flist, data.table::fread,header=TRUE,sep="\t",skip=skip,stringsAsFactors=FALSE))
fdf$g=fdf$"Trimmed G(0)"
fdf$r=fdf$"Trimmed r"
fdf$K=fdf$"Trimmed K"
fdf$v=1
fdf[,c("Area G(0)", "Area r", "Area K", "Area Error", "Greyscale G(0)", "Greyscale r", "Greyscale K", "Greyscale Error", "Trimmed G(0)", "Trimmed r", "Trimmed K", "Trimmed Error")]=NULL
fdf=makeFitness(fdf)
write.table(fdf,file.path(folder,"ANALYSISOUT",paste(folder,"_FIT.out",sep="")),quote=FALSE,row.names=FALSE,sep="\t")
folder="cSGA"
skip=0
flist=file.path(folder,"ANALYSISOUT",list.files(file.path(folder,"ANALYSISOUT"),pattern="*.txt"))
fdf=do.call(rbind, lapply(flist, data.table::fread,header=TRUE,sep="\t",skip=skip,stringsAsFactors=FALSE))
fdf$v=1
fdf=makeFitness(fdf)
write.table(fdf,file.path(folder,"ANALYSISOUT",paste(folder,"_FIT.out",sep="")),quote=FALSE,row.names=FALSE,sep="\t")
# Create GIS.txt files
######################
o2g=fread("F:\\LOGS3\\CommonAUXILIARY\\ORF2GENEv2.txt",header=TRUE,stringsAsFactors=FALSE)
genes=o2g$Gene
names(genes)=o2g$ORF
commonStrip=c("YDR173C","YER069W","YHR018C","YJL071W","YJL088W","YML099C","YMR042W","YMR062C","YOL058W","YOL140W","YBR248C","YCL030C","YFR025C","YER055C",
"YIL020C","YIL116W","YCL018W","YGL009C","YHR002W","YLR451W","YNL104C","YOR108W","YBR115C","YDL131W","YDL182W","YDR034C","YDR234W","YGL154C",
"YIL094C","YIR034C","YNR050C","YMR038C")
sgd=readSGD()
neighbs=getNeighbours(c("YJR089W","YEL021W"),20,sgd)
StripListLink=unique(neighbs$FName)
strip=c(commonStrip,StripListLink)
bdf=data.table::fread("BIR1_17/ANALYSISOUT/BIR1_17_FIT.out",header=TRUE,stringsAsFactors=FALSE,sep="\t")
cdf=data.table::fread("cSGA/ANALYSISOUT/cSGA_FIT.out",header=TRUE,stringsAsFactors=FALSE,sep="\t")
bdf=bdf[!bdf$ORF%in%strip,]
cdf=cdf[!cdf$ORF%in%strip,]
bdf$Gene=genes[bdf$ORF]
cdf$Gene=genes[cdf$ORF]
bdf$Medium="SDM_rhl_CNGHT"
cdf$Medium="SDM_rhl_CNGT"
bdf$ScreenID="bir1-17"
cdf$ScreenID="cSGA"
bdf$PI="DAL"
cdf$PI="DAL"
bdf$Client="MS"
cdf$Client="MS"
bdf$Inoc="DIL"
cdf$Inoc="DIL"
bdf$Screen.Name="bir1-17"
cdf$Screen.Name="cSGA"
bdf$Library="SDLv2"
cdf$Library="SDLv2"
bdf$User="AC"
cdf$User="SGA"
bdf$ExptDate="2010"
cdf$ExptDate="2009"
bdf$TrtMed=paste(bdf$Treatment,bdf$Medium,sep="_")
cdf$TrtMed=paste(cdf$Treatment,cdf$Medium,sep="_")
unique(cdf$TrtMed)
unique(bdf$TrtMed)
ctms=c("20_SDM_rhl_CNGT","27_SDM_rhl_CNGT","37_SDM_rhl_CNGT")
btms=c("20_SDM_rhl_CNGHT","27_SDM_rhl_CNGHT","37_SDM_rhl_CNGHT")
bootstrap=NULL
reg="lmreg"
normalised=c("FALSE")
fdef="MDRMDP"
pdf("FitnessPlots.pdf")
for(fdef in c("MDRMDP","r","K","AUC","MDR","MDP")){
bdf$fit=bdf[[fdef]]
cdf$fit=cdf[[fdef]]
for(wctest in c(TRUE,FALSE)){
if(wctest) {tlab="WILCOX"}else{tlab="TTEST"}
for(i in seq_along(ctms)){
a=bdf[bdf$TrtMed==btms[i],]
b=cdf[cdf$TrtMed==ctms[i],]
clab = paste(unique(b$ScreenID),unique(b$Inoc),unique(b$Library),unique(b$User),unique(b$Screen.Name),unique(b$ExptDate),unique(b$TrtMed),collapse=" ")
qlab = paste(unique(a$ScreenID),unique(a$Inoc),unique(a$Library),unique(a$User),unique(a$Screen.Name),unique(a$ExptDate),unique(a$TrtMed),collapse=" ")
#root = paste(unique(QUER$Client),qfolder[1],unique(QUER$Screen.Name),qTrtMed,"vs",cfolder[1],unique(CONT$Screen.Name),cTrtMed,sep="_")
#if (fileID!="") root=paste(root,fileID,sep="_")
if (wctest) {testlab="Wilcoxon"}else{testlab="t-test"}
#if (!is.null(bootstrap)) testlab="bootstrap"
# Calculate genetic interactions and produce epistasis plot
epi=qfa.epi(a,b,0.05,plot=FALSE,wctest=wctest,bootstrap=bootstrap,modcheck=FALSE,reg=reg)
flab=paste("Fitness plot (",testlab,")",sep="")
mmain=paste("Normalised =",normalised[1],fdef,flab,sep=" ")
qfa.epiplot(epi,0.05,xxlab=clab,yylab=qlab,mmain=mmain,fmax=0)
report.epi(epi$Results,file=paste(fdef,ctms[i],tlab,"GIS.txt",sep="_"))
}
}
}
dev.off()
|
#' Determine temporal resolution
#'
#' @description Performs minimum time step standardization,
#' gap filling and start/end time selection. This function
#' provides the option to select the minimum temporal step size of an
#' \code{\link{is.trex}} object. Additionally, the user can define the
#' start and end time of the series and select the minimum size under
#' which gaps should be filled, using linear interpolation.
#'
#' @usage dt.steps(input, start,
#' end, time.int = 10, max.gap = 60,
#' decimals = 10, df = FALSE)
#'
#' @param input An \code{\link{is.trex}}-compliant (output) object
#' @param start Character string providing the start time for the series.
#' Format has to be provided in "UTC" (e.g., "2012-05-28 00:00" or
#' Year-Month-Day Hour:Minute). Starting time should not be earlier
#' than the start of the series.
#' @param end Character string providing the start time for the series.
#' Format has to be provided in "UTC" (e.g., "2012-06-28 00:50" or
#' Year-Month-Day Hour:Minute). End time should be earlier than
#' the end time and not later than that of the series.
#' @param time.int Numeric value providing the number of minutes for the
#' minimum time step. When \code{time.int} is smaller than the minimum time step
#' of the series, a linear interpolation is applied. If \code{time.int} is
#' larger than the minimum time step of the series values are averaged to the chosen
#' value of \code{time.int} (after performing a linear interpolation
#' to obtain a one-minute resolution).
#' @param max.gap Numeric value providing the maximum size of a gap in minutes,
#' which can be filled by performing a linear interpolation.
#' @param decimals Integer value defining the number of decimals of the output
#' (default = 10).
#' @param df Logical; if \code{TRUE}, output is provided in a \code{data.frame}
#' format with a timestamp and a value (\eqn{\Delta T} or \eqn{\Delta V}) column.
#' If \code{FALSE}, output is provided as a \code{zoo} object (default = FALSE).
#'
#' @description Time series have different temporal resolutions.
#' This function provides the option to standardize the minimum time step by
#' either performing a linear interpolation when the requested time step
#' is smaller than the minimum time step of the series or average values when
#' the requested time step is larger than the minimum time step of the series.
#' Before this process, the entire time series is converted to a one-minute time
#' step by applying a linear interpolation (excluding gap \eqn{periods > \code{max.gap}}).
#'
#' @return A \code{zoo} object or \code{data.frame} in the appropriate
#' format for further processing.
#'
#' @export
#'
#' @examples
#' \dontrun{
#' input <- is.trex(example.data(type="doy"),
#' tz="GMT",time.format="%H:%M", solar.time=TRUE,
#' long.deg=7.7459,ref.add=FALSE)
#' in.ts <- dt.steps(input=input,start='2012-06-28 00:00',end='2012-07-28 00:00',
#' time.int=60,max.gap=120,decimals=6,df=FALSE)
#' plot(in.ts)
#' head(in.ts)
#' }
dt.steps <-
function(input,
start,
end,
time.int = 10,
max.gap = 60,
decimals = 10,
df = FALSE) {
#test
#input=add1
#time.int=60
#max.gap=120
#decimals=10
#df=FALSE
#length(input)
#tail(input)
#start=as.character(zoo::index(head(t, 1)))
#end=as.character(zoo::index(tail(t, 1)))
#time.int=15
#max.gap=15
#decimals=10
#df=FALSE
#tz="UTC"
#p= process
if (attributes(input)$class == "data.frame") {
#e
if (is.numeric(input$value) == F)
stop("Invalid input data, values within the data.frame are not numeric.")
if (is.character(input$timestamp) == F)
stop("Invalid input data, timestamp within the data.frame are not numeric.")
#p
input <-
zoo::zoo(
input$value,
order.by = base::as.POSIXct(input$timestamp, format = "%Y-%m-%d %H:%M:%S", tz =
"UTC")
)
#e
if (as.character(zoo::index(input)[1]) == "(NA NA)" |
is.na(zoo::index(input)[1]) == T)
stop("No timestamp present, time.format is likely incorrect.")
}
#d= default conditions
if (missing(start)) {
start = as.character(zoo::index(input))[1]
}
if (missing(end)) {
end = as.character(zoo::index(input))[length(input)]
}
if (missing(time.int)) {
time.int <- 15
}
if (missing(max.gap)) {
max.gap <- 60
}
if (missing(decimals)) {
decimals <- 10
}
if (missing(df)) {
df = F
}
if (df != T &
df != F)
stop("Unused argument, df needs to be TRUE|FALSE.")
#e= errors
if (zoo::is.zoo(input) == F)
stop(
"Invalid input data, use a zoo file from is.trex or a zoo vector containing numeric values (tz= UTC)."
)
if (is.numeric(input) == F)
stop("Invalid input data, values within the vector are not numeric.")
if (is.character(start) == F)
stop("Unused argument, start is not a character (format= %Y-%m-%d %H:%M:%S).")
if (is.character(end) == F)
stop("Unused argument, end is not a character (format= %Y-%m-%d %H:%M:%S).")
if (is.numeric(max.gap) == F)
stop("Unused argument, max.gap is not numeric.")
if (is.numeric(time.int) == F)
stop("Unused argument, time.int is not numeric.")
if (is.numeric(decimals) == F)
stop("Unused argument, decimals is not numeric.")
if (decimals < 3 |
decimals > 15)
stop("Unused argument, decimals can only fall between 3-15.")
if (nchar(start)==16){start<-base::paste0(start,":00")}
if (nchar(end)==16){end<-base::paste0(end,":00")}
#p
ts.start <-
as.POSIXct(as.character(base::paste0(start)),
format = "%Y-%m-%d %H:%M:%S",
tz = "UTC") - 1
ts.end <-
as.POSIXct(as.character(base::paste0(end)),
format = "%Y-%m-%d %H:%M:%S",
tz = "UTC") + 1
#e
if (is.na(ts.start) == TRUE)
stop("Unused argument, start is not in the correct format (%Y-%m-%d %H:%M:%S).")
if (is.na(ts.end) == TRUE)
stop("Unused argument, end is not in the correct format (%Y-%m-%d %H:%M:%S).")
if (round(as.numeric(ts.start - zoo::index(input[1]))) < -1)
stop("Unused argument, start is earlier than start of the timestamp.")
if (round(as.numeric(zoo::index(input[length(input)]) - ts.end)) < -1)
stop("Unused argument, end is later than end of the timestamp.")
#p
value <-
stats::na.omit(stats::window(input, start = ts.start, end = ts.end))
value <- (stats::na.omit(value))
raw.gap <-
as.numeric(difftime(
zoo::index(value)[-1],
zoo::index(value[-length(value)]),
tz = "UTC",
units = c("mins")
))
gap <- c(raw.gap, NA) #minimum gap in minutes
#d
if (missing(max.gap)) {
max.gap <- min(raw.gap)
}
#e
if (min(gap, na.rm = TRUE) > max.gap)
stop("Unused argument, min.gap is smaller the minimum timestep.")
#w= warnings
if (time.int > (60 * 24)) {
warning("Selected time.int is larger than a day.")
}
#c
if (time.int > min(gap, na.rm = TRUE)) {
#p
gap <- zoo::zoo(gap, order.by = zoo::index(value))
dummy <-
zoo::zoo(NA, order.by = seq(
from = ts.start + 1,
to = ts.end - 1,
by = (60 * 1)
)) #*time.int
proc.1 <- zoo::cbind.zoo(value, gap, dummy)
proc.1[which(is.na(proc.1$value) == F), "dummy"] <- 0
proc.1$value <- zoo::na.approx(proc.1$value, na.rm = F)
proc.1$gap <- zoo::na.locf(proc.1$gap, na.rm = F)
proc.1[which(is.na(proc.1$value) == TRUE), "gap"] <- NA
proc.1[which(proc.1$dummy == 0), "gap"] <- 0
add <-
zoo::rollapply(
proc.1$value,
time.int,
align = "center",
FUN = mean,
na.rm = TRUE,
partial = TRUE
)
add[which(as.character(add) == "NaN")] <- NA
proc.1$value <- add
}else{
#p
gap <- zoo::zoo(gap, order.by = zoo::index(value))
dummy <-
zoo::zoo(NA, order.by = seq(
from = ts.start + 1,
to = ts.end - 1,
by = (60 * time.int)
))
proc.1 <- zoo::cbind.zoo(value, gap, dummy)
proc.1[which(is.na(proc.1$value) == F), "dummy"] <- 0
proc.1$value <- zoo::na.approx(proc.1$value, na.rm = F)
proc.1$gap <- zoo::na.locf(proc.1$gap, na.rm = F)
proc.1[which(is.na(proc.1$value) == TRUE), "gap"] <- NA
proc.1[which(proc.1$dummy == 0), "gap"] <- 0
}
#p
proc.1$value <-
zoo::na.locf(zoo::na.locf(proc.1$value, na.rm = F), fromLast = TRUE)
proc.1[which(proc.1$gap > max.gap), "value"] <- NA
proc.1$value <- round(proc.1$value, decimals)
#o= output
output.data <- proc.1[which(as.character(zoo::index(proc.1)) %in% as.character(seq(
from = ts.start + 1,
to = ts.end - 1,
by = (60 * time.int)
))), "value"]
length(output.data)
#remove values outside of the range
start.input<-zoo::index(na.omit(input))[1]-1
start.output<-zoo::index(na.omit(output.data))[1]
if(start.input!=start.output){
window(output.data,start=start.output,end=start.input)<-NA
}
end.input<-zoo::index(na.omit(input))[length(zoo::index(na.omit(input)))]+1
end.output<-zoo::index(na.omit(output.data))[length(zoo::index(na.omit(output.data)))]
if(end.input!=end.output){
window(output.data,start=end.input,end=end.output)<-NA
}
if (df == T) {
output.data <-
data.frame(timestamp = as.character(zoo::index(output.data)),
value = as.numeric(as.character(output.data)))
output.data$timestamp <- as.character(output.data$timestamp)
output.data$value <- as.numeric(output.data$value)
}
return(output.data)
}
| /R/dt.steps.R | permissive | the-Hull/TREX | R | false | false | 11,118 | r | #' Determine temporal resolution
#'
#' @description Performs minimum time step standardization,
#' gap filling and start/end time selection. This function
#' provides the option to select the minimum temporal step size of an
#' \code{\link{is.trex}} object. Additionally, the user can define the
#' start and end time of the series and select the minimum size under
#' which gaps should be filled, using linear interpolation.
#'
#' @usage dt.steps(input, start,
#' end, time.int = 10, max.gap = 60,
#' decimals = 10, df = FALSE)
#'
#' @param input An \code{\link{is.trex}}-compliant (output) object
#' @param start Character string providing the start time for the series.
#' Format has to be provided in "UTC" (e.g., "2012-05-28 00:00" or
#' Year-Month-Day Hour:Minute). Starting time should not be earlier
#' than the start of the series.
#' @param end Character string providing the start time for the series.
#' Format has to be provided in "UTC" (e.g., "2012-06-28 00:50" or
#' Year-Month-Day Hour:Minute). End time should be earlier than
#' the end time and not later than that of the series.
#' @param time.int Numeric value providing the number of minutes for the
#' minimum time step. When \code{time.int} is smaller than the minimum time step
#' of the series, a linear interpolation is applied. If \code{time.int} is
#' larger than the minimum time step of the series values are averaged to the chosen
#' value of \code{time.int} (after performing a linear interpolation
#' to obtain a one-minute resolution).
#' @param max.gap Numeric value providing the maximum size of a gap in minutes,
#' which can be filled by performing a linear interpolation.
#' @param decimals Integer value defining the number of decimals of the output
#' (default = 10).
#' @param df Logical; if \code{TRUE}, output is provided in a \code{data.frame}
#' format with a timestamp and a value (\eqn{\Delta T} or \eqn{\Delta V}) column.
#' If \code{FALSE}, output is provided as a \code{zoo} object (default = FALSE).
#'
#' @description Time series have different temporal resolutions.
#' This function provides the option to standardize the minimum time step by
#' either performing a linear interpolation when the requested time step
#' is smaller than the minimum time step of the series or average values when
#' the requested time step is larger than the minimum time step of the series.
#' Before this process, the entire time series is converted to a one-minute time
#' step by applying a linear interpolation (excluding gap \eqn{periods > \code{max.gap}}).
#'
#' @return A \code{zoo} object or \code{data.frame} in the appropriate
#' format for further processing.
#'
#' @export
#'
#' @examples
#' \dontrun{
#' input <- is.trex(example.data(type="doy"),
#' tz="GMT",time.format="%H:%M", solar.time=TRUE,
#' long.deg=7.7459,ref.add=FALSE)
#' in.ts <- dt.steps(input=input,start='2012-06-28 00:00',end='2012-07-28 00:00',
#' time.int=60,max.gap=120,decimals=6,df=FALSE)
#' plot(in.ts)
#' head(in.ts)
#' }
dt.steps <-
function(input,
start,
end,
time.int = 10,
max.gap = 60,
decimals = 10,
df = FALSE) {
#test
#input=add1
#time.int=60
#max.gap=120
#decimals=10
#df=FALSE
#length(input)
#tail(input)
#start=as.character(zoo::index(head(t, 1)))
#end=as.character(zoo::index(tail(t, 1)))
#time.int=15
#max.gap=15
#decimals=10
#df=FALSE
#tz="UTC"
#p= process
if (attributes(input)$class == "data.frame") {
#e
if (is.numeric(input$value) == F)
stop("Invalid input data, values within the data.frame are not numeric.")
if (is.character(input$timestamp) == F)
stop("Invalid input data, timestamp within the data.frame are not numeric.")
#p
input <-
zoo::zoo(
input$value,
order.by = base::as.POSIXct(input$timestamp, format = "%Y-%m-%d %H:%M:%S", tz =
"UTC")
)
#e
if (as.character(zoo::index(input)[1]) == "(NA NA)" |
is.na(zoo::index(input)[1]) == T)
stop("No timestamp present, time.format is likely incorrect.")
}
#d= default conditions
if (missing(start)) {
start = as.character(zoo::index(input))[1]
}
if (missing(end)) {
end = as.character(zoo::index(input))[length(input)]
}
if (missing(time.int)) {
time.int <- 15
}
if (missing(max.gap)) {
max.gap <- 60
}
if (missing(decimals)) {
decimals <- 10
}
if (missing(df)) {
df = F
}
if (df != T &
df != F)
stop("Unused argument, df needs to be TRUE|FALSE.")
#e= errors
if (zoo::is.zoo(input) == F)
stop(
"Invalid input data, use a zoo file from is.trex or a zoo vector containing numeric values (tz= UTC)."
)
if (is.numeric(input) == F)
stop("Invalid input data, values within the vector are not numeric.")
if (is.character(start) == F)
stop("Unused argument, start is not a character (format= %Y-%m-%d %H:%M:%S).")
if (is.character(end) == F)
stop("Unused argument, end is not a character (format= %Y-%m-%d %H:%M:%S).")
if (is.numeric(max.gap) == F)
stop("Unused argument, max.gap is not numeric.")
if (is.numeric(time.int) == F)
stop("Unused argument, time.int is not numeric.")
if (is.numeric(decimals) == F)
stop("Unused argument, decimals is not numeric.")
if (decimals < 3 |
decimals > 15)
stop("Unused argument, decimals can only fall between 3-15.")
if (nchar(start)==16){start<-base::paste0(start,":00")}
if (nchar(end)==16){end<-base::paste0(end,":00")}
#p
ts.start <-
as.POSIXct(as.character(base::paste0(start)),
format = "%Y-%m-%d %H:%M:%S",
tz = "UTC") - 1
ts.end <-
as.POSIXct(as.character(base::paste0(end)),
format = "%Y-%m-%d %H:%M:%S",
tz = "UTC") + 1
#e
if (is.na(ts.start) == TRUE)
stop("Unused argument, start is not in the correct format (%Y-%m-%d %H:%M:%S).")
if (is.na(ts.end) == TRUE)
stop("Unused argument, end is not in the correct format (%Y-%m-%d %H:%M:%S).")
if (round(as.numeric(ts.start - zoo::index(input[1]))) < -1)
stop("Unused argument, start is earlier than start of the timestamp.")
if (round(as.numeric(zoo::index(input[length(input)]) - ts.end)) < -1)
stop("Unused argument, end is later than end of the timestamp.")
#p
value <-
stats::na.omit(stats::window(input, start = ts.start, end = ts.end))
value <- (stats::na.omit(value))
raw.gap <-
as.numeric(difftime(
zoo::index(value)[-1],
zoo::index(value[-length(value)]),
tz = "UTC",
units = c("mins")
))
gap <- c(raw.gap, NA) #minimum gap in minutes
#d
if (missing(max.gap)) {
max.gap <- min(raw.gap)
}
#e
if (min(gap, na.rm = TRUE) > max.gap)
stop("Unused argument, min.gap is smaller the minimum timestep.")
#w= warnings
if (time.int > (60 * 24)) {
warning("Selected time.int is larger than a day.")
}
#c
if (time.int > min(gap, na.rm = TRUE)) {
#p
gap <- zoo::zoo(gap, order.by = zoo::index(value))
dummy <-
zoo::zoo(NA, order.by = seq(
from = ts.start + 1,
to = ts.end - 1,
by = (60 * 1)
)) #*time.int
proc.1 <- zoo::cbind.zoo(value, gap, dummy)
proc.1[which(is.na(proc.1$value) == F), "dummy"] <- 0
proc.1$value <- zoo::na.approx(proc.1$value, na.rm = F)
proc.1$gap <- zoo::na.locf(proc.1$gap, na.rm = F)
proc.1[which(is.na(proc.1$value) == TRUE), "gap"] <- NA
proc.1[which(proc.1$dummy == 0), "gap"] <- 0
add <-
zoo::rollapply(
proc.1$value,
time.int,
align = "center",
FUN = mean,
na.rm = TRUE,
partial = TRUE
)
add[which(as.character(add) == "NaN")] <- NA
proc.1$value <- add
}else{
#p
gap <- zoo::zoo(gap, order.by = zoo::index(value))
dummy <-
zoo::zoo(NA, order.by = seq(
from = ts.start + 1,
to = ts.end - 1,
by = (60 * time.int)
))
proc.1 <- zoo::cbind.zoo(value, gap, dummy)
proc.1[which(is.na(proc.1$value) == F), "dummy"] <- 0
proc.1$value <- zoo::na.approx(proc.1$value, na.rm = F)
proc.1$gap <- zoo::na.locf(proc.1$gap, na.rm = F)
proc.1[which(is.na(proc.1$value) == TRUE), "gap"] <- NA
proc.1[which(proc.1$dummy == 0), "gap"] <- 0
}
#p
proc.1$value <-
zoo::na.locf(zoo::na.locf(proc.1$value, na.rm = F), fromLast = TRUE)
proc.1[which(proc.1$gap > max.gap), "value"] <- NA
proc.1$value <- round(proc.1$value, decimals)
#o= output
output.data <- proc.1[which(as.character(zoo::index(proc.1)) %in% as.character(seq(
from = ts.start + 1,
to = ts.end - 1,
by = (60 * time.int)
))), "value"]
length(output.data)
#remove values outside of the range
start.input<-zoo::index(na.omit(input))[1]-1
start.output<-zoo::index(na.omit(output.data))[1]
if(start.input!=start.output){
window(output.data,start=start.output,end=start.input)<-NA
}
end.input<-zoo::index(na.omit(input))[length(zoo::index(na.omit(input)))]+1
end.output<-zoo::index(na.omit(output.data))[length(zoo::index(na.omit(output.data)))]
if(end.input!=end.output){
window(output.data,start=end.input,end=end.output)<-NA
}
if (df == T) {
output.data <-
data.frame(timestamp = as.character(zoo::index(output.data)),
value = as.numeric(as.character(output.data)))
output.data$timestamp <- as.character(output.data$timestamp)
output.data$value <- as.numeric(output.data$value)
}
return(output.data)
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/czas.R
\name{czas}
\alias{czas}
\title{Funkcja sprawdza czy dwie wartosci liczbowe sa "podobne"}
\usage{
czas(czas_glowny, czas_por)
}
\arguments{
\item{czas_glowny}{pierwsza wartosc numeryczna, liczba naturalna}
\item{czas_por}{druga wartosc numeryczna, liczba naturalna}
}
\value{
wartosc numeryczna 1, jesli istnieje istotne podobienstwo miedzy wartosciami, 0 wpp
}
\description{
Funkcja \code{czas} sprawdza czy dwie wartosci liczbowe sa "podobne" (w domysle
chodzi o czas trwania filmu)
}
\author{
Emilia Momotko
}
| /faza2JMS/pakiet/FilmyJMS/man/czas.Rd | no_license | Emomotko/Filmy | R | false | false | 608 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/czas.R
\name{czas}
\alias{czas}
\title{Funkcja sprawdza czy dwie wartosci liczbowe sa "podobne"}
\usage{
czas(czas_glowny, czas_por)
}
\arguments{
\item{czas_glowny}{pierwsza wartosc numeryczna, liczba naturalna}
\item{czas_por}{druga wartosc numeryczna, liczba naturalna}
}
\value{
wartosc numeryczna 1, jesli istnieje istotne podobienstwo miedzy wartosciami, 0 wpp
}
\description{
Funkcja \code{czas} sprawdza czy dwie wartosci liczbowe sa "podobne" (w domysle
chodzi o czas trwania filmu)
}
\author{
Emilia Momotko
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.