content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
#' Serve phantasus.
#'
#' \code{servePhantasus} starts http server handling phantasus static files
#' and opencpu server.
#'
#' @param host Host to listen.
#'
#' @param port Port to listen.
#'
#' @param staticRoot Path to static files with phantasus.js
#' (on local file system).
#'
#' @param cacheDir Full path to cache directory.
#'
#' @return Running instance of phantasus application.
#'
#' @import opencpu
#' @import httpuv
#' @import Rook
#' @export
#'
#' @examples
#' \dontrun{
#' servePhantasus('0.0.0.0', 8000, cacheDir=file.path(getwd(), 'cache'))
#' }
servePhantasus <- function(host, port,
staticRoot = system.file("www/phantasus.js",
package = "phantasus"),
cacheDir = tempdir()) {
options(phantasusCacheDir = cacheDir)
app <- Rook::URLMap$new(`/ocpu` = opencpu:::rookhandler("/ocpu"),
`/?` = Rook::Static$new(urls = c("/"),
root = staticRoot))
httpuv::runServer(host, port, app = app)
}
| /R/serve.R | no_license | assaron/phantasus | R | false | false | 1,058 | r | #' Serve phantasus.
#'
#' \code{servePhantasus} starts http server handling phantasus static files
#' and opencpu server.
#'
#' @param host Host to listen.
#'
#' @param port Port to listen.
#'
#' @param staticRoot Path to static files with phantasus.js
#' (on local file system).
#'
#' @param cacheDir Full path to cache directory.
#'
#' @return Running instance of phantasus application.
#'
#' @import opencpu
#' @import httpuv
#' @import Rook
#' @export
#'
#' @examples
#' \dontrun{
#' servePhantasus('0.0.0.0', 8000, cacheDir=file.path(getwd(), 'cache'))
#' }
servePhantasus <- function(host, port,
staticRoot = system.file("www/phantasus.js",
package = "phantasus"),
cacheDir = tempdir()) {
options(phantasusCacheDir = cacheDir)
app <- Rook::URLMap$new(`/ocpu` = opencpu:::rookhandler("/ocpu"),
`/?` = Rook::Static$new(urls = c("/"),
root = staticRoot))
httpuv::runServer(host, port, app = app)
}
|
library(ggplotAssist)
### Name: textInput4
### Title: Create side-by side textInput with disabled spell check
### Aliases: textInput4
### ** Examples
library(shiny)
# Only run examples in interactive R sessions
if (interactive()) {
ui <- fluidPage(
textInput4("id", "id", ""),
textInput4("name","name","")
)
server <- function(input, output) {
}
shinyApp(ui, server)
}
| /data/genthat_extracted_code/ggplotAssist/examples/textInput4.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 434 | r | library(ggplotAssist)
### Name: textInput4
### Title: Create side-by side textInput with disabled spell check
### Aliases: textInput4
### ** Examples
library(shiny)
# Only run examples in interactive R sessions
if (interactive()) {
ui <- fluidPage(
textInput4("id", "id", ""),
textInput4("name","name","")
)
server <- function(input, output) {
}
shinyApp(ui, server)
}
|
#
# IFPUG-COSMIC-conv.R, 15 Sep 16
#
# Data from:
# Javad Mohammadian Amiri and Venkata Vinod Kumar Padmanabhuni
# A Comprehensive Evaluation of Conversion Approaches for Different Function Points
#
# Example from:
# Empirical Software Engineering using R
# Derek M. Jones
source("ESEUR_config.r")
library("plyr")
plot_a_fit=function(subj)
{
# w_mod=glm(FP ~ log(CFP), data=subj, family=poisson)
w_mod=glm(log(FP) ~ log(CFP), data=subj)
pred=predict(w_mod, newdata=data.frame(CFP=CFP_seq))
lines(CFP_seq, exp(pred), col=subj$col)
}
plot_fit=function(who)
{
d_ply(who, .(col), plot_a_fit)
}
plot_points=function(who)
{
points(who$CFP, who$FP, col=who$col)
#lines(loess.smooth(who$CFP, who$FP, span=0.5, family="gaussian"), col=line_col)
}
bench=read.csv(paste0(ESEUR_dir, "statistics/BTH2011Padmanabhuni.csv.xz"), as.is=TRUE)
# conv_mod=glm(FP ~ (who_FP+CFP)^2+kind+Dataset, data=bench)
# conv_mod=glm(FP ~ (who_FP+CFP)^2+kind, data=bench)
# summary(conv_mod)
no_students=subset(bench, Dataset != "Cuadtado_jj07" & Dataset != "Cuadtado_jj06")
no_Cuadtado=subset(no_students, Dataset != "Cuadtado_2007")
# table(no_students$who_CFP, no_students$who_FP)
conv_mod=glm(FP ~ (who_FP+CFP)^2+kind+Dataset, data=no_students)
conv_pmod=glm(FP ~ (who_FP+who_CFP+log(CFP))^2-who_FP:who_CFP+kind, family=poisson, data=no_students)
conv_mod=glm(log(FP) ~ (who_FP+log(CFP))^2+kind, data=no_students)
conv_cmod=glm(log(CFP) ~ (who_FP+log(FP))^2+kind, data=no_students)
summary(conv_pmod)
summary(conv_mod)
summary(conv_cmod)
CFP_seq=seq(20, 2000, by=5)
pal_col=rainbow(4)
D_names=unique(no_students$Dataset)
D_cols=rainbow(length(D_names))
no_students$col=mapvalues(no_students$Dataset, D_names, D_cols)
ind=subset(no_students, who_CFP == "ind")
aca=subset(no_students, who_CFP != "ind")
plot_layout(1, 2)
plot(1, type="n", log="xy",
xlim=range(no_students$CFP), ylim=range(no_students$FP),
xlab="COSMIC", ylab="FPA\n")
plot_points(ind)
plot_points(aca)
legend(x="topleft", legend=D_names, bty="n", fill=D_cols, cex=1.2)
# Remove what data that has a different slope to everything else
# aca=subset(no_Cuadtado, who_CFP != "ind")
plot(1, type="n", log="xy",
xlim=range(no_students$CFP), ylim=range(no_students$FP),
xlab="COSMIC", ylab="FPA\n")
plot_fit(ind)
plot_fit(aca)
# Remove what data that has a different slope to everything else
# aca=subset(no_Cuadtado, who_CFP != "ind")
library("simex")
no_students=subset(no_students, !is.na(kind))
no_students$l_CFP=log(no_students$CFP)
no_students$l_FP=log(no_students$FP)
conv_cmod=glm(l_CFP ~ (who_FP+l_FP)^2+kind, data=no_students)
conv_simex=simex(conv_cmod, SIMEXvariable="l_FP", measurement.error=no_students$l_FP/30, asymptotic=FALSE)
summary(conv_simex)
| /regression/IFPUG-COSMIC-conv.R | no_license | montahdaya/ESEUR-code-data | R | false | false | 2,735 | r | #
# IFPUG-COSMIC-conv.R, 15 Sep 16
#
# Data from:
# Javad Mohammadian Amiri and Venkata Vinod Kumar Padmanabhuni
# A Comprehensive Evaluation of Conversion Approaches for Different Function Points
#
# Example from:
# Empirical Software Engineering using R
# Derek M. Jones
source("ESEUR_config.r")
library("plyr")
plot_a_fit=function(subj)
{
# w_mod=glm(FP ~ log(CFP), data=subj, family=poisson)
w_mod=glm(log(FP) ~ log(CFP), data=subj)
pred=predict(w_mod, newdata=data.frame(CFP=CFP_seq))
lines(CFP_seq, exp(pred), col=subj$col)
}
plot_fit=function(who)
{
d_ply(who, .(col), plot_a_fit)
}
plot_points=function(who)
{
points(who$CFP, who$FP, col=who$col)
#lines(loess.smooth(who$CFP, who$FP, span=0.5, family="gaussian"), col=line_col)
}
bench=read.csv(paste0(ESEUR_dir, "statistics/BTH2011Padmanabhuni.csv.xz"), as.is=TRUE)
# conv_mod=glm(FP ~ (who_FP+CFP)^2+kind+Dataset, data=bench)
# conv_mod=glm(FP ~ (who_FP+CFP)^2+kind, data=bench)
# summary(conv_mod)
no_students=subset(bench, Dataset != "Cuadtado_jj07" & Dataset != "Cuadtado_jj06")
no_Cuadtado=subset(no_students, Dataset != "Cuadtado_2007")
# table(no_students$who_CFP, no_students$who_FP)
conv_mod=glm(FP ~ (who_FP+CFP)^2+kind+Dataset, data=no_students)
conv_pmod=glm(FP ~ (who_FP+who_CFP+log(CFP))^2-who_FP:who_CFP+kind, family=poisson, data=no_students)
conv_mod=glm(log(FP) ~ (who_FP+log(CFP))^2+kind, data=no_students)
conv_cmod=glm(log(CFP) ~ (who_FP+log(FP))^2+kind, data=no_students)
summary(conv_pmod)
summary(conv_mod)
summary(conv_cmod)
CFP_seq=seq(20, 2000, by=5)
pal_col=rainbow(4)
D_names=unique(no_students$Dataset)
D_cols=rainbow(length(D_names))
no_students$col=mapvalues(no_students$Dataset, D_names, D_cols)
ind=subset(no_students, who_CFP == "ind")
aca=subset(no_students, who_CFP != "ind")
plot_layout(1, 2)
plot(1, type="n", log="xy",
xlim=range(no_students$CFP), ylim=range(no_students$FP),
xlab="COSMIC", ylab="FPA\n")
plot_points(ind)
plot_points(aca)
legend(x="topleft", legend=D_names, bty="n", fill=D_cols, cex=1.2)
# Remove what data that has a different slope to everything else
# aca=subset(no_Cuadtado, who_CFP != "ind")
plot(1, type="n", log="xy",
xlim=range(no_students$CFP), ylim=range(no_students$FP),
xlab="COSMIC", ylab="FPA\n")
plot_fit(ind)
plot_fit(aca)
# Remove what data that has a different slope to everything else
# aca=subset(no_Cuadtado, who_CFP != "ind")
library("simex")
no_students=subset(no_students, !is.na(kind))
no_students$l_CFP=log(no_students$CFP)
no_students$l_FP=log(no_students$FP)
conv_cmod=glm(l_CFP ~ (who_FP+l_FP)^2+kind, data=no_students)
conv_simex=simex(conv_cmod, SIMEXvariable="l_FP", measurement.error=no_students$l_FP/30, asymptotic=FALSE)
summary(conv_simex)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simple_slopes.R
\name{as_huxtable.sim_slopes}
\alias{as_huxtable.sim_slopes}
\title{Create tabular output for simple slopes analysis}
\usage{
as_huxtable.sim_slopes(x, format = "{estimate} ({std.error})",
sig.levels = c(`***` = 0.001, `**` = 0.01, `*` = 0.05, `#` = 0.1),
digits = getOption("jtools-digits", 2), conf.level = 0.95,
intercept = attr(x, "cond.int"), int.format = format, ...)
}
\arguments{
\item{x}{The \code{\link[=sim_slopes]{sim_slopes()}} object.}
\item{format}{The method for sharing the slope and associated uncertainty.
Default is \code{"{estimate} ({std.error})"}. See the instructions for the
\code{error_format} argument of \code{\link[=export_summs]{export_summs()}} for more on your
options.}
\item{sig.levels}{A named vector in which the values are potential p value
thresholds and the names are significance markers (e.g., "*") for when
p values are below the threshold. Default is
\code{c(`***` = .001, `**` = .01, `*` = .05, `#` = .1)}.}
\item{digits}{How many digits should the outputted table round to? Default
is 2.}
\item{conf.level}{How wide the confidence interval should be, if it
is used. .95 (95\% interval) is the default.}
\item{intercept}{Should conditional intercepts be included? Default is
whatever the \code{cond.int} argument to \code{x} was.}
\item{int.format}{If conditional intercepts were requested, how should
they be formatted? Default is the same as \code{format}.}
\item{...}{Ignored.}
}
\description{
This function converts a \code{sim_slopes} object into a
\code{huxtable} object, making it suitable for use in external documents.
}
\details{
For more on what you can do with a \code{huxtable}, see \pkg{huxtable}.
}
| /man/as_huxtable.sim_slopes.Rd | permissive | mychan24/interactions | R | false | true | 1,765 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simple_slopes.R
\name{as_huxtable.sim_slopes}
\alias{as_huxtable.sim_slopes}
\title{Create tabular output for simple slopes analysis}
\usage{
as_huxtable.sim_slopes(x, format = "{estimate} ({std.error})",
sig.levels = c(`***` = 0.001, `**` = 0.01, `*` = 0.05, `#` = 0.1),
digits = getOption("jtools-digits", 2), conf.level = 0.95,
intercept = attr(x, "cond.int"), int.format = format, ...)
}
\arguments{
\item{x}{The \code{\link[=sim_slopes]{sim_slopes()}} object.}
\item{format}{The method for sharing the slope and associated uncertainty.
Default is \code{"{estimate} ({std.error})"}. See the instructions for the
\code{error_format} argument of \code{\link[=export_summs]{export_summs()}} for more on your
options.}
\item{sig.levels}{A named vector in which the values are potential p value
thresholds and the names are significance markers (e.g., "*") for when
p values are below the threshold. Default is
\code{c(`***` = .001, `**` = .01, `*` = .05, `#` = .1)}.}
\item{digits}{How many digits should the outputted table round to? Default
is 2.}
\item{conf.level}{How wide the confidence interval should be, if it
is used. .95 (95\% interval) is the default.}
\item{intercept}{Should conditional intercepts be included? Default is
whatever the \code{cond.int} argument to \code{x} was.}
\item{int.format}{If conditional intercepts were requested, how should
they be formatted? Default is the same as \code{format}.}
\item{...}{Ignored.}
}
\description{
This function converts a \code{sim_slopes} object into a
\code{huxtable} object, making it suitable for use in external documents.
}
\details{
For more on what you can do with a \code{huxtable}, see \pkg{huxtable}.
}
|
source("https://bioconductor.org/biocLite.R")
biocLite("ShortRead")#install the package
library(ShortRead) #load the package
rm(list=ls())
setwd("D:/OneDrive/Results/Oligo transformation/HTS_new/1_2") #set way to the folder with a fastq file
reads=readFastq("G33_C33_OligoTransformation_Rep1") #upload the fastq file
length(reads) ###initial number of reads
dir.create("PQ14 0N_longer105")
setwd("D:/OneDrive/Results/Oligo transformation/HTS_new/1_2/PQ14 0N_longer105")
width<-as.numeric(width(reads))
barplot(table(width)) #shows distribution of read lengths
log<-width>=105 #check if the read length>=105nt (Leader(54nt)+Repeat(28nt)+1st spacer (at least 23nt))
table(log)#TRUE = how many reads have length>=105nt
reads_long<-reads[log] #keep only reads >=105nt
###Determine base quality at each position, subsitute nucleotides with quality less than the quality cutoff (PQcutoff) with "N" (We used PQcutoff=14 for this HTS run)
PQcutoff<-14
length(reads_long)
width_F<-width(reads_long)
stat<-as.data.frame(table(width_F))
stat$Freq<-as.numeric(as.character(stat$Freq))
stat$width_F<-as.numeric(as.character(stat$width_F))
For<-c()
for (i in 1:nrow(stat)){
width<-stat[i,1]
reads<-reads_long[width(reads_long)==width]
l<-length(reads)
if (l<=300000) {
reads_F1<-reads
seq_R1 <- sread(reads_F1)
q <- PhredQuality(quality(quality(reads_F1)))
MATR <- matrix(charToRaw(as.character(unlist(q))), nrow=length(q), byrow=TRUE)
positions <- MATR < charToRaw(as.character(PhredQuality(as.integer(PQcutoff))))
N <- DNAString(paste(rep.int("N", width(seq_R1)[1]), collapse=""))
N_tr <- as(Views(N, start=1, end=rowSums(positions)), "DNAStringSet")
rm(reads_F1)
gc()
seq_R <- replaceLetterAt(seq_R1, positions, N_tr)
For<-append(For, seq_R)
}
else {
whole<-l%/%300000
rest<-l%%300000
for (b in 1:(whole+1)) {
if (b<(whole+1)) {
reads_F1<-reads[(b*300000-299999):(b*300000)]
seq_R1 <- sread(reads_F1)
q <- PhredQuality(quality(quality(reads_F1)))
MATR <- matrix(charToRaw(as.character(unlist(q))), nrow=length(q), byrow=TRUE)
positions <- MATR < charToRaw(as.character(PhredQuality(as.integer(PQcutoff))))
N <- DNAString(paste(rep.int("N", width(seq_R1)[1]), collapse=""))
N_tr <- as(Views(N, start=1, end=rowSums(positions)), "DNAStringSet")
rm(reads_F1)
gc()
seq_R <- replaceLetterAt(seq_R1, positions, N_tr)
For<-append(For, seq_R)}
else {
reads_F1<-reads[(b*300000-299999):length(reads)]
seq_R1 <- sread(reads_F1)
q <- PhredQuality(quality(quality(reads_F1)))
MATR <- matrix(charToRaw(as.character(unlist(q))), nrow=length(q), byrow=TRUE)
positions <- MATR < charToRaw(as.character(PhredQuality(as.integer(PQcutoff))))
N <- DNAString(paste(rep.int("N", width(seq_R1)[1]), collapse=""))
N_tr <- as(Views(N, start=1, end=rowSums(positions)), "DNAStringSet")
rm(reads_F1)
gc()
seq_R <- replaceLetterAt(seq_R1, positions, N_tr)
For<-append(For, seq_R)
}
}
}
}
M1<-For #M1 contains your reads
rm(list=setdiff(ls(),c("M1")))
M1<-gsub("R","N",M1)
M1<-gsub("Y","N",M1)
M1<-gsub("M","N",M1)
M1<-gsub("K","N",M1)
M1<-gsub("S","N",M1)
M1<-gsub("W","N",M1)
M1<-gsub("H","N",M1)
M1<-gsub("B","N",M1)
M1<-gsub("V","N",M1)
M1<-gsub("D","N",M1)
table(vcountPattern("N",M1,max.mismatch=0,with.indels=F)) #shows how many reads have 0, 1, 2.... Ns per read
log<-vcountPattern("N",M1,max.mismatch=0,with.indels=F)<=0 #we will keep reads with no more than 0 Ns
table(log) #no more than 0N per read
M1<-DNAStringSet(M1[log])
#####How many reads contain at least 1 repeat?
rep="GTGTTCCCCGCGCCAGCGGGGATAAACC"
LOG1<-(vcountPattern(rep,M1,max.mismatch=3,with.indels=F)>0)
M1_r<-M1[LOG1] ###the reads with repeats
M1_NOT_r<-M1[!LOG1] ###the reads without repeats
length(M1_r)
#where does the first DR stop??
end<-endIndex(vmatchPattern(rep,M1_r,max.mismatch=3,with.indels=F))
end<-unlist(lapply(end, function(x) x[1])) #take the end coordinate of the first repeat only
log<-(end+1)<(width(M1_r))
M1_r<-M1_r[log]
end<-end[log]
M1_trimmed<-narrow(M1_r, end+1, width(M1_r)) ###trimm reads so they now start with the first nucleotide of a spacer (G in the case of a typial E.coli spacer)
log<-width(M1_trimmed)>=24#(at least 24 nucleotides of the first spacer)
table(log)
M1_trimmed<-M1_trimmed[log]
#####how many reads contain second repeat? (these are the reads with one additional spacer)
LOG1<-vcountPattern(rep,M1_trimmed,max.mismatch=3,with.indels=F)==1
table(LOG1)
adapted<-M1_trimmed[LOG1] ###these are the reads with additional repeat
nonadapted<-M1_trimmed[!LOG1]
nonadapted_24<-narrow(nonadapted, 1,24)
#####How many nonadapted reads contain preexisting spacer?
sp0<-"GAGCACAAATATCATCGCTCAAAC"
log_sp<-vcountPattern(sp0,nonadapted_24,max.mismatch=2,with.indels=F)==1
table(log_sp)
length(log_sp[log_sp==TRUE]) #Nonadapted (Contain old spacer)
length(log_sp[log_sp!=TRUE]) #Nonadapted (Smth else instead of the old spacer)
maybe_adapted<-nonadapted_24[!log_sp]
length(adapted) #these are reads with one additional spacer
length(maybe_adapted) #not clear what follows the repeat
length(nonadapted) #nonadapted
##determine the position of the second repeat and extract new spacers
start<-startIndex(vmatchPattern(rep,adapted,max.mismatch=3,with.indels=F))
start<-as.vector(unlist(start))
sp1<-narrow(adapted, 1, start-1) #trimm by the start coordinate of the second repeat to keep just the spacer sequence
spacers<-sp1 #these are the sequences of new spacers
hist(width(spacers)) #shows distribution of spacer lengths
table(width(spacers))#shows distribution of spacer lengths
writeFasta(DNAStringSet(spacers),file="spacers.fasta") #writes spacer sequences into the file "spacers.fasta"
writeFasta(DNAStringSet(maybe_adapted),file="maybe_spacers.fasta") #writes spacer sequences into the file "maybe_spacers.fasta"
| /Prespacer_Efficiency (OligoTransformation)/Spacer_extraction.R | permissive | AnnaBioLogic/Shiriaeva_et_al_2019 | R | false | false | 6,260 | r | source("https://bioconductor.org/biocLite.R")
biocLite("ShortRead")#install the package
library(ShortRead) #load the package
rm(list=ls())
setwd("D:/OneDrive/Results/Oligo transformation/HTS_new/1_2") #set way to the folder with a fastq file
reads=readFastq("G33_C33_OligoTransformation_Rep1") #upload the fastq file
length(reads) ###initial number of reads
dir.create("PQ14 0N_longer105")
setwd("D:/OneDrive/Results/Oligo transformation/HTS_new/1_2/PQ14 0N_longer105")
width<-as.numeric(width(reads))
barplot(table(width)) #shows distribution of read lengths
log<-width>=105 #check if the read length>=105nt (Leader(54nt)+Repeat(28nt)+1st spacer (at least 23nt))
table(log)#TRUE = how many reads have length>=105nt
reads_long<-reads[log] #keep only reads >=105nt
###Determine base quality at each position, subsitute nucleotides with quality less than the quality cutoff (PQcutoff) with "N" (We used PQcutoff=14 for this HTS run)
PQcutoff<-14
length(reads_long)
width_F<-width(reads_long)
stat<-as.data.frame(table(width_F))
stat$Freq<-as.numeric(as.character(stat$Freq))
stat$width_F<-as.numeric(as.character(stat$width_F))
For<-c()
for (i in 1:nrow(stat)){
width<-stat[i,1]
reads<-reads_long[width(reads_long)==width]
l<-length(reads)
if (l<=300000) {
reads_F1<-reads
seq_R1 <- sread(reads_F1)
q <- PhredQuality(quality(quality(reads_F1)))
MATR <- matrix(charToRaw(as.character(unlist(q))), nrow=length(q), byrow=TRUE)
positions <- MATR < charToRaw(as.character(PhredQuality(as.integer(PQcutoff))))
N <- DNAString(paste(rep.int("N", width(seq_R1)[1]), collapse=""))
N_tr <- as(Views(N, start=1, end=rowSums(positions)), "DNAStringSet")
rm(reads_F1)
gc()
seq_R <- replaceLetterAt(seq_R1, positions, N_tr)
For<-append(For, seq_R)
}
else {
whole<-l%/%300000
rest<-l%%300000
for (b in 1:(whole+1)) {
if (b<(whole+1)) {
reads_F1<-reads[(b*300000-299999):(b*300000)]
seq_R1 <- sread(reads_F1)
q <- PhredQuality(quality(quality(reads_F1)))
MATR <- matrix(charToRaw(as.character(unlist(q))), nrow=length(q), byrow=TRUE)
positions <- MATR < charToRaw(as.character(PhredQuality(as.integer(PQcutoff))))
N <- DNAString(paste(rep.int("N", width(seq_R1)[1]), collapse=""))
N_tr <- as(Views(N, start=1, end=rowSums(positions)), "DNAStringSet")
rm(reads_F1)
gc()
seq_R <- replaceLetterAt(seq_R1, positions, N_tr)
For<-append(For, seq_R)}
else {
reads_F1<-reads[(b*300000-299999):length(reads)]
seq_R1 <- sread(reads_F1)
q <- PhredQuality(quality(quality(reads_F1)))
MATR <- matrix(charToRaw(as.character(unlist(q))), nrow=length(q), byrow=TRUE)
positions <- MATR < charToRaw(as.character(PhredQuality(as.integer(PQcutoff))))
N <- DNAString(paste(rep.int("N", width(seq_R1)[1]), collapse=""))
N_tr <- as(Views(N, start=1, end=rowSums(positions)), "DNAStringSet")
rm(reads_F1)
gc()
seq_R <- replaceLetterAt(seq_R1, positions, N_tr)
For<-append(For, seq_R)
}
}
}
}
M1<-For #M1 contains your reads
rm(list=setdiff(ls(),c("M1")))
M1<-gsub("R","N",M1)
M1<-gsub("Y","N",M1)
M1<-gsub("M","N",M1)
M1<-gsub("K","N",M1)
M1<-gsub("S","N",M1)
M1<-gsub("W","N",M1)
M1<-gsub("H","N",M1)
M1<-gsub("B","N",M1)
M1<-gsub("V","N",M1)
M1<-gsub("D","N",M1)
table(vcountPattern("N",M1,max.mismatch=0,with.indels=F)) #shows how many reads have 0, 1, 2.... Ns per read
log<-vcountPattern("N",M1,max.mismatch=0,with.indels=F)<=0 #we will keep reads with no more than 0 Ns
table(log) #no more than 0N per read
M1<-DNAStringSet(M1[log])
#####How many reads contain at least 1 repeat?
rep="GTGTTCCCCGCGCCAGCGGGGATAAACC"
LOG1<-(vcountPattern(rep,M1,max.mismatch=3,with.indels=F)>0)
M1_r<-M1[LOG1] ###the reads with repeats
M1_NOT_r<-M1[!LOG1] ###the reads without repeats
length(M1_r)
#where does the first DR stop??
end<-endIndex(vmatchPattern(rep,M1_r,max.mismatch=3,with.indels=F))
end<-unlist(lapply(end, function(x) x[1])) #take the end coordinate of the first repeat only
log<-(end+1)<(width(M1_r))
M1_r<-M1_r[log]
end<-end[log]
M1_trimmed<-narrow(M1_r, end+1, width(M1_r)) ###trimm reads so they now start with the first nucleotide of a spacer (G in the case of a typial E.coli spacer)
log<-width(M1_trimmed)>=24#(at least 24 nucleotides of the first spacer)
table(log)
M1_trimmed<-M1_trimmed[log]
#####how many reads contain second repeat? (these are the reads with one additional spacer)
LOG1<-vcountPattern(rep,M1_trimmed,max.mismatch=3,with.indels=F)==1
table(LOG1)
adapted<-M1_trimmed[LOG1] ###these are the reads with additional repeat
nonadapted<-M1_trimmed[!LOG1]
nonadapted_24<-narrow(nonadapted, 1,24)
#####How many nonadapted reads contain preexisting spacer?
sp0<-"GAGCACAAATATCATCGCTCAAAC"
log_sp<-vcountPattern(sp0,nonadapted_24,max.mismatch=2,with.indels=F)==1
table(log_sp)
length(log_sp[log_sp==TRUE]) #Nonadapted (Contain old spacer)
length(log_sp[log_sp!=TRUE]) #Nonadapted (Smth else instead of the old spacer)
maybe_adapted<-nonadapted_24[!log_sp]
length(adapted) #these are reads with one additional spacer
length(maybe_adapted) #not clear what follows the repeat
length(nonadapted) #nonadapted
##determine the position of the second repeat and extract new spacers
start<-startIndex(vmatchPattern(rep,adapted,max.mismatch=3,with.indels=F))
start<-as.vector(unlist(start))
sp1<-narrow(adapted, 1, start-1) #trimm by the start coordinate of the second repeat to keep just the spacer sequence
spacers<-sp1 #these are the sequences of new spacers
hist(width(spacers)) #shows distribution of spacer lengths
table(width(spacers))#shows distribution of spacer lengths
writeFasta(DNAStringSet(spacers),file="spacers.fasta") #writes spacer sequences into the file "spacers.fasta"
writeFasta(DNAStringSet(maybe_adapted),file="maybe_spacers.fasta") #writes spacer sequences into the file "maybe_spacers.fasta"
|
cacheSolve <- function(x, ...) {
inv <- x$getInverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(mat, ...)
x$setInverse(inv)
inv
} | /cacheSolve.R | no_license | Issammoalla/ProgrammingAssignment2 | R | false | false | 214 | r | cacheSolve <- function(x, ...) {
inv <- x$getInverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(mat, ...)
x$setInverse(inv)
inv
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/multkw_m.R
\name{multkw.m}
\alias{multkw.m}
\title{Multivariate Kruskal-Wallis test with missing data}
\usage{
multkw.m(group, y, r, weight, print = TRUE)
}
\arguments{
\item{group}{The factorial variable that subsets the dataset in groups. Can be a character vector, a factorial vector or an integer/numeric vector.}
\item{y}{The dataset of n numeric(or integer) variables.}
\item{r}{Optional. The missing data pattern to be applied. If dataset has \code{NA} and if the missing data pattern is the distribution of the \code{NA}'s in the dataset, \code{r} is optional and is automatically computed.}
\item{weight}{Optional. The weighting scheme to be used to compute the final value of the test statistic. As test statistics are calculated for each pattern of missingness, there are as statistics as patterns. The final test statistic can thus be the arithmetic mean of each statistic (\code{weight="equal"}) or the ponderated mean of each statistic relative to the proportion of each missing pattern (\code{weight="prop"}).}
\item{print}{Whether the test should be printed (\code{TRUE}, the default) or not (e.g., to be stored in an object)}
}
\value{
Returns a list of results of the various multivariate Kruskal-Wallis tests that have been computed.
The results are the test statistics (W2), the degrees of freedom (df) and the p-value of the test statistic.
These three results are given for (1) a "classical" multivariate Kruskal-Wallis test, i.e. on data without missing values; each test statistic is thus followed by a .c for "complete" and (2) a global multivariate Kruskal-Wallis test that takes into account missing values (see details); each test statistic is thus followed by a .m for "missing".
}
\description{
This function computes a multivariate Kruskal-Wallis test for n numeric variables (which can contain NA's) relative to one factorial variable (that subsets the dataset in groups)
}
\details{
A "likelihood-based" multivariate Kruskal-Wallis test is computed ; in large samples, the test statistic is approximately khi² distributed.
A first "classic" multivariate Kruskal-Wallis test is computed on "complete" data (i.e. removing the rows with at least one missing value).
A second test is computed and include missing values: the test is computed for each "missing pattern" (i.e. missing pattern 1 = no missing data ; missing pattern 2 = missing data only in the first variable, etc) and a general test statistic is thus obtained from the "partial" test statistics. See also option "weight".
Finally, outputs allow to compare results with complete data only and with missing data.
As the test statistic is approximately khi² distributed (in large samples), p-values are based on khi² distributions.
Degrees of freedom ar not the same for the "complete" data test and for the "missing" data test, see the See Also section.
}
\examples{
data(airquality)
datamkw<-airquality[,1:4]
multkw(y=datamkw,airquality$Month)
multkw.m(y=datamkw,airquality$Month)
}
\references{
\insertRef{He.etal.2017}{ULT}
}
\seealso{
See chapter 2.2.2 and 4.2 of the \href{http://d-scholarship.pitt.edu/19411/1/Fanyin_ETD_draft_08-06-2013.pdf}{PhD manuscript of Fanyin He} and 'Methodology' of \insertCite{He.etal.2017;textual}{ULT} for more details.
}
\author{
Fanyin He (most of the statistical function)
Jacob Maugoust (packaging)
}
| /man/multkw.m.Rd | no_license | jacobmaugoust/ULT | R | false | true | 3,420 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/multkw_m.R
\name{multkw.m}
\alias{multkw.m}
\title{Multivariate Kruskal-Wallis test with missing data}
\usage{
multkw.m(group, y, r, weight, print = TRUE)
}
\arguments{
\item{group}{The factorial variable that subsets the dataset in groups. Can be a character vector, a factorial vector or an integer/numeric vector.}
\item{y}{The dataset of n numeric(or integer) variables.}
\item{r}{Optional. The missing data pattern to be applied. If dataset has \code{NA} and if the missing data pattern is the distribution of the \code{NA}'s in the dataset, \code{r} is optional and is automatically computed.}
\item{weight}{Optional. The weighting scheme to be used to compute the final value of the test statistic. As test statistics are calculated for each pattern of missingness, there are as statistics as patterns. The final test statistic can thus be the arithmetic mean of each statistic (\code{weight="equal"}) or the ponderated mean of each statistic relative to the proportion of each missing pattern (\code{weight="prop"}).}
\item{print}{Whether the test should be printed (\code{TRUE}, the default) or not (e.g., to be stored in an object)}
}
\value{
Returns a list of results of the various multivariate Kruskal-Wallis tests that have been computed.
The results are the test statistics (W2), the degrees of freedom (df) and the p-value of the test statistic.
These three results are given for (1) a "classical" multivariate Kruskal-Wallis test, i.e. on data without missing values; each test statistic is thus followed by a .c for "complete" and (2) a global multivariate Kruskal-Wallis test that takes into account missing values (see details); each test statistic is thus followed by a .m for "missing".
}
\description{
This function computes a multivariate Kruskal-Wallis test for n numeric variables (which can contain NA's) relative to one factorial variable (that subsets the dataset in groups)
}
\details{
A "likelihood-based" multivariate Kruskal-Wallis test is computed ; in large samples, the test statistic is approximately khi² distributed.
A first "classic" multivariate Kruskal-Wallis test is computed on "complete" data (i.e. removing the rows with at least one missing value).
A second test is computed and include missing values: the test is computed for each "missing pattern" (i.e. missing pattern 1 = no missing data ; missing pattern 2 = missing data only in the first variable, etc) and a general test statistic is thus obtained from the "partial" test statistics. See also option "weight".
Finally, outputs allow to compare results with complete data only and with missing data.
As the test statistic is approximately khi² distributed (in large samples), p-values are based on khi² distributions.
Degrees of freedom ar not the same for the "complete" data test and for the "missing" data test, see the See Also section.
}
\examples{
data(airquality)
datamkw<-airquality[,1:4]
multkw(y=datamkw,airquality$Month)
multkw.m(y=datamkw,airquality$Month)
}
\references{
\insertRef{He.etal.2017}{ULT}
}
\seealso{
See chapter 2.2.2 and 4.2 of the \href{http://d-scholarship.pitt.edu/19411/1/Fanyin_ETD_draft_08-06-2013.pdf}{PhD manuscript of Fanyin He} and 'Methodology' of \insertCite{He.etal.2017;textual}{ULT} for more details.
}
\author{
Fanyin He (most of the statistical function)
Jacob Maugoust (packaging)
}
|
computeCentroids <- function(X, idx, K) {
#COMPUTECENTROIDS returs the new centroids by computing the means of the
#data points assigned to each centroid.
# centroids <- COMPUTECENTROIDS(X, idx, K) returns the new centroids by
# computing the means of the data points assigned to each centroid. It is
# given a dataset X where each row is a single data point, a vector
# idx of centroid assignments (i.e. each entry in range [1..K]) for each
# example, and K, the number of centroids. You should return a matrix
# centroids, where each row of centroids is the mean of the data points
# assigned to it.
#
# Useful variables
m <- dim(X)[1]
n <- dim(X)[2]
# You need to return the following variables correctly.
centroids <- matrix(0,K,n)
# ----------------------- YOUR CODE HERE -----------------------
# Instructions: Go over every centroid and compute mean of all points that
# belong to it. Concretely, the row vector centroids[i, ]
# should contain the mean of the data points assigned to
# centroid i.
#
# Note: You can use a for-loop over the centroids to compute this.
#
centroids
# --------------------------------------------------------------
}
| /starter/7/computeCentroids.R | no_license | faridcher/ml-course | R | false | false | 1,277 | r | computeCentroids <- function(X, idx, K) {
#COMPUTECENTROIDS returs the new centroids by computing the means of the
#data points assigned to each centroid.
# centroids <- COMPUTECENTROIDS(X, idx, K) returns the new centroids by
# computing the means of the data points assigned to each centroid. It is
# given a dataset X where each row is a single data point, a vector
# idx of centroid assignments (i.e. each entry in range [1..K]) for each
# example, and K, the number of centroids. You should return a matrix
# centroids, where each row of centroids is the mean of the data points
# assigned to it.
#
# Useful variables
m <- dim(X)[1]
n <- dim(X)[2]
# You need to return the following variables correctly.
centroids <- matrix(0,K,n)
# ----------------------- YOUR CODE HERE -----------------------
# Instructions: Go over every centroid and compute mean of all points that
# belong to it. Concretely, the row vector centroids[i, ]
# should contain the mean of the data points assigned to
# centroid i.
#
# Note: You can use a for-loop over the centroids to compute this.
#
centroids
# --------------------------------------------------------------
}
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 1723132
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 1723132
c
c Input Parameter (command line, file):
c input filename QBFLIB/Miller-Marin/fpu/fpu-10Xh-error01-nonuniform-depth-24.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 645201
c no.of clauses 1723132
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 1723132
c
c QBFLIB/Miller-Marin/fpu/fpu-10Xh-error01-nonuniform-depth-24.qdimacs 645201 1723132 E1 [] 0 3525 641338 1723132 NONE
| /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Miller-Marin/fpu/fpu-10Xh-error01-nonuniform-depth-24/fpu-10Xh-error01-nonuniform-depth-24.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 693 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 1723132
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 1723132
c
c Input Parameter (command line, file):
c input filename QBFLIB/Miller-Marin/fpu/fpu-10Xh-error01-nonuniform-depth-24.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 645201
c no.of clauses 1723132
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 1723132
c
c QBFLIB/Miller-Marin/fpu/fpu-10Xh-error01-nonuniform-depth-24.qdimacs 645201 1723132 E1 [] 0 3525 641338 1723132 NONE
|
\name{H3K4me3.TDH.other.chunk8}
\alias{H3K4me3.TDH.other.chunk8}
\docType{data}
\title{
H3K4me3_TDH_other chunk 8 subset
}
\description{
It should be easy to recover a joint peak in at least 8 of the 10 samples.
}
\usage{data("H3K4me3.TDH.other.chunk8")}
\format{
A profile data frame (sample.id, chromStart, chromEnd, count)
with 19866 observations.
}
\source{
http://cbio.ensmp.fr/~thocking/chip-seq-chunk-db/ data set
H3K4me3_TDH_other, chunk 8.
http://cbio.ensmp.fr/~thocking/chip-seq-chunk-db/H3K4me3_TDH_other/8/regions.png
}
\keyword{datasets}
| /man/H3K4me3.TDH.other.chunk8.Rd | no_license | tdhock/PeakSegJoint | R | false | false | 566 | rd | \name{H3K4me3.TDH.other.chunk8}
\alias{H3K4me3.TDH.other.chunk8}
\docType{data}
\title{
H3K4me3_TDH_other chunk 8 subset
}
\description{
It should be easy to recover a joint peak in at least 8 of the 10 samples.
}
\usage{data("H3K4me3.TDH.other.chunk8")}
\format{
A profile data frame (sample.id, chromStart, chromEnd, count)
with 19866 observations.
}
\source{
http://cbio.ensmp.fr/~thocking/chip-seq-chunk-db/ data set
H3K4me3_TDH_other, chunk 8.
http://cbio.ensmp.fr/~thocking/chip-seq-chunk-db/H3K4me3_TDH_other/8/regions.png
}
\keyword{datasets}
|
# 0_initialize.r
# Last updated 20190822
# checkpoint will look for (and, if necessary, install)
# packages from the snapshot date
library(checkpoint)
checkpoint("2018-11-25")
library(here) # set top level of current project
library(tidyverse) # mutate, filter, etc
library(broom) # tidy
library(geepack) #geeglm
library(imputeTS) # na.replace
library(conflicted) # conflict_prefer
library(arm) # rescale
# This function is adapted from Dustin Fife's "fifer" package, which is not
# on MRAN/CRAN but is available at https://github.com/dustinfife/fifer
chisq.post.hoc <- function(
tbl,
test=c("fisher.test"),
popsInRows=TRUE,
control=c("fdr","BH","BY","bonferroni","holm","hochberg","hommel"),
digits=4, ...) {
control <- match.arg(control) # extract correction method
test = match.fun(test) # extract which test (fisher or chi square)
if (!popsInRows) tbl <- t(tbl) # test rows or columns
popsNames <- rownames(tbl)
prs <- combn(1:nrow(tbl),2) # come up with all possible comparisons
tests <- ncol(prs) # preallocate
pvals <- numeric(tests)
lbls <- character(tests)
for (i in 1:tests) {
pvals[i] <- test(tbl[prs[,i],], ...)$p.value
lbls[i] <- paste(popsNames[prs[,i]],collapse=" vs. ")
}
adj.pvals <- p.adjust(pvals,method=control)
cat("Adjusted p-values used the",control,"method.\n\n")
data.frame(
comparison=lbls,
raw.p=round(pvals,digits),
adj.p=round(adj.pvals,digits))
}
# These scripts pull data from two sources. "SVIP_CLEAN" is the
# location of the extracted `Simons_VIP__16p11.2_Dataset_v10.0`
# archive downloadable from the SFARI Base portal. "SVIP_RAW"
# is the location of a single raw CSV file containing M-SOPS data,
# which was not part of the "clean" data release, but which we received
# by request from the Simons Foundation.
SVIP_CLEAN <- here("data","clean","Simons_VIP__16p11.2_Dataset_v10.0")
SVIP_RAW <- here("data","raw","sops.csv")
# Give dplyr namespace priority
conflict_prefer("filter", "dplyr")
conflict_prefer("select", "dplyr")
# Pull in data
source(here("r","1_import.r"))
# Derive ADOS RRB and SA Calibrated Severity Scores
source(here("r","2_derive_ados_domain_css.r"))
# Derive index of positive psychotic symptoms
source(here("r","3_derive_psychosis_index.r"))
# Create groups for regressions
source(here("r","4_create_analysis_groups.r"))
# Run analyses and save data as series of CSV files
source(here("r","5_run_analyses.r")) | /r/0_initialize.r | permissive | Andreas222222/2019-16p11-psychosis | R | false | false | 2,606 | r | # 0_initialize.r
# Last updated 20190822
# checkpoint will look for (and, if necessary, install)
# packages from the snapshot date
library(checkpoint)
checkpoint("2018-11-25")
library(here) # set top level of current project
library(tidyverse) # mutate, filter, etc
library(broom) # tidy
library(geepack) #geeglm
library(imputeTS) # na.replace
library(conflicted) # conflict_prefer
library(arm) # rescale
# This function is adapted from Dustin Fife's "fifer" package, which is not
# on MRAN/CRAN but is available at https://github.com/dustinfife/fifer
chisq.post.hoc <- function(
tbl,
test=c("fisher.test"),
popsInRows=TRUE,
control=c("fdr","BH","BY","bonferroni","holm","hochberg","hommel"),
digits=4, ...) {
control <- match.arg(control) # extract correction method
test = match.fun(test) # extract which test (fisher or chi square)
if (!popsInRows) tbl <- t(tbl) # test rows or columns
popsNames <- rownames(tbl)
prs <- combn(1:nrow(tbl),2) # come up with all possible comparisons
tests <- ncol(prs) # preallocate
pvals <- numeric(tests)
lbls <- character(tests)
for (i in 1:tests) {
pvals[i] <- test(tbl[prs[,i],], ...)$p.value
lbls[i] <- paste(popsNames[prs[,i]],collapse=" vs. ")
}
adj.pvals <- p.adjust(pvals,method=control)
cat("Adjusted p-values used the",control,"method.\n\n")
data.frame(
comparison=lbls,
raw.p=round(pvals,digits),
adj.p=round(adj.pvals,digits))
}
# These scripts pull data from two sources. "SVIP_CLEAN" is the
# location of the extracted `Simons_VIP__16p11.2_Dataset_v10.0`
# archive downloadable from the SFARI Base portal. "SVIP_RAW"
# is the location of a single raw CSV file containing M-SOPS data,
# which was not part of the "clean" data release, but which we received
# by request from the Simons Foundation.
SVIP_CLEAN <- here("data","clean","Simons_VIP__16p11.2_Dataset_v10.0")
SVIP_RAW <- here("data","raw","sops.csv")
# Give dplyr namespace priority
conflict_prefer("filter", "dplyr")
conflict_prefer("select", "dplyr")
# Pull in data
source(here("r","1_import.r"))
# Derive ADOS RRB and SA Calibrated Severity Scores
source(here("r","2_derive_ados_domain_css.r"))
# Derive index of positive psychotic symptoms
source(here("r","3_derive_psychosis_index.r"))
# Create groups for regressions
source(here("r","4_create_analysis_groups.r"))
# Run analyses and save data as series of CSV files
source(here("r","5_run_analyses.r")) |
#==============================================================================
# Working directory
#==============================================================================
setwd("C:\\Users\\ma.bolivar643\\Dropbox\\Accelerometria\\MARA")
#==============================================================================
# Required libraries
#==============================================================================
library("RSQLite")
library("sqldf")
library("xlsx")
library("Hmisc")
library("scales")
source(".\\R\\ADPP.R")
#==============================================================================
# Functions
#==============================================================================
##Compares the dates in the .agd file against the PACK
qualityControlCheck <- function(dbDir,data, settings, pack){
#Compares the dates/serial in the .agd file against the PACK
#
# Args:
# dbDir: The name of the file which the data was read from.
# data: The data frame with accelerometry data (datetime, axis1, axis2, axis3, etc)
# settings: The data frame constaining the accelerometer settings parameters
# pack: The data frame containing the participant checklist to validate the information
# Returns:
# Logical. True, if the info in the data.frame is consistent with th PACK
#Auxiliar var. True, if the info in the data.frame is consistent with th PACK
valid <- TRUE
#Extract the participant id, first or second use (A or B), accelerometer serial and download date
m<-regexec("\\\\[[:print:]]+\\\\([[:digit:]]+)([A-Z])([[:digit:]]).([[:digit:]]+).([[:digit:]]+)",dbDir)
use <- regmatches(dbDir, m)[[1]][3] #If it is the first time of use -> A, else B.
id <- regmatches(dbDir, m)[[1]][2] #Participant ID
acserial <- regmatches(dbDir, m)[[1]][5] #Accelerometer serial number
downloadDay <- regmatches(dbDir, m)[[1]][6] #Download day
row <- match(id,table=pack$ID.Participante,nomatch=-1) #row in the PACK
days <- unique(as.Date(data$datetime))
packserial <- ifelse(use == "A",pack[row, 1],pack[row, 9])
epoch <- data[2,1]-data[1,1]
if(acserial != packserial){
cat("\nThe serial contained in the pack mismatch the serial in the .agd file")
valid <- FALSE
}
if(row == -1){
cat(paste("\nParticipant ",as.character(id)," doesn't have a correponding record in the PACK", sep=" "))
valid = FALSE
}else{
#Compare the serial in the .agd file with the serial in the name file
dsindx <- match("deviceserial",settings$settingName)
if(acserial != substr(settings$settingValue[dsindx], 9,13)){
cat("\nThe serial contained in the file name mismatch the serial in the .agd file")
valid <- FALSE
}
if(min(days)!=ifelse(use == "A",pack[row, 2],pack[row, 10])){
cat("\nInitialization day mismatch the initialization day in the pack")
cat(paste("\n\tInitialization day .agd: ",min(days), sep=" "))
cat(paste("\n\tInitialization day PACK: ",(as.Date("1970-01-01")+ifelse(use == "A",pack[row, 2],pack[row, 10])), sep=" "))
valid <- FALSE
}
if(max(days)<=ifelse(use == "A",pack[row, 2],pack[row, 10])+6){
cat("\nRetrieval day is within the 7 days of data collection")
cat(paste("\n\tRetrieval day .agd: ",max(days), sep=" "))
cat(paste("\n\tRetrieval day PACK: ",(as.Date("1970-01-01")+ifelse(use == "A",pack[row, 6],pack[row, 14])), sep=" "))
valid = FALSE
}
}
return (valid)
}
#Checks for unusual values in wear labeled observation
checkWearPeriods <- function(data,maxc = 20000){
# Checks for unusual values in wear labeled observation and
# classifies the first minutes of the first day as sleep (maximum the first 20 minutes)
#
# Args:
# data: The data frame with accelerometry data (datetime, axis1, axis2, axis3, etc) plus the activity column.
# maxc: Maximum count limit per epoch.
#
# Returns:
# The data frame with all the accelerometry data plus the updated activity column.
i <- 1
while(data$activity[i]=="wear" && i <= 160 ){
data$activity[i]<-"sleep"
i <- i+1
}
for( i in seq(2:(nrow(data)-1))){
if(data$axis1[i]>maxc){
if(data$activity[i-1]=="sleep" || data$activity[i+1]=="sleep"){
data$activity[i]="sleep"
}else{
data$activity[i]="non-wear"
}
}
}
return(data)
}
#Set activity in each epoch
setActivitySNW <- function(x,label,intv, minlength=20){
# Labels the minutes within the intervals parameter (intv) in the activity column
#
# Args:
# x: Activity vector
# l: label to classify the epochs
# intv: Matrix in which each rows is an interval to be labeled
# minlength: Minimum number of uninterrupted epochs labeled as "wear" to be relabeled with l value.
#
# Returns:
# The updated activity column.
intv<-matrix(intv,ncol=2)
if(nrow(intv) > 0){
for(rs in seq(1:nrow(intv))){
l <- intv[rs,1]
u <- intv[rs,2]
if(sum(x[l:u] =="wear")>=minlength){
x[l:u] <- ifelse(x[l:u]=="wear",label, x[l:u])
}
}
}
return(x)
}
#Left joins the 15sec data frame with 60secc data frame (i.e. adds data15 data frame the activity column)
mergingActivity <- function(data15,data60){
# Left joins the 15sec data frame with 60secc data frame (i.e. adds data15 data frame the activity column)
#
# Args:
# data15: Accelerometry data frame agreggated in 15 sec epochs
# data60: Accelerometry data frame agreggated in 60 sec epochs plus the activity column ("sleep","non-wear","wear")
#
# Returns:
# Accelerometry data frame agreggated in 15 plus the activity column ("sleep","non-wear","wear").
options(warn=-1)
data15$minute <- tryCatch(as.POSIXct(trunc.POSIXt(data15$datetime,units ="mins")))
options(warn=0)
q <- "SELECT d15.datetime, d15.axis1, d15.axis2, d15.axis3, d15.steps,
d15.lux, d15.incline, d60.activity,d15.day_n2n, d15.day_m2m
FROM data15 as d15 LEFT JOIN data60 as d60
ON d15.minute = d60.datetime"
data15 <- (sqldf(q, method ="raw"))
data15$datetime <- as.POSIXct(data15$datetime,origin="1970-01-01 00:00:00", tz = "GMT")
data15$day_n2n <- as.Date(data15$day_n2n,origin="1970-01-01 00:00:00")
data15$day_m2m <- as.Date(data15$day_m2m,origin="1970-01-01 00:00:00")
return(data15)
}
#Extracts a substring from a string, starting from the left-most character.
substrRight <- function(x, n){
# Extracts a substring from a string, starting from the left-most character.
# Args:
# x:The string that you wish to extract from.
# n: The number of characters that you wish to extract starting from the left-most character.
#
# Returns:
# Substring with n characters.
substr(x, nchar(x)-n+1, nchar(x))
}
createQueryIntensity <- function(colName = "intensityEV", intensities = c("moderate","vigorous"), dayType = "all",tomin = 4, q_data){
# Builds a SQL query to extract the average day minutes and counts for a given intensity from a given query "q_data".
# Args:
# colName: Column which contains the physical intensity levels (i.e. vigorous, moderate, light and sedentary)
# intensities: Physical intensity levels to be taking into account for this query.
# dayType: Prefix which indicates what days are been used to build the query. This must match with the q_query.
# tomin: The ratio between the desired output epoch and the actual epoch (e.g. If the actual epoch is 15sec and the desired epoch is 60sec then tomin=60/15=4)
# q_data: the data frame/query containing the information to be treated. (e.g. midweek acceleromtry data, weekend accelerometry data, alldays accelerometry data)
#
# Returns:
# Query string.
cutsname <- substrRight(colName,2)
WHEREcond <- sapply(intensities, FUN = function(i) (paste("(intensity",cutsname," = '",i,"')",sep = "")))
WHEREcond <- paste(WHEREcond, collapse = " OR ")
ints_name <- sapply(intensities, substr,1,1)
ints_name <- paste(ints_name, collapse = "", sep ="")
q <- paste("SELECT avg(duration",cutsname,") as ",dayType,"mean_",ints_name,"_",cutsname,", avg(countsEV) as ",dayType,"mean_cnt",ints_name,"_",cutsname,"
FROM
(SELECT day_m2m, count(axis1)*1.0/",tomin," as duration",cutsname,", sum(axis1) as counts",cutsname,"
FROM (",q_data,")
WHERE ",WHEREcond,"
GROUP BY day_m2m)", sep ="")
return(q)
}
# Builds a set of queries to extract physical activity related variables given a accelerometry data frame.
getobs <- function(dbDir, data, timeunit = "min"){
# Builds a set of queries to extract physical activity related variables given a accelerometry data frame.
# Args:
# dbDir: The name of the file which the data were read from.
# data: Accelerometry data frame plus activity and phisical intensity columns.
# timeunit: Time units for the time-derived variables.
#
# Returns:
# A data frame.
#Default time unit
tu <- 60
if(timeunit =="sec"){
tu <- 1
}else if(timeunit == "min"){
tu <- 60
}else if(timeunit == "hour"){
tu <- 3600
}else{
print("Wrong time unit. Valid parameters: 'sec','min','hour'")
print("default time unit:'min'")
}
epoch <- as.double(data[2,1])-as.double(data[1,1])
tomin <- as.numeric(tu/epoch)*1.0
data$weekday <- weekdays(data$day_m2m, abbreviate=T)
saturday <- weekdays(as.Date(c("2013-07-13")),abbreviate=T)
saturday <- as.data.frame(saturday)
sunday <- weekdays(as.Date(c("2013-07-14")),abbreviate=T)
sunday <- as.data.frame(sunday)
weekend <- weekdays(as.Date(c("2013-07-13","2013-07-14")),abbreviate=T) #Saturday and sunday
midweek <- unique(data$weekday)
midweek <- subset(midweek, subset= !(midweek%in%weekend) )
weekend <- as.data.frame(weekend)
midweek <- as.data.frame(midweek)
#Extract the participant id
m<-regexec("\\\\[[:print:]]+\\\\([[:digit:]]+)([A-Z])([[:digit:]]).([[:digit:]]+).([[:digit:]]+)",dbDir)
PID <- regmatches(dbDir, m)[[1]][2]
PID <- as.data.frame(PID)
Measure <- regmatches(dbDir, m)[[1]][4]
Measure <- as.data.frame(Measure)
use <- regmatches(dbDir, m)[[1]][3]
use <- as.data.frame(use)
#Query: Wearing time (min) per day
q_wtpd <- paste("SELECT day_m2m, count(axis1)*1.0/",tomin," as wearTime, weekday
FROM data WHERE activity = 'wear'
GROUP BY day_m2m", sep="")
#Query: valid days
q_vd <- paste("SELECT day_m2m, weekday FROM (",q_wtpd,") WHERE wearTime >= 600")
#Query: number of valid days
q_nvd <- paste("SELECT count(day_m2m) as valdays FROM (",q_vd,")")
#Query: number of valid days (midweek)
q_nvd_wk <- paste("SELECT count(day_m2m) as valwkdays FROM (",q_vd,"), midweek WHERE weekday = midweek")
#Query: number of valid days (weekend)
q_nvd_wd <- paste("SELECT count(day_m2m) as valwkend FROM (",q_vd,"), weekend WHERE weekday = weekend")
#Extract only obs classified as wear and valid
q_wearobs <- paste("SELECT * FROM data as d JOIN (",q_vd,") as vd
WHERE d.activity = 'wear' AND d.day_m2m = vd.day_m2m ")
q_sleepNWobs <- paste("SELECT * FROM data as d JOIN (",q_vd,") as vd
WHERE (d.activity = 'sleep' OR d.activity = 'non-wear') AND d.day_m2m = vd.day_m2m ")
dt <- "all"
#Query: mean wake/wear time per day
q_MeanWakeWear <- paste("SELECT avg(Time) as ", dt,"MeanWakeWear
FROM
(SELECT day_m2m, count(axis1)*1.0/",tomin," as Time, weekday FROM
(",q_wearobs,") GROUP BY day_m2m)",sep = "")
#Query: mean sleep and nonwear time per day
q_MeanSleepNW <- paste("SELECT avg(Time) as ", dt,"MeanSleepNW
FROM
(SELECT day_m2m, count(axis1)*1.0/",tomin," as Time, weekday FROM
(",q_sleepNWobs,") GROUP BY day_m2m)",sep = "")
#Queries: physical activity intensities
q_mv <- createQueryIntensity(colName = "intensityEV", intensities = c("moderate","vigorous"),dayType=dt,tomin, q_data=q_wearobs)
q_v <- createQueryIntensity(colName = "intensityEV", intensities = c("vigorous"),dayType=dt,tomin, q_data=q_wearobs)
q_m <- createQueryIntensity(colName = "intensityEV", intensities = c("moderate"),dayType=dt,tomin, q_data=q_wearobs)
q_l <- createQueryIntensity(colName = "intensityEV", intensities = c("light"),dayType=dt,tomin, q_data=q_wearobs)
q_s <- createQueryIntensity(colName = "intensityEV", intensities = c("sedentary"),dayType=dt,tomin, q_data=q_wearobs)
#Query: Mean daily total intensity counts
q_MeanActCounts <- paste("SELECT avg(Counts) as ", dt,"MeanActCounts
FROM
(SELECT day_m2m, sum(axis1) as Counts, weekday FROM
(",q_wearobs,") GROUP BY day_m2m)",sep = "")
#Query: Mean intensity count per minute
q_MeanIntenPerMin <- paste("SELECT avg(axis1)*1.0*",tomin," as ", dt,"MeanIntenPerMin FROM
(",q_wearobs,")",sep = "")
#Extract only obs classified as wear and valid (MIDWEEK)
q_wearobs <- paste("SELECT *
FROM midweek as w, data as d JOIN (",q_vd,") as vd
WHERE d.activity = 'wear' AND d.day_m2m = vd.day_m2m AND (d.weekday = w.midweek)")
q_sleepNWobs <- paste("SELECT *
FROM midweek as w,data as d JOIN (",q_vd,") as vd
WHERE (d.activity = 'sleep' OR d.activity = 'non-wear')
AND (d.day_m2m = vd.day_m2m) AND (d.weekday = w.midweek) ")
dt <- "WKDAY"
#Query: mean wake/wear time per day
q_MeanWakeWear_wk <- paste("SELECT avg(Time) as ", dt,"MeanWakeWear
FROM
(SELECT day_m2m, count(axis1)*1.0/",tomin," as Time, weekday FROM
(",q_wearobs,") GROUP BY day_m2m)",sep = "")
#Query: mean sleep and nonwear time per day
q_MeanSleepNW_wk <- paste("SELECT avg(Time) as ", dt,"MeanSleepNW
FROM
(SELECT day_m2m, count(axis1)*1.0/",tomin," as Time, weekday FROM
(",q_sleepNWobs,") GROUP BY day_m2m)",sep = "")
#Queries: physical activity intensities (MIDWEEK)
q_mv_wk <- createQueryIntensity(colName = "intensityEV", intensities = c("moderate","vigorous"),dayType=dt, tomin,q_data=q_wearobs)
q_v_wk <- createQueryIntensity(colName = "intensityEV", intensities = c("vigorous"),dayType=dt, tomin,q_data=q_wearobs)
q_m_wk <- createQueryIntensity(colName = "intensityEV", intensities = c("moderate"),dayType=dt, tomin,q_data=q_wearobs)
q_l_wk <- createQueryIntensity(colName = "intensityEV", intensities = c("light"),dayType=dt, tomin,q_data=q_wearobs)
q_s_wk <- createQueryIntensity(colName = "intensityEV", intensities = c("sedentary"),dayType=dt, tomin,q_data=q_wearobs)
#Query: Mean daily total intensity counts (midweek)
q_MeanActCounts_wk <- paste("SELECT avg(Counts) as ", dt,"MeanActCounts
FROM
(SELECT day_m2m, sum(axis1) as Counts, weekday FROM
(",q_wearobs,") GROUP BY day_m2m)",sep = "")
#Query: Mean intensity count per minute (midweek)
q_MeanIntenPerMin_wk <- paste("SELECT avg(axis1)*1.0*",tomin," as ", dt,"MeanIntenPerMin FROM
(",q_wearobs,")",sep = "")
#Extract only obs classified as wear and valid (WEEKEND)
q_wearobs <- paste("SELECT *
FROM weekend as w, data as d JOIN (",q_vd,") as vd
WHERE d.activity = 'wear' AND d.day_m2m = vd.day_m2m AND (d.weekday = w.weekend)")
q_sleepNWobs <- paste("SELECT * FROM weekend as w, data as d JOIN (",q_vd,") as vd
WHERE (d.activity = 'sleep' OR d.activity = 'non-wear')
AND (d.day_m2m = vd.day_m2m) AND (d.weekday = w.weekend) ")
dt <- "WKEND"
#Query: mean wake/wear time per day
q_MeanWakeWear_wd <- paste("SELECT avg(Time) as ", dt,"MeanWakeWear
FROM
(SELECT day_m2m, count(axis1)*1.0/",tomin," as Time, weekday FROM
(",q_wearobs,") GROUP BY day_m2m)",sep = "")
#Query: mean sleep and nonwear time per day
q_MeanSleepNW_wd <- paste("SELECT avg(Time) as ", dt,"MeanSleepNW
FROM
(SELECT day_m2m, count(axis1)*1.0/",tomin," as Time, weekday FROM
(",q_sleepNWobs,") GROUP BY day_m2m)",sep = "")
#Queries: physical activity intensities (WEEKEND)
q_mv_wd <- createQueryIntensity(colName = "intensityEV", intensities = c("moderate","vigorous"),dayType=dt, tomin,q_data=q_wearobs)
q_v_wd <- createQueryIntensity(colName = "intensityEV", intensities = c("vigorous"),dayType=dt, tomin,q_data=q_wearobs)
q_m_wd <- createQueryIntensity(colName = "intensityEV", intensities = c("moderate"),dayType=dt, tomin,q_data=q_wearobs)
q_l_wd <- createQueryIntensity(colName = "intensityEV", intensities = c("light"),dayType=dt, tomin,q_data=q_wearobs)
q_s_wd <- createQueryIntensity(colName = "intensityEV", intensities = c("sedentary"),dayType=dt, tomin,q_data=q_wearobs)
#Query: Mean daily total intensity counts (weekend)
q_MeanActCounts_wd <- paste("SELECT avg(Counts) as ", dt,"MeanActCounts
FROM
(SELECT day_m2m, sum(axis1) as Counts, weekday FROM
(",q_wearobs,") GROUP BY day_m2m)",sep = "")
#Query: Mean intensity count per minute (weekend)
q_MeanIntenPerMin_wd <- paste("SELECT avg(axis1)*1.0*",tomin," as ", dt,"MeanIntenPerMin FROM
(",q_wearobs,")",sep = "")
#Extract only obs classified as wear and valid (SUNDAY)
q_wearobs <- paste("SELECT *
FROM sunday as s, data as d JOIN (",q_vd,") as vd
WHERE d.activity = 'wear' AND d.day_m2m = vd.day_m2m AND (d.weekday = s.sunday)")
q_sleepNWobs <- paste("SELECT * FROM sunday as s, data as d JOIN (",q_vd,") as vd
WHERE (d.activity = 'sleep' OR d.activity = 'non-wear')
AND (d.day_m2m = vd.day_m2m) AND (d.weekday = s.sunday) ")
dt <- "SUN"
#Query: mean wake/wear time per day
q_MeanWakeWear_s <- paste("SELECT avg(Time) as ", dt,"MeanWakeWear
FROM
(SELECT day_m2m, count(axis1)*1.0/",tomin," as Time, weekday FROM
(",q_wearobs,") GROUP BY day_m2m)",sep = "")
#Query: mean sleep and nonwear time per day
q_MeanSleepNW_s <- paste("SELECT avg(Time) as ", dt,"MeanSleepNW
FROM
(SELECT day_m2m, count(axis1)*1.0/",tomin," as Time, weekday FROM
(",q_sleepNWobs,") GROUP BY day_m2m)",sep = "")
#Queries: physical activity intensities (WEEKEND)
q_mv_s <- createQueryIntensity(colName = "intensityEV", intensities = c("moderate","vigorous"),dayType=dt, tomin,q_data=q_wearobs)
q_v_s <- createQueryIntensity(colName = "intensityEV", intensities = c("vigorous"),dayType=dt, tomin,q_data=q_wearobs)
q_m_s <- createQueryIntensity(colName = "intensityEV", intensities = c("moderate"),dayType=dt, tomin,q_data=q_wearobs)
q_l_s <- createQueryIntensity(colName = "intensityEV", intensities = c("light"),dayType=dt, tomin,q_data=q_wearobs)
q_s_s <- createQueryIntensity(colName = "intensityEV", intensities = c("sedentary"),dayType=dt, tomin,q_data=q_wearobs)
#Query: Mean daily total intensity counts (weekend)
q_MeanActCounts_s <- paste("SELECT avg(Counts) as ", dt,"MeanActCounts
FROM
(SELECT day_m2m, sum(axis1) as Counts, weekday FROM
(",q_wearobs,") GROUP BY day_m2m)",sep = "")
#Query: Mean intensity count per minute (weekend)
q_MeanIntenPerMin_s <- paste("SELECT avg(axis1)*1.0*",tomin," as ", dt,"MeanIntenPerMin FROM
(",q_wearobs,")",sep = "")
allqueries <- c(q_nvd,q_nvd_wk,q_nvd_wd,
q_MeanWakeWear,q_MeanSleepNW,q_mv,q_v,q_m,q_l,q_s,q_MeanActCounts,q_MeanIntenPerMin,
q_MeanWakeWear_wk,q_MeanSleepNW_wk,q_mv_wk,q_v_wk,q_m_wk,q_l_wk,q_s_wk,q_MeanActCounts_wk,q_MeanIntenPerMin_wk,
q_MeanWakeWear_wd,q_MeanSleepNW_wd,q_mv_wd,q_v_wd,q_m_wd,q_l_wd,q_s_wd,q_MeanActCounts_wd,q_MeanIntenPerMin_wd,
q_MeanWakeWear_s,q_MeanSleepNW_s,q_mv_s,q_v_s,q_m_s,q_l_s,q_s_s,q_MeanActCounts_s,q_MeanIntenPerMin_s)
allqueries <- sapply(allqueries, FUN = function(i) (paste("(",i,")", sep ="")))
allqueries <- paste(allqueries, collapse=" , ", sep=" , ")
q_full <- paste("SELECT * FROM", allqueries)
obs <- sqldf(q_full)
valid <- ifelse(obs$valwkdays>=3 && obs$valwkend>=1,1,0)
valid <- as.data.frame(valid)
obs <- cbind(Measure,PID,use, valid, obs)
return(obs)
q<- "SELECT day_m2m, day_n2n, count(axis1) as MeanSleepNW FROM data
WHERE activity = 'sleep' GROUP BY day_n2n"
q<- "SELECT day_m2m, day_n2n, count(axis1) as MeanSleepNW FROM data
WHERE activity = 'non-wear' GROUP BY day_n2n"
}
#Read PACK
readPack <- function(name,sheets){
# Read the xlsx file of the participant checklist (PACK)
# Args:
# name: The name of the file which the data is read from (Complete file path).
# sheets: Workbook sheets to be readed.
#
# Returns:
# A data frame with the pack information.
pack <- data.frame()
for(sheet in sheets){
pack <- rbind(pack, (read.xlsx(name,sheetName=sheet,colClasses= "character",
startRow=2,as.data.frame=T,
stringsAsFactors=F)))
}
#Extract the last digits of the device serial
pack[,1]<- substr(pack[,1], 1, 5)
return(pack)
}
#=============================================================================
# Main routine
#=============================================================================
inputdir <- ".\\data_MARA"
outputdir <- ".\\output"
outputfile <- paste("COL PA MARA var_",as.Date(Sys.time()),".csv", sep="")
packdir <- "PACK-MARA.xlsx"
sheets <- c("COLEGIO 20 DE JULIO", "COLEGIO MANUELITA SAENZ",
"COLEGIO MONTEBELLO", "ISCOLE")
d <- "2103A3_10503_011120131sec.agd"
d <- "2210A3_10500_051120131sec.agd"
d <- "2228A3_10459_121120131sec.agd"
d <- "2227A3_10476_051120131sec.agd"
d <- "1117A3_10463_231020131sec.agd"
main <- function(){
#Time
tnow <- Sys.time()
#Final data set
adata <- data.frame()
#Get datafiles names
dataFiles <- dir(inputdir)
#Read the PACK
pack <- readPack(packdir,sheets)
#Open Log output
sink(paste(outputdir,"\\log.txt",collapse="",sep=""))
cat("Log output:\n")
for (d in dataFiles){
#Database location
dbDir <- paste(inputdir,"\\",d,sep="")
cat(d)
#Checks file size
if(file.info(dbDir)$size/1000<8000){
cat("...........Wrong file size\n")
}else{
#0. Read data form the .agd files (SQLite database)
db <- readDatabase(dbDir)
data <- db$data
settings <- db$settings
#1. Quality control checks
valid <- qualityControlCheck(dbDir,data, settings, pack)
epoch <- as.numeric(data[2,1]-data[1,1], units="secs")
if(epoch == 1){
#2 Data aggregation
data15 <- aggregation(15, data)
data60 <- aggregation(60, data)
data15$activity <- rep("wear",nrow(data15)) #Activity (sleep, non-wear, wear/wake)
data60$activity <- rep("wear",nrow(data60)) #Activity (sleep, non-wear, wear/wake)
#2.5. Cleaning (Remove last day of data)
udays <- unique(as.Date(data60$datetime))
lastday <- max(udays)
data15 <- removeData(data15,toremove=c(lastday),units="days")
data60 <- removeData(data60,toremove=c(lastday),units="days")
#3. Sleep period
sleep <- sleepPeriods(data=data60,sleepOnsetHours= c(19,5), bedTimemin = 5,
tolBsleepMatrix = matrix(c(0,24,10),1,3,byrow=T),
tolMatrix = matrix(c(0,5,20,
5,19,10,
19,24,20),3,3,byrow=T),
minSleepTime = 160, scanParam = "axis1",
nonWearPSleep = T, nonWearInact = 90, nonWearTol = 2,
nonWearscanParam = "axis1",
overlap_frac = 0.9)
data60$activity <- setActivitySNW(data60$activity,label="sleep",intv=sleep$sleep,minlength = 20)
data60$activity <- setActivitySNW(data60$activity,label="non-wear",intv=sleep$sleepnw, minlength = 20)
#4. Non-wear period
nWP <- nonWearPeriods(data60, scanParam="axis1",innactivity=20,tolerance=0) #nonWearPeriods. Innactivity and tolerance in minutes
data60$activity <- setActivitySNW(data60$activity,label="non-wear",nWP, minlength = 20)
#5. Wear periods
data60 <- checkWearPeriods(data60,maxc = 20000)
#6. Cleaning (Remove last day of data and more than 7 days of data)
udays <- unique(as.Date(data60$datetime))
firstday <- min(udays)
lastday <- max(udays)
validdays <- seq(firstday,firstday+6,by=1)
daystoremove <- udays[!(udays%in%validdays)]
data15 <- removeData(data15,toremove=c(daystoremove),units="days")
data60 <- removeData(data60,toremove=c(daystoremove),units="days")
#7. Add intensity physical activity
data15 <- mergingActivity(data15,data60)
data15$intensityEV <- mapply(cut_points_evenson,epoch=15, data15$axis1)
data60$intensityEV <- mapply(cut_points_evenson,epoch=60, data60$axis1)
#8. Get the datafile observation for the final data frame(only wear periods)
ob <- getobs(dbDir,data15, timeunit="min")
#Copies the final data frame in the clipboard
#write.csv(ob,file="clipboard", row.names=F)
adata<-rbind(adata,ob)
write.csv(adata,file=paste(outputdir,"\\",outputfile, sep="",collapse=""), row.names=F)
if(valid==T){
cat("........OK\n")
}else{
cat("........INVALID\n")
}
}else{
cat("........SKIPPED (Wrong epoch) \n")
}
}
}
sink()
write.csv(adata,file=paste(outputdir,"\\",outputfile, sep="",collapse=""), row.names=F)
print(paste("Total time:", as.numeric(Sys.time()-tnow,units="mins")," mins"))
}
#=============================================================================
tryCatch(main(),finally=sink())
#=============================================================================
data60$activity <- as.factor(data60$activity)
data60$intensityEV <- as.factor(data60$intensityEV)
data60$weekday <- weekdays(data60$datetime)
data60$hour <- hours(data60$datetime)
data60$time <- times(substr(as.character(as.chron(data60$datetime)),11,18))
data.frame(unique(data60$day_m2m),weekdays(unique(data60$day_m2m)))
table(data60$activity, weekdays(data60$day_m2m))
par(mfrow=c(1,1))
n <- 1
smoothed <- c()
for(i in 1:nrow(data60)){
smoothed <- c(smoothed,mean(data60$axis1[(i-n):(i+n)]))
}
data60$smooth <- smoothed
stime <- times("7:00:00")
etime <- times("13:00:00")
day1 <- "jueves"
day2 <- "viernes"
dd <- subset(data60, data60$hour >= hours(stime) & data60$hour<hours(etime) & data60$weekday == day1)
dd2 <- subset(data60, data60$hour >=hours(stime) & data60$hour<hours(etime) & data60$weekday == day2)
plot(dd$time,dd$smooth, type="l", lwd=3,col=rgb(166/250,34/255,30/255),
xlab="Time", ylab="Counts per minute (cpm)",
xlim = c(stime,etime),ylim=c(0,6000), axes = F)
abline(h=c(100,573*4,1002*4), col = "gray", lty = "dotted",lwd = 3)
text(x=rep(stime-times("00:10:00"),3),y=c(573*4,1002*4,6150)-150,adj = c(0,1),
labels=c("Light","Moderate","Vigorous"))
lines(dd$time,dd$smooth,lwd=3,col=rgb(166/250,34/255,30/255))
lines(dd2$time,dd2$smooth,lwd=3,col=rgb(49/250,76/255,117/255))
axis(side=1,at=seq(stime,etime,times("01:00:00")),
labels= paste(hours(stime):hours(etime),":00",sep=""))
axis(2)
legend(x= c(etime-times("01:15:00")*etime/times("14:00:00"),etime-times("00:30:00")*etime/times("14:00:00")),y=c(4800,6000),
legend=c("Thursday","Friday"),lty=1, lwd=3,
col=c(rgb(166/250,34/255,30/255),rgb(49/250,76/255,117/255)),seg.len = 0.5,
cex=1, x.intersp=0.1, xjust=0, yjust=0, bty="n")
box()
#===========================================================================
#===========================================================================
stime <- times("9:00:00")
etime <- times("10:00:00")
day1 <- "jueves"
day2 <- "viernes"
dd <- subset(data60, data60$hour >= hours(stime) & data60$hour<hours(etime) & data60$weekday == day1)
dd2 <- subset(data60, data60$hour >=hours(stime) & data60$hour<hours(etime) & data60$weekday == day2)
plot(dd$time,dd$smooth, type="l", lwd=3,col=rgb(166/250,34/255,30/255),
xlab="Time", ylab="Counts per minute (cpm)",
xlim = c(stime,etime),ylim=c(0,6000), axes = F)
abline(h=c(100,573*4,1002*4), col = "gray", lty = "dotted",lwd = 3)
text(x=rep(stime-times("00:02:00"),3),y=c(573*4,1002*4,6150)-150,adj = c(0,1),
labels=c("Light","Moderate","Vigorous"))
lines(dd$time,dd$smooth,lwd=3,col=rgb(166/250,34/255,30/255))
lines(dd2$time,dd2$smooth,lwd=3,col=rgb(49/250,76/255,117/255))
axis(side=1,at=seq(stime,etime,times("00:15:00")),
labels= seq(stime,etime,times("00:15:00")))
axis(2)
legend(x= c(etime-times("00:00:00"),etime-times("00:15:00")*etime/times("14:00:00")),y=c(4800,6000),
legend=c("Thursday","Friday"),lty=1, lwd=3,
col=c(rgb(166/250,34/255,30/255),rgb(49/250,76/255,117/255)),seg.len = 0.5,
cex=1, x.intersp=0.1, xjust=0, yjust=0, bty="n")
box()
#=====================================================================00
n <- 1
smoothed <- c()
for(i in 1:nrow(data)){
cat(i, "\n")
smoothed <- c(smoothed,mean(data$axis1[(i-n):(i+n)]))
}
data$smooth <- smoothed
dd<- data
plot(dd$datetime,dd$smooth, type="l", lwd=1,col=rgb(166/250,34/255,30/255),
xlab="Date/Time", ylab="Counts per second (cps)")
| /MARA_ac_protocol.R | no_license | mabolivar/Physical_Activity_R | R | false | false | 30,648 | r | #==============================================================================
# Working directory
#==============================================================================
setwd("C:\\Users\\ma.bolivar643\\Dropbox\\Accelerometria\\MARA")
#==============================================================================
# Required libraries
#==============================================================================
library("RSQLite")
library("sqldf")
library("xlsx")
library("Hmisc")
library("scales")
source(".\\R\\ADPP.R")
#==============================================================================
# Functions
#==============================================================================
##Compares the dates in the .agd file against the PACK
qualityControlCheck <- function(dbDir,data, settings, pack){
#Compares the dates/serial in the .agd file against the PACK
#
# Args:
# dbDir: The name of the file which the data was read from.
# data: The data frame with accelerometry data (datetime, axis1, axis2, axis3, etc)
# settings: The data frame constaining the accelerometer settings parameters
# pack: The data frame containing the participant checklist to validate the information
# Returns:
# Logical. True, if the info in the data.frame is consistent with th PACK
#Auxiliar var. True, if the info in the data.frame is consistent with th PACK
valid <- TRUE
#Extract the participant id, first or second use (A or B), accelerometer serial and download date
m<-regexec("\\\\[[:print:]]+\\\\([[:digit:]]+)([A-Z])([[:digit:]]).([[:digit:]]+).([[:digit:]]+)",dbDir)
use <- regmatches(dbDir, m)[[1]][3] #If it is the first time of use -> A, else B.
id <- regmatches(dbDir, m)[[1]][2] #Participant ID
acserial <- regmatches(dbDir, m)[[1]][5] #Accelerometer serial number
downloadDay <- regmatches(dbDir, m)[[1]][6] #Download day
row <- match(id,table=pack$ID.Participante,nomatch=-1) #row in the PACK
days <- unique(as.Date(data$datetime))
packserial <- ifelse(use == "A",pack[row, 1],pack[row, 9])
epoch <- data[2,1]-data[1,1]
if(acserial != packserial){
cat("\nThe serial contained in the pack mismatch the serial in the .agd file")
valid <- FALSE
}
if(row == -1){
cat(paste("\nParticipant ",as.character(id)," doesn't have a correponding record in the PACK", sep=" "))
valid = FALSE
}else{
#Compare the serial in the .agd file with the serial in the name file
dsindx <- match("deviceserial",settings$settingName)
if(acserial != substr(settings$settingValue[dsindx], 9,13)){
cat("\nThe serial contained in the file name mismatch the serial in the .agd file")
valid <- FALSE
}
if(min(days)!=ifelse(use == "A",pack[row, 2],pack[row, 10])){
cat("\nInitialization day mismatch the initialization day in the pack")
cat(paste("\n\tInitialization day .agd: ",min(days), sep=" "))
cat(paste("\n\tInitialization day PACK: ",(as.Date("1970-01-01")+ifelse(use == "A",pack[row, 2],pack[row, 10])), sep=" "))
valid <- FALSE
}
if(max(days)<=ifelse(use == "A",pack[row, 2],pack[row, 10])+6){
cat("\nRetrieval day is within the 7 days of data collection")
cat(paste("\n\tRetrieval day .agd: ",max(days), sep=" "))
cat(paste("\n\tRetrieval day PACK: ",(as.Date("1970-01-01")+ifelse(use == "A",pack[row, 6],pack[row, 14])), sep=" "))
valid = FALSE
}
}
return (valid)
}
#Checks for unusual values in wear labeled observation
checkWearPeriods <- function(data,maxc = 20000){
# Checks for unusual values in wear labeled observation and
# classifies the first minutes of the first day as sleep (maximum the first 20 minutes)
#
# Args:
# data: The data frame with accelerometry data (datetime, axis1, axis2, axis3, etc) plus the activity column.
# maxc: Maximum count limit per epoch.
#
# Returns:
# The data frame with all the accelerometry data plus the updated activity column.
i <- 1
while(data$activity[i]=="wear" && i <= 160 ){
data$activity[i]<-"sleep"
i <- i+1
}
for( i in seq(2:(nrow(data)-1))){
if(data$axis1[i]>maxc){
if(data$activity[i-1]=="sleep" || data$activity[i+1]=="sleep"){
data$activity[i]="sleep"
}else{
data$activity[i]="non-wear"
}
}
}
return(data)
}
#Set activity in each epoch
setActivitySNW <- function(x,label,intv, minlength=20){
# Labels the minutes within the intervals parameter (intv) in the activity column
#
# Args:
# x: Activity vector
# l: label to classify the epochs
# intv: Matrix in which each rows is an interval to be labeled
# minlength: Minimum number of uninterrupted epochs labeled as "wear" to be relabeled with l value.
#
# Returns:
# The updated activity column.
intv<-matrix(intv,ncol=2)
if(nrow(intv) > 0){
for(rs in seq(1:nrow(intv))){
l <- intv[rs,1]
u <- intv[rs,2]
if(sum(x[l:u] =="wear")>=minlength){
x[l:u] <- ifelse(x[l:u]=="wear",label, x[l:u])
}
}
}
return(x)
}
#Left joins the 15sec data frame with 60secc data frame (i.e. adds data15 data frame the activity column)
mergingActivity <- function(data15,data60){
# Left joins the 15sec data frame with 60secc data frame (i.e. adds data15 data frame the activity column)
#
# Args:
# data15: Accelerometry data frame agreggated in 15 sec epochs
# data60: Accelerometry data frame agreggated in 60 sec epochs plus the activity column ("sleep","non-wear","wear")
#
# Returns:
# Accelerometry data frame agreggated in 15 plus the activity column ("sleep","non-wear","wear").
options(warn=-1)
data15$minute <- tryCatch(as.POSIXct(trunc.POSIXt(data15$datetime,units ="mins")))
options(warn=0)
q <- "SELECT d15.datetime, d15.axis1, d15.axis2, d15.axis3, d15.steps,
d15.lux, d15.incline, d60.activity,d15.day_n2n, d15.day_m2m
FROM data15 as d15 LEFT JOIN data60 as d60
ON d15.minute = d60.datetime"
data15 <- (sqldf(q, method ="raw"))
data15$datetime <- as.POSIXct(data15$datetime,origin="1970-01-01 00:00:00", tz = "GMT")
data15$day_n2n <- as.Date(data15$day_n2n,origin="1970-01-01 00:00:00")
data15$day_m2m <- as.Date(data15$day_m2m,origin="1970-01-01 00:00:00")
return(data15)
}
#Extracts a substring from a string, starting from the left-most character.
substrRight <- function(x, n){
# Extracts a substring from a string, starting from the left-most character.
# Args:
# x:The string that you wish to extract from.
# n: The number of characters that you wish to extract starting from the left-most character.
#
# Returns:
# Substring with n characters.
substr(x, nchar(x)-n+1, nchar(x))
}
createQueryIntensity <- function(colName = "intensityEV", intensities = c("moderate","vigorous"), dayType = "all",tomin = 4, q_data){
# Builds a SQL query to extract the average day minutes and counts for a given intensity from a given query "q_data".
# Args:
# colName: Column which contains the physical intensity levels (i.e. vigorous, moderate, light and sedentary)
# intensities: Physical intensity levels to be taking into account for this query.
# dayType: Prefix which indicates what days are been used to build the query. This must match with the q_query.
# tomin: The ratio between the desired output epoch and the actual epoch (e.g. If the actual epoch is 15sec and the desired epoch is 60sec then tomin=60/15=4)
# q_data: the data frame/query containing the information to be treated. (e.g. midweek acceleromtry data, weekend accelerometry data, alldays accelerometry data)
#
# Returns:
# Query string.
cutsname <- substrRight(colName,2)
WHEREcond <- sapply(intensities, FUN = function(i) (paste("(intensity",cutsname," = '",i,"')",sep = "")))
WHEREcond <- paste(WHEREcond, collapse = " OR ")
ints_name <- sapply(intensities, substr,1,1)
ints_name <- paste(ints_name, collapse = "", sep ="")
q <- paste("SELECT avg(duration",cutsname,") as ",dayType,"mean_",ints_name,"_",cutsname,", avg(countsEV) as ",dayType,"mean_cnt",ints_name,"_",cutsname,"
FROM
(SELECT day_m2m, count(axis1)*1.0/",tomin," as duration",cutsname,", sum(axis1) as counts",cutsname,"
FROM (",q_data,")
WHERE ",WHEREcond,"
GROUP BY day_m2m)", sep ="")
return(q)
}
# Builds a set of queries to extract physical activity related variables given a accelerometry data frame.
getobs <- function(dbDir, data, timeunit = "min"){
# Builds a set of queries to extract physical activity related variables given a accelerometry data frame.
# Args:
# dbDir: The name of the file which the data were read from.
# data: Accelerometry data frame plus activity and phisical intensity columns.
# timeunit: Time units for the time-derived variables.
#
# Returns:
# A data frame.
#Default time unit
tu <- 60
if(timeunit =="sec"){
tu <- 1
}else if(timeunit == "min"){
tu <- 60
}else if(timeunit == "hour"){
tu <- 3600
}else{
print("Wrong time unit. Valid parameters: 'sec','min','hour'")
print("default time unit:'min'")
}
epoch <- as.double(data[2,1])-as.double(data[1,1])
tomin <- as.numeric(tu/epoch)*1.0
data$weekday <- weekdays(data$day_m2m, abbreviate=T)
saturday <- weekdays(as.Date(c("2013-07-13")),abbreviate=T)
saturday <- as.data.frame(saturday)
sunday <- weekdays(as.Date(c("2013-07-14")),abbreviate=T)
sunday <- as.data.frame(sunday)
weekend <- weekdays(as.Date(c("2013-07-13","2013-07-14")),abbreviate=T) #Saturday and sunday
midweek <- unique(data$weekday)
midweek <- subset(midweek, subset= !(midweek%in%weekend) )
weekend <- as.data.frame(weekend)
midweek <- as.data.frame(midweek)
#Extract the participant id
m<-regexec("\\\\[[:print:]]+\\\\([[:digit:]]+)([A-Z])([[:digit:]]).([[:digit:]]+).([[:digit:]]+)",dbDir)
PID <- regmatches(dbDir, m)[[1]][2]
PID <- as.data.frame(PID)
Measure <- regmatches(dbDir, m)[[1]][4]
Measure <- as.data.frame(Measure)
use <- regmatches(dbDir, m)[[1]][3]
use <- as.data.frame(use)
#Query: Wearing time (min) per day
q_wtpd <- paste("SELECT day_m2m, count(axis1)*1.0/",tomin," as wearTime, weekday
FROM data WHERE activity = 'wear'
GROUP BY day_m2m", sep="")
#Query: valid days
q_vd <- paste("SELECT day_m2m, weekday FROM (",q_wtpd,") WHERE wearTime >= 600")
#Query: number of valid days
q_nvd <- paste("SELECT count(day_m2m) as valdays FROM (",q_vd,")")
#Query: number of valid days (midweek)
q_nvd_wk <- paste("SELECT count(day_m2m) as valwkdays FROM (",q_vd,"), midweek WHERE weekday = midweek")
#Query: number of valid days (weekend)
q_nvd_wd <- paste("SELECT count(day_m2m) as valwkend FROM (",q_vd,"), weekend WHERE weekday = weekend")
#Extract only obs classified as wear and valid
q_wearobs <- paste("SELECT * FROM data as d JOIN (",q_vd,") as vd
WHERE d.activity = 'wear' AND d.day_m2m = vd.day_m2m ")
q_sleepNWobs <- paste("SELECT * FROM data as d JOIN (",q_vd,") as vd
WHERE (d.activity = 'sleep' OR d.activity = 'non-wear') AND d.day_m2m = vd.day_m2m ")
dt <- "all"
#Query: mean wake/wear time per day
q_MeanWakeWear <- paste("SELECT avg(Time) as ", dt,"MeanWakeWear
FROM
(SELECT day_m2m, count(axis1)*1.0/",tomin," as Time, weekday FROM
(",q_wearobs,") GROUP BY day_m2m)",sep = "")
#Query: mean sleep and nonwear time per day
q_MeanSleepNW <- paste("SELECT avg(Time) as ", dt,"MeanSleepNW
FROM
(SELECT day_m2m, count(axis1)*1.0/",tomin," as Time, weekday FROM
(",q_sleepNWobs,") GROUP BY day_m2m)",sep = "")
#Queries: physical activity intensities
q_mv <- createQueryIntensity(colName = "intensityEV", intensities = c("moderate","vigorous"),dayType=dt,tomin, q_data=q_wearobs)
q_v <- createQueryIntensity(colName = "intensityEV", intensities = c("vigorous"),dayType=dt,tomin, q_data=q_wearobs)
q_m <- createQueryIntensity(colName = "intensityEV", intensities = c("moderate"),dayType=dt,tomin, q_data=q_wearobs)
q_l <- createQueryIntensity(colName = "intensityEV", intensities = c("light"),dayType=dt,tomin, q_data=q_wearobs)
q_s <- createQueryIntensity(colName = "intensityEV", intensities = c("sedentary"),dayType=dt,tomin, q_data=q_wearobs)
#Query: Mean daily total intensity counts
q_MeanActCounts <- paste("SELECT avg(Counts) as ", dt,"MeanActCounts
FROM
(SELECT day_m2m, sum(axis1) as Counts, weekday FROM
(",q_wearobs,") GROUP BY day_m2m)",sep = "")
#Query: Mean intensity count per minute
q_MeanIntenPerMin <- paste("SELECT avg(axis1)*1.0*",tomin," as ", dt,"MeanIntenPerMin FROM
(",q_wearobs,")",sep = "")
#Extract only obs classified as wear and valid (MIDWEEK)
q_wearobs <- paste("SELECT *
FROM midweek as w, data as d JOIN (",q_vd,") as vd
WHERE d.activity = 'wear' AND d.day_m2m = vd.day_m2m AND (d.weekday = w.midweek)")
q_sleepNWobs <- paste("SELECT *
FROM midweek as w,data as d JOIN (",q_vd,") as vd
WHERE (d.activity = 'sleep' OR d.activity = 'non-wear')
AND (d.day_m2m = vd.day_m2m) AND (d.weekday = w.midweek) ")
dt <- "WKDAY"
#Query: mean wake/wear time per day
q_MeanWakeWear_wk <- paste("SELECT avg(Time) as ", dt,"MeanWakeWear
FROM
(SELECT day_m2m, count(axis1)*1.0/",tomin," as Time, weekday FROM
(",q_wearobs,") GROUP BY day_m2m)",sep = "")
#Query: mean sleep and nonwear time per day
q_MeanSleepNW_wk <- paste("SELECT avg(Time) as ", dt,"MeanSleepNW
FROM
(SELECT day_m2m, count(axis1)*1.0/",tomin," as Time, weekday FROM
(",q_sleepNWobs,") GROUP BY day_m2m)",sep = "")
#Queries: physical activity intensities (MIDWEEK)
q_mv_wk <- createQueryIntensity(colName = "intensityEV", intensities = c("moderate","vigorous"),dayType=dt, tomin,q_data=q_wearobs)
q_v_wk <- createQueryIntensity(colName = "intensityEV", intensities = c("vigorous"),dayType=dt, tomin,q_data=q_wearobs)
q_m_wk <- createQueryIntensity(colName = "intensityEV", intensities = c("moderate"),dayType=dt, tomin,q_data=q_wearobs)
q_l_wk <- createQueryIntensity(colName = "intensityEV", intensities = c("light"),dayType=dt, tomin,q_data=q_wearobs)
q_s_wk <- createQueryIntensity(colName = "intensityEV", intensities = c("sedentary"),dayType=dt, tomin,q_data=q_wearobs)
#Query: Mean daily total intensity counts (midweek)
q_MeanActCounts_wk <- paste("SELECT avg(Counts) as ", dt,"MeanActCounts
FROM
(SELECT day_m2m, sum(axis1) as Counts, weekday FROM
(",q_wearobs,") GROUP BY day_m2m)",sep = "")
#Query: Mean intensity count per minute (midweek)
q_MeanIntenPerMin_wk <- paste("SELECT avg(axis1)*1.0*",tomin," as ", dt,"MeanIntenPerMin FROM
(",q_wearobs,")",sep = "")
#Extract only obs classified as wear and valid (WEEKEND)
q_wearobs <- paste("SELECT *
FROM weekend as w, data as d JOIN (",q_vd,") as vd
WHERE d.activity = 'wear' AND d.day_m2m = vd.day_m2m AND (d.weekday = w.weekend)")
q_sleepNWobs <- paste("SELECT * FROM weekend as w, data as d JOIN (",q_vd,") as vd
WHERE (d.activity = 'sleep' OR d.activity = 'non-wear')
AND (d.day_m2m = vd.day_m2m) AND (d.weekday = w.weekend) ")
dt <- "WKEND"
#Query: mean wake/wear time per day
q_MeanWakeWear_wd <- paste("SELECT avg(Time) as ", dt,"MeanWakeWear
FROM
(SELECT day_m2m, count(axis1)*1.0/",tomin," as Time, weekday FROM
(",q_wearobs,") GROUP BY day_m2m)",sep = "")
#Query: mean sleep and nonwear time per day
q_MeanSleepNW_wd <- paste("SELECT avg(Time) as ", dt,"MeanSleepNW
FROM
(SELECT day_m2m, count(axis1)*1.0/",tomin," as Time, weekday FROM
(",q_sleepNWobs,") GROUP BY day_m2m)",sep = "")
#Queries: physical activity intensities (WEEKEND)
q_mv_wd <- createQueryIntensity(colName = "intensityEV", intensities = c("moderate","vigorous"),dayType=dt, tomin,q_data=q_wearobs)
q_v_wd <- createQueryIntensity(colName = "intensityEV", intensities = c("vigorous"),dayType=dt, tomin,q_data=q_wearobs)
q_m_wd <- createQueryIntensity(colName = "intensityEV", intensities = c("moderate"),dayType=dt, tomin,q_data=q_wearobs)
q_l_wd <- createQueryIntensity(colName = "intensityEV", intensities = c("light"),dayType=dt, tomin,q_data=q_wearobs)
q_s_wd <- createQueryIntensity(colName = "intensityEV", intensities = c("sedentary"),dayType=dt, tomin,q_data=q_wearobs)
#Query: Mean daily total intensity counts (weekend)
q_MeanActCounts_wd <- paste("SELECT avg(Counts) as ", dt,"MeanActCounts
FROM
(SELECT day_m2m, sum(axis1) as Counts, weekday FROM
(",q_wearobs,") GROUP BY day_m2m)",sep = "")
#Query: Mean intensity count per minute (weekend)
q_MeanIntenPerMin_wd <- paste("SELECT avg(axis1)*1.0*",tomin," as ", dt,"MeanIntenPerMin FROM
(",q_wearobs,")",sep = "")
#Extract only obs classified as wear and valid (SUNDAY)
q_wearobs <- paste("SELECT *
FROM sunday as s, data as d JOIN (",q_vd,") as vd
WHERE d.activity = 'wear' AND d.day_m2m = vd.day_m2m AND (d.weekday = s.sunday)")
q_sleepNWobs <- paste("SELECT * FROM sunday as s, data as d JOIN (",q_vd,") as vd
WHERE (d.activity = 'sleep' OR d.activity = 'non-wear')
AND (d.day_m2m = vd.day_m2m) AND (d.weekday = s.sunday) ")
dt <- "SUN"
#Query: mean wake/wear time per day
q_MeanWakeWear_s <- paste("SELECT avg(Time) as ", dt,"MeanWakeWear
FROM
(SELECT day_m2m, count(axis1)*1.0/",tomin," as Time, weekday FROM
(",q_wearobs,") GROUP BY day_m2m)",sep = "")
#Query: mean sleep and nonwear time per day
q_MeanSleepNW_s <- paste("SELECT avg(Time) as ", dt,"MeanSleepNW
FROM
(SELECT day_m2m, count(axis1)*1.0/",tomin," as Time, weekday FROM
(",q_sleepNWobs,") GROUP BY day_m2m)",sep = "")
#Queries: physical activity intensities (WEEKEND)
q_mv_s <- createQueryIntensity(colName = "intensityEV", intensities = c("moderate","vigorous"),dayType=dt, tomin,q_data=q_wearobs)
q_v_s <- createQueryIntensity(colName = "intensityEV", intensities = c("vigorous"),dayType=dt, tomin,q_data=q_wearobs)
q_m_s <- createQueryIntensity(colName = "intensityEV", intensities = c("moderate"),dayType=dt, tomin,q_data=q_wearobs)
q_l_s <- createQueryIntensity(colName = "intensityEV", intensities = c("light"),dayType=dt, tomin,q_data=q_wearobs)
q_s_s <- createQueryIntensity(colName = "intensityEV", intensities = c("sedentary"),dayType=dt, tomin,q_data=q_wearobs)
#Query: Mean daily total intensity counts (weekend)
q_MeanActCounts_s <- paste("SELECT avg(Counts) as ", dt,"MeanActCounts
FROM
(SELECT day_m2m, sum(axis1) as Counts, weekday FROM
(",q_wearobs,") GROUP BY day_m2m)",sep = "")
#Query: Mean intensity count per minute (weekend)
q_MeanIntenPerMin_s <- paste("SELECT avg(axis1)*1.0*",tomin," as ", dt,"MeanIntenPerMin FROM
(",q_wearobs,")",sep = "")
allqueries <- c(q_nvd,q_nvd_wk,q_nvd_wd,
q_MeanWakeWear,q_MeanSleepNW,q_mv,q_v,q_m,q_l,q_s,q_MeanActCounts,q_MeanIntenPerMin,
q_MeanWakeWear_wk,q_MeanSleepNW_wk,q_mv_wk,q_v_wk,q_m_wk,q_l_wk,q_s_wk,q_MeanActCounts_wk,q_MeanIntenPerMin_wk,
q_MeanWakeWear_wd,q_MeanSleepNW_wd,q_mv_wd,q_v_wd,q_m_wd,q_l_wd,q_s_wd,q_MeanActCounts_wd,q_MeanIntenPerMin_wd,
q_MeanWakeWear_s,q_MeanSleepNW_s,q_mv_s,q_v_s,q_m_s,q_l_s,q_s_s,q_MeanActCounts_s,q_MeanIntenPerMin_s)
allqueries <- sapply(allqueries, FUN = function(i) (paste("(",i,")", sep ="")))
allqueries <- paste(allqueries, collapse=" , ", sep=" , ")
q_full <- paste("SELECT * FROM", allqueries)
obs <- sqldf(q_full)
valid <- ifelse(obs$valwkdays>=3 && obs$valwkend>=1,1,0)
valid <- as.data.frame(valid)
obs <- cbind(Measure,PID,use, valid, obs)
return(obs)
q<- "SELECT day_m2m, day_n2n, count(axis1) as MeanSleepNW FROM data
WHERE activity = 'sleep' GROUP BY day_n2n"
q<- "SELECT day_m2m, day_n2n, count(axis1) as MeanSleepNW FROM data
WHERE activity = 'non-wear' GROUP BY day_n2n"
}
#Read PACK
readPack <- function(name,sheets){
# Read the xlsx file of the participant checklist (PACK)
# Args:
# name: The name of the file which the data is read from (Complete file path).
# sheets: Workbook sheets to be readed.
#
# Returns:
# A data frame with the pack information.
pack <- data.frame()
for(sheet in sheets){
pack <- rbind(pack, (read.xlsx(name,sheetName=sheet,colClasses= "character",
startRow=2,as.data.frame=T,
stringsAsFactors=F)))
}
#Extract the last digits of the device serial
pack[,1]<- substr(pack[,1], 1, 5)
return(pack)
}
#=============================================================================
# Main routine
#=============================================================================
inputdir <- ".\\data_MARA"
outputdir <- ".\\output"
outputfile <- paste("COL PA MARA var_",as.Date(Sys.time()),".csv", sep="")
packdir <- "PACK-MARA.xlsx"
sheets <- c("COLEGIO 20 DE JULIO", "COLEGIO MANUELITA SAENZ",
"COLEGIO MONTEBELLO", "ISCOLE")
d <- "2103A3_10503_011120131sec.agd"
d <- "2210A3_10500_051120131sec.agd"
d <- "2228A3_10459_121120131sec.agd"
d <- "2227A3_10476_051120131sec.agd"
d <- "1117A3_10463_231020131sec.agd"
main <- function(){
#Time
tnow <- Sys.time()
#Final data set
adata <- data.frame()
#Get datafiles names
dataFiles <- dir(inputdir)
#Read the PACK
pack <- readPack(packdir,sheets)
#Open Log output
sink(paste(outputdir,"\\log.txt",collapse="",sep=""))
cat("Log output:\n")
for (d in dataFiles){
#Database location
dbDir <- paste(inputdir,"\\",d,sep="")
cat(d)
#Checks file size
if(file.info(dbDir)$size/1000<8000){
cat("...........Wrong file size\n")
}else{
#0. Read data form the .agd files (SQLite database)
db <- readDatabase(dbDir)
data <- db$data
settings <- db$settings
#1. Quality control checks
valid <- qualityControlCheck(dbDir,data, settings, pack)
epoch <- as.numeric(data[2,1]-data[1,1], units="secs")
if(epoch == 1){
#2 Data aggregation
data15 <- aggregation(15, data)
data60 <- aggregation(60, data)
data15$activity <- rep("wear",nrow(data15)) #Activity (sleep, non-wear, wear/wake)
data60$activity <- rep("wear",nrow(data60)) #Activity (sleep, non-wear, wear/wake)
#2.5. Cleaning (Remove last day of data)
udays <- unique(as.Date(data60$datetime))
lastday <- max(udays)
data15 <- removeData(data15,toremove=c(lastday),units="days")
data60 <- removeData(data60,toremove=c(lastday),units="days")
#3. Sleep period
sleep <- sleepPeriods(data=data60,sleepOnsetHours= c(19,5), bedTimemin = 5,
tolBsleepMatrix = matrix(c(0,24,10),1,3,byrow=T),
tolMatrix = matrix(c(0,5,20,
5,19,10,
19,24,20),3,3,byrow=T),
minSleepTime = 160, scanParam = "axis1",
nonWearPSleep = T, nonWearInact = 90, nonWearTol = 2,
nonWearscanParam = "axis1",
overlap_frac = 0.9)
data60$activity <- setActivitySNW(data60$activity,label="sleep",intv=sleep$sleep,minlength = 20)
data60$activity <- setActivitySNW(data60$activity,label="non-wear",intv=sleep$sleepnw, minlength = 20)
#4. Non-wear period
nWP <- nonWearPeriods(data60, scanParam="axis1",innactivity=20,tolerance=0) #nonWearPeriods. Innactivity and tolerance in minutes
data60$activity <- setActivitySNW(data60$activity,label="non-wear",nWP, minlength = 20)
#5. Wear periods
data60 <- checkWearPeriods(data60,maxc = 20000)
#6. Cleaning (Remove last day of data and more than 7 days of data)
udays <- unique(as.Date(data60$datetime))
firstday <- min(udays)
lastday <- max(udays)
validdays <- seq(firstday,firstday+6,by=1)
daystoremove <- udays[!(udays%in%validdays)]
data15 <- removeData(data15,toremove=c(daystoremove),units="days")
data60 <- removeData(data60,toremove=c(daystoremove),units="days")
#7. Add intensity physical activity
data15 <- mergingActivity(data15,data60)
data15$intensityEV <- mapply(cut_points_evenson,epoch=15, data15$axis1)
data60$intensityEV <- mapply(cut_points_evenson,epoch=60, data60$axis1)
#8. Get the datafile observation for the final data frame(only wear periods)
ob <- getobs(dbDir,data15, timeunit="min")
#Copies the final data frame in the clipboard
#write.csv(ob,file="clipboard", row.names=F)
adata<-rbind(adata,ob)
write.csv(adata,file=paste(outputdir,"\\",outputfile, sep="",collapse=""), row.names=F)
if(valid==T){
cat("........OK\n")
}else{
cat("........INVALID\n")
}
}else{
cat("........SKIPPED (Wrong epoch) \n")
}
}
}
sink()
write.csv(adata,file=paste(outputdir,"\\",outputfile, sep="",collapse=""), row.names=F)
print(paste("Total time:", as.numeric(Sys.time()-tnow,units="mins")," mins"))
}
#=============================================================================
tryCatch(main(),finally=sink())
#=============================================================================
data60$activity <- as.factor(data60$activity)
data60$intensityEV <- as.factor(data60$intensityEV)
data60$weekday <- weekdays(data60$datetime)
data60$hour <- hours(data60$datetime)
data60$time <- times(substr(as.character(as.chron(data60$datetime)),11,18))
data.frame(unique(data60$day_m2m),weekdays(unique(data60$day_m2m)))
table(data60$activity, weekdays(data60$day_m2m))
par(mfrow=c(1,1))
n <- 1
smoothed <- c()
for(i in 1:nrow(data60)){
smoothed <- c(smoothed,mean(data60$axis1[(i-n):(i+n)]))
}
data60$smooth <- smoothed
stime <- times("7:00:00")
etime <- times("13:00:00")
day1 <- "jueves"
day2 <- "viernes"
dd <- subset(data60, data60$hour >= hours(stime) & data60$hour<hours(etime) & data60$weekday == day1)
dd2 <- subset(data60, data60$hour >=hours(stime) & data60$hour<hours(etime) & data60$weekday == day2)
plot(dd$time,dd$smooth, type="l", lwd=3,col=rgb(166/250,34/255,30/255),
xlab="Time", ylab="Counts per minute (cpm)",
xlim = c(stime,etime),ylim=c(0,6000), axes = F)
abline(h=c(100,573*4,1002*4), col = "gray", lty = "dotted",lwd = 3)
text(x=rep(stime-times("00:10:00"),3),y=c(573*4,1002*4,6150)-150,adj = c(0,1),
labels=c("Light","Moderate","Vigorous"))
lines(dd$time,dd$smooth,lwd=3,col=rgb(166/250,34/255,30/255))
lines(dd2$time,dd2$smooth,lwd=3,col=rgb(49/250,76/255,117/255))
axis(side=1,at=seq(stime,etime,times("01:00:00")),
labels= paste(hours(stime):hours(etime),":00",sep=""))
axis(2)
legend(x= c(etime-times("01:15:00")*etime/times("14:00:00"),etime-times("00:30:00")*etime/times("14:00:00")),y=c(4800,6000),
legend=c("Thursday","Friday"),lty=1, lwd=3,
col=c(rgb(166/250,34/255,30/255),rgb(49/250,76/255,117/255)),seg.len = 0.5,
cex=1, x.intersp=0.1, xjust=0, yjust=0, bty="n")
box()
#===========================================================================
#===========================================================================
stime <- times("9:00:00")
etime <- times("10:00:00")
day1 <- "jueves"
day2 <- "viernes"
dd <- subset(data60, data60$hour >= hours(stime) & data60$hour<hours(etime) & data60$weekday == day1)
dd2 <- subset(data60, data60$hour >=hours(stime) & data60$hour<hours(etime) & data60$weekday == day2)
plot(dd$time,dd$smooth, type="l", lwd=3,col=rgb(166/250,34/255,30/255),
xlab="Time", ylab="Counts per minute (cpm)",
xlim = c(stime,etime),ylim=c(0,6000), axes = F)
abline(h=c(100,573*4,1002*4), col = "gray", lty = "dotted",lwd = 3)
text(x=rep(stime-times("00:02:00"),3),y=c(573*4,1002*4,6150)-150,adj = c(0,1),
labels=c("Light","Moderate","Vigorous"))
lines(dd$time,dd$smooth,lwd=3,col=rgb(166/250,34/255,30/255))
lines(dd2$time,dd2$smooth,lwd=3,col=rgb(49/250,76/255,117/255))
axis(side=1,at=seq(stime,etime,times("00:15:00")),
labels= seq(stime,etime,times("00:15:00")))
axis(2)
legend(x= c(etime-times("00:00:00"),etime-times("00:15:00")*etime/times("14:00:00")),y=c(4800,6000),
legend=c("Thursday","Friday"),lty=1, lwd=3,
col=c(rgb(166/250,34/255,30/255),rgb(49/250,76/255,117/255)),seg.len = 0.5,
cex=1, x.intersp=0.1, xjust=0, yjust=0, bty="n")
box()
#=====================================================================00
n <- 1
smoothed <- c()
for(i in 1:nrow(data)){
cat(i, "\n")
smoothed <- c(smoothed,mean(data$axis1[(i-n):(i+n)]))
}
data$smooth <- smoothed
dd<- data
plot(dd$datetime,dd$smooth, type="l", lwd=1,col=rgb(166/250,34/255,30/255),
xlab="Date/Time", ylab="Counts per second (cps)")
|
#' add a new defaul par list to the gsplot object
#'
#' @param object a gsplot object
#' @param field the name of the field add the par to
#' (e.g., 'global','view.1.2','side.1', etc)
#' @return a modified gsplot object
add_new_par <- function(object, field){
defaults <- list(c())
if (field == 'global'){
defaults <- config('par', custom.config = object[["global"]][["config"]][["config.file"]])
}
if ('par' %in% names(object[[field]]))
stop('par in ', field, ' already exists, cannot add it.', call. = FALSE)
object <- modify_par(object, arguments=defaults, field)
return(object)
} | /R/add_new_par.R | permissive | USGS-R/gsplot | R | false | false | 605 | r | #' add a new defaul par list to the gsplot object
#'
#' @param object a gsplot object
#' @param field the name of the field add the par to
#' (e.g., 'global','view.1.2','side.1', etc)
#' @return a modified gsplot object
add_new_par <- function(object, field){
defaults <- list(c())
if (field == 'global'){
defaults <- config('par', custom.config = object[["global"]][["config"]][["config.file"]])
}
if ('par' %in% names(object[[field]]))
stop('par in ', field, ' already exists, cannot add it.', call. = FALSE)
object <- modify_par(object, arguments=defaults, field)
return(object)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_sites_EG.R
\name{plot_sites_EG}
\alias{plot_sites_EG}
\title{Representation of sites selected to be surveyed}
\usage{
plot_sites_EG(master_selection, selection_type, variable_1 = NULL,
variable_2 = NULL, selection_number = 1, col_all = NULL,
col_sites = NULL, col_pre = NULL, cex_all = 0.7,
cex_sites = 1, cex_pre = 1, pch_all = 16, pch_sites = 16,
pch_pre = 16, add_main = TRUE)
}
\arguments{
\item{master_selection}{a master_selection object derived from functions
\code{\link{random_selection}}, \code{\link{uniformG_selection}},
\code{\link{uniformE_selection}}, or \code{\link{EG_selection}}.}
\item{selection_type}{(character) Type of selection depending on the function
used to select sites. The options available are "random"
(\code{\link{random_selection}}), "G" (\code{\link{uniformG_selection}}),
"E" (\code{\link{uniformE_selection}}), and "EG"
(\code{\link{EG_selection}}).}
\item{variable_1}{(character or numeric) name or position of the first
variable (X axis) to be plotted in environmental space. Default = NULL,
required when \code{selection_type} = "random" or "G".}
\item{variable_2}{(character or numeric) name or position of the second
variable (Y axis) to be plotted in environmental space. It must be different
from the first one. Default = NULL, required when \code{selection_type} =
"random" or "G".}
\item{selection_number}{(numeric) number of selection to be plotted.
Default = 1.}
\item{col_all}{colors for points in all points in the region of interest.
The default, NULL, uses a light gray color.}
\item{col_sites}{color for selected sites. The default, NULL, uses
a blue color to represent selected sites.}
\item{col_pre}{color for preselected sites. The default, NULL, uses
a red color to represent preselected sites. Ignored if preselected sites are
not present in \code{master_selection}.}
\item{cex_all}{(numeric) value defining magnification of all points
relative to the default. Default = 0.7.}
\item{cex_sites}{(numeric) value defining magnification of selected sites
relative to the default. Default = 1.}
\item{cex_pre}{(numeric) value defining magnification of preselected sites
relative to the default. Default = 1. Ignored if preselected sites are
not present in \code{master_selection}.}
\item{pch_all}{(numeric) integer specifying a symbol when plotting all
points. Default = 16.}
\item{pch_sites}{(numeric) integer specifying a symbol when plotting points
of selected sites. Default = 16.}
\item{pch_pre}{(numeric) integer specifying a symbol when plotting points
of preselected sites. Default = 16. Ignored if preselected sites are
not present in \code{master_selection}.}
\item{add_main}{(logical) whether or not to add fixed titles to the plot.
Default = TRUE. Titles added are "Environmental space" and "Geographic
space".}
}
\value{
A two-panel plot showing the selected sites. They are show in both spaces,
geographic and environmental.
}
\description{
Creates a two-panel plot representing sites (all and selected
for survey) in both spaces, environmental and geographic.
}
\examples{
# Data
data("m_matrix", package = "biosurvey")
# Making blocks for analysis
m_blocks <- make_blocks(m_matrix, variable_1 = "PC1",
variable_2 = "PC2", n_cols = 10, n_rows = 10,
block_type = "equal_area")
# Checking column names
colnames(m_blocks$data_matrix)
# Selecting sites uniformly in E space
selectionE <- uniformE_selection(m_blocks, variable_1 = "PC1",
variable_2 = "PC2",
selection_from = "block_centroids",
expected_points = 25, max_n_samplings = 1,
initial_distance = 1, increase = 0.1,
replicates = 5, set_seed = 1)
# Plotting
plot_sites_EG(selectionE, selection_type = "E")
}
| /man/plot_sites_EG.Rd | no_license | tom-gu/biosurvey | R | false | true | 3,999 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_sites_EG.R
\name{plot_sites_EG}
\alias{plot_sites_EG}
\title{Representation of sites selected to be surveyed}
\usage{
plot_sites_EG(master_selection, selection_type, variable_1 = NULL,
variable_2 = NULL, selection_number = 1, col_all = NULL,
col_sites = NULL, col_pre = NULL, cex_all = 0.7,
cex_sites = 1, cex_pre = 1, pch_all = 16, pch_sites = 16,
pch_pre = 16, add_main = TRUE)
}
\arguments{
\item{master_selection}{a master_selection object derived from functions
\code{\link{random_selection}}, \code{\link{uniformG_selection}},
\code{\link{uniformE_selection}}, or \code{\link{EG_selection}}.}
\item{selection_type}{(character) Type of selection depending on the function
used to select sites. The options available are "random"
(\code{\link{random_selection}}), "G" (\code{\link{uniformG_selection}}),
"E" (\code{\link{uniformE_selection}}), and "EG"
(\code{\link{EG_selection}}).}
\item{variable_1}{(character or numeric) name or position of the first
variable (X axis) to be plotted in environmental space. Default = NULL,
required when \code{selection_type} = "random" or "G".}
\item{variable_2}{(character or numeric) name or position of the second
variable (Y axis) to be plotted in environmental space. It must be different
from the first one. Default = NULL, required when \code{selection_type} =
"random" or "G".}
\item{selection_number}{(numeric) number of selection to be plotted.
Default = 1.}
\item{col_all}{colors for points in all points in the region of interest.
The default, NULL, uses a light gray color.}
\item{col_sites}{color for selected sites. The default, NULL, uses
a blue color to represent selected sites.}
\item{col_pre}{color for preselected sites. The default, NULL, uses
a red color to represent preselected sites. Ignored if preselected sites are
not present in \code{master_selection}.}
\item{cex_all}{(numeric) value defining magnification of all points
relative to the default. Default = 0.7.}
\item{cex_sites}{(numeric) value defining magnification of selected sites
relative to the default. Default = 1.}
\item{cex_pre}{(numeric) value defining magnification of preselected sites
relative to the default. Default = 1. Ignored if preselected sites are
not present in \code{master_selection}.}
\item{pch_all}{(numeric) integer specifying a symbol when plotting all
points. Default = 16.}
\item{pch_sites}{(numeric) integer specifying a symbol when plotting points
of selected sites. Default = 16.}
\item{pch_pre}{(numeric) integer specifying a symbol when plotting points
of preselected sites. Default = 16. Ignored if preselected sites are
not present in \code{master_selection}.}
\item{add_main}{(logical) whether or not to add fixed titles to the plot.
Default = TRUE. Titles added are "Environmental space" and "Geographic
space".}
}
\value{
A two-panel plot showing the selected sites. They are show in both spaces,
geographic and environmental.
}
\description{
Creates a two-panel plot representing sites (all and selected
for survey) in both spaces, environmental and geographic.
}
\examples{
# Data
data("m_matrix", package = "biosurvey")
# Making blocks for analysis
m_blocks <- make_blocks(m_matrix, variable_1 = "PC1",
variable_2 = "PC2", n_cols = 10, n_rows = 10,
block_type = "equal_area")
# Checking column names
colnames(m_blocks$data_matrix)
# Selecting sites uniformly in E space
selectionE <- uniformE_selection(m_blocks, variable_1 = "PC1",
variable_2 = "PC2",
selection_from = "block_centroids",
expected_points = 25, max_n_samplings = 1,
initial_distance = 1, increase = 0.1,
replicates = 5, set_seed = 1)
# Plotting
plot_sites_EG(selectionE, selection_type = "E")
}
|
# https://fivethirtyeight.com/features/can-you-solve-these-colorful-puzzles/
# You play a game with four balls: One ball is red, one is blue,
# one is green and one is yellow. They are placed in a box. You
# draw a ball out of the box at random and note its color. Without
# replacing the first ball, you draw a second ball and then paint
# it to match the color of the first. Replace both balls, and repeat
# the process. The game ends when all four balls have become the
# same color. What is the expected number of turns to finish the game?
# Libraries
library(ggplot2)
# Input
num_colors <- 4
num_balls <- 4
num_trials <- 25000
# Code
rpl <- num_balls > num_colors
num_turns <- numeric(num_trials)
for (i in 1:num_trials) {
a <- sample(1:num_colors, num_balls, replace = rpl)
count <- 0
while(!isTRUE(all.equal(min(a), max(a)))) {
b <- sample(a, 2, replace = FALSE)
c1 <- min(which(a == b[1]))
c2 <- min(which(a == b[2]))
a[c(c1, c2)] <- b[1]
count <- count + 1
}
num_turns[i] <- count
}
# Output
ggplot(mapping = aes(num_turns)) +
geom_histogram(binwidth = 1) +
theme_bw() +
labs(title = "Riddler 4/28", x = "Num. of Turns", y = "Count")
median(num_turns)
mean(num_turns)
| /colorBalls.r | permissive | sgranitz/riddler | R | false | false | 1,227 | r | # https://fivethirtyeight.com/features/can-you-solve-these-colorful-puzzles/
# You play a game with four balls: One ball is red, one is blue,
# one is green and one is yellow. They are placed in a box. You
# draw a ball out of the box at random and note its color. Without
# replacing the first ball, you draw a second ball and then paint
# it to match the color of the first. Replace both balls, and repeat
# the process. The game ends when all four balls have become the
# same color. What is the expected number of turns to finish the game?
# Libraries
library(ggplot2)
# Input
num_colors <- 4
num_balls <- 4
num_trials <- 25000
# Code
rpl <- num_balls > num_colors
num_turns <- numeric(num_trials)
for (i in 1:num_trials) {
a <- sample(1:num_colors, num_balls, replace = rpl)
count <- 0
while(!isTRUE(all.equal(min(a), max(a)))) {
b <- sample(a, 2, replace = FALSE)
c1 <- min(which(a == b[1]))
c2 <- min(which(a == b[2]))
a[c(c1, c2)] <- b[1]
count <- count + 1
}
num_turns[i] <- count
}
# Output
ggplot(mapping = aes(num_turns)) +
geom_histogram(binwidth = 1) +
theme_bw() +
labs(title = "Riddler 4/28", x = "Num. of Turns", y = "Count")
median(num_turns)
mean(num_turns)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/s3control_operations.R
\name{s3control_delete_access_point_policy_for_object_lambda}
\alias{s3control_delete_access_point_policy_for_object_lambda}
\title{Removes the resource policy for an Object Lambda Access Point}
\usage{
s3control_delete_access_point_policy_for_object_lambda(AccountId, Name)
}
\arguments{
\item{AccountId}{[required] The account ID for the account that owns the specified Object Lambda
Access Point.}
\item{Name}{[required] The name of the Object Lambda Access Point you want to delete the policy
for.}
}
\description{
Removes the resource policy for an Object Lambda Access Point.
See \url{https://www.paws-r-sdk.com/docs/s3control_delete_access_point_policy_for_object_lambda/} for full documentation.
}
\keyword{internal}
| /cran/paws.storage/man/s3control_delete_access_point_policy_for_object_lambda.Rd | permissive | paws-r/paws | R | false | true | 828 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/s3control_operations.R
\name{s3control_delete_access_point_policy_for_object_lambda}
\alias{s3control_delete_access_point_policy_for_object_lambda}
\title{Removes the resource policy for an Object Lambda Access Point}
\usage{
s3control_delete_access_point_policy_for_object_lambda(AccountId, Name)
}
\arguments{
\item{AccountId}{[required] The account ID for the account that owns the specified Object Lambda
Access Point.}
\item{Name}{[required] The name of the Object Lambda Access Point you want to delete the policy
for.}
}
\description{
Removes the resource policy for an Object Lambda Access Point.
See \url{https://www.paws-r-sdk.com/docs/s3control_delete_access_point_policy_for_object_lambda/} for full documentation.
}
\keyword{internal}
|
#' @title city_synonymes
#' @description Use Levenshtein distance iteratively to find plausible misspellings for specific cities.
#' @param x List of city names in the data
#' @param synonymes Table of known city name variants with the columns "name" and "synonyme"
#' @param verbose verbose
#' @param output Output format of the synonyme table. A "list" or "data.frame"
#' @return List of city name variants
#' @return A list with two elements: synonymes (all unique mappings) and ambiguous (non-ambiguous mappings)
#' @export
#' @details This generates an augmented synonyme table including all variants indicated by the distance function. Final names with ambiguous mappings are removed and listed separately.
#' @author Leo Lahti and Niko Ilomaki \email{niko.ilomaki@@helsinki.fi}
#' @references See citation("fennica")
#' @examples \dontrun{all <- city_synonymes(c("Turku","Turkue", "Tampere", "Helsinki"), synonymes)}
#' @keywords utilities
city_synonymes <- function (x, synonymes, verbose = TRUE, output = "data.frame") {
# TODO make more generic version
synonyme <- NULL
# User lowercase for all variant searches
xorig <- x
unique_cities <- tolower(unique(x))
# No lowercase for the final names
# Unique final names in the synonyme list
basecase <- unique(as.character(synonymes$name))
city.variants <- list()
# Standard cases
for(s in basecase){
if (verbose) { message(s) }
# Retrieve the variants
# User lowercase for all variant searches
known.variants <- c(s, synonymes[synonymes$name == s, "synonyme"])
known.variants <- tolower(known.variants)
# Augment the list of known variants by variants found from the
# given unique_cities list
recognized.variants <- collect_misspellings(s, known.variants, unique_cities)
# Add detected variants to the list
city.variants[[s]] <- recognized.variants
# Remove detected variants from the list to speed up
# NOTE: this will potentially hide some overlapping matchings
# This function is not run often, therefore speed is not critical
# Hence let us skip this line for now
# unique_cities <- setdiff(unique_cities, recognized.variants)
}
# Convert into a synonyme table
sn2 <- do.call("rbind", sapply(names(city.variants), function (city) {cbind(city, city.variants[[city]])}))
colnames(sn2) <- c("name", "synonyme")
# Remove duplicates
sn2 <- sn2[!duplicated(sn2),]
# Remove ambiguous matchings (each synonyme should have exactly one name)
amb <- names(which(sapply(split(sn2[, "name"], sn2[, "synonyme"]), length) > 1))
warning(paste("Removing ", length(amb), "ambiguous synonymes: ", paste(amb, collapse = ",")))
# Remove the ambiguous synonymes with many different names
sn2 <- sn2[!sn2[, "synonyme"] %in% amb,]
sn2 <- as.data.frame(sn2)
# Include only the initial places of interest in the final mapping table
sn2 <- filter(sn2, synonyme %in% tolower(xorig))
sn2$name <- droplevels(sn2$name)
sn2$synonyme <- droplevels(sn2$synonyme)
if (output == "list") {
sn2 <- split(as.character(sn2$synonyme), as.character(sn2$name))
}
list(synonymes = sn2, ambiguous = amb)
}
| /R/city_synonymes.R | permissive | COMHIS/fennica | R | false | false | 3,189 | r | #' @title city_synonymes
#' @description Use Levenshtein distance iteratively to find plausible misspellings for specific cities.
#' @param x List of city names in the data
#' @param synonymes Table of known city name variants with the columns "name" and "synonyme"
#' @param verbose verbose
#' @param output Output format of the synonyme table. A "list" or "data.frame"
#' @return List of city name variants
#' @return A list with two elements: synonymes (all unique mappings) and ambiguous (non-ambiguous mappings)
#' @export
#' @details This generates an augmented synonyme table including all variants indicated by the distance function. Final names with ambiguous mappings are removed and listed separately.
#' @author Leo Lahti and Niko Ilomaki \email{niko.ilomaki@@helsinki.fi}
#' @references See citation("fennica")
#' @examples \dontrun{all <- city_synonymes(c("Turku","Turkue", "Tampere", "Helsinki"), synonymes)}
#' @keywords utilities
city_synonymes <- function (x, synonymes, verbose = TRUE, output = "data.frame") {
# TODO make more generic version
synonyme <- NULL
# User lowercase for all variant searches
xorig <- x
unique_cities <- tolower(unique(x))
# No lowercase for the final names
# Unique final names in the synonyme list
basecase <- unique(as.character(synonymes$name))
city.variants <- list()
# Standard cases
for(s in basecase){
if (verbose) { message(s) }
# Retrieve the variants
# User lowercase for all variant searches
known.variants <- c(s, synonymes[synonymes$name == s, "synonyme"])
known.variants <- tolower(known.variants)
# Augment the list of known variants by variants found from the
# given unique_cities list
recognized.variants <- collect_misspellings(s, known.variants, unique_cities)
# Add detected variants to the list
city.variants[[s]] <- recognized.variants
# Remove detected variants from the list to speed up
# NOTE: this will potentially hide some overlapping matchings
# This function is not run often, therefore speed is not critical
# Hence let us skip this line for now
# unique_cities <- setdiff(unique_cities, recognized.variants)
}
# Convert into a synonyme table
sn2 <- do.call("rbind", sapply(names(city.variants), function (city) {cbind(city, city.variants[[city]])}))
colnames(sn2) <- c("name", "synonyme")
# Remove duplicates
sn2 <- sn2[!duplicated(sn2),]
# Remove ambiguous matchings (each synonyme should have exactly one name)
amb <- names(which(sapply(split(sn2[, "name"], sn2[, "synonyme"]), length) > 1))
warning(paste("Removing ", length(amb), "ambiguous synonymes: ", paste(amb, collapse = ",")))
# Remove the ambiguous synonymes with many different names
sn2 <- sn2[!sn2[, "synonyme"] %in% amb,]
sn2 <- as.data.frame(sn2)
# Include only the initial places of interest in the final mapping table
sn2 <- filter(sn2, synonyme %in% tolower(xorig))
sn2$name <- droplevels(sn2$name)
sn2$synonyme <- droplevels(sn2$synonyme)
if (output == "list") {
sn2 <- split(as.character(sn2$synonyme), as.character(sn2$name))
}
list(synonymes = sn2, ambiguous = amb)
}
|
library(corrplot)
library(ggplot2)
library(reshape2)
library(Matrix)
library(dplyr)
library(scatterplot3d)
library(graphics)
library(spatstat)
library(gridExtra)
#Set up working directory
#setwd("E:/courses/Stat215A/lab_final/")
#getwd()
grid_arrange_shared_legend <- function(...,
ncol = length(list(...)),
nrow = 1,
position = c("bottom", "right")) {
# Function to share a legend between multiple plots
# using grid.arrange. Taken from:
# https://github.com/tidyverse/ggplot2/wiki/
# share-a-legend-between-two-ggplot2-graphs
library(grid)
plots <- list(...)
position <- match.arg(position)
g <- ggplotGrob(plots[[1]] +
theme(legend.position = position))$grobs
legend <- g[[which(sapply(g, function(x) x$name) == "guide-box")]]
lheight <- sum(legend$height)
lwidth <- sum(legend$width)
gl <- lapply(plots, function(x) x +
theme(legend.position = "none"))
gl <- c(gl, ncol = ncol, nrow = nrow)
combined <- switch(position,
"bottom" = arrangeGrob(do.call(arrangeGrob, gl),
legend,ncol = 1,
heights = unit.c(unit(1, "npc")
- lheight,
lheight)),
"right" = arrangeGrob(do.call(arrangeGrob, gl),
legend, ncol = 2,
widths = unit.c(unit(1, "npc") -
lwidth, lwidth)))
grid.newpage()
grid.draw(combined)
# return gtable invisibly
invisible(combined)
}
| /lab_final/code/setup.R | no_license | ssaxena00/stat215a | R | false | false | 1,904 | r | library(corrplot)
library(ggplot2)
library(reshape2)
library(Matrix)
library(dplyr)
library(scatterplot3d)
library(graphics)
library(spatstat)
library(gridExtra)
#Set up working directory
#setwd("E:/courses/Stat215A/lab_final/")
#getwd()
grid_arrange_shared_legend <- function(...,
ncol = length(list(...)),
nrow = 1,
position = c("bottom", "right")) {
# Function to share a legend between multiple plots
# using grid.arrange. Taken from:
# https://github.com/tidyverse/ggplot2/wiki/
# share-a-legend-between-two-ggplot2-graphs
library(grid)
plots <- list(...)
position <- match.arg(position)
g <- ggplotGrob(plots[[1]] +
theme(legend.position = position))$grobs
legend <- g[[which(sapply(g, function(x) x$name) == "guide-box")]]
lheight <- sum(legend$height)
lwidth <- sum(legend$width)
gl <- lapply(plots, function(x) x +
theme(legend.position = "none"))
gl <- c(gl, ncol = ncol, nrow = nrow)
combined <- switch(position,
"bottom" = arrangeGrob(do.call(arrangeGrob, gl),
legend,ncol = 1,
heights = unit.c(unit(1, "npc")
- lheight,
lheight)),
"right" = arrangeGrob(do.call(arrangeGrob, gl),
legend, ncol = 2,
widths = unit.c(unit(1, "npc") -
lwidth, lwidth)))
grid.newpage()
grid.draw(combined)
# return gtable invisibly
invisible(combined)
}
|
# TODO:
# 1) Write lag analysis for estimators.
# Simple one for growth rate
# N.B. for Rt this will need wallinga et al Rt calculation of Rt using infectivity profile.
# 2) default estimator functions combining best practice.
# classes for estimates e.g. growthratetimeseries, rttimeseries, poissonratetimeserris, proportions
# simple timeseries as S3 class
# Multiple time series as S3
# 3) plotting functions
# consider a new library to hold class definitions and plotting functions
# or a new library to hold basic data manipulation and validation functions and a new library for plotting
# 4) restructure appendix vignettes to use latex versions from PhD and save figures somewhere.
# rename outputs to remove dates
# check time-series plots still working / fix timeseries plots
# 5) re-architect validation functions
# into new package?
ensureExists = function(df, column, orElse = function(df) {stop("Missing column: ",column)},...) {ensure_exists(df, column, orElse, ...)}
# make sure all the columns exist or report what the problems are
checkValid = function(df,columns) {
success = all(sapply(columns, function(colname) {
if(!(colname %in% colnames(df))) {
message("Missing column: ",colname)
return(FALSE)
}
return(TRUE)
}))
if(!success) stop("Invalid dataframe")
}
# create a weekday and is.weekend column
weekdayFromDates = function(df) {
checkValid(df,"date")
df %>% mutate(
weekday = ordered(lubridate::wday(date),levels=1:7, labels=c("sun","mon","tue","wed","thur","fri","sat")),
is.weekend = weekday %in% c("sat","sun")
)
}
#' Calculates a weighting to apply to each day of week
#'
#' @param simpleTimeseries a covid timeseries data frame
#' @param ...
#' @param valueVar the variable with the weekly periodicity
#'
#' @return the dataframe with a weekday.wt column which says how much that value is over expressed in the data
weekendEffect = function(simpleTimeseries, valueVar="value", ...) {
valueVar = ensym(valueVar)
if (simpleTimeseries %>% is.grouped_df()) stop("this does not work on grouped data. use a group_modify.")
simpleTimeseries = simpleTimeseries %>% weekdayFromDates()
# set the default uniform weighting
defaultWt = tibble(
weekday = ordered(1:7,labels=c("sun","mon","tue","wed","thur","fri","sat")),
weekday.wt = rep(1,7)
)
if(nrow(simpleTimeseries)>=21) {
# if there is enough data estimate how much weight each day should have
weight = simpleTimeseries %>%
mutate(.percentBias =
log(!!valueVar+1) /
slider::slide_dbl(log(!!valueVar+1), .before=3, .after=3,.f = mean, na.rm=TRUE,.complete = TRUE)-1
) %>%
group_by(weekday,.add=TRUE) %>%
summarise(
weekday.wt = exp(abs(mean(.percentBias, na.rm=TRUE))),
.groups="drop"
) %>%
mutate(weekday.wt=weekday.wt/mean(weekday.wt, na.rm=TRUE))
if(nrow(weight) !=7 | any(is.na(weight$weekday.wt))) {
weight = defaultWt
}
} else {
weight = defaultWt
}
simpleTimeseries %>% inner_join(weight, by="weekday") %>% return()
}
#
# #' @description Calculates a weighting to apply to each day of week
# #' @param simpleTimeseries a covid timeseries data frame
# #' @param window the window over which we are to normalise the sample size
# #' @param sampleSizeVar the variable with the sample size in it
# #' @return the dataframe with a sample.wt column which says how much that sample is relevant to the data
# sampleSizeEffect = function(simpleTimeseries, window, sampleSizeVar="total") {
# if (simpleTimeseries %>% is.grouped_df()) stop("this does not work on grouped data. use a group_modify.")
# sampleSizeVar = ensym(sampleSizeVar)
# simpleTimeseries = simpleTimeseries %>% arrange(date) %>% mutate(
# #sample.wt = ifelse(!!sampleSizeVar==0,0,!!sampleSizeVar / slider::slide_dbl(!!sampleSizeVar, .before = floor(window/2), .after = floor(window/2), mean, na.rm=TRUE,.complete = FALSE))
# sample.wt = ifelse(!!sampleSizeVar==0,0,!!sampleSizeVar/mean(!!sampleSizeVar,na.rm = TRUE))
# )
# return(simpleTimeseries)
# }
#
## Locfit estimate outputs ----
# This is just to format locfit results given a locfit model.
# extract the locfit result from the locfit model and format it
locfitExtractResult = function(df, model, estimate, modelName, link = "value") {
tryCatch({
points = preplot(model,where = "fitp",se.fit = TRUE,band="local")
t = points$tr
fit = points$fit
se.fit = tryCatch({
forecast::na.interp(points$se.fit)
}, error = function(e) {
rep(NA,length(fit))
})
df %>% formatResult(fit,se.fit,t,estimate,modelName,link)
}, error = function(e) {
df %>% nullResult(estimate,modelName,link,error = e$message)
})
}
# or else NA
opt = function(expr) tryCatch(expr,error=function(e) NA_real_)
# format a transformed normally distributed variable into quantiles
formatResult = function(df, fit, se.fit, t, estimate, modelName,link) {
df %>% mutate(
!!(paste0(estimate,".",link)) := fit,
!!(paste0(estimate,".SE.",link)) := se.fit,
!!(paste0(estimate,".Quantile.0.025")) := opt(t(qnorm(0.025,fit,se.fit))),
!!(paste0(estimate,".Quantile.0.05")) := opt(t(qnorm(0.05,fit,se.fit))),
!!(paste0(estimate,".Quantile.0.25")) := opt(t(qnorm(0.25,fit,se.fit))),
!!(paste0(estimate,".Quantile.0.5")) := t(fit),
!!(paste0(estimate,".Quantile.0.75")) := opt(t(qnorm(0.75,fit,se.fit))),
!!(paste0(estimate,".Quantile.0.95")) := opt(t(qnorm(0.95,fit,se.fit))),
!!(paste0(estimate,".Quantile.0.975")) := opt(t(qnorm(0.975,fit,se.fit))),
!!(paste0(estimate,".model")) := modelName)
}
# extract the locfit result from the locfit model and format it
nullResult = function(df, estimate, modelName, link = "value", error = "unknown error", centralValue = 0) {
df %>% formatResult(fit = centralValue, se.fit=NA_real_, t=function(x) x, estimate, modelName, link) %>%
mutate(
!!(paste0(estimate,".error")) := error
)
}
#' Rename a growth rate estimate by placing a prefix in front o
#'
#' @param df the datafram with the Growth rate, possion rate, R_t or proportion estimates
#' @param prefix the prefix to add
#' @param estimates which estimates to rename (defaults to all of "Growth","Est","Proportion" and "Rt")
#'
#' @return the dataframe with the columns renamed
#' @export
renameResult = function(df, prefix, estimates = c("Growth","Est","Proportion","Rt","doublingTime")) {
for (estimate in estimates) {
df = df %>% rename_with(.cols = starts_with("Growth"), .fn = ~ paste0(prefix,".",.x))
}
}
## Locfit estimators ----
# Generate the formula for a locfit model based on things I understand
locfitFormula = function(valueVar, nrowDf, window, polynomialDegree, nearestNeighbours = TRUE, ...) {
valueVar=ensym(valueVar)
tmp_alpha = min(window/nrowDf,1)
tmp_alpha_2 = min((window*2+1)/nrowDf,1)
lpParams = list(
nn = if( nearestNeighbours ) tmp_alpha_2 else tmp_alpha, # this is given in fraction of total observations
h = if( !nearestNeighbours ) window else 0, # this is given in units of X
deg = polynomialDegree
)
lpParamsText = paste(names(lpParams),lpParams,sep="=",collapse=", ")
lpFormula = as.formula(paste0(as_label(valueVar), " ~ locfit::lp(time, ",lpParamsText,")"))
return(lpFormula)
}
#' Generate a smoothed estimate of the proportion of cases compared to some total.
#'
#' @param simpleTimeseries - a minimal time-series including date, value, and if available total. If total is present the proportion is value/total. otherwise it is value.
#' @param degree the polynomial degree
#' @param window the data window in days
#' @param estimateMean there is no closed form estimate of the mean of a logit transformed normal. it can be calculated by integration by this is relatively expensive and not done unless explicitly needed,
#' @param ... may include "nearestNeigbour=FALSE" to disable the tail behaviour of locfit
#'
#' @return a timeseries with binomial proportion estimates (columns starting with "Proportion")
#' @export
locfitProportionEstimate = function(simpleTimeseries, degree = 2, window = 14, estimateMean = FALSE,... ) { #, weightBySampleSize = FALSE, weightByWeekday = FALSE, ...) {
if (simpleTimeseries %>% is.grouped_df()) stop("this does not work on grouped data. use a group_modify.")
simpleTimeseries %>% checkValid(c("date","value"))
simpleTimeseries = simpleTimeseries %>%
arrange(date) %>%
ensureExists("total", orElse = function(ts,...) ts %>% mutate(total=1)) %>%
#ensureExists("weekday.wt", orElse = function(ts,...) ts %>% weekendEffect(valueVar=total)) %>%
#ensureExists("sample.wt", orElse = function(ts,...) ts %>% sampleSizeEffect(window=window, sampleSizeVar=total)) %>%
ensureExists("time", orElse = function(ts,...) ts %>% mutate(time = as.integer(date-max(date)))) %>%
mutate(.prop = ifelse(total==0,NA,value/total))
if(any(simpleTimeseries$.prop > 1,na.rm = TRUE)) stop("Proportions model has values greater than 1. Did you specify total column correctly?")
if(sum(na.omit(simpleTimeseries$.prop) != 0) < degree) {
return(simpleTimeseries %>% nullResult(estimate = "Proportion", modelName = glue::glue("binomial:{degree}:{window}"), link = "logit", error = "not enough non zero values", centralValue = 0))
}
if(sum(na.omit(simpleTimeseries$.prop) != 1) < degree) {
return(simpleTimeseries %>% nullResult(estimate = "Proportion", modelName = glue::glue("binomial:{degree}:{window}"), link = "logit", error = "not enough non unitary values", centralValue = 1))
}
# simpleTimeseries = simpleTimeseries %>% mutate(fit.wt = 1)
# if(weightBySampleSize) simpleTimeseries = simpleTimeseries %>% mutate(fit.wt = fit.wt*sample.wt)
# if(weightByWeekday) simpleTimeseries = simpleTimeseries %>% mutate(fit.wt = fit.wt*weekday.wt)
#
# if(weightBySampleSize) {
# simpleTimeseries = simpleTimeseries %>% select(-.prop) %>% group_by_all() %>% summarise(
# .prop = c(rep(1,value),rep(1,total-value))
# )
# }
tryCatch({
model = locfit::locfit(
locfitFormula(.prop, nrowDf = nrow(simpleTimeseries), window, degree, ...),
# weights = fit.wt,
data=simpleTimeseries,
family="qbinomial",
link="logit",
ev=simpleTimeseries$time
)}, error=function(e) browser()
)
# weightLbl = case_when(
# weightBySampleSize & weightByWeekday ~ "both",
# weightBySampleSize ~ "sample",
# weightByWeekday ~ "weekday",
# TRUE ~ "none"
# )
weightLbl = "none"
simpleTimeseries = simpleTimeseries %>%
locfitExtractResult(model, estimate = "Proportion", modelName = glue::glue("binomial:{degree}:{window}:{weightLbl}"), link = "logit") %>%
select(-.prop)
if (estimateMean) {
simpleTimeseries = simpleTimeseries %>%
mutate(
Proportion.value = map2_dbl(Proportion.logit, Proportion.SE.logit, .f = ~ ifelse(is.na(.y),.x,logitnorm::momentsLogitnorm(.x,.y)[["mean"]])) #();NA_real_))
)
}
return(simpleTimeseries)
}
#' Generate a smoothed estimate of the relative growth rate of cases compared to some baseline using proportions.
#'
#' @param simpleTimeseries - a minimal time-series including date, value, and if available total. If total is present the proportion is value/total. otherwise it is value, and total is assumed to be 1.
#' @param degree the polynomial degree
#' @param window the data window in days
#' @param ... may include "nearestNeigbour=FALSE" to disable the tail behaviour of locfit
#'
#' @return a timeseries with growth rate estimates (columns starting with "Growth")
#' @export
locfitProportionalGrowthEstimate = function(simpleTimeseries, degree = 2, window = 14, ...) { #}, weightBySampleSize = FALSE, weightByWeekday = FALSE, ...) {
if (simpleTimeseries %>% is.grouped_df()) stop("this does not work on grouped data. use a group_modify.")
simpleTimeseries %>% checkValid(c("date","value"))
simpleTimeseries = simpleTimeseries %>%
arrange(date) %>%
ensureExists("total", orElse = function(ts,...) {
message("No total column in proportional timeseries - assuming value is a fraction, and total is 1.")
ts %>% mutate(total=1)
}) %>%
# ensureExists("weekday.wt", orElse = function(ts,...) ts %>% weekendEffect(valueVar=total)) %>%
# ensureExists("sample.wt", orElse = function(ts,...) ts %>% sampleSizeEffect(window=window, sampleSizeVar=total)) %>%
ensureExists("time", orElse = function(ts,...) ts %>% mutate(time = as.integer(date-max(date)))) %>%
mutate(.prop = ifelse(total==0,NA,value/total))
if(any(simpleTimeseries$.prop > 1,na.rm = TRUE)) stop("Proportions model contains fractions greater than 1. Did you specify total column correctly?")
if(sum(na.omit(simpleTimeseries$.prop) != 0) < degree) {
return(simpleTimeseries %>% nullResult(estimate = "Growth", modelName = glue::glue("binomial:{degree}:{window}"), link = "value",error = "not enough non zero values", centralValue = 0))
}
if(sum(na.omit(simpleTimeseries$.prop) != 1) < degree) {
return(simpleTimeseries %>% nullResult(estimate = "Proportion", modelName = glue::glue("binomial:{degree}:{window}"), link = "value", error = "not enough non unitary values", centralValue = 0))
}
# simpleTimeseries = simpleTimeseries %>% mutate(fit.wt = 1)
# if(weightBySampleSize) simpleTimeseries = simpleTimeseries %>% mutate(fit.wt = fit.wt*sample.wt)
# if(weightByWeekday) simpleTimeseries = simpleTimeseries %>% mutate(fit.wt = fit.wt*weekday.wt)
model = locfit::locfit(
locfitFormula(.prop, nrowDf = nrow(simpleTimeseries), window, degree, ...),
# weights = fit.wt,
data=simpleTimeseries,
family="qbinomial",
link="logit",
deriv=1,
ev=simpleTimeseries$time
)
# weightLbl = case_when(
# weightBySampleSize & weightByWeekday ~ "both",
# weightBySampleSize ~ "sample",
# weightByWeekday ~ "weekday",
# TRUE ~ "none"
# )
weightLbl = "none"
# no link function in growth rate as the derivative
simpleTimeseries %>%
locfitExtractResult(model = model, estimate = "Growth", modelName = glue::glue("binomial:{degree}:{window}:{weightLbl}"), link = "value") %>%
select(-.prop)
}
#' Generate a smoothed estimate of the absolute growth rate of cases using a poisson model.
#'
#' @param simpleTimeseries - a minimal time-series including date, value, and if available total. If total is present the proportion is value/total. otherwise it is value.
#' @param degree the polynomial degree
#' @param window the data window in days
#' @param ... may include "nearestNeigbour=FALSE" to disable the tail behaviour of locfit
#'
#' @return a timeseries with poisson rate estimates (columns starting with "Est")
#' @export
locfitPoissonRateEstimate = function(simpleTimeseries, degree = 2, window = 14, weightByWeekday = FALSE, ...) {
if (simpleTimeseries %>% is.grouped_df()) stop("this does not work on grouped data. use a group_modify.")
simpleTimeseries %>% checkValid(c("date","value"))
simpleTimeseries = simpleTimeseries %>%
arrange(date) %>%
ensureExists("weekday.wt", orElse = function(ts,...) ts %>% weekendEffect(valueVar=value)) %>%
ensureExists("time", orElse = function(ts,...) ts %>% mutate(time = as.integer(date-max(date)))) %>%
mutate(.prop = value)
if(sum(na.omit(simpleTimeseries$.prop) != 0) < degree) {
return(simpleTimeseries %>% nullResult(estimate = "Est", modelName = glue::glue("poisson:{degree}:{window}"), link = "log",error = "not enough non zero values", centralValue = 0))
}
simpleTimeseries = simpleTimeseries %>% mutate(fit.wt = 1)
if(weightByWeekday) simpleTimeseries = simpleTimeseries %>% mutate(fit.wt = fit.wt*weekday.wt)
model = locfit::locfit(
locfitFormula(.prop, nrowDf = nrow(simpleTimeseries), window, degree, ...),
weights = fit.wt,
data=simpleTimeseries,
family="qpoisson",
link="log",
ev=simpleTimeseries$time
)
weightLbl = case_when(
weightByWeekday ~ "weekday",
TRUE ~ "none"
)
# no link function in growth rate as the derivative
simpleTimeseries %>%
locfitExtractResult(model, estimate = "Est", modelName = glue::glue("poisson:{degree}:{window}:{weightLbl}"), link="log") %>%
select(-.prop)
}
#' Generate a smoothed estimate of the absolute growth rate of cases using a poisson model.
#'
#' @param simpleTimeseries - a minimal time-series including date, value, and if available total. If total is present the proportion is value/total. otherwise it is value.
#' @param degree the polynomial degree
#' @param window the data window in days
#' @param ... may include "nearestNeigbour=FALSE" to disable the tail behaviour of locfit
#'
#' @return a timeseries with growth rate estimates (columns starting with "Growth")
#' @export
locfitGrowthEstimate = function(simpleTimeseries, degree = 2, window = 14, weightByWeekday = FALSE, ...) {
if (simpleTimeseries %>% is.grouped_df()) stop("this does not work on grouped data. use a group_modify.")
simpleTimeseries %>% checkValid(c("date","value"))
simpleTimeseries = simpleTimeseries %>%
arrange(date) %>%
ensureExists("weekday.wt", orElse = function(ts,...) weekendEffect(ts,valueVar=value)) %>%
ensureExists("time", orElse = function(ts,...) mutate(ts, time = as.integer(date-max(date)))) %>%
mutate(.prop = value)
if(sum(na.omit(simpleTimeseries$.prop) != 0) < degree) {
return(simpleTimeseries %>% nullResult(estimate = "Growth", modelName = glue::glue("poisson:{degree}:{window}"), link = "value",error = "not enough non zero values", centralValue = 0))
}
simpleTimeseries = simpleTimeseries %>% mutate(fit.wt = 1)
if(weightByWeekday) simpleTimeseries = simpleTimeseries %>% mutate(fit.wt = fit.wt*weekday.wt)
model = locfit::locfit(
locfitFormula(.prop, nrowDf = nrow(simpleTimeseries), window, degree, ...),
weights = fit.wt,
data=simpleTimeseries,
family="qpoisson",
link="log",
deriv=1,
ev=simpleTimeseries$time
)
weightLbl = case_when(
weightByWeekday ~ "weekday",
TRUE ~ "none"
)
# no link function in growth rate as the derivative
simpleTimeseries %>%
locfitExtractResult(model = model, estimate = "Growth", modelName = glue::glue("poisson:{degree}:{window}:{weightLbl}"), link = "value") %>%
#TODO: more statistics here?
select(-.prop)
}
#' Calucualte a doubling time with quantiles for any timeseries with Growth rate estimates
#'
#' @param simpleTimeseries
#'
#' @return a timeseries with doubling time estimates (columns starting with "doublingTime")
#' @export
doublingTimeFromGrowthRate = function(simpleTimeseries) {
reorder = function(x) (1-(stringr::str_extract(x,"[0-9]\\.[0-9]+") %>% as.numeric())) %>% sprintf(fmt="doublingTime.Quantile.%1.3g")
simpleTimeseries %>% mutate(across(.cols = starts_with("Growth.Quantile"), .fns = ~ log(2)/.x, .names = "{reorder(.col)}"))
}
#' Calculate a reproduction number estimate using the Wallinga 2007 estimation using empirical generation time distribution. This uses resampling to transmit uncertainty in growth rate estimates
#'
#' @param simpleTimeseries - With a "Growth" estimate as a normally distributed quantility
#' @param yMatrix - the matrix of possible infectivity profiles as discrete distributions
#' @param aVector - the upper boundaries of the time cut-offs for the infectivity profiles
#' @param bootstraps - the number of bootstraps to take to calculate for each point.
#' @param quantiles - quantiles to calculate.
#' @import logitnorm
#'
#' @return a timeseries with "Rt" estimates
#' @export
rtFromGrowthRate = function(simpleTimeseries, infectivityProfile, yMatrix = infectivityProfile$yMatrix, aVector = infectivityProfile$aVector, bootstraps = 20*dim(yMatrix)[2], quantiles = c(0.025,0.05,0.25,0.5,0.75,0.95,0.975)) {
if (simpleTimeseries %>% is.grouped_df()) stop("this does not work on grouped data. use a group_modify.")
simpleTimeseries %>% checkValid(c("date","value","Growth.value","Growth.SE.value"))
# grab the y matrix from the list column
y_cols = matrix(yMatrix)
a = aVector
# figure out how many bootstraps we need:
bootsPerInf = max(c(bootstraps %/% dim(yMatrix)[2],1))
# lose the zero values in y and a, if present (which they will be):
if (a[1]==0) {
y_cols = matrix(y_cols[-1,])
a = a[-1]
}
# get the infectivity profiles as a list of vectors, each bootstrap profile will be a vector.
ys = asplit(y_cols, MARGIN=2)
d3 = simpleTimeseries %>% mutate(R = map2(Growth.value, Growth.SE.value, function(mean_r,sd_r) {
#r_samples = rnorm(bootsPerInf*length(ys),mean_r,sd_r)
qnts = seq(0,1,length.out = bootsPerInf)[2:(bootsPerInf-1)]
r_samples = qnorm(p=qnts,mean_r,sd_r)
rs = asplit(matrix(r_samples,nrow=length(ys)), MARGIN=1)
# browser()
out = map2(rs,ys,function(r10,y) {
# browser()
R10 = sapply(r10, function(r) {
# browser()
R = r/sum(y*(exp(-r*lag(a,default=0))-exp(-r*a))/(a - lag(a,default=0)))
})
})
R_out = as.vector(sapply(out,c))
R_q = quantile(R_out, quantiles)
names(R_q) = paste0("Rt.Quantile.",quantiles)
R_summ = enframe(R_q) %>% pivot_wider() %>% mutate(Rt.value = mean(R_out), Rt.SE.value = sd(R_out))
return(R_summ)
}))
return(d3 %>% unnest(R) %>% mutate(Rt.model = "wallinga:growth-rate"))
}
## Manchester growth rate ----
# ## Adapted from code
# Copyright (c) 2020 Ian Hall
# See LICENCE for licensing information
# Growth rate estimates for confirmed cases in Europe and for different metrics in Italy using GAM
# Figure 1 (main text) and figures S1 and S2 (electronic supplementary material) of:
#
# Pellis L, Scarabel F, Stage HB, Overton CE, Chappell LHK, Fearon E, Bennett E,
# University of Manchester COVID-19 Modelling Group, Lythgoe KA, House TA and Hall I,
# "Challenges in control of COVID-19: short doubling time and long delay to effect of interventions",
# Philosophical Transactions of the Royal Society B (2021)
#
# gamGrowthEstimate = function(simpleTimeseries, meth="GCV.Cp", FE='WD'){
# if (simpleTimeseries %>% is.grouped_df()) stop("this does not work on grouped data. use a group_modify.")
#
# simpleTimeseries %>% checkValid(c("date","value"))
# simpleTimeseries = simpleTimeseries %>%
# arrange(date) %>%
# ensureExists("time", orElse = function(ts,...) ts %>% mutate(time = as.integer(date-max(date)))) %>%
# mutate(.incidence = value)
#
# #res <- data.frame(sdt=rep(0,npts),sdtup=rep(0,npts),sdtlow=rep(0,npts),doub=rep(0,npts),doubup=rep(0,npts),doublow=rep(0,npts))
# #Tv <- timev
#
# if(FE=='None') {
# MGAM <- mcgv::gam(.incidence ~ mcgv::s(time), data = simpleTimeseries, family=quasipoisson, method=meth)
# } else {
# simpleTimeseries = simpleTimeseries %>%
# ensureExists("weekday", orElse = function(ts,...) ts %>% weekendEffect(valueVar=value)) %>%
# ensureExists("is.weekend", orElse = function(ts,...) ts %>% weekendEffect(valueVar=value))
# if(FE=='WE'){
# MGAM <- mcgv::gam(.incidence ~ mcgv::s(time)+is.weekend, data = simpleTimeseries, family=quasipoisson, method=meth)
# } else {
# MGAM <- mcgv::gam(.incidence ~ mcgv::s(time)+weekday, data = simpleTimeseries, family=quasipoisson, method=meth)
# }
# }
#
# X0 <- predict(MGAM, simpleTimeseries %>% mutate(time=time-eps), type="lpmatrix")
# eps <- 1e-7 ## finite difference interval
# X1 <- predict(MGAM, simpleTimeseries %>% mutate(time=time+eps),type="lpmatrix")
# Xp <- (X1-X0)/(2*eps) ## maps coefficients to (fd approx.) derivatives
# # something to do with extracting the coefficients
# off <- ifelse(FE=='None',1,ifelse(FE=='WE',2,7))
# Xi <- Xp*0
# Xi[,1:9+off] <- Xp[,1:9+off] ## weekend Xi%*%coef(MGAM) = smooth deriv i
# df <- Xi%*%coef(MGAM) ## ith smooth derivative
# df.sd <- rowSums(Xi%*%MGAM$Vp*Xi)^.5 ## cheap diag(Xi%*%b$Vp%*%t(Xi))^.5
# ## derivative calculation, pers comm S. N. Wood, found in mgcv: Mixed GAM Computation Vehicle with automatic smoothness estimation. R packageversion 1.8-31 (2019) https://CRAN.R-project.org/package=mgcv.
#
# simpleTimeseries %>% formatResult(fit = df, se.fit = df.sd,t = function(x) x, estimate = "Growth", modelName = glue::glue("poisson:gam-{meth}:{FE}"), link = "value")
#
# }
## Point estimators ----
#' Calculate a slightly more robust estimate of growth rate and proportion based on a single model at a range of
#'
#' @param simpleTimeseries - the timeseries containing date, value and total
#' @param dates - dates at which to evaluate the model
#' @param window - the window of data
#' @param weekly - either "weekday","weekend" or "none" to define whether to fit a fixed effect model to the weekday, or the is.weekend
#' @param includeModel - keep the fitted model as a list column for fit analysis
#' @param ...
#'
#' @return a dataframe of evaluation dates, growth rates, proportions and model fit
#' @export
pointProportionEstimate = function(simpleTimeseries, dates = max(simpleTimeseries$date)-3, window = 14, weekly = "weekday", includeModel = TRUE,...) {
if (simpleTimeseries %>% is.grouped_df()) stop("this does not work on grouped data. use a group_modify.")
if (find.package("logitnorm", quiet = TRUE) %>% length %>% equals(0)) {
message("Installing logitnorm needed for analyses")
install.packages("logitnorm", repos = "https://cloud.r-project.org")
}
predictDates = as.Date(dates)
simpleTimeseries %>% checkValid(c("date","value"))
simpleTimeseries = simpleTimeseries %>%
arrange(date) %>%
ensureExists("total", orElse = function(ts,...) ts %>% mutate(total=1)) %>%
ensureExists("time", orElse = function(ts,...) ts %>% mutate(time = as.integer(date-max(date)))) %>%
ensureExists(c("weekday","is.weekend"), orElse = function(ts,...) ts %>% weekdayFromDates()) %>%
mutate(.prop = ifelse(total==0,NA,value/total))
if(any(simpleTimeseries$.prop > 1,na.rm = TRUE)) stop("Proportions model has values greater than 1. Did you specify total column correctly?")
if (weekly=="weekday") {
modelFormula = .prop ~ time + weekday
} else if (weekly=="weekend") {
modelFormula = .prop ~ time + is.weekend
} else {
modelFormula = .prop ~ time
}
bind_rows(lapply(predictDates, function(predictDate) {
dateMin = as.Date(predictDate)-floor(window/2)
dateMax = as.Date(predictDate)+floor(window/2)
suppressWarnings({
model = glm(
modelFormula,
data=simpleTimeseries %>% filter(date >= dateMin & date <= dateMax) %>% mutate(sample.wt = total/mean(total,na.rm=TRUE)),
family="binomial",
weights=sample.wt
)
})
predictAt = tibble(
date = predictDate,
time = as.integer(date-max(simpleTimeseries$date)),
) %>% weekdayFromDates()
predicted = predict(model,newdata = predictAt,se.fit = TRUE, type="link")
linkFn = family(model)$linkinv
predictAt = formatResult(predictAt, unname(predicted$fit), unname(predicted$se.fit), linkFn, "Proportion", "glm", "logit")
predictAt = predictAt %>% mutate(
Proportion.value = map2_dbl(Proportion.logit, Proportion.SE.logit, .f = ~ logitnorm::momentsLogitnorm(.x, .y)[["mean"]])
)
gr = summary(model)$coefficients["time",]
predictAt = formatResult(predictAt, gr[[1]], gr[[2]], function(x) x, "Growth", "glm", "value")
if(includeModel) predictAt %>% mutate(fit = list(model))
}))
}
#' Calculate a slightly more robust estimate of growth rate and poisson rate based on a single model
#'
#' @param simpleTimeseries - the timeseries containing date and value
#' @param dates - dates at which to evaluate the model
#' @param window - the window of data
#' @param weekly - either "weekday","weekend" or "none" to define whether to fit a fixed effect model to the weekday, or the is.weekend
#' @param includeModel - keep the fitted model as a list column for fit analysis
#' @param ...
#'
#' @return a dataframe of evaluation dates, growth rates, poisson rates and model fit
#' @export
pointPoissonEstimate = function(simpleTimeseries, dates, window, weekly = "weekday", includeModel = TRUE,...) {
if (simpleTimeseries %>% is.grouped_df()) stop("this does not work on grouped data. use a group_modify.")
predictDates = as.Date(dates)
simpleTimeseries %>% checkValid(c("date","value"))
simpleTimeseries = simpleTimeseries %>%
arrange(date) %>%
ensureExists("time", orElse = function(ts,...) ts %>% mutate(time = as.integer(date-max(date)))) %>%
ensureExists(c("weekday","is.weekend"), orElse = function(ts,...) ts %>% weekdayFromDates()) %>%
mutate(.prop = value)
if (weekly=="weekday") {
modelFormula = .prop ~ time + weekday
} else if (weekly=="weekend") {
modelFormula = .prop ~ time + is.weekend
} else {
modelFormula = .prop ~ time
}
bind_rows(lapply(predictDates, function(predictDate) {
dateMin = as.Date(predictDate)-floor(window/2)
dateMax = as.Date(predictDate)+floor(window/2)
model = glm(
modelFormula,
data=simpleTimeseries %>% filter(date >= dateMin & date <= dateMax),
family="poisson"
)
predictAt = tibble(
date = predictDate,
time = as.integer(date-max(simpleTimeseries$date)),
) %>% weekdayFromDates()
predicted = predict(model,newdata = predictAt,se.fit = TRUE, type="link")
linkFn = family(model)$linkinv
predictAt = formatResult(predictAt, unname(predicted$fit), unname(predicted$se.fit), linkFn, "Est", "glm", "log")
gr = summary(model)$coefficients["time",]
predictAt = formatResult(predictAt, gr[[1]], gr[[2]], function(x) x, "Growth", "glm", "value")
if(includeModel) predictAt %>% mutate(fit = list(model))
}))
}
## EpiEstim wrapper ----
#' Minimal epiestim wrapper to execute a time series R_t using a discrete infectivity profile matrix, and format the result to be consistent with the rest of this..
epiestimRtEstimate = function(simpleTimeseries, yMatrix, bootstraps = 10*dim(yMatrix)[2], window = 14) {
if (simpleTimeseries %>% is.grouped_df()) stop("this does not work on grouped data. use a group_modify.")
siConfig = EpiEstim::make_config(method = "si_from_sample")
tmp = simpleTimeseries %>% dplyr::select(dates=date,I=value)
simpleTimeseries = simpleTimeseries %>% dplyr::mutate(seq_id=row_number())
bootsPerInf = max(c(bootstraps %/% dim(yMatrix)[2],1))
siConfig$t_start = c(2:(nrow(tmp)-window))
siConfig$t_end = siConfig$t_start+window
siConfig$n2 = bootsPerInf
warn = NA
tmp4 =
withCallingHandlers(
tryCatch(EpiEstim::estimate_R(tmp, method = "si_from_sample",config=siConfig,si_sample = yMatrix), error = stop), warning= function(w) {
warn <<- w$message
invokeRestart("muffleWarning")
})
tmp5 = tmp4$R %>% mutate(seq_id=t_end, errors=NA, `Rt.window`=window) #warn)
tmp5 = tmp5 %>% rename_with(.cols = contains("(R)"),.fn=function(x) paste0("Rt.",stringr::str_remove(x,fixed("(R)")))) %>%
rename(`Rt.Quantile.0.5` = Rt.Median)
tmp6 = simpleTimeseries %>% dplyr::left_join(tmp5, by="seq_id")
return(tmp6 %>% select(-seq_id))
}
# plotProportionEstimate = function(simpleTimeseries, mapping = aes(), ...) {
#
# simpleTimeseries = simpleTimeseries %>% ensureExists("Proportion.Quantile.0.5", orElse = estimateProportions(simpleTimeseries,...))
# # We are going to pretend there is just one
# simpleTimeseries
# tmp2 = tmp %>% filter(date <= max(date)-1) %>% mutate(
# binom::binom.confint(Negative,n,method="wilson")
# )
#
# ggplot(estimate,aes(x=date,y=fit,ymin=lo,ymax=hi))+geom_ribbon(alpha=0.3)+geom_line(colour="blue")+
# geom_point(data=tmp2,mapping=aes(x=date,y=mean),inherit.aes = FALSE)+
# geom_errorbar(data=tmp2,mapping=aes(x=date,ymin=lower,ymax=upper),inherit.aes = FALSE)+
# scale_y_continuous(trans = "logit")
# }
## Weekly wrappers - doubled up with jepidemic
# takes a line list of patient admissions and estimates weekly rates based on
# a quasi-poisson model fitted to count data using local regression.
# expects admissions to contain admission_week columns only defining the date of admission
estimateWeeklyRate = function(admissions, ... ,nn=0.2,deg=2) {
admissionCounts = admissions %>% group_by(admission_week) %>% count()
fit = locfit::locfit(n~locfit::lp(admission_week,nn=nn,deg=deg),data = admissionCounts,family="qpoisson")
weeks = seq(min(admissionCounts$admission_week),max(admissionCounts$admission_week),by = 1/7)
tmp = preplot(fit,newdata=weeks,se.fit = TRUE,band="local")
t = tmp$tr
tibble(
admission_week = weeks,
admission_date = .weeks_to_date(weeks),
lower = .opt(t(qnorm(0.05,tmp$fit,tmp$se.fit))),
median = t(qnorm(0.5,tmp$fit,tmp$se.fit)),
upper = .opt(t(qnorm(0.95,tmp$fit,tmp$se.fit)))
)
}
## Date utility ----
# guess the intervals between dates
.day_interval = function(dates) {
dates = sort(unique(dates))
if (length(dates) < 4) return(1)
interval = .gcd(na.omit(dates-lag(dates)))
return(interval)
}
# greatest common denominator
.gcd2 = function(a, b) {
if (b == 0) a else Recall(b, a %% b)
}
.gcd <- function(...) {
Reduce(.gcd2, c(...))
}
# convert a date column to a numeric count of intervals from day_zero
# the default value for day zero is a sunday at the start of the covid outbreak.
.date_to_time = function(dates, interval=1) {
day_zero = as.Date(getOption("day_zero","2019-12-29"))
out = floor(as.numeric(dates-day_zero)+interval/100)/interval
attr(out,"interval")=interval
return(out)
}
# convert a time column of intervals from day_zero to a date
.time_to_date = function(times, interval=NA) {
day_zero = as.Date(getOption("day_zero","2019-12-29"))
if(is.na(interval)) interval=attr(times,"interval")
return(floor(times*interval+interval/100)+day_zero)
}
.full_seq_times = function(times, interval=NA) {
orig_interval=attr(times,"interval")
if(is.null(orig_interval)) stop("the original timeseries has lost its metadata")
if (is.na(interval)) interval=orig_interval
out = seq(min(times),max(times),by = interval/orig_interval)
attr(out,"interval")=orig_interval
return(out)
}
# turn a random set of dates into an evenly spaced set separated by an interval that makes sense given the data
# intervals will be inside the data
.full_seq_dates = function(dates, interval=.day_interval(dates), truncate_partials = FALSE) {
times = .date_to_time(dates,interval)
# the +1/interval - 1 here ensures we have a full final interval as defined by the
# dates. if interval is one this resolves to the max period
# otherwise it depends on the day of the week of the largest date - a larges date on a Saturday completes a week beginning on a Sunday.
if (truncate_partials) time_seq = seq(ceiling(min(times)),floor(max(times)+1/interval)-1,1)
else time_seq = seq(ceiling(min(times-0.01)),floor(max(times+0.01)),1)
date_seq = .time_to_date(time_seq, interval)
return(date_seq)
}
# TODO: test this a bit.
# full seq dates should be start of periods. interval is length of period.
# checks to see if a date or dates is within a range of dates where the dates define the start of a period
# of size defined by interval parameter as an integer.
.within_sequence = function(dates, full_seq_dates, interval = .day_interval(full_seq_dates)) {
times = .date_to_time(dates,interval)
test = .date_to_time(full_seq_dates, interval)
return(times >= min(test) & times < max(test)+1)
}
# floor to intervals from with zero_day as reference point
# makes a set of dates line up to the lower end of a period
.floor_sequence = function(dates, interval) {
times = .date_to_time(dates,interval)
return(.time_to_date(floor(times+1/100)))
}
#.full_seq_dates(c(Sys.Date(),Sys.Date()-7,Sys.Date()-21,Sys.Date()-28))
#.day_interval(Sys.Date()+c(4,8,24))
#.full_seq_dates(Sys.Date()+c(4,8,24))
## .specification_from_formula( formula = ~ date + age + gender + region )
## .specification_from_formula( formula = ~ date + reported(report_date) + age + gender + region )
## .specification_from_formula( formula = class(variant) ~ date + reported(report_date) + age + gender + region )
## .specification_from_formula( formula = n ~ specimen_date + age + gender + region )
## .specification_from_formula( formula = n ~ non_date(something) + specimen_date + age + gender + region )
## .specification_from_formula( formula = count() ~ date() + age + gender + region )
## .specification_from_formula( formula = class(variant) + count(n) ~ date() + age + gender + region )
## .specification_from_formula( formula = class(variant) + count(n) ~ date(specimen) + age + gender + region )
## .specification_from_formula( formula = growth.rate() + Rt() + class(variant) + count(n) ~ date(specimen) + age + gender + region )
## parse$observations = named_list_lhs
## parse$predictors = named_list_rhs
## parse$groups = unnamed_list_rhs
## Formula utility ----
# rhs of a formula as a string or a one sided formula
.rhs = function(formula, as_formula=FALSE) {
tmp = as.character(formula)
if(length(tmp) == 2) tmp = tmp[[2]]
else tmp = tmp[[3]]
if (as_formula) {
return(as.formula(paste0("~",tmp)))
} else {
return(tmp)
}
}
# lhs of a formula as a string or a one sided formula
.lhs = function(formula, as_formula=FALSE) {
tmp = as.character(formula)
if(length(tmp) == 2) return(NULL)
else tmp = tmp[[2]]
if (as_formula) {
return(as.formula(paste0("~",tmp)))
} else {
return(tmp)
}
}
# combine a rhs and a lhs into a single formula
.join_sides = function(lhs,rhs) {
if (is_formula(rhs)) rhs = .rhs(rhs)
if (is.null(lhs)) {
return(as.formula(paste0("~",rhs)))
} else {
# if lhs is a formula then it is a right sided formula
if (is_formula(lhs)) lhs = .rhs(lhs)
return(as.formula(sprintf("%s ~ %s", lhs,rhs)))
}
}
# Update a formula using a new one treating rhs and lhs in the same way
.update = function(x, ...) {
UseMethod(".update",x)
}
.update.formula = function(x, new) {
# switch rhs to lhs
x_rhs = .rhs(x,as_formula=TRUE)
new_rhs = .rhs(new,as_formula=TRUE)
update_rhs = update(x_rhs,new_rhs)
if(is.null(.lhs(x))) {
# If the original lhs is empty
if(is.null(.lhs(new))) {
# the new lhs is also empty
update_lhs = NULL
} else {
# the updated_lh will be the new_lhs but in formulae like
# new = x + . ~ y
# the + . term is problematic as it does not get removed
new_lhs = .lhs(new,as_formula=TRUE)
if(any(all.vars(new_lhs)==".")) {
tmp = .lhs(new,as_formula=FALSE) # this gets the LHS as a character.
tmp = stringr::str_remove(tmp[[1]],"(\\s\\+\\s)?\\.(\\s\\+\\s)?")
new_lhs = as.formula(paste0("~",tmp))
}
update_lhs = new_lhs
}
} else {
# the original lhs is not empty
x_lhs = .lhs(x,as_formula=TRUE)
new_lhs = .lhs(new,as_formula=TRUE)
if (is.null(new_lhs)) {
update_lhs = NULL
} else {
update_lhs = update(x_lhs,new_lhs)
}
}
browser()
return(.join_sides(update_lhs,update_rhs))
# as both updates are of the RHS
}
.update.epimetadata = function(x, new) {
formula = x$formula
as.epimetadata(.update.formula(formula, new), type=x$type, interval = x$interval)
}
.update.epi_ts = function(x, new) {
epi = x %>% get_meta()
return(x %>% set_meta(.update.epimetadata(epi,new)))
}
.update.epi_ll = function(x, new) {
epi = x %>% get_meta()
return(x %>% set_meta(.update.epimetadata(epi,new)))
}
## Metadata utility ----
# parse a formula into a specification
.specification_from_formula = function(formula) {
if (is.null(formula)) return(NULL)
# vars = all.vars(formula)
form_chr = as.character(formula)[-1]
if (length(form_chr) == 1) form_chr = c("",form_chr)
names(form_chr) = c("lhs","rhs")
form_chr = form_chr %>%
purrr::map(stringr::str_split,fixed("+"),n=Inf) %>%
purrr::flatten() %>%
purrr::map(stringr::str_trim)
form_df = form_chr %>% enframe() %>% unnest(c(value)) %>% mutate(
mapping = value %>% stringr::str_extract("(.*)\\(.*\\)") %>% stringr::str_remove("\\(.*\\)"),
mapped = value %>% stringr::str_remove("(.*)\\(") %>% stringr::str_remove("\\)") %>% stringr::str_remove_all("`"),
value = ifelse(mapped == "", mapping, mapped)
) %>%
select(-mapped) %>%
rename(side = name) %>%
mutate(value = lapply(value,as.symbol))
form_df %>% filter(is.na(mapping)) %>% pull(value)
if (!any(form_df$mapping=="date",na.rm = TRUE)) {
form_df = form_df %>% group_by(side) %>% mutate(mapping = replace(mapping,is.na(mapping) & side == "rhs" & !is.na(lag(mapping,default = "")),"date")) %>% ungroup()
if (!any(form_df$mapping=="date",na.rm = TRUE)) stop("No date column identified. Either date must be first term on the rhs or specifically named date(...)")
}
# This will pick up a value only if there is at least one term on the lhs (and it is not already named)
if (!any(form_df$mapping=="count",na.rm = TRUE)) {
form_df = form_df %>% group_by(side) %>% mutate(mapping = replace(mapping,is.na(mapping) & side == "lhs" & !is.na(lag(mapping,default = "")),"count")) %>% ungroup()
}
if (any(duplicated(na.omit(form_df$mapping)))) stop("duplicate mappings specified in formula: "+formula)
class(form_df) = c("specification",class(form_df))
return(form_df)
}
# convert a specification back into a formula
.formula_from_specification = function(specification) {
specification %>% mutate(
label = sapply(value,as_label),
term = case_when(
is.na(mapping) ~ label,
mapping == label ~ paste0(mapping,"()"),
TRUE ~ paste0(sprintf("%s(%s)",mapping,label)))
) %>% group_by(side) %>%
summarise(term = paste0(term,collapse=" + ")) %>%
summarise(formula = paste0(term,collapse=" ~ ")) %>%
pull(formula) %>% as.formula()
}
# construst a list of utility functions from a specification object.
.mapper = function(x,...) {
v = x
return(list(
grps = v %>% filter(side=="rhs" & is.na(mapping)) %>% pull(value),
date = v %>% filter(side=="rhs" & mapping=="date") %>% pull(value) %>% `[[`(1),
incidentals = v %>% filter(side=="lhs" & is.na(mapping)) %>% pull(value),
get = function(type) {
tmp = v %>% filter(mapping==type) %>% pull(value)
if (length(tmp) == 0) return(NULL)
tmp[[1]]
},
predictor = function(type) {
tmp = v %>% filter(side=="rhs" & mapping==type) %>% pull(value)
if (length(tmp) == 0) return(NULL)
tmp[[1]]
},
observation = function(type="count") {
tmp = v %>% filter(side=="lhs" & mapping==type) %>% pull(value)
# can subst NULL using !! and it behaves as expected in ggplot and tidyselect
if (length(tmp) == 0) return(NULL)
tmp[[1]]
},
has_observation = function(type="count") {
nrow(v %>% filter(side=="lhs" & mapping==type)) > 0
},
has_predictor = function(type) {
nrow(v %>% filter(side=="rhs" & mapping==type)) > 0
}
))
}
# .var_from_rhs = function(formula, match="date") {
# if (is.null(formula)) return(NULL)
# v = .specification_from_formula(formula)
# sym = v %>% filter(side=="rhs" & mapping == match) %>% pull(value)
# if (length(sym)==0) return(NULL)
# return(sym)
# }
#
# .vars_from_rhs = function(formula) {
# if (is.null(formula)) return(NULL)
# v = .specification_from_formula(formula)
# sym = v %>% filter(side=="rhs" & !is.na(mapping)) %>% select(mapping,value) %>% deframe()
# return(sym)
# }
#
# .grps_from_rhs = function(formula) {
# if (is.null(formula)) return(NULL)
# v = .specification_from_formula(formula)
# sym = v %>% filter(side=="rhs" & is.na(mapping)) %>% pull(value)
# return(sym)
# }
#
# .value_from_lhs = function(formula) {
# if (is.null(formula)) return(NULL)
# # formula = n ~ date + type(cls) + report(spec) + age+gender+region+code
# v = all.vars(rlang::f_lhs(formula))
# if (length(v) > 1) stop("Only zero or one variable on lhs allowed, defining the value (e.g. case count)")
# if (length(v) == 0) return(NULL)
# return(as.symbol(v))
# }
# .rdeframe = function(form_df, ...) {
# vars = ensyms(...)
# if (length(vars) == 1) {
# return(form_df %>% pull(!!vars[[1]]) %>% unlist())
# }
# form_df %>%
# rename(.name = !!vars[[1]]) %>%
# mutate(.name = ifelse(is.na(.name),"na",as.character(.name))) %>%
# group_by(.name) %>% group_modify(function(d,g,...) {
#
# tibble(.value = list(.rdeframe(d,!!!vars[-1])))
# }) %>% deframe()
# }
#
# .map
#
# tmp = .rdeframe(form_df, side, mapping, value)
## metadata object ----
as.epimetadata = function(x, ...) {
UseMethod("as.epimetadata", x)
}
as.epimetadata.formula = function(x, type, interval = 1, ...) {
specification = .specification_from_formula(x)
.make_metadata(x, specification, type, interval)
}
as.epimetadata.specification = function(x, type, interval = 1, ...) {
formula = .formula_from_specification(x)
.make_metadata(formula, x, type, interval)
}
.make_metadata = function(
formula,
specification,
type,
interval
) {
return(structure(
list(
specification = specification,
formula = formula,
interval = interval,
type = type,
m = .mapper(specification)
),
class = "epimetadata"
))
}
.set_interval = function(meta, interval) {
meta$interval = interval
return(meta)
}
## generic epi data functions ----
.make_epidata = function(x, meta, cls) {
.check_conformant(x,meta)
x = x %>% ungroup() %>% mutate(.id=row_number())
x = x %>% set_meta(meta)
class(x) = c(cls, class(x))
return(x)
}
.guess_type = function(x,meta) {
if (any(c("epi_ts","epi_ll") %in% class(x))) return(x)
date = meta$m$date
grps = meta$m$grps
cls = meta$m$observation("class")
if (meta$m$has_observation("count")) {
# can infer type from metadata
return(as.epi_ts.data.frame(x, meta$formula, interval = meta$interval))
}
if (meta$type=="ts") {
return(as.epi_ts.data.frame(x, meta$formula, interval = meta$interval))
}
grpwise_count_R2 = x %>% group_by(!!!grps,!!cls,!!date) %>% count() %>% pull(n) %>% magrittr::subtract(1) %>% magrittr::raise_to_power(2) %>% mean()
full = .full_seq_dates(x %>% pull(!!date),interval)
incomplete_ts = x %>% group_by(!!!grps,!!cls) %>% summarise(matched = sum(!!date %in% full)) %>% mutate(missing = length(full)-matched, total=length(full)) %>% ungroup() %>% summarise(prop = sum(missing)/sum(total)) %>% pull(prop)
if (incomplete_ts < 0.05 & grpwise_count_R2 < 0.01) {
browser()
as.epi_ts.data.frame(x, meta$formula, interval = meta$interval)
} else {
as.epi_ll.data.frame(x, meta$formula)
}
}
.check_conformant = function(x, meta) {
if (!is.data.frame(x)) stop("epi data must be a data frame")
ok = TRUE
for (col in meta$specification$value) {
if (!(as_label(col) %in% colnames(x))) {
message("No column found: ",as_label(col))
ok = FALSE
}
}
if (!ok) stop("Input is not conformant to specification provided: ",meta$formula)
}
get_meta = function(x) {
return(attr(x,"epi"))
}
set_meta = function(x, metadata) {
.check_conformant(x,metadata)
attr(x,"epi")=metadata
return(x)
}
is.epi_ll = function(x,...) {
return(any(c("epi_ll","std_ll") %in% class(x)))
}
is.epi_ts = function(x,...) {
return(any(c("epi_ts","std_ts") %in% class(x)))
}
## Line list object ----
as.epi_ll = function(x, ...) {
UseMethod("as.epi_ll",x)
}
as.epi_ll.default = function(x, ...) {
stop("Can't make a epi line list out of a ",paste0(class(x),collapse=", "))
}
as.epi_ll.data.frame = function(x, formula) {
# interval in line list defaults to 1.
meta = as.epimetadata(formula, type="ll")
m = meta$m
# date = m$date
cls = m$observation("class")
multinom = !is_null(cls)
# grps = m$grps
out_class = "epi_ll"
if (multinom) out_class = c("epi_multi",out_class)
if (m$has_observation("count")) stop("Attempting to make a line list out of a object with a 'count' column. Did you mean to use as.epi_ts()?")
return(.make_epidata(x,meta,out_class))
}
as.epi_ll.epi_ts = function(x, jitter=FALSE) {
meta = x %>% get_meta()
m = meta$m
interval = meta$interval
cls = m$observation("class")
grps = m$grps
date = m$date
count = m$observation("count")
multinom = !is_null(cls)
# grps = m$grps
out_class = "epi_ll"
if (multinom) out_class = c("epi_multi",out_class)
if(is.null(count)) stop("count column must be present")
y = x %>% group_by(!!!grps,!!date,!!cls) %>% group_modify(function(d,g,..) {
join = unlist(map2(d %>% pull(.id), d %>% pull(!!count), ~ rep(.x,.y)))
return(d %>% select(-count) %>% inner_join(tibble(.id=join), by=".id") %>% select(-.id))
})
y = y %>% ungroup() %>% mutate(.id=row_number())
if (jitter & interval > 1) {
y = y %>% mutate(!!date := !!date+floor(runif(nrow(y),max=interval)))
}
specification = meta$specification %>% filter(!(side == "lhs" & mapping %in% c("count")))
meta = as.epimetadata(specification, type="ll")
return(.make_epidata(y,meta,out_class))
}
summary.epi_ll = function(x,...) {
epi = x %>% get_meta()
m = epi$m
cat(sprintf("linelist: %1.0f line list(s), %1.0f entries", length(m$grps)+1, nrow(x)),"\n")
print(epi$formula, showEnv = FALSE)
}
print.epi_ll = function(x,...) {
summary(x,...)
NextMethod(x,...)
}
glimpse.epi_ll = function(x,...) {
summary(x,...)
NextMethod(x,...)
}
## Incidence ts object ----
as.epi_ts = function(x, ...) {
UseMethod("as.epi_ts", x)
}
as.epi_ts.default = function(x, ...) {
stop("Can't make a epi time series out of a ",paste0(class(x),collapse=", "))
}
as.epi_ts.Date = function(x, count, class = NULL, ...) {
x = tibble(date = x, count = count, class = class)
formula = count() ~ date()
if(!is.null(class)) formula = .update(formula, class() + . ~ .)
as.epidata.data.frame(x, formula, ...)
}
.null_na = function(x) {ifelse(suppressWarnings(is.na(x)),NULL,x)}
as.epi_ts.data.frame = function(x, formula, interval = NA, rectangular = FALSE, dates = NULL) {
meta = as.epimetadata(formula, type="ts", interval=interval)
date = meta$m$date
# Determine the frequency of the time series
# either asserted, or by reduction from the data
if (is.na(meta$interval)) meta$interval = .day_interval(x %>% pull(!!date))
.convert_dataframe(x, meta, rectangular = FALSE, verbose = TRUE, dates = dates)
}
as.epi_ts.epi_ll = function(x, formula = count() + . ~ ., interval = 1, dates = NULL) {
meta = x %>% get_meta()
new_meta = .update(meta, formula)
m = new_meta$m
new_count = m$observation("count")
# what dates are we looking at?
orig_dates = x %>% pull(!!m$date)
if (is.null(dates)) dates = orig_dates
dates = .full_seq_dates(dates,interval, truncate_partials = TRUE)
y = .convert_dataframe(x %>% mutate(!!new_count == 1), new_meta, rectangular = TRUE, verbose = FALSE, dates = dates)
return(y %>% set_meta(new_meta))
}
.convert_dataframe = function(x, meta, rectangular = FALSE, verbose=FALSE, dates = NULL) {
if (nrow(x)<2) stop("need multiple time points for a timeseries")
interval = meta$interval
m = meta$m
date = m$date
cls = m$observation("class")
value = m$observation("count")
grps = m$grps
out_class = c("epi_ts")
multinom = !is_null(cls)
if (multinom) out_class = c("epi_multi",out_class)
dates_given = !is.null(dates)
orig_dates = x %>% pull(!!date)
if(!dates_given) dates = .full_seq_dates(orig_dates,interval)
# make sure data dates are within the range of the desired interval dates
if (interval > 1) {
# this is good for linelist type data where we want to make sure we have whole intervals
# not so good for data already in time series which may "finish" on the first date of an interval.
x = x %>% filter(.within_sequence(!!date, dates, interval))
x = x %>% mutate(!!date := .floor_sequence(!!date, interval))
}
# check count values are unique for each combination of date, grouping, and multinom class
tmp = x %>% group_by(!!!grps, !!cls, !!date) %>% count()
if (any(tmp$n > 1)) {
browser()
# TODO have to reconsider this as count is a very optional column of time series but others must be
if (verbose) message("Input dataframe has more than one row per date (and class combination), which may be intentional. Combining (class) counts in multiple rows by summation, any other observations will be lost.")
if (!is.null(value)) {
if(any(is.na(tmp %>% pull(!!value)))) warning("Count column contains some NA values. The combined count will be NA for these rows.")
x = x %>% group_by(!!!grps, !!cls, !!date) %>% summarise(!!value := sum(!!value))
}
# since group by summarise steps will remove all other observations we need to make sure that the metadata is updated with the lhs including only class and count.
specification = meta$specification %>% filter(!(side == "lhs" & mapping %in% c("class","count")))
meta = as.epimetadata(specification, type=meta$type, interval=meta$interval)
}
# ensure completeness of dates and (optionally) class on a per group basis
# step 1 setup the complete combination of dates and classes (if present)
if (multinom) {
# ensure counts are complete for each of the outcome classes also as well as for each date.
clsses = tibble(x) %>% pull(!!cls) %>% unique() %>% sort()
join_cols = c(as_label(date),as_label(cls))
} else {
clsses = NULL
join_cols = as_label(date)
}
# step 2 generate a crossing dataframe of all combination of dates and optionally classes
# this is the version for rectangular time series, where a single source of data contains
# the full range of time points for all sources - i.e. there is known to be no differential
# reporting delay.
lhs = .dates_and_classes(date,dates,cls,clsses)
# step 3 left join crossing dataframe with data and fill missing counts with zero.
# ensuring that the result is valid
x = tibble(x) %>%
group_by(!!!grps) %>%
group_modify(function(d,g,...) {
# do a groupwise join. the lhs is either all dates or all dates and class levels
# or if we are not using rectangular time series then calculate a group-wise lhs
# including the range present in the data.
if (!rectangular & !dates_given) {
tmp = d %>% pull(!!date)
dates = .full_seq_dates(tmp,interval)
lhs = .dates_and_classes(date,dates,cls,clsses)
}
# do the fill for missing counts.
d = lhs %>%
left_join(d, by = join_cols)
if (!is.null(value)) {
# TODO: what about other observations?
d = d %>% mutate(!!value := ifelse(is.na(!!value),0,!!value))
}
return(d)
}) %>% ungroup()
if (!".id" %in% colnames(x)) {
# add an .id column only if there is not one already.
x = x %>% mutate(.id=row_number())
}
return(.make_epidata(
as_tibble(x),
meta,
out_class))
}
.dates_and_classes = function(date, dates, cls, clsses) {
if (!is.null(clsses)) {
lhs = crossing(!!date := dates, !!cls := clsses)
} else {
lhs = tibble(!!date := dates)
}
}
summary.epi_ts = function(x, ...) {
epi = x %>% get_meta()
m = epi$m
dates = x %>% pull(!!(m$date)) %>% range()
grpCount = x %>% select(!!!m$grps) %>% distinct() %>% nrow()
cat(sprintf("%1.0f timeseries, with interval %s day(s), from %s up to (but not including) %s, %1.0f total records", grpCount, epi$interval, dates[[1]], dates[[2]]+1+epi$interval, nrow(x)),"\n")
print(epi$formula, showEnv = FALSE)
}
print.epi_ts = function(x,...) {
summary(x,...)
NextMethod(x,...)
}
glimpse.epi_ts = function(x,...) {
summary(x,...)
NextMethod(x,...)
}
## standardised formats ----
# force the rename of all observations and predictors, add in .time and .grpId (and optionally .subGrpId if multinomial) columns
.normalise = function(x, interval = NA, ...) {
meta = x %>% get_meta()
m = meta$m
date = m$date
cls = m$observation("class")
multinom = !is.null(cls)
grps = m$grps
y = x
for (map in meta$specification$mapping) {
# rename the columns to their canonical names defined in the specification.
if (!is.na(map)) {
value = m$get(map)
y = y %>% mutate(!!map := !!value)
}
}
all = meta$specification$value
if (is.na(interval)) interval = .day_interval(y$date)
y = y %>%
select(all_of(na.omit(meta$specification$mapping)), !!!grps, .id)
y = y %>% group_by(!!!grps) %>% mutate(
.grpId = cur_group_id(),
.time = .date_to_time(date, interval),
) %>% group_by(!!!grps, .grpId)
if (multinom) {
y = y %>% group_by(!!!grps, class) %>% mutate(.subGrpId = cur_group_id()) %>% group_by(!!!grps, .grpId)
class(y) = c("epi_multi",class(y))
} else {
class(y) = c("epi_simple",class(y))
}
return(y)
}
.denormalise = function(x, meta) {
y=x
if (".time" %in% colnames(y)) {
if (!("date" %in% colnames(y))) {
y = y %>% mutate(date = .time_to_date(.time))
} else if (any(is.na(y$date))) {
y = y %>% mutate(date = .time_to_date(.time))
}
}
y = y %>% select(-.time)
for (map in meta$specification$mapping) {
# rename the columns to their denormalised names defined in the specification.
# group columns will probably not have changed name
if (!is.na(map)) {
value = m$get(map)
if (map %in% colnames(y)) {
y = y %>% rename(!!value := !!map)
}
}
}
old_cols = sapply(meta$specification$value,as_label, USE.NAMES = FALSE)
new_cols = colnames(y)[!colnames(y) %in% old_cols]
new_obs = new_cols %>% magrittr::extract(!stringr::str_starts(.,stringr::fixed(".")))
# all new cols are added as new observations onto the lhs
new_cols_df = tibble(
side = "lhs",
value = sapply(new_obs, as.symbol,USE.NAMES = FALSE),
mapping = new_obs
)
new_spec = bind_rows(
meta$specification %>% filter(sapply(value, as_label, USE.NAMES = FALSE) %in% colnames(y)),
new_cols_df)
new_meta = as.epimetadata.specification(new_spec, type=meta$type, interval = meta$interval)
y = y %>% ungroup() %>% select(any_of(old_cols),all_of(new_obs),any_of(".id"))
# y = y %>% ungroup() %>% select(c(!starts_with("."),.id)) %>% glimpse()
return(y %>% .guess_type(new_meta))
}
## Execute a simple timeseries processing step on a standard data format ---
execute_epifunction = function(x, .f, ...) {
# x =tmp
# .f=estimate_proportion
# check input is valid for the function
# this is defined in the function
meta = x %>% get_meta()
require_str = formals(.f)[[".require"]]
input = .normalise(x)
browser()
if (!is.null(require_str)) {
requires = eval(require_str)
if (!all(requires %in% meta$specification$mapping)) {
warning("Input data frame does not have all the required columns:")
warning(meta$formula)
stop("must contain column mappings for: ",paste0(requires,collapse = ","))
}
}
output = input %>% group_modify(function(d,g,...) {
if ("epi_ll" %in% class(x)) {
class(d) = c("std_ll",class(d))
return(.f(d, g=g, ...))
} else {
class(d) = c("std_ts",class(d))
# TODO: informative error messages
return(.f(d, g=g, ..., interval = meta$interval))
}
# execute the epifunction call
})
if (!"date" %in% colnames(output)) {
#TODO: consider autoconverting .time to date
stop("the result of an epifunction must include a date column")
}
return(.denormalise(output, meta))
}
## Simple timeseries functions ----
# a set of estimators for the simple single time series situations
# the estimates target a range of outputs such as poisson rate, proportion model, growth rate, etc.
# these are aimed to be tidy but assume (and enforce) column naming conventions are adhered to
# these do not work on grouped data. they assume the input has been sanitised before hand, although should tolerate NA values.
# make sure a given column exists and create it with the orElse function if not.
ensure_exists = function(df, column, or_else = function(df) {stop("Missing column: ",column)},...) {
or_else = purrr::as_mapper(or_else)
out = df
if(!(all(column %in% colnames(df)))) {
out = or_else(df,...)
}
out
}
# or else NA
.opt = function(expr) tryCatch(expr,error=function(e) NA_real_)
# format a transformed normally distributed variable into quantiles
.format_result = function(df, fit, se.fit, t, estimate, modelName,link, error=NA_character_) {
est = #purrr::map2(fit,se.fit,.f = function(fit,se.fit) {
tibble(
!!(paste0(link,"(x)")) := fit,
!!(paste0("SE.",link,"(x)")) := se.fit,
Quantile.0.025 = .opt(t(qnorm(0.025,fit,se.fit))),
Quantile.0.05 = .opt(t(qnorm(0.05,fit,se.fit))),
Quantile.0.25 = .opt(t(qnorm(0.25,fit,se.fit))),
Quantile.0.5 = t(fit),
Quantile.0.75 = .opt(t(qnorm(0.75,fit,se.fit))),
Quantile.0.95 = .opt(t(qnorm(0.95,fit,se.fit))),
Quantile.0.975 = .opt(t(qnorm(0.975,fit,se.fit))),
model = modelName,
error = error)
#})
df %>% mutate(!!estimate := est)
}
# extract the locfit result from the locfit model and format it
.fixed_result = function(df, estimate, modelName, link, mean = NA_real_, se = NA_real_, error = "unknown error") {
df %>% .format_result(fit = mean, se.fit= se, t=.inv[[link]], estimate, modelName, link, error)
}
.empty_result = function(df, estimate) {
df %>% mutate(!!estimate := tibble())
}
.inv = list(
value = function(x) x,
log = function(x) {ifelse(x==-Inf, 0, exp(x))},
logit = function(x) {case_when(x==-Inf ~ 0,x==Inf ~ 1, TRUE ~ 1/(1+exp(-x)))}
)
# This is just to format locfit results given a locfit model.
# extract the locfit result from the locfit model and format it
# ... could be where="fitp", or newdata=.time points....
.locfit_extract_result = function(df, model, estimate, modelName, link = "value") {
tryCatch({
points = preplot(model,se.fit = TRUE,band="local", newdata = df)
t = points$trans
fit = points$fit
se.fit = tryCatch({
as.vector(forecast::na.interp(points$se.fit))
}, error = function(e) {
rep(NA,length(fit))
})
df %>% .format_result(fit,se.fit,t,estimate,modelName,link)
}, error = function(e) {
df %>% .fixed_result(estimate,modelName,link,error = e$message)
})
}
## Proportion estimation ----
# Generate the formula for a locfit model based on things I understand
.locfit_formula = function(valueVar, nrowDf, window, polynomialDegree, nearestNeighbours = TRUE, ...) {
valueVar=ensym(valueVar)
tmp_alpha = min(window/nrowDf,1)
tmp_alpha_2 = min((window*2+1)/nrowDf,1)
lpParams = list(
nn = if( nearestNeighbours ) tmp_alpha_2 else tmp_alpha, # this is given in fraction of total observations
h = if( !nearestNeighbours ) window else 0, # this is given in units of X
deg = polynomialDegree
)
lpParamsText = paste(names(lpParams),lpParams,sep="=",collapse=", ")
lpFormula = as.formula(paste0(as_label(valueVar), " ~ locfit::lp(.time, ",lpParamsText,")"))
return(lpFormula)
}
.has_count = function(x, ...) {
"count" %in% colnames(x)
}
# takes a line list or incidence count of patient admissions with a multinomial class label, and fits
# a quasi-binomial model with logit link using local regression. This expects a dataframe
# with an .time column and a class column. Multinomial class is either treated as a
# set of 1 versus others binomials, if the class is unordered (or not a factor) or as a set of less than or equal versus more
# than binomials (if the multinomial class is ordered)
estimate_proportion = function(d, ..., interval = .day_interval(d$date), window = 28, degree=2, quick=NA) {
d = d %>% ensure_exists("date")
d = d %>% ensure_exists("class")
# convert dates to times
d = d %>% ensure_exists(".time", or_else = ~ mutate(., .time = .date_to_time(date, interval)))
if (is.na(quick)) {
quick = !((.has_count(d) & sum(d$count) < 10000) | (!.has_count(d) & nrow(d) < 10000))
}
is_ts = .has_count(d)
time_span = (max(d$.time)-min(d$.time))*interval
# get the output as fractional weeks - and convert to days.
data_times = seq(min(d$.time),max(d$.time), 1)
predict_times = seq(min(d$.time),max(d$.time), 1/interval)
cumulative = is.ordered(d$class)
model_name = sprintf("locfit:probability:%s:%s:%1.0f*%1.0f:%1.0f",if(cumulative) "cumulative" else "binomial",if(quick) "counts" else "linelist", window, interval,degree)
out = tibble()
# repeat once for each class level. This is a binomial comparison (x vs not(x)) or cumulative (<=x) vs (>x)
for (level in sort(unique(d$class))) {
if (cumulative) {
tmpdf = d %>% mutate(class_bool = class <= level)
est_name = "probability.cumulative"
} else {
tmpdf = d %>% mutate(class_bool = class == level)
est_name = "probability"
}
if (is_ts) {
# summarise the counts
tmpdf_quick = tmpdf %>% group_by(.time,class_bool) %>% summarise(count = sum(count),.groups="drop")
if(!quick) tmpdf_slow = tmpdf_quick %>% group_by(.time,class_bool) %>% group_modify(function(d,g,..) {return(tibble(count = rep(1,d$count)))})
} %>% {
tmpdf_slow = tmpdf
if(quick) tmpdf_quick = tmpdf %>% group_by(.time,class_bool) %>% summarise(count = n(),.groups="drop") %>% tidyr::complete(.time = data_times, class_bool, fill=list(count=0) )
}
result = tibble(.time=predict_times, class=level)
if (nrow(tmpdf) == 0) {
# empty estimate
out = out %>% bind_rows(result %>% .fixed_result(est_name,model_name,link = "logit",mean = NA,se = NA, error = "no data"))
} else if (sum(tmpdf$class_bool) < degree) {
# zero estimate
out = out %>% bind_rows(result %>% .fixed_result(est_name,model_name,link = "logit",mean = -Inf,se = 10000, error = "all zeros"))
} else if (sum(!tmpdf$class_bool) < degree) {
# one estimate
out = out %>% bind_rows(result %>% .fixed_result(est_name,model_name,link = "logit",mean = Inf,se = 10000, error = "all ones"))
} else {
tryCatch({
if (quick) {
# This is how I expect it to work:
# lf_form = .locfit_formula(class_bool, time_span, window = window, polynomialDegree = degree, nearestNeighbours = !is_ts)
# fit = locfit::locfit(lf_form,
# data = tmpdf,
# weights = count,
# family="qbinomial", link="logit")
# This is what seems to work but does not include any sample size in weighting.
tmpdf_quick = tmpdf_quick %>% group_by(.time) %>% mutate(total = sum(count), p=count/total) %>%
filter(class_bool) # %>%
# this bit does not work either
# mutate(inv_var = 1/(total*p*(1-p))) %>% mutate(inv_var = ifelse(is.finite(inv_var),inv_var,1))
lf_form = .locfit_formula(p, time_span, window = window, polynomialDegree = degree, nearestNeighbours = FALSE)
# timeseries model when there are counts
fit = locfit::locfit(lf_form,
data = tmpdf_quick,
# weights = total, weights =1/total
# weights = inv_var,
family="qbinomial", link="logit", maxit = 5000, maxk=5000)
# browser()
} else {
# this is the line list version.
lf_form = .locfit_formula(class_bool, time_span, window = window, polynomialDegree = degree, nearestNeighbours = TRUE)
# line list model when there are no counts
fit = locfit::locfit(lf_form,
data = tmpdf_slow,
family="qbinomial", link="logit", maxit = 5000, maxk=5000)
}
tmp = result %>% .locfit_extract_result(model = fit, estimate = est_name, modelName = model_name, link = "logit")
out = out %>% bind_rows(tmp)
}, error = function(e) {
browser()
out = out %>% bind_rows(result %>% .fixed_result(est_name,model_name,link="logit",error = e$message))
})
}
}
# convert times back to dates
out = out %>% mutate(date = .time_to_date(.time))
# swap factor levels back in
if (is.factor(d$class)) out = out %>% mutate(class = factor(class, levels(d$class), ordered = is.ordered(d$class)))
return(out)
}
# takes a line list of patient admissions with a multinomial class label, and fits
# a quasi-binomial model with logit link using local regression. This expects a dataframe
# with an admission_week column and a class column. Multinomial class is either treated as a
# set of 1 versus others binomials (cumulative = FALSE) or as a set of less than or equal versus more
# than binomials (cumulative = TRUE, which assumes multinomial class is ordered)
estimateProportion = function(admissions, ... ,nn=0.2, deg=2, cumulative = is.ordered(admissions$class)) {
# get the output as fractional weeks - we wil convert this to days later.
weeks = seq(min(admissions$admission_week),max(admissions$admission_week),by = 1/7)
out = tibble()
# we are doing a binomial this for each level in the factor versus all other levels.
# this lets us create an estimate for multinomial data that I'm going to use later.
# I've never been sure about whether multinomial proportions can be treated as the sum of
# binomial 1 vs others, my suspicion is they can't, but I'm going to do it anyway
for (level in levels(admissions$class)) {
if (cumulative) {
tmpdf = admissions %>% mutate(class_bool = class <= level)
} else {
tmpdf = admissions %>% mutate(class_bool = class == level)
}
if (any(is.na(tmpdf$class_bool))) browser()
# detect some edge cases
if (nrow(tmpdf) == 0) {
# data set is empty
out = out %>% bind_rows(
tibble(
class = level,
admission_week = weeks,
admission_date = .weeks_to_date(weeks),
lower = 0,
median = 0,
upper = 1
)
)
} else if (!any(tmpdf$class_bool)) {
# for a given class there is no data or all observations are negative
out = out %>% bind_rows(
tibble(
class = level,
admission_week = weeks,
admission_date = .weeks_to_date(weeks),
lower = 0,
median = 0,
upper = 0
)
)
} else if (all(tmpdf$class_bool)) {
# for a given class all the observations are positive
out = out %>% bind_rows(
tibble(
class = level,
admission_week = weeks,
admission_date = .weeks_to_date(weeks),
lower = 1,
median = 1,
upper = 1
)
)
} else {
fit = locfit::locfit(class_bool ~ locfit::lp(admission_week,nn=nn,deg=deg),
data = tmpdf,family="qbinomial", link="logit")
tmp = preplot(fit,newdata=weeks,se.fit = TRUE,band="local")
t = tmp$tr
out = out %>% bind_rows(
tibble(
class = level,
admission_week = weeks,
admission_date = .weeks_to_date(weeks),
lower = .opt(t(qnorm(0.05,tmp$fit,tmp$se.fit))),
median = t(tmp$fit), #only because fit is normally distributed so mean=median
upper = .opt(t(qnorm(0.95,tmp$fit,tmp$se.fit)))
)
)
}
}
out = out %>% mutate(class = factor(class, levels(admissions$class)))
return(out)
}
| /R/todo/estimators_old.R | permissive | bristol-vaccine-centre/growthrates | R | false | false | 71,807 | r | # TODO:
# 1) Write lag analysis for estimators.
# Simple one for growth rate
# N.B. for Rt this will need wallinga et al Rt calculation of Rt using infectivity profile.
# 2) default estimator functions combining best practice.
# classes for estimates e.g. growthratetimeseries, rttimeseries, poissonratetimeserris, proportions
# simple timeseries as S3 class
# Multiple time series as S3
# 3) plotting functions
# consider a new library to hold class definitions and plotting functions
# or a new library to hold basic data manipulation and validation functions and a new library for plotting
# 4) restructure appendix vignettes to use latex versions from PhD and save figures somewhere.
# rename outputs to remove dates
# check time-series plots still working / fix timeseries plots
# 5) re-architect validation functions
# into new package?
ensureExists = function(df, column, orElse = function(df) {stop("Missing column: ",column)},...) {ensure_exists(df, column, orElse, ...)}
# make sure all the columns exist or report what the problems are
checkValid = function(df,columns) {
success = all(sapply(columns, function(colname) {
if(!(colname %in% colnames(df))) {
message("Missing column: ",colname)
return(FALSE)
}
return(TRUE)
}))
if(!success) stop("Invalid dataframe")
}
# create a weekday and is.weekend column
weekdayFromDates = function(df) {
checkValid(df,"date")
df %>% mutate(
weekday = ordered(lubridate::wday(date),levels=1:7, labels=c("sun","mon","tue","wed","thur","fri","sat")),
is.weekend = weekday %in% c("sat","sun")
)
}
#' Calculates a weighting to apply to each day of week
#'
#' @param simpleTimeseries a covid timeseries data frame
#' @param ...
#' @param valueVar the variable with the weekly periodicity
#'
#' @return the dataframe with a weekday.wt column which says how much that value is over expressed in the data
weekendEffect = function(simpleTimeseries, valueVar="value", ...) {
valueVar = ensym(valueVar)
if (simpleTimeseries %>% is.grouped_df()) stop("this does not work on grouped data. use a group_modify.")
simpleTimeseries = simpleTimeseries %>% weekdayFromDates()
# set the default uniform weighting
defaultWt = tibble(
weekday = ordered(1:7,labels=c("sun","mon","tue","wed","thur","fri","sat")),
weekday.wt = rep(1,7)
)
if(nrow(simpleTimeseries)>=21) {
# if there is enough data estimate how much weight each day should have
weight = simpleTimeseries %>%
mutate(.percentBias =
log(!!valueVar+1) /
slider::slide_dbl(log(!!valueVar+1), .before=3, .after=3,.f = mean, na.rm=TRUE,.complete = TRUE)-1
) %>%
group_by(weekday,.add=TRUE) %>%
summarise(
weekday.wt = exp(abs(mean(.percentBias, na.rm=TRUE))),
.groups="drop"
) %>%
mutate(weekday.wt=weekday.wt/mean(weekday.wt, na.rm=TRUE))
if(nrow(weight) !=7 | any(is.na(weight$weekday.wt))) {
weight = defaultWt
}
} else {
weight = defaultWt
}
simpleTimeseries %>% inner_join(weight, by="weekday") %>% return()
}
#
# #' @description Calculates a weighting to apply to each day of week
# #' @param simpleTimeseries a covid timeseries data frame
# #' @param window the window over which we are to normalise the sample size
# #' @param sampleSizeVar the variable with the sample size in it
# #' @return the dataframe with a sample.wt column which says how much that sample is relevant to the data
# sampleSizeEffect = function(simpleTimeseries, window, sampleSizeVar="total") {
# if (simpleTimeseries %>% is.grouped_df()) stop("this does not work on grouped data. use a group_modify.")
# sampleSizeVar = ensym(sampleSizeVar)
# simpleTimeseries = simpleTimeseries %>% arrange(date) %>% mutate(
# #sample.wt = ifelse(!!sampleSizeVar==0,0,!!sampleSizeVar / slider::slide_dbl(!!sampleSizeVar, .before = floor(window/2), .after = floor(window/2), mean, na.rm=TRUE,.complete = FALSE))
# sample.wt = ifelse(!!sampleSizeVar==0,0,!!sampleSizeVar/mean(!!sampleSizeVar,na.rm = TRUE))
# )
# return(simpleTimeseries)
# }
#
## Locfit estimate outputs ----
# This is just to format locfit results given a locfit model.
# extract the locfit result from the locfit model and format it
locfitExtractResult = function(df, model, estimate, modelName, link = "value") {
tryCatch({
points = preplot(model,where = "fitp",se.fit = TRUE,band="local")
t = points$tr
fit = points$fit
se.fit = tryCatch({
forecast::na.interp(points$se.fit)
}, error = function(e) {
rep(NA,length(fit))
})
df %>% formatResult(fit,se.fit,t,estimate,modelName,link)
}, error = function(e) {
df %>% nullResult(estimate,modelName,link,error = e$message)
})
}
# or else NA
opt = function(expr) tryCatch(expr,error=function(e) NA_real_)
# format a transformed normally distributed variable into quantiles
formatResult = function(df, fit, se.fit, t, estimate, modelName,link) {
df %>% mutate(
!!(paste0(estimate,".",link)) := fit,
!!(paste0(estimate,".SE.",link)) := se.fit,
!!(paste0(estimate,".Quantile.0.025")) := opt(t(qnorm(0.025,fit,se.fit))),
!!(paste0(estimate,".Quantile.0.05")) := opt(t(qnorm(0.05,fit,se.fit))),
!!(paste0(estimate,".Quantile.0.25")) := opt(t(qnorm(0.25,fit,se.fit))),
!!(paste0(estimate,".Quantile.0.5")) := t(fit),
!!(paste0(estimate,".Quantile.0.75")) := opt(t(qnorm(0.75,fit,se.fit))),
!!(paste0(estimate,".Quantile.0.95")) := opt(t(qnorm(0.95,fit,se.fit))),
!!(paste0(estimate,".Quantile.0.975")) := opt(t(qnorm(0.975,fit,se.fit))),
!!(paste0(estimate,".model")) := modelName)
}
# extract the locfit result from the locfit model and format it
nullResult = function(df, estimate, modelName, link = "value", error = "unknown error", centralValue = 0) {
df %>% formatResult(fit = centralValue, se.fit=NA_real_, t=function(x) x, estimate, modelName, link) %>%
mutate(
!!(paste0(estimate,".error")) := error
)
}
#' Rename a growth rate estimate by placing a prefix in front o
#'
#' @param df the datafram with the Growth rate, possion rate, R_t or proportion estimates
#' @param prefix the prefix to add
#' @param estimates which estimates to rename (defaults to all of "Growth","Est","Proportion" and "Rt")
#'
#' @return the dataframe with the columns renamed
#' @export
renameResult = function(df, prefix, estimates = c("Growth","Est","Proportion","Rt","doublingTime")) {
for (estimate in estimates) {
df = df %>% rename_with(.cols = starts_with("Growth"), .fn = ~ paste0(prefix,".",.x))
}
}
## Locfit estimators ----
# Generate the formula for a locfit model based on things I understand
locfitFormula = function(valueVar, nrowDf, window, polynomialDegree, nearestNeighbours = TRUE, ...) {
valueVar=ensym(valueVar)
tmp_alpha = min(window/nrowDf,1)
tmp_alpha_2 = min((window*2+1)/nrowDf,1)
lpParams = list(
nn = if( nearestNeighbours ) tmp_alpha_2 else tmp_alpha, # this is given in fraction of total observations
h = if( !nearestNeighbours ) window else 0, # this is given in units of X
deg = polynomialDegree
)
lpParamsText = paste(names(lpParams),lpParams,sep="=",collapse=", ")
lpFormula = as.formula(paste0(as_label(valueVar), " ~ locfit::lp(time, ",lpParamsText,")"))
return(lpFormula)
}
#' Generate a smoothed estimate of the proportion of cases compared to some total.
#'
#' @param simpleTimeseries - a minimal time-series including date, value, and if available total. If total is present the proportion is value/total. otherwise it is value.
#' @param degree the polynomial degree
#' @param window the data window in days
#' @param estimateMean there is no closed form estimate of the mean of a logit transformed normal. it can be calculated by integration by this is relatively expensive and not done unless explicitly needed,
#' @param ... may include "nearestNeigbour=FALSE" to disable the tail behaviour of locfit
#'
#' @return a timeseries with binomial proportion estimates (columns starting with "Proportion")
#' @export
locfitProportionEstimate = function(simpleTimeseries, degree = 2, window = 14, estimateMean = FALSE,... ) { #, weightBySampleSize = FALSE, weightByWeekday = FALSE, ...) {
if (simpleTimeseries %>% is.grouped_df()) stop("this does not work on grouped data. use a group_modify.")
simpleTimeseries %>% checkValid(c("date","value"))
simpleTimeseries = simpleTimeseries %>%
arrange(date) %>%
ensureExists("total", orElse = function(ts,...) ts %>% mutate(total=1)) %>%
#ensureExists("weekday.wt", orElse = function(ts,...) ts %>% weekendEffect(valueVar=total)) %>%
#ensureExists("sample.wt", orElse = function(ts,...) ts %>% sampleSizeEffect(window=window, sampleSizeVar=total)) %>%
ensureExists("time", orElse = function(ts,...) ts %>% mutate(time = as.integer(date-max(date)))) %>%
mutate(.prop = ifelse(total==0,NA,value/total))
if(any(simpleTimeseries$.prop > 1,na.rm = TRUE)) stop("Proportions model has values greater than 1. Did you specify total column correctly?")
if(sum(na.omit(simpleTimeseries$.prop) != 0) < degree) {
return(simpleTimeseries %>% nullResult(estimate = "Proportion", modelName = glue::glue("binomial:{degree}:{window}"), link = "logit", error = "not enough non zero values", centralValue = 0))
}
if(sum(na.omit(simpleTimeseries$.prop) != 1) < degree) {
return(simpleTimeseries %>% nullResult(estimate = "Proportion", modelName = glue::glue("binomial:{degree}:{window}"), link = "logit", error = "not enough non unitary values", centralValue = 1))
}
# simpleTimeseries = simpleTimeseries %>% mutate(fit.wt = 1)
# if(weightBySampleSize) simpleTimeseries = simpleTimeseries %>% mutate(fit.wt = fit.wt*sample.wt)
# if(weightByWeekday) simpleTimeseries = simpleTimeseries %>% mutate(fit.wt = fit.wt*weekday.wt)
#
# if(weightBySampleSize) {
# simpleTimeseries = simpleTimeseries %>% select(-.prop) %>% group_by_all() %>% summarise(
# .prop = c(rep(1,value),rep(1,total-value))
# )
# }
tryCatch({
model = locfit::locfit(
locfitFormula(.prop, nrowDf = nrow(simpleTimeseries), window, degree, ...),
# weights = fit.wt,
data=simpleTimeseries,
family="qbinomial",
link="logit",
ev=simpleTimeseries$time
)}, error=function(e) browser()
)
# weightLbl = case_when(
# weightBySampleSize & weightByWeekday ~ "both",
# weightBySampleSize ~ "sample",
# weightByWeekday ~ "weekday",
# TRUE ~ "none"
# )
weightLbl = "none"
simpleTimeseries = simpleTimeseries %>%
locfitExtractResult(model, estimate = "Proportion", modelName = glue::glue("binomial:{degree}:{window}:{weightLbl}"), link = "logit") %>%
select(-.prop)
if (estimateMean) {
simpleTimeseries = simpleTimeseries %>%
mutate(
Proportion.value = map2_dbl(Proportion.logit, Proportion.SE.logit, .f = ~ ifelse(is.na(.y),.x,logitnorm::momentsLogitnorm(.x,.y)[["mean"]])) #();NA_real_))
)
}
return(simpleTimeseries)
}
#' Generate a smoothed estimate of the relative growth rate of cases compared to some baseline using proportions.
#'
#' @param simpleTimeseries - a minimal time-series including date, value, and if available total. If total is present the proportion is value/total. otherwise it is value, and total is assumed to be 1.
#' @param degree the polynomial degree
#' @param window the data window in days
#' @param ... may include "nearestNeigbour=FALSE" to disable the tail behaviour of locfit
#'
#' @return a timeseries with growth rate estimates (columns starting with "Growth")
#' @export
locfitProportionalGrowthEstimate = function(simpleTimeseries, degree = 2, window = 14, ...) { #}, weightBySampleSize = FALSE, weightByWeekday = FALSE, ...) {
if (simpleTimeseries %>% is.grouped_df()) stop("this does not work on grouped data. use a group_modify.")
simpleTimeseries %>% checkValid(c("date","value"))
simpleTimeseries = simpleTimeseries %>%
arrange(date) %>%
ensureExists("total", orElse = function(ts,...) {
message("No total column in proportional timeseries - assuming value is a fraction, and total is 1.")
ts %>% mutate(total=1)
}) %>%
# ensureExists("weekday.wt", orElse = function(ts,...) ts %>% weekendEffect(valueVar=total)) %>%
# ensureExists("sample.wt", orElse = function(ts,...) ts %>% sampleSizeEffect(window=window, sampleSizeVar=total)) %>%
ensureExists("time", orElse = function(ts,...) ts %>% mutate(time = as.integer(date-max(date)))) %>%
mutate(.prop = ifelse(total==0,NA,value/total))
if(any(simpleTimeseries$.prop > 1,na.rm = TRUE)) stop("Proportions model contains fractions greater than 1. Did you specify total column correctly?")
if(sum(na.omit(simpleTimeseries$.prop) != 0) < degree) {
return(simpleTimeseries %>% nullResult(estimate = "Growth", modelName = glue::glue("binomial:{degree}:{window}"), link = "value",error = "not enough non zero values", centralValue = 0))
}
if(sum(na.omit(simpleTimeseries$.prop) != 1) < degree) {
return(simpleTimeseries %>% nullResult(estimate = "Proportion", modelName = glue::glue("binomial:{degree}:{window}"), link = "value", error = "not enough non unitary values", centralValue = 0))
}
# simpleTimeseries = simpleTimeseries %>% mutate(fit.wt = 1)
# if(weightBySampleSize) simpleTimeseries = simpleTimeseries %>% mutate(fit.wt = fit.wt*sample.wt)
# if(weightByWeekday) simpleTimeseries = simpleTimeseries %>% mutate(fit.wt = fit.wt*weekday.wt)
model = locfit::locfit(
locfitFormula(.prop, nrowDf = nrow(simpleTimeseries), window, degree, ...),
# weights = fit.wt,
data=simpleTimeseries,
family="qbinomial",
link="logit",
deriv=1,
ev=simpleTimeseries$time
)
# weightLbl = case_when(
# weightBySampleSize & weightByWeekday ~ "both",
# weightBySampleSize ~ "sample",
# weightByWeekday ~ "weekday",
# TRUE ~ "none"
# )
weightLbl = "none"
# no link function in growth rate as the derivative
simpleTimeseries %>%
locfitExtractResult(model = model, estimate = "Growth", modelName = glue::glue("binomial:{degree}:{window}:{weightLbl}"), link = "value") %>%
select(-.prop)
}
#' Generate a smoothed estimate of the absolute growth rate of cases using a poisson model.
#'
#' @param simpleTimeseries - a minimal time-series including date, value, and if available total. If total is present the proportion is value/total. otherwise it is value.
#' @param degree the polynomial degree
#' @param window the data window in days
#' @param ... may include "nearestNeigbour=FALSE" to disable the tail behaviour of locfit
#'
#' @return a timeseries with poisson rate estimates (columns starting with "Est")
#' @export
locfitPoissonRateEstimate = function(simpleTimeseries, degree = 2, window = 14, weightByWeekday = FALSE, ...) {
if (simpleTimeseries %>% is.grouped_df()) stop("this does not work on grouped data. use a group_modify.")
simpleTimeseries %>% checkValid(c("date","value"))
simpleTimeseries = simpleTimeseries %>%
arrange(date) %>%
ensureExists("weekday.wt", orElse = function(ts,...) ts %>% weekendEffect(valueVar=value)) %>%
ensureExists("time", orElse = function(ts,...) ts %>% mutate(time = as.integer(date-max(date)))) %>%
mutate(.prop = value)
if(sum(na.omit(simpleTimeseries$.prop) != 0) < degree) {
return(simpleTimeseries %>% nullResult(estimate = "Est", modelName = glue::glue("poisson:{degree}:{window}"), link = "log",error = "not enough non zero values", centralValue = 0))
}
simpleTimeseries = simpleTimeseries %>% mutate(fit.wt = 1)
if(weightByWeekday) simpleTimeseries = simpleTimeseries %>% mutate(fit.wt = fit.wt*weekday.wt)
model = locfit::locfit(
locfitFormula(.prop, nrowDf = nrow(simpleTimeseries), window, degree, ...),
weights = fit.wt,
data=simpleTimeseries,
family="qpoisson",
link="log",
ev=simpleTimeseries$time
)
weightLbl = case_when(
weightByWeekday ~ "weekday",
TRUE ~ "none"
)
# no link function in growth rate as the derivative
simpleTimeseries %>%
locfitExtractResult(model, estimate = "Est", modelName = glue::glue("poisson:{degree}:{window}:{weightLbl}"), link="log") %>%
select(-.prop)
}
#' Generate a smoothed estimate of the absolute growth rate of cases using a poisson model.
#'
#' @param simpleTimeseries - a minimal time-series including date, value, and if available total. If total is present the proportion is value/total. otherwise it is value.
#' @param degree the polynomial degree
#' @param window the data window in days
#' @param ... may include "nearestNeigbour=FALSE" to disable the tail behaviour of locfit
#'
#' @return a timeseries with growth rate estimates (columns starting with "Growth")
#' @export
locfitGrowthEstimate = function(simpleTimeseries, degree = 2, window = 14, weightByWeekday = FALSE, ...) {
if (simpleTimeseries %>% is.grouped_df()) stop("this does not work on grouped data. use a group_modify.")
simpleTimeseries %>% checkValid(c("date","value"))
simpleTimeseries = simpleTimeseries %>%
arrange(date) %>%
ensureExists("weekday.wt", orElse = function(ts,...) weekendEffect(ts,valueVar=value)) %>%
ensureExists("time", orElse = function(ts,...) mutate(ts, time = as.integer(date-max(date)))) %>%
mutate(.prop = value)
if(sum(na.omit(simpleTimeseries$.prop) != 0) < degree) {
return(simpleTimeseries %>% nullResult(estimate = "Growth", modelName = glue::glue("poisson:{degree}:{window}"), link = "value",error = "not enough non zero values", centralValue = 0))
}
simpleTimeseries = simpleTimeseries %>% mutate(fit.wt = 1)
if(weightByWeekday) simpleTimeseries = simpleTimeseries %>% mutate(fit.wt = fit.wt*weekday.wt)
model = locfit::locfit(
locfitFormula(.prop, nrowDf = nrow(simpleTimeseries), window, degree, ...),
weights = fit.wt,
data=simpleTimeseries,
family="qpoisson",
link="log",
deriv=1,
ev=simpleTimeseries$time
)
weightLbl = case_when(
weightByWeekday ~ "weekday",
TRUE ~ "none"
)
# no link function in growth rate as the derivative
simpleTimeseries %>%
locfitExtractResult(model = model, estimate = "Growth", modelName = glue::glue("poisson:{degree}:{window}:{weightLbl}"), link = "value") %>%
#TODO: more statistics here?
select(-.prop)
}
#' Calucualte a doubling time with quantiles for any timeseries with Growth rate estimates
#'
#' @param simpleTimeseries
#'
#' @return a timeseries with doubling time estimates (columns starting with "doublingTime")
#' @export
doublingTimeFromGrowthRate = function(simpleTimeseries) {
reorder = function(x) (1-(stringr::str_extract(x,"[0-9]\\.[0-9]+") %>% as.numeric())) %>% sprintf(fmt="doublingTime.Quantile.%1.3g")
simpleTimeseries %>% mutate(across(.cols = starts_with("Growth.Quantile"), .fns = ~ log(2)/.x, .names = "{reorder(.col)}"))
}
#' Calculate a reproduction number estimate using the Wallinga 2007 estimation using empirical generation time distribution. This uses resampling to transmit uncertainty in growth rate estimates
#'
#' @param simpleTimeseries - With a "Growth" estimate as a normally distributed quantility
#' @param yMatrix - the matrix of possible infectivity profiles as discrete distributions
#' @param aVector - the upper boundaries of the time cut-offs for the infectivity profiles
#' @param bootstraps - the number of bootstraps to take to calculate for each point.
#' @param quantiles - quantiles to calculate.
#' @import logitnorm
#'
#' @return a timeseries with "Rt" estimates
#' @export
rtFromGrowthRate = function(simpleTimeseries, infectivityProfile, yMatrix = infectivityProfile$yMatrix, aVector = infectivityProfile$aVector, bootstraps = 20*dim(yMatrix)[2], quantiles = c(0.025,0.05,0.25,0.5,0.75,0.95,0.975)) {
if (simpleTimeseries %>% is.grouped_df()) stop("this does not work on grouped data. use a group_modify.")
simpleTimeseries %>% checkValid(c("date","value","Growth.value","Growth.SE.value"))
# grab the y matrix from the list column
y_cols = matrix(yMatrix)
a = aVector
# figure out how many bootstraps we need:
bootsPerInf = max(c(bootstraps %/% dim(yMatrix)[2],1))
# lose the zero values in y and a, if present (which they will be):
if (a[1]==0) {
y_cols = matrix(y_cols[-1,])
a = a[-1]
}
# get the infectivity profiles as a list of vectors, each bootstrap profile will be a vector.
ys = asplit(y_cols, MARGIN=2)
d3 = simpleTimeseries %>% mutate(R = map2(Growth.value, Growth.SE.value, function(mean_r,sd_r) {
#r_samples = rnorm(bootsPerInf*length(ys),mean_r,sd_r)
qnts = seq(0,1,length.out = bootsPerInf)[2:(bootsPerInf-1)]
r_samples = qnorm(p=qnts,mean_r,sd_r)
rs = asplit(matrix(r_samples,nrow=length(ys)), MARGIN=1)
# browser()
out = map2(rs,ys,function(r10,y) {
# browser()
R10 = sapply(r10, function(r) {
# browser()
R = r/sum(y*(exp(-r*lag(a,default=0))-exp(-r*a))/(a - lag(a,default=0)))
})
})
R_out = as.vector(sapply(out,c))
R_q = quantile(R_out, quantiles)
names(R_q) = paste0("Rt.Quantile.",quantiles)
R_summ = enframe(R_q) %>% pivot_wider() %>% mutate(Rt.value = mean(R_out), Rt.SE.value = sd(R_out))
return(R_summ)
}))
return(d3 %>% unnest(R) %>% mutate(Rt.model = "wallinga:growth-rate"))
}
## Manchester growth rate ----
# ## Adapted from code
# Copyright (c) 2020 Ian Hall
# See LICENCE for licensing information
# Growth rate estimates for confirmed cases in Europe and for different metrics in Italy using GAM
# Figure 1 (main text) and figures S1 and S2 (electronic supplementary material) of:
#
# Pellis L, Scarabel F, Stage HB, Overton CE, Chappell LHK, Fearon E, Bennett E,
# University of Manchester COVID-19 Modelling Group, Lythgoe KA, House TA and Hall I,
# "Challenges in control of COVID-19: short doubling time and long delay to effect of interventions",
# Philosophical Transactions of the Royal Society B (2021)
#
# gamGrowthEstimate = function(simpleTimeseries, meth="GCV.Cp", FE='WD'){
# if (simpleTimeseries %>% is.grouped_df()) stop("this does not work on grouped data. use a group_modify.")
#
# simpleTimeseries %>% checkValid(c("date","value"))
# simpleTimeseries = simpleTimeseries %>%
# arrange(date) %>%
# ensureExists("time", orElse = function(ts,...) ts %>% mutate(time = as.integer(date-max(date)))) %>%
# mutate(.incidence = value)
#
# #res <- data.frame(sdt=rep(0,npts),sdtup=rep(0,npts),sdtlow=rep(0,npts),doub=rep(0,npts),doubup=rep(0,npts),doublow=rep(0,npts))
# #Tv <- timev
#
# if(FE=='None') {
# MGAM <- mcgv::gam(.incidence ~ mcgv::s(time), data = simpleTimeseries, family=quasipoisson, method=meth)
# } else {
# simpleTimeseries = simpleTimeseries %>%
# ensureExists("weekday", orElse = function(ts,...) ts %>% weekendEffect(valueVar=value)) %>%
# ensureExists("is.weekend", orElse = function(ts,...) ts %>% weekendEffect(valueVar=value))
# if(FE=='WE'){
# MGAM <- mcgv::gam(.incidence ~ mcgv::s(time)+is.weekend, data = simpleTimeseries, family=quasipoisson, method=meth)
# } else {
# MGAM <- mcgv::gam(.incidence ~ mcgv::s(time)+weekday, data = simpleTimeseries, family=quasipoisson, method=meth)
# }
# }
#
# X0 <- predict(MGAM, simpleTimeseries %>% mutate(time=time-eps), type="lpmatrix")
# eps <- 1e-7 ## finite difference interval
# X1 <- predict(MGAM, simpleTimeseries %>% mutate(time=time+eps),type="lpmatrix")
# Xp <- (X1-X0)/(2*eps) ## maps coefficients to (fd approx.) derivatives
# # something to do with extracting the coefficients
# off <- ifelse(FE=='None',1,ifelse(FE=='WE',2,7))
# Xi <- Xp*0
# Xi[,1:9+off] <- Xp[,1:9+off] ## weekend Xi%*%coef(MGAM) = smooth deriv i
# df <- Xi%*%coef(MGAM) ## ith smooth derivative
# df.sd <- rowSums(Xi%*%MGAM$Vp*Xi)^.5 ## cheap diag(Xi%*%b$Vp%*%t(Xi))^.5
# ## derivative calculation, pers comm S. N. Wood, found in mgcv: Mixed GAM Computation Vehicle with automatic smoothness estimation. R packageversion 1.8-31 (2019) https://CRAN.R-project.org/package=mgcv.
#
# simpleTimeseries %>% formatResult(fit = df, se.fit = df.sd,t = function(x) x, estimate = "Growth", modelName = glue::glue("poisson:gam-{meth}:{FE}"), link = "value")
#
# }
## Point estimators ----
#' Calculate a slightly more robust estimate of growth rate and proportion based on a single model at a range of
#'
#' @param simpleTimeseries - the timeseries containing date, value and total
#' @param dates - dates at which to evaluate the model
#' @param window - the window of data
#' @param weekly - either "weekday","weekend" or "none" to define whether to fit a fixed effect model to the weekday, or the is.weekend
#' @param includeModel - keep the fitted model as a list column for fit analysis
#' @param ...
#'
#' @return a dataframe of evaluation dates, growth rates, proportions and model fit
#' @export
pointProportionEstimate = function(simpleTimeseries, dates = max(simpleTimeseries$date)-3, window = 14, weekly = "weekday", includeModel = TRUE,...) {
if (simpleTimeseries %>% is.grouped_df()) stop("this does not work on grouped data. use a group_modify.")
if (find.package("logitnorm", quiet = TRUE) %>% length %>% equals(0)) {
message("Installing logitnorm needed for analyses")
install.packages("logitnorm", repos = "https://cloud.r-project.org")
}
predictDates = as.Date(dates)
simpleTimeseries %>% checkValid(c("date","value"))
simpleTimeseries = simpleTimeseries %>%
arrange(date) %>%
ensureExists("total", orElse = function(ts,...) ts %>% mutate(total=1)) %>%
ensureExists("time", orElse = function(ts,...) ts %>% mutate(time = as.integer(date-max(date)))) %>%
ensureExists(c("weekday","is.weekend"), orElse = function(ts,...) ts %>% weekdayFromDates()) %>%
mutate(.prop = ifelse(total==0,NA,value/total))
if(any(simpleTimeseries$.prop > 1,na.rm = TRUE)) stop("Proportions model has values greater than 1. Did you specify total column correctly?")
if (weekly=="weekday") {
modelFormula = .prop ~ time + weekday
} else if (weekly=="weekend") {
modelFormula = .prop ~ time + is.weekend
} else {
modelFormula = .prop ~ time
}
bind_rows(lapply(predictDates, function(predictDate) {
dateMin = as.Date(predictDate)-floor(window/2)
dateMax = as.Date(predictDate)+floor(window/2)
suppressWarnings({
model = glm(
modelFormula,
data=simpleTimeseries %>% filter(date >= dateMin & date <= dateMax) %>% mutate(sample.wt = total/mean(total,na.rm=TRUE)),
family="binomial",
weights=sample.wt
)
})
predictAt = tibble(
date = predictDate,
time = as.integer(date-max(simpleTimeseries$date)),
) %>% weekdayFromDates()
predicted = predict(model,newdata = predictAt,se.fit = TRUE, type="link")
linkFn = family(model)$linkinv
predictAt = formatResult(predictAt, unname(predicted$fit), unname(predicted$se.fit), linkFn, "Proportion", "glm", "logit")
predictAt = predictAt %>% mutate(
Proportion.value = map2_dbl(Proportion.logit, Proportion.SE.logit, .f = ~ logitnorm::momentsLogitnorm(.x, .y)[["mean"]])
)
gr = summary(model)$coefficients["time",]
predictAt = formatResult(predictAt, gr[[1]], gr[[2]], function(x) x, "Growth", "glm", "value")
if(includeModel) predictAt %>% mutate(fit = list(model))
}))
}
#' Calculate a slightly more robust estimate of growth rate and poisson rate based on a single model
#'
#' @param simpleTimeseries - the timeseries containing date and value
#' @param dates - dates at which to evaluate the model
#' @param window - the window of data
#' @param weekly - either "weekday","weekend" or "none" to define whether to fit a fixed effect model to the weekday, or the is.weekend
#' @param includeModel - keep the fitted model as a list column for fit analysis
#' @param ...
#'
#' @return a dataframe of evaluation dates, growth rates, poisson rates and model fit
#' @export
pointPoissonEstimate = function(simpleTimeseries, dates, window, weekly = "weekday", includeModel = TRUE,...) {
if (simpleTimeseries %>% is.grouped_df()) stop("this does not work on grouped data. use a group_modify.")
predictDates = as.Date(dates)
simpleTimeseries %>% checkValid(c("date","value"))
simpleTimeseries = simpleTimeseries %>%
arrange(date) %>%
ensureExists("time", orElse = function(ts,...) ts %>% mutate(time = as.integer(date-max(date)))) %>%
ensureExists(c("weekday","is.weekend"), orElse = function(ts,...) ts %>% weekdayFromDates()) %>%
mutate(.prop = value)
if (weekly=="weekday") {
modelFormula = .prop ~ time + weekday
} else if (weekly=="weekend") {
modelFormula = .prop ~ time + is.weekend
} else {
modelFormula = .prop ~ time
}
bind_rows(lapply(predictDates, function(predictDate) {
dateMin = as.Date(predictDate)-floor(window/2)
dateMax = as.Date(predictDate)+floor(window/2)
model = glm(
modelFormula,
data=simpleTimeseries %>% filter(date >= dateMin & date <= dateMax),
family="poisson"
)
predictAt = tibble(
date = predictDate,
time = as.integer(date-max(simpleTimeseries$date)),
) %>% weekdayFromDates()
predicted = predict(model,newdata = predictAt,se.fit = TRUE, type="link")
linkFn = family(model)$linkinv
predictAt = formatResult(predictAt, unname(predicted$fit), unname(predicted$se.fit), linkFn, "Est", "glm", "log")
gr = summary(model)$coefficients["time",]
predictAt = formatResult(predictAt, gr[[1]], gr[[2]], function(x) x, "Growth", "glm", "value")
if(includeModel) predictAt %>% mutate(fit = list(model))
}))
}
## EpiEstim wrapper ----
#' Minimal epiestim wrapper to execute a time series R_t using a discrete infectivity profile matrix, and format the result to be consistent with the rest of this..
epiestimRtEstimate = function(simpleTimeseries, yMatrix, bootstraps = 10*dim(yMatrix)[2], window = 14) {
if (simpleTimeseries %>% is.grouped_df()) stop("this does not work on grouped data. use a group_modify.")
siConfig = EpiEstim::make_config(method = "si_from_sample")
tmp = simpleTimeseries %>% dplyr::select(dates=date,I=value)
simpleTimeseries = simpleTimeseries %>% dplyr::mutate(seq_id=row_number())
bootsPerInf = max(c(bootstraps %/% dim(yMatrix)[2],1))
siConfig$t_start = c(2:(nrow(tmp)-window))
siConfig$t_end = siConfig$t_start+window
siConfig$n2 = bootsPerInf
warn = NA
tmp4 =
withCallingHandlers(
tryCatch(EpiEstim::estimate_R(tmp, method = "si_from_sample",config=siConfig,si_sample = yMatrix), error = stop), warning= function(w) {
warn <<- w$message
invokeRestart("muffleWarning")
})
tmp5 = tmp4$R %>% mutate(seq_id=t_end, errors=NA, `Rt.window`=window) #warn)
tmp5 = tmp5 %>% rename_with(.cols = contains("(R)"),.fn=function(x) paste0("Rt.",stringr::str_remove(x,fixed("(R)")))) %>%
rename(`Rt.Quantile.0.5` = Rt.Median)
tmp6 = simpleTimeseries %>% dplyr::left_join(tmp5, by="seq_id")
return(tmp6 %>% select(-seq_id))
}
# plotProportionEstimate = function(simpleTimeseries, mapping = aes(), ...) {
#
# simpleTimeseries = simpleTimeseries %>% ensureExists("Proportion.Quantile.0.5", orElse = estimateProportions(simpleTimeseries,...))
# # We are going to pretend there is just one
# simpleTimeseries
# tmp2 = tmp %>% filter(date <= max(date)-1) %>% mutate(
# binom::binom.confint(Negative,n,method="wilson")
# )
#
# ggplot(estimate,aes(x=date,y=fit,ymin=lo,ymax=hi))+geom_ribbon(alpha=0.3)+geom_line(colour="blue")+
# geom_point(data=tmp2,mapping=aes(x=date,y=mean),inherit.aes = FALSE)+
# geom_errorbar(data=tmp2,mapping=aes(x=date,ymin=lower,ymax=upper),inherit.aes = FALSE)+
# scale_y_continuous(trans = "logit")
# }
## Weekly wrappers - doubled up with jepidemic
# takes a line list of patient admissions and estimates weekly rates based on
# a quasi-poisson model fitted to count data using local regression.
# expects admissions to contain admission_week columns only defining the date of admission
estimateWeeklyRate = function(admissions, ... ,nn=0.2,deg=2) {
admissionCounts = admissions %>% group_by(admission_week) %>% count()
fit = locfit::locfit(n~locfit::lp(admission_week,nn=nn,deg=deg),data = admissionCounts,family="qpoisson")
weeks = seq(min(admissionCounts$admission_week),max(admissionCounts$admission_week),by = 1/7)
tmp = preplot(fit,newdata=weeks,se.fit = TRUE,band="local")
t = tmp$tr
tibble(
admission_week = weeks,
admission_date = .weeks_to_date(weeks),
lower = .opt(t(qnorm(0.05,tmp$fit,tmp$se.fit))),
median = t(qnorm(0.5,tmp$fit,tmp$se.fit)),
upper = .opt(t(qnorm(0.95,tmp$fit,tmp$se.fit)))
)
}
## Date utility ----
# guess the intervals between dates
.day_interval = function(dates) {
dates = sort(unique(dates))
if (length(dates) < 4) return(1)
interval = .gcd(na.omit(dates-lag(dates)))
return(interval)
}
# greatest common denominator
.gcd2 = function(a, b) {
if (b == 0) a else Recall(b, a %% b)
}
.gcd <- function(...) {
Reduce(.gcd2, c(...))
}
# convert a date column to a numeric count of intervals from day_zero
# the default value for day zero is a sunday at the start of the covid outbreak.
.date_to_time = function(dates, interval=1) {
day_zero = as.Date(getOption("day_zero","2019-12-29"))
out = floor(as.numeric(dates-day_zero)+interval/100)/interval
attr(out,"interval")=interval
return(out)
}
# convert a time column of intervals from day_zero to a date
.time_to_date = function(times, interval=NA) {
day_zero = as.Date(getOption("day_zero","2019-12-29"))
if(is.na(interval)) interval=attr(times,"interval")
return(floor(times*interval+interval/100)+day_zero)
}
.full_seq_times = function(times, interval=NA) {
orig_interval=attr(times,"interval")
if(is.null(orig_interval)) stop("the original timeseries has lost its metadata")
if (is.na(interval)) interval=orig_interval
out = seq(min(times),max(times),by = interval/orig_interval)
attr(out,"interval")=orig_interval
return(out)
}
# turn a random set of dates into an evenly spaced set separated by an interval that makes sense given the data
# intervals will be inside the data
.full_seq_dates = function(dates, interval=.day_interval(dates), truncate_partials = FALSE) {
times = .date_to_time(dates,interval)
# the +1/interval - 1 here ensures we have a full final interval as defined by the
# dates. if interval is one this resolves to the max period
# otherwise it depends on the day of the week of the largest date - a larges date on a Saturday completes a week beginning on a Sunday.
if (truncate_partials) time_seq = seq(ceiling(min(times)),floor(max(times)+1/interval)-1,1)
else time_seq = seq(ceiling(min(times-0.01)),floor(max(times+0.01)),1)
date_seq = .time_to_date(time_seq, interval)
return(date_seq)
}
# TODO: test this a bit.
# full seq dates should be start of periods. interval is length of period.
# checks to see if a date or dates is within a range of dates where the dates define the start of a period
# of size defined by interval parameter as an integer.
.within_sequence = function(dates, full_seq_dates, interval = .day_interval(full_seq_dates)) {
times = .date_to_time(dates,interval)
test = .date_to_time(full_seq_dates, interval)
return(times >= min(test) & times < max(test)+1)
}
# floor to intervals from with zero_day as reference point
# makes a set of dates line up to the lower end of a period
.floor_sequence = function(dates, interval) {
times = .date_to_time(dates,interval)
return(.time_to_date(floor(times+1/100)))
}
#.full_seq_dates(c(Sys.Date(),Sys.Date()-7,Sys.Date()-21,Sys.Date()-28))
#.day_interval(Sys.Date()+c(4,8,24))
#.full_seq_dates(Sys.Date()+c(4,8,24))
## .specification_from_formula( formula = ~ date + age + gender + region )
## .specification_from_formula( formula = ~ date + reported(report_date) + age + gender + region )
## .specification_from_formula( formula = class(variant) ~ date + reported(report_date) + age + gender + region )
## .specification_from_formula( formula = n ~ specimen_date + age + gender + region )
## .specification_from_formula( formula = n ~ non_date(something) + specimen_date + age + gender + region )
## .specification_from_formula( formula = count() ~ date() + age + gender + region )
## .specification_from_formula( formula = class(variant) + count(n) ~ date() + age + gender + region )
## .specification_from_formula( formula = class(variant) + count(n) ~ date(specimen) + age + gender + region )
## .specification_from_formula( formula = growth.rate() + Rt() + class(variant) + count(n) ~ date(specimen) + age + gender + region )
## parse$observations = named_list_lhs
## parse$predictors = named_list_rhs
## parse$groups = unnamed_list_rhs
## Formula utility ----
# rhs of a formula as a string or a one sided formula
.rhs = function(formula, as_formula=FALSE) {
tmp = as.character(formula)
if(length(tmp) == 2) tmp = tmp[[2]]
else tmp = tmp[[3]]
if (as_formula) {
return(as.formula(paste0("~",tmp)))
} else {
return(tmp)
}
}
# lhs of a formula as a string or a one sided formula
.lhs = function(formula, as_formula=FALSE) {
tmp = as.character(formula)
if(length(tmp) == 2) return(NULL)
else tmp = tmp[[2]]
if (as_formula) {
return(as.formula(paste0("~",tmp)))
} else {
return(tmp)
}
}
# combine a rhs and a lhs into a single formula
.join_sides = function(lhs,rhs) {
if (is_formula(rhs)) rhs = .rhs(rhs)
if (is.null(lhs)) {
return(as.formula(paste0("~",rhs)))
} else {
# if lhs is a formula then it is a right sided formula
if (is_formula(lhs)) lhs = .rhs(lhs)
return(as.formula(sprintf("%s ~ %s", lhs,rhs)))
}
}
# Update a formula using a new one treating rhs and lhs in the same way
.update = function(x, ...) {
UseMethod(".update",x)
}
.update.formula = function(x, new) {
# switch rhs to lhs
x_rhs = .rhs(x,as_formula=TRUE)
new_rhs = .rhs(new,as_formula=TRUE)
update_rhs = update(x_rhs,new_rhs)
if(is.null(.lhs(x))) {
# If the original lhs is empty
if(is.null(.lhs(new))) {
# the new lhs is also empty
update_lhs = NULL
} else {
# the updated_lh will be the new_lhs but in formulae like
# new = x + . ~ y
# the + . term is problematic as it does not get removed
new_lhs = .lhs(new,as_formula=TRUE)
if(any(all.vars(new_lhs)==".")) {
tmp = .lhs(new,as_formula=FALSE) # this gets the LHS as a character.
tmp = stringr::str_remove(tmp[[1]],"(\\s\\+\\s)?\\.(\\s\\+\\s)?")
new_lhs = as.formula(paste0("~",tmp))
}
update_lhs = new_lhs
}
} else {
# the original lhs is not empty
x_lhs = .lhs(x,as_formula=TRUE)
new_lhs = .lhs(new,as_formula=TRUE)
if (is.null(new_lhs)) {
update_lhs = NULL
} else {
update_lhs = update(x_lhs,new_lhs)
}
}
browser()
return(.join_sides(update_lhs,update_rhs))
# as both updates are of the RHS
}
.update.epimetadata = function(x, new) {
formula = x$formula
as.epimetadata(.update.formula(formula, new), type=x$type, interval = x$interval)
}
.update.epi_ts = function(x, new) {
epi = x %>% get_meta()
return(x %>% set_meta(.update.epimetadata(epi,new)))
}
.update.epi_ll = function(x, new) {
epi = x %>% get_meta()
return(x %>% set_meta(.update.epimetadata(epi,new)))
}
## Metadata utility ----
# parse a formula into a specification
.specification_from_formula = function(formula) {
if (is.null(formula)) return(NULL)
# vars = all.vars(formula)
form_chr = as.character(formula)[-1]
if (length(form_chr) == 1) form_chr = c("",form_chr)
names(form_chr) = c("lhs","rhs")
form_chr = form_chr %>%
purrr::map(stringr::str_split,fixed("+"),n=Inf) %>%
purrr::flatten() %>%
purrr::map(stringr::str_trim)
form_df = form_chr %>% enframe() %>% unnest(c(value)) %>% mutate(
mapping = value %>% stringr::str_extract("(.*)\\(.*\\)") %>% stringr::str_remove("\\(.*\\)"),
mapped = value %>% stringr::str_remove("(.*)\\(") %>% stringr::str_remove("\\)") %>% stringr::str_remove_all("`"),
value = ifelse(mapped == "", mapping, mapped)
) %>%
select(-mapped) %>%
rename(side = name) %>%
mutate(value = lapply(value,as.symbol))
form_df %>% filter(is.na(mapping)) %>% pull(value)
if (!any(form_df$mapping=="date",na.rm = TRUE)) {
form_df = form_df %>% group_by(side) %>% mutate(mapping = replace(mapping,is.na(mapping) & side == "rhs" & !is.na(lag(mapping,default = "")),"date")) %>% ungroup()
if (!any(form_df$mapping=="date",na.rm = TRUE)) stop("No date column identified. Either date must be first term on the rhs or specifically named date(...)")
}
# This will pick up a value only if there is at least one term on the lhs (and it is not already named)
if (!any(form_df$mapping=="count",na.rm = TRUE)) {
form_df = form_df %>% group_by(side) %>% mutate(mapping = replace(mapping,is.na(mapping) & side == "lhs" & !is.na(lag(mapping,default = "")),"count")) %>% ungroup()
}
if (any(duplicated(na.omit(form_df$mapping)))) stop("duplicate mappings specified in formula: "+formula)
class(form_df) = c("specification",class(form_df))
return(form_df)
}
# convert a specification back into a formula
.formula_from_specification = function(specification) {
specification %>% mutate(
label = sapply(value,as_label),
term = case_when(
is.na(mapping) ~ label,
mapping == label ~ paste0(mapping,"()"),
TRUE ~ paste0(sprintf("%s(%s)",mapping,label)))
) %>% group_by(side) %>%
summarise(term = paste0(term,collapse=" + ")) %>%
summarise(formula = paste0(term,collapse=" ~ ")) %>%
pull(formula) %>% as.formula()
}
# construst a list of utility functions from a specification object.
.mapper = function(x,...) {
v = x
return(list(
grps = v %>% filter(side=="rhs" & is.na(mapping)) %>% pull(value),
date = v %>% filter(side=="rhs" & mapping=="date") %>% pull(value) %>% `[[`(1),
incidentals = v %>% filter(side=="lhs" & is.na(mapping)) %>% pull(value),
get = function(type) {
tmp = v %>% filter(mapping==type) %>% pull(value)
if (length(tmp) == 0) return(NULL)
tmp[[1]]
},
predictor = function(type) {
tmp = v %>% filter(side=="rhs" & mapping==type) %>% pull(value)
if (length(tmp) == 0) return(NULL)
tmp[[1]]
},
observation = function(type="count") {
tmp = v %>% filter(side=="lhs" & mapping==type) %>% pull(value)
# can subst NULL using !! and it behaves as expected in ggplot and tidyselect
if (length(tmp) == 0) return(NULL)
tmp[[1]]
},
has_observation = function(type="count") {
nrow(v %>% filter(side=="lhs" & mapping==type)) > 0
},
has_predictor = function(type) {
nrow(v %>% filter(side=="rhs" & mapping==type)) > 0
}
))
}
# .var_from_rhs = function(formula, match="date") {
# if (is.null(formula)) return(NULL)
# v = .specification_from_formula(formula)
# sym = v %>% filter(side=="rhs" & mapping == match) %>% pull(value)
# if (length(sym)==0) return(NULL)
# return(sym)
# }
#
# .vars_from_rhs = function(formula) {
# if (is.null(formula)) return(NULL)
# v = .specification_from_formula(formula)
# sym = v %>% filter(side=="rhs" & !is.na(mapping)) %>% select(mapping,value) %>% deframe()
# return(sym)
# }
#
# .grps_from_rhs = function(formula) {
# if (is.null(formula)) return(NULL)
# v = .specification_from_formula(formula)
# sym = v %>% filter(side=="rhs" & is.na(mapping)) %>% pull(value)
# return(sym)
# }
#
# .value_from_lhs = function(formula) {
# if (is.null(formula)) return(NULL)
# # formula = n ~ date + type(cls) + report(spec) + age+gender+region+code
# v = all.vars(rlang::f_lhs(formula))
# if (length(v) > 1) stop("Only zero or one variable on lhs allowed, defining the value (e.g. case count)")
# if (length(v) == 0) return(NULL)
# return(as.symbol(v))
# }
# .rdeframe = function(form_df, ...) {
# vars = ensyms(...)
# if (length(vars) == 1) {
# return(form_df %>% pull(!!vars[[1]]) %>% unlist())
# }
# form_df %>%
# rename(.name = !!vars[[1]]) %>%
# mutate(.name = ifelse(is.na(.name),"na",as.character(.name))) %>%
# group_by(.name) %>% group_modify(function(d,g,...) {
#
# tibble(.value = list(.rdeframe(d,!!!vars[-1])))
# }) %>% deframe()
# }
#
# .map
#
# tmp = .rdeframe(form_df, side, mapping, value)
## metadata object ----
as.epimetadata = function(x, ...) {
UseMethod("as.epimetadata", x)
}
as.epimetadata.formula = function(x, type, interval = 1, ...) {
specification = .specification_from_formula(x)
.make_metadata(x, specification, type, interval)
}
as.epimetadata.specification = function(x, type, interval = 1, ...) {
formula = .formula_from_specification(x)
.make_metadata(formula, x, type, interval)
}
.make_metadata = function(
formula,
specification,
type,
interval
) {
return(structure(
list(
specification = specification,
formula = formula,
interval = interval,
type = type,
m = .mapper(specification)
),
class = "epimetadata"
))
}
.set_interval = function(meta, interval) {
meta$interval = interval
return(meta)
}
## generic epi data functions ----
.make_epidata = function(x, meta, cls) {
.check_conformant(x,meta)
x = x %>% ungroup() %>% mutate(.id=row_number())
x = x %>% set_meta(meta)
class(x) = c(cls, class(x))
return(x)
}
.guess_type = function(x,meta) {
if (any(c("epi_ts","epi_ll") %in% class(x))) return(x)
date = meta$m$date
grps = meta$m$grps
cls = meta$m$observation("class")
if (meta$m$has_observation("count")) {
# can infer type from metadata
return(as.epi_ts.data.frame(x, meta$formula, interval = meta$interval))
}
if (meta$type=="ts") {
return(as.epi_ts.data.frame(x, meta$formula, interval = meta$interval))
}
grpwise_count_R2 = x %>% group_by(!!!grps,!!cls,!!date) %>% count() %>% pull(n) %>% magrittr::subtract(1) %>% magrittr::raise_to_power(2) %>% mean()
full = .full_seq_dates(x %>% pull(!!date),interval)
incomplete_ts = x %>% group_by(!!!grps,!!cls) %>% summarise(matched = sum(!!date %in% full)) %>% mutate(missing = length(full)-matched, total=length(full)) %>% ungroup() %>% summarise(prop = sum(missing)/sum(total)) %>% pull(prop)
if (incomplete_ts < 0.05 & grpwise_count_R2 < 0.01) {
browser()
as.epi_ts.data.frame(x, meta$formula, interval = meta$interval)
} else {
as.epi_ll.data.frame(x, meta$formula)
}
}
.check_conformant = function(x, meta) {
if (!is.data.frame(x)) stop("epi data must be a data frame")
ok = TRUE
for (col in meta$specification$value) {
if (!(as_label(col) %in% colnames(x))) {
message("No column found: ",as_label(col))
ok = FALSE
}
}
if (!ok) stop("Input is not conformant to specification provided: ",meta$formula)
}
get_meta = function(x) {
return(attr(x,"epi"))
}
set_meta = function(x, metadata) {
.check_conformant(x,metadata)
attr(x,"epi")=metadata
return(x)
}
is.epi_ll = function(x,...) {
return(any(c("epi_ll","std_ll") %in% class(x)))
}
is.epi_ts = function(x,...) {
return(any(c("epi_ts","std_ts") %in% class(x)))
}
## Line list object ----
as.epi_ll = function(x, ...) {
UseMethod("as.epi_ll",x)
}
as.epi_ll.default = function(x, ...) {
stop("Can't make a epi line list out of a ",paste0(class(x),collapse=", "))
}
as.epi_ll.data.frame = function(x, formula) {
# interval in line list defaults to 1.
meta = as.epimetadata(formula, type="ll")
m = meta$m
# date = m$date
cls = m$observation("class")
multinom = !is_null(cls)
# grps = m$grps
out_class = "epi_ll"
if (multinom) out_class = c("epi_multi",out_class)
if (m$has_observation("count")) stop("Attempting to make a line list out of a object with a 'count' column. Did you mean to use as.epi_ts()?")
return(.make_epidata(x,meta,out_class))
}
as.epi_ll.epi_ts = function(x, jitter=FALSE) {
meta = x %>% get_meta()
m = meta$m
interval = meta$interval
cls = m$observation("class")
grps = m$grps
date = m$date
count = m$observation("count")
multinom = !is_null(cls)
# grps = m$grps
out_class = "epi_ll"
if (multinom) out_class = c("epi_multi",out_class)
if(is.null(count)) stop("count column must be present")
y = x %>% group_by(!!!grps,!!date,!!cls) %>% group_modify(function(d,g,..) {
join = unlist(map2(d %>% pull(.id), d %>% pull(!!count), ~ rep(.x,.y)))
return(d %>% select(-count) %>% inner_join(tibble(.id=join), by=".id") %>% select(-.id))
})
y = y %>% ungroup() %>% mutate(.id=row_number())
if (jitter & interval > 1) {
y = y %>% mutate(!!date := !!date+floor(runif(nrow(y),max=interval)))
}
specification = meta$specification %>% filter(!(side == "lhs" & mapping %in% c("count")))
meta = as.epimetadata(specification, type="ll")
return(.make_epidata(y,meta,out_class))
}
summary.epi_ll = function(x,...) {
epi = x %>% get_meta()
m = epi$m
cat(sprintf("linelist: %1.0f line list(s), %1.0f entries", length(m$grps)+1, nrow(x)),"\n")
print(epi$formula, showEnv = FALSE)
}
print.epi_ll = function(x,...) {
summary(x,...)
NextMethod(x,...)
}
glimpse.epi_ll = function(x,...) {
summary(x,...)
NextMethod(x,...)
}
## Incidence ts object ----
as.epi_ts = function(x, ...) {
UseMethod("as.epi_ts", x)
}
as.epi_ts.default = function(x, ...) {
stop("Can't make a epi time series out of a ",paste0(class(x),collapse=", "))
}
as.epi_ts.Date = function(x, count, class = NULL, ...) {
x = tibble(date = x, count = count, class = class)
formula = count() ~ date()
if(!is.null(class)) formula = .update(formula, class() + . ~ .)
as.epidata.data.frame(x, formula, ...)
}
.null_na = function(x) {ifelse(suppressWarnings(is.na(x)),NULL,x)}
as.epi_ts.data.frame = function(x, formula, interval = NA, rectangular = FALSE, dates = NULL) {
meta = as.epimetadata(formula, type="ts", interval=interval)
date = meta$m$date
# Determine the frequency of the time series
# either asserted, or by reduction from the data
if (is.na(meta$interval)) meta$interval = .day_interval(x %>% pull(!!date))
.convert_dataframe(x, meta, rectangular = FALSE, verbose = TRUE, dates = dates)
}
as.epi_ts.epi_ll = function(x, formula = count() + . ~ ., interval = 1, dates = NULL) {
meta = x %>% get_meta()
new_meta = .update(meta, formula)
m = new_meta$m
new_count = m$observation("count")
# what dates are we looking at?
orig_dates = x %>% pull(!!m$date)
if (is.null(dates)) dates = orig_dates
dates = .full_seq_dates(dates,interval, truncate_partials = TRUE)
y = .convert_dataframe(x %>% mutate(!!new_count == 1), new_meta, rectangular = TRUE, verbose = FALSE, dates = dates)
return(y %>% set_meta(new_meta))
}
.convert_dataframe = function(x, meta, rectangular = FALSE, verbose=FALSE, dates = NULL) {
if (nrow(x)<2) stop("need multiple time points for a timeseries")
interval = meta$interval
m = meta$m
date = m$date
cls = m$observation("class")
value = m$observation("count")
grps = m$grps
out_class = c("epi_ts")
multinom = !is_null(cls)
if (multinom) out_class = c("epi_multi",out_class)
dates_given = !is.null(dates)
orig_dates = x %>% pull(!!date)
if(!dates_given) dates = .full_seq_dates(orig_dates,interval)
# make sure data dates are within the range of the desired interval dates
if (interval > 1) {
# this is good for linelist type data where we want to make sure we have whole intervals
# not so good for data already in time series which may "finish" on the first date of an interval.
x = x %>% filter(.within_sequence(!!date, dates, interval))
x = x %>% mutate(!!date := .floor_sequence(!!date, interval))
}
# check count values are unique for each combination of date, grouping, and multinom class
tmp = x %>% group_by(!!!grps, !!cls, !!date) %>% count()
if (any(tmp$n > 1)) {
browser()
# TODO have to reconsider this as count is a very optional column of time series but others must be
if (verbose) message("Input dataframe has more than one row per date (and class combination), which may be intentional. Combining (class) counts in multiple rows by summation, any other observations will be lost.")
if (!is.null(value)) {
if(any(is.na(tmp %>% pull(!!value)))) warning("Count column contains some NA values. The combined count will be NA for these rows.")
x = x %>% group_by(!!!grps, !!cls, !!date) %>% summarise(!!value := sum(!!value))
}
# since group by summarise steps will remove all other observations we need to make sure that the metadata is updated with the lhs including only class and count.
specification = meta$specification %>% filter(!(side == "lhs" & mapping %in% c("class","count")))
meta = as.epimetadata(specification, type=meta$type, interval=meta$interval)
}
# ensure completeness of dates and (optionally) class on a per group basis
# step 1 setup the complete combination of dates and classes (if present)
if (multinom) {
# ensure counts are complete for each of the outcome classes also as well as for each date.
clsses = tibble(x) %>% pull(!!cls) %>% unique() %>% sort()
join_cols = c(as_label(date),as_label(cls))
} else {
clsses = NULL
join_cols = as_label(date)
}
# step 2 generate a crossing dataframe of all combination of dates and optionally classes
# this is the version for rectangular time series, where a single source of data contains
# the full range of time points for all sources - i.e. there is known to be no differential
# reporting delay.
lhs = .dates_and_classes(date,dates,cls,clsses)
# step 3 left join crossing dataframe with data and fill missing counts with zero.
# ensuring that the result is valid
x = tibble(x) %>%
group_by(!!!grps) %>%
group_modify(function(d,g,...) {
# do a groupwise join. the lhs is either all dates or all dates and class levels
# or if we are not using rectangular time series then calculate a group-wise lhs
# including the range present in the data.
if (!rectangular & !dates_given) {
tmp = d %>% pull(!!date)
dates = .full_seq_dates(tmp,interval)
lhs = .dates_and_classes(date,dates,cls,clsses)
}
# do the fill for missing counts.
d = lhs %>%
left_join(d, by = join_cols)
if (!is.null(value)) {
# TODO: what about other observations?
d = d %>% mutate(!!value := ifelse(is.na(!!value),0,!!value))
}
return(d)
}) %>% ungroup()
if (!".id" %in% colnames(x)) {
# add an .id column only if there is not one already.
x = x %>% mutate(.id=row_number())
}
return(.make_epidata(
as_tibble(x),
meta,
out_class))
}
.dates_and_classes = function(date, dates, cls, clsses) {
if (!is.null(clsses)) {
lhs = crossing(!!date := dates, !!cls := clsses)
} else {
lhs = tibble(!!date := dates)
}
}
summary.epi_ts = function(x, ...) {
epi = x %>% get_meta()
m = epi$m
dates = x %>% pull(!!(m$date)) %>% range()
grpCount = x %>% select(!!!m$grps) %>% distinct() %>% nrow()
cat(sprintf("%1.0f timeseries, with interval %s day(s), from %s up to (but not including) %s, %1.0f total records", grpCount, epi$interval, dates[[1]], dates[[2]]+1+epi$interval, nrow(x)),"\n")
print(epi$formula, showEnv = FALSE)
}
print.epi_ts = function(x,...) {
summary(x,...)
NextMethod(x,...)
}
glimpse.epi_ts = function(x,...) {
summary(x,...)
NextMethod(x,...)
}
## standardised formats ----
# force the rename of all observations and predictors, add in .time and .grpId (and optionally .subGrpId if multinomial) columns
.normalise = function(x, interval = NA, ...) {
meta = x %>% get_meta()
m = meta$m
date = m$date
cls = m$observation("class")
multinom = !is.null(cls)
grps = m$grps
y = x
for (map in meta$specification$mapping) {
# rename the columns to their canonical names defined in the specification.
if (!is.na(map)) {
value = m$get(map)
y = y %>% mutate(!!map := !!value)
}
}
all = meta$specification$value
if (is.na(interval)) interval = .day_interval(y$date)
y = y %>%
select(all_of(na.omit(meta$specification$mapping)), !!!grps, .id)
y = y %>% group_by(!!!grps) %>% mutate(
.grpId = cur_group_id(),
.time = .date_to_time(date, interval),
) %>% group_by(!!!grps, .grpId)
if (multinom) {
y = y %>% group_by(!!!grps, class) %>% mutate(.subGrpId = cur_group_id()) %>% group_by(!!!grps, .grpId)
class(y) = c("epi_multi",class(y))
} else {
class(y) = c("epi_simple",class(y))
}
return(y)
}
.denormalise = function(x, meta) {
y=x
if (".time" %in% colnames(y)) {
if (!("date" %in% colnames(y))) {
y = y %>% mutate(date = .time_to_date(.time))
} else if (any(is.na(y$date))) {
y = y %>% mutate(date = .time_to_date(.time))
}
}
y = y %>% select(-.time)
for (map in meta$specification$mapping) {
# rename the columns to their denormalised names defined in the specification.
# group columns will probably not have changed name
if (!is.na(map)) {
value = m$get(map)
if (map %in% colnames(y)) {
y = y %>% rename(!!value := !!map)
}
}
}
old_cols = sapply(meta$specification$value,as_label, USE.NAMES = FALSE)
new_cols = colnames(y)[!colnames(y) %in% old_cols]
new_obs = new_cols %>% magrittr::extract(!stringr::str_starts(.,stringr::fixed(".")))
# all new cols are added as new observations onto the lhs
new_cols_df = tibble(
side = "lhs",
value = sapply(new_obs, as.symbol,USE.NAMES = FALSE),
mapping = new_obs
)
new_spec = bind_rows(
meta$specification %>% filter(sapply(value, as_label, USE.NAMES = FALSE) %in% colnames(y)),
new_cols_df)
new_meta = as.epimetadata.specification(new_spec, type=meta$type, interval = meta$interval)
y = y %>% ungroup() %>% select(any_of(old_cols),all_of(new_obs),any_of(".id"))
# y = y %>% ungroup() %>% select(c(!starts_with("."),.id)) %>% glimpse()
return(y %>% .guess_type(new_meta))
}
## Execute a simple timeseries processing step on a standard data format ---
execute_epifunction = function(x, .f, ...) {
# x =tmp
# .f=estimate_proportion
# check input is valid for the function
# this is defined in the function
meta = x %>% get_meta()
require_str = formals(.f)[[".require"]]
input = .normalise(x)
browser()
if (!is.null(require_str)) {
requires = eval(require_str)
if (!all(requires %in% meta$specification$mapping)) {
warning("Input data frame does not have all the required columns:")
warning(meta$formula)
stop("must contain column mappings for: ",paste0(requires,collapse = ","))
}
}
output = input %>% group_modify(function(d,g,...) {
if ("epi_ll" %in% class(x)) {
class(d) = c("std_ll",class(d))
return(.f(d, g=g, ...))
} else {
class(d) = c("std_ts",class(d))
# TODO: informative error messages
return(.f(d, g=g, ..., interval = meta$interval))
}
# execute the epifunction call
})
if (!"date" %in% colnames(output)) {
#TODO: consider autoconverting .time to date
stop("the result of an epifunction must include a date column")
}
return(.denormalise(output, meta))
}
## Simple timeseries functions ----
# a set of estimators for the simple single time series situations
# the estimates target a range of outputs such as poisson rate, proportion model, growth rate, etc.
# these are aimed to be tidy but assume (and enforce) column naming conventions are adhered to
# these do not work on grouped data. they assume the input has been sanitised before hand, although should tolerate NA values.
# make sure a given column exists and create it with the orElse function if not.
ensure_exists = function(df, column, or_else = function(df) {stop("Missing column: ",column)},...) {
or_else = purrr::as_mapper(or_else)
out = df
if(!(all(column %in% colnames(df)))) {
out = or_else(df,...)
}
out
}
# or else NA
.opt = function(expr) tryCatch(expr,error=function(e) NA_real_)
# format a transformed normally distributed variable into quantiles
.format_result = function(df, fit, se.fit, t, estimate, modelName,link, error=NA_character_) {
est = #purrr::map2(fit,se.fit,.f = function(fit,se.fit) {
tibble(
!!(paste0(link,"(x)")) := fit,
!!(paste0("SE.",link,"(x)")) := se.fit,
Quantile.0.025 = .opt(t(qnorm(0.025,fit,se.fit))),
Quantile.0.05 = .opt(t(qnorm(0.05,fit,se.fit))),
Quantile.0.25 = .opt(t(qnorm(0.25,fit,se.fit))),
Quantile.0.5 = t(fit),
Quantile.0.75 = .opt(t(qnorm(0.75,fit,se.fit))),
Quantile.0.95 = .opt(t(qnorm(0.95,fit,se.fit))),
Quantile.0.975 = .opt(t(qnorm(0.975,fit,se.fit))),
model = modelName,
error = error)
#})
df %>% mutate(!!estimate := est)
}
# extract the locfit result from the locfit model and format it
.fixed_result = function(df, estimate, modelName, link, mean = NA_real_, se = NA_real_, error = "unknown error") {
df %>% .format_result(fit = mean, se.fit= se, t=.inv[[link]], estimate, modelName, link, error)
}
.empty_result = function(df, estimate) {
df %>% mutate(!!estimate := tibble())
}
.inv = list(
value = function(x) x,
log = function(x) {ifelse(x==-Inf, 0, exp(x))},
logit = function(x) {case_when(x==-Inf ~ 0,x==Inf ~ 1, TRUE ~ 1/(1+exp(-x)))}
)
# This is just to format locfit results given a locfit model.
# extract the locfit result from the locfit model and format it
# ... could be where="fitp", or newdata=.time points....
.locfit_extract_result = function(df, model, estimate, modelName, link = "value") {
tryCatch({
points = preplot(model,se.fit = TRUE,band="local", newdata = df)
t = points$trans
fit = points$fit
se.fit = tryCatch({
as.vector(forecast::na.interp(points$se.fit))
}, error = function(e) {
rep(NA,length(fit))
})
df %>% .format_result(fit,se.fit,t,estimate,modelName,link)
}, error = function(e) {
df %>% .fixed_result(estimate,modelName,link,error = e$message)
})
}
## Proportion estimation ----
# Generate the formula for a locfit model based on things I understand
.locfit_formula = function(valueVar, nrowDf, window, polynomialDegree, nearestNeighbours = TRUE, ...) {
valueVar=ensym(valueVar)
tmp_alpha = min(window/nrowDf,1)
tmp_alpha_2 = min((window*2+1)/nrowDf,1)
lpParams = list(
nn = if( nearestNeighbours ) tmp_alpha_2 else tmp_alpha, # this is given in fraction of total observations
h = if( !nearestNeighbours ) window else 0, # this is given in units of X
deg = polynomialDegree
)
lpParamsText = paste(names(lpParams),lpParams,sep="=",collapse=", ")
lpFormula = as.formula(paste0(as_label(valueVar), " ~ locfit::lp(.time, ",lpParamsText,")"))
return(lpFormula)
}
.has_count = function(x, ...) {
"count" %in% colnames(x)
}
# takes a line list or incidence count of patient admissions with a multinomial class label, and fits
# a quasi-binomial model with logit link using local regression. This expects a dataframe
# with an .time column and a class column. Multinomial class is either treated as a
# set of 1 versus others binomials, if the class is unordered (or not a factor) or as a set of less than or equal versus more
# than binomials (if the multinomial class is ordered)
estimate_proportion = function(d, ..., interval = .day_interval(d$date), window = 28, degree=2, quick=NA) {
d = d %>% ensure_exists("date")
d = d %>% ensure_exists("class")
# convert dates to times
d = d %>% ensure_exists(".time", or_else = ~ mutate(., .time = .date_to_time(date, interval)))
if (is.na(quick)) {
quick = !((.has_count(d) & sum(d$count) < 10000) | (!.has_count(d) & nrow(d) < 10000))
}
is_ts = .has_count(d)
time_span = (max(d$.time)-min(d$.time))*interval
# get the output as fractional weeks - and convert to days.
data_times = seq(min(d$.time),max(d$.time), 1)
predict_times = seq(min(d$.time),max(d$.time), 1/interval)
cumulative = is.ordered(d$class)
model_name = sprintf("locfit:probability:%s:%s:%1.0f*%1.0f:%1.0f",if(cumulative) "cumulative" else "binomial",if(quick) "counts" else "linelist", window, interval,degree)
out = tibble()
# repeat once for each class level. This is a binomial comparison (x vs not(x)) or cumulative (<=x) vs (>x)
for (level in sort(unique(d$class))) {
if (cumulative) {
tmpdf = d %>% mutate(class_bool = class <= level)
est_name = "probability.cumulative"
} else {
tmpdf = d %>% mutate(class_bool = class == level)
est_name = "probability"
}
if (is_ts) {
# summarise the counts
tmpdf_quick = tmpdf %>% group_by(.time,class_bool) %>% summarise(count = sum(count),.groups="drop")
if(!quick) tmpdf_slow = tmpdf_quick %>% group_by(.time,class_bool) %>% group_modify(function(d,g,..) {return(tibble(count = rep(1,d$count)))})
} %>% {
tmpdf_slow = tmpdf
if(quick) tmpdf_quick = tmpdf %>% group_by(.time,class_bool) %>% summarise(count = n(),.groups="drop") %>% tidyr::complete(.time = data_times, class_bool, fill=list(count=0) )
}
result = tibble(.time=predict_times, class=level)
if (nrow(tmpdf) == 0) {
# empty estimate
out = out %>% bind_rows(result %>% .fixed_result(est_name,model_name,link = "logit",mean = NA,se = NA, error = "no data"))
} else if (sum(tmpdf$class_bool) < degree) {
# zero estimate
out = out %>% bind_rows(result %>% .fixed_result(est_name,model_name,link = "logit",mean = -Inf,se = 10000, error = "all zeros"))
} else if (sum(!tmpdf$class_bool) < degree) {
# one estimate
out = out %>% bind_rows(result %>% .fixed_result(est_name,model_name,link = "logit",mean = Inf,se = 10000, error = "all ones"))
} else {
tryCatch({
if (quick) {
# This is how I expect it to work:
# lf_form = .locfit_formula(class_bool, time_span, window = window, polynomialDegree = degree, nearestNeighbours = !is_ts)
# fit = locfit::locfit(lf_form,
# data = tmpdf,
# weights = count,
# family="qbinomial", link="logit")
# This is what seems to work but does not include any sample size in weighting.
tmpdf_quick = tmpdf_quick %>% group_by(.time) %>% mutate(total = sum(count), p=count/total) %>%
filter(class_bool) # %>%
# this bit does not work either
# mutate(inv_var = 1/(total*p*(1-p))) %>% mutate(inv_var = ifelse(is.finite(inv_var),inv_var,1))
lf_form = .locfit_formula(p, time_span, window = window, polynomialDegree = degree, nearestNeighbours = FALSE)
# timeseries model when there are counts
fit = locfit::locfit(lf_form,
data = tmpdf_quick,
# weights = total, weights =1/total
# weights = inv_var,
family="qbinomial", link="logit", maxit = 5000, maxk=5000)
# browser()
} else {
# this is the line list version.
lf_form = .locfit_formula(class_bool, time_span, window = window, polynomialDegree = degree, nearestNeighbours = TRUE)
# line list model when there are no counts
fit = locfit::locfit(lf_form,
data = tmpdf_slow,
family="qbinomial", link="logit", maxit = 5000, maxk=5000)
}
tmp = result %>% .locfit_extract_result(model = fit, estimate = est_name, modelName = model_name, link = "logit")
out = out %>% bind_rows(tmp)
}, error = function(e) {
browser()
out = out %>% bind_rows(result %>% .fixed_result(est_name,model_name,link="logit",error = e$message))
})
}
}
# convert times back to dates
out = out %>% mutate(date = .time_to_date(.time))
# swap factor levels back in
if (is.factor(d$class)) out = out %>% mutate(class = factor(class, levels(d$class), ordered = is.ordered(d$class)))
return(out)
}
# takes a line list of patient admissions with a multinomial class label, and fits
# a quasi-binomial model with logit link using local regression. This expects a dataframe
# with an admission_week column and a class column. Multinomial class is either treated as a
# set of 1 versus others binomials (cumulative = FALSE) or as a set of less than or equal versus more
# than binomials (cumulative = TRUE, which assumes multinomial class is ordered)
estimateProportion = function(admissions, ... ,nn=0.2, deg=2, cumulative = is.ordered(admissions$class)) {
# get the output as fractional weeks - we wil convert this to days later.
weeks = seq(min(admissions$admission_week),max(admissions$admission_week),by = 1/7)
out = tibble()
# we are doing a binomial this for each level in the factor versus all other levels.
# this lets us create an estimate for multinomial data that I'm going to use later.
# I've never been sure about whether multinomial proportions can be treated as the sum of
# binomial 1 vs others, my suspicion is they can't, but I'm going to do it anyway
for (level in levels(admissions$class)) {
if (cumulative) {
tmpdf = admissions %>% mutate(class_bool = class <= level)
} else {
tmpdf = admissions %>% mutate(class_bool = class == level)
}
if (any(is.na(tmpdf$class_bool))) browser()
# detect some edge cases
if (nrow(tmpdf) == 0) {
# data set is empty
out = out %>% bind_rows(
tibble(
class = level,
admission_week = weeks,
admission_date = .weeks_to_date(weeks),
lower = 0,
median = 0,
upper = 1
)
)
} else if (!any(tmpdf$class_bool)) {
# for a given class there is no data or all observations are negative
out = out %>% bind_rows(
tibble(
class = level,
admission_week = weeks,
admission_date = .weeks_to_date(weeks),
lower = 0,
median = 0,
upper = 0
)
)
} else if (all(tmpdf$class_bool)) {
# for a given class all the observations are positive
out = out %>% bind_rows(
tibble(
class = level,
admission_week = weeks,
admission_date = .weeks_to_date(weeks),
lower = 1,
median = 1,
upper = 1
)
)
} else {
fit = locfit::locfit(class_bool ~ locfit::lp(admission_week,nn=nn,deg=deg),
data = tmpdf,family="qbinomial", link="logit")
tmp = preplot(fit,newdata=weeks,se.fit = TRUE,band="local")
t = tmp$tr
out = out %>% bind_rows(
tibble(
class = level,
admission_week = weeks,
admission_date = .weeks_to_date(weeks),
lower = .opt(t(qnorm(0.05,tmp$fit,tmp$se.fit))),
median = t(tmp$fit), #only because fit is normally distributed so mean=median
upper = .opt(t(qnorm(0.95,tmp$fit,tmp$se.fit)))
)
)
}
}
out = out %>% mutate(class = factor(class, levels(admissions$class)))
return(out)
}
|
#' A calista Function
#'
#' @param x
#' @keywords calista
#' @export
#' @examples
#' Mode()
Mode <- function(x) {
ux <- unique(x)
ux[which.max(tabulate(match(x, ux)))]
}
| /CALISTA-R/calista/R/Mode.R | permissive | CABSEL/CALISTA | R | false | false | 176 | r | #' A calista Function
#'
#' @param x
#' @keywords calista
#' @export
#' @examples
#' Mode()
Mode <- function(x) {
ux <- unique(x)
ux[which.max(tabulate(match(x, ux)))]
}
|
library(sparklyr)
library(dplyr)
library(stringr)
library(arules)
setwd("/Users/michaelstedler/PycharmProjects/BigDataProject")
# Spark Configurations
conf <- spark_config()
# build SPARK Connection
sc <- spark_connect(master = "local")
# load dataframe
jobsFeed <- read.csv("data/jobsFeed.csv")
tbl_jobsFeed <- copy_to(sc,jobsFeed,name=spark_table_name(substitute(jobsFeed)))
#
x <- tbl_jobsFeed %>%
#filter(city=="BERLIN") %>%
select(author,category,city,country)%>%
collect()
# build list with all categories in filtered jobsFeed
categories <- c()
for(i in 1:length(x$category)){
ls = strsplit(x$category[i],split = ";")
for(cat in ls[[1]]){
if(!cat %in% categories){
categories <-c(categories,str_replace_all(cat,"[^[:alnum:]]",""))
}
}
}
# create columns for every possible language
mydf <- data.frame(x$author,
x$city,
x$country,
x$category)
for (i in categories) {
print(i)
eval(parse(text = paste0('mydf$',i,' <- rep(FALSE,length(x$category))')))
}
# load true for every language in inserat
for(i in 1:length(x$category)){
ls = strsplit(x$category[i],split = ";")
for(cat in ls[[1]]){
if(cat %in% categories){
eval(parse(text = paste0('mydf$',cat,'[',i,'] <- TRUE')))
}
}
}
# perform Assiociation Rules
rules_all <- apriori(mydf,
parameter = list(supp = 0.05,
conf = 0.05,
target = "rules",
minlen=2),
# lhs -> target input classes, rhs -> target output classes
#appearance = list(rhs=c("python"))
)
inspect(rules_all)
| /spark_ar.R | no_license | moikipoiki/BigDataProject | R | false | false | 1,792 | r | library(sparklyr)
library(dplyr)
library(stringr)
library(arules)
setwd("/Users/michaelstedler/PycharmProjects/BigDataProject")
# Spark Configurations
conf <- spark_config()
# build SPARK Connection
sc <- spark_connect(master = "local")
# load dataframe
jobsFeed <- read.csv("data/jobsFeed.csv")
tbl_jobsFeed <- copy_to(sc,jobsFeed,name=spark_table_name(substitute(jobsFeed)))
#
x <- tbl_jobsFeed %>%
#filter(city=="BERLIN") %>%
select(author,category,city,country)%>%
collect()
# build list with all categories in filtered jobsFeed
categories <- c()
for(i in 1:length(x$category)){
ls = strsplit(x$category[i],split = ";")
for(cat in ls[[1]]){
if(!cat %in% categories){
categories <-c(categories,str_replace_all(cat,"[^[:alnum:]]",""))
}
}
}
# create columns for every possible language
mydf <- data.frame(x$author,
x$city,
x$country,
x$category)
for (i in categories) {
print(i)
eval(parse(text = paste0('mydf$',i,' <- rep(FALSE,length(x$category))')))
}
# load true for every language in inserat
for(i in 1:length(x$category)){
ls = strsplit(x$category[i],split = ";")
for(cat in ls[[1]]){
if(cat %in% categories){
eval(parse(text = paste0('mydf$',cat,'[',i,'] <- TRUE')))
}
}
}
# perform Assiociation Rules
rules_all <- apriori(mydf,
parameter = list(supp = 0.05,
conf = 0.05,
target = "rules",
minlen=2),
# lhs -> target input classes, rhs -> target output classes
#appearance = list(rhs=c("python"))
)
inspect(rules_all)
|
# -------------------------------------------------------------------------
setwd("~/data_analysis/")
# -------------------------------------------------------------------------
library(xlsx)
# -------------------------------------------------------------------------
# read in fi rst worksheet using a sheet index or name
my_xlsx <- read.xlsx("./data/my_data.xlsx", sheetName = "Sheet2")
head(my_xlsx)
# -------------------------------------------------------------------------
# read in data and change class type
mydata_sheet1.2 <- read.xlsx ("mydata.xlsx", sheetName = "Sheet1",
stringsAsFactors = FALSE,
colClasses = c ("double", "character",
"logical"))
# -------------------------------------------------------------------------
# by default keepFormula is set to FALSE so only
# the formula output will be read in
read.xlsx ("mydata.xlsx", sheetName = "Sheet4")
# -------------------------------------------------------------------------
# changing the keepFormula to TRUE will display the equations
read.xlsx ("mydata.xlsx", sheetName = "Sheet4", keepFormulas = TRUE)
# -------------------------------------------------------------------------
# readxl Package #
# -------------------------------------------------------------------------
library(readxl)
# -------------------------------------------------------------------------
my_data <- read_excel("./data/my_data.xlsx", sheet = "Sheet2")
my_data
# -------------------------------------------------------------------------
mydata_ex <- read_excel ("mydata.xlsx", sheet = "Sheet5",
col_types = c ("numeric", "blank", "numeric",
"date", "blank"))
# -------------------------------------------------------------------------
# change variable 3 to a logical variable
mydata_ex$`variable 3` <- as.logical (mydata_ex$`variable 3`)
# -------------------------------------------------------------------------
#Load Data f rom Saved R Object File
# -------------------------------------------------------------------------
# There are three primary ways that people tend
# to save R data/objects: as .RData, .rda, or as .rds fi les.
# -------------------------------------------------------------------------
load ("mydata.RData")
load (fi le = "mydata.rda")
name <- readRDS ("mydata.rds")
| /import_scrap_export_data/read_excel_rds.R | no_license | ttedla/data_analysis | R | false | false | 2,464 | r |
# -------------------------------------------------------------------------
setwd("~/data_analysis/")
# -------------------------------------------------------------------------
library(xlsx)
# -------------------------------------------------------------------------
# read in fi rst worksheet using a sheet index or name
my_xlsx <- read.xlsx("./data/my_data.xlsx", sheetName = "Sheet2")
head(my_xlsx)
# -------------------------------------------------------------------------
# read in data and change class type
mydata_sheet1.2 <- read.xlsx ("mydata.xlsx", sheetName = "Sheet1",
stringsAsFactors = FALSE,
colClasses = c ("double", "character",
"logical"))
# -------------------------------------------------------------------------
# by default keepFormula is set to FALSE so only
# the formula output will be read in
read.xlsx ("mydata.xlsx", sheetName = "Sheet4")
# -------------------------------------------------------------------------
# changing the keepFormula to TRUE will display the equations
read.xlsx ("mydata.xlsx", sheetName = "Sheet4", keepFormulas = TRUE)
# -------------------------------------------------------------------------
# readxl Package #
# -------------------------------------------------------------------------
library(readxl)
# -------------------------------------------------------------------------
my_data <- read_excel("./data/my_data.xlsx", sheet = "Sheet2")
my_data
# -------------------------------------------------------------------------
mydata_ex <- read_excel ("mydata.xlsx", sheet = "Sheet5",
col_types = c ("numeric", "blank", "numeric",
"date", "blank"))
# -------------------------------------------------------------------------
# change variable 3 to a logical variable
mydata_ex$`variable 3` <- as.logical (mydata_ex$`variable 3`)
# -------------------------------------------------------------------------
#Load Data f rom Saved R Object File
# -------------------------------------------------------------------------
# There are three primary ways that people tend
# to save R data/objects: as .RData, .rda, or as .rds fi les.
# -------------------------------------------------------------------------
load ("mydata.RData")
load (fi le = "mydata.rda")
name <- readRDS ("mydata.rds")
|
/Credit_Logistic.r | no_license | mekriti/Credit-Risk-Modelling-using-R | R | false | false | 5,978 | r | ||
stimuli <- demo_stim()
test_that("error", {
expect_error(metrics(stimuli, "x[s]"))
expect_equal(metrics(stimuli, "x[200]"), c(f_multi = NA_real_, m_multi = NA_real_))
})
test_that("single point", {
x0 <- c(f_multi = stimuli[[1]]$points[["x", 1]],
m_multi = stimuli[[2]]$points[["x", 1]])
y0 <- c(f_multi = stimuli[[1]]$points[["y", 1]],
m_multi = stimuli[[2]]$points[["y", 1]])
expect_equal(metrics(stimuli, "x[0]"), x0)
expect_equal(metrics(stimuli, "y[0]"), y0)
})
test_that("eye spacing", {
es <- metrics(stimuli, c(0, 1))
comp <- c(f_multi = 77.40749, m_multi = 75.37856)
expect_equal(es, comp, tol = .0001)
})
test_that("formula", {
eye_spacing <- "sqrt(pow(x[0]-x[1], 2) + pow(y[0]-y[1],2))"
es <- metrics(stimuli, eye_spacing)
comp <- c(f_multi = 77.40749, m_multi = 75.37856)
expect_equal(es, comp, tol = .0001)
})
| /tests/testthat/test-metrics.R | permissive | debruine/webmorphR | R | false | false | 878 | r | stimuli <- demo_stim()
test_that("error", {
expect_error(metrics(stimuli, "x[s]"))
expect_equal(metrics(stimuli, "x[200]"), c(f_multi = NA_real_, m_multi = NA_real_))
})
test_that("single point", {
x0 <- c(f_multi = stimuli[[1]]$points[["x", 1]],
m_multi = stimuli[[2]]$points[["x", 1]])
y0 <- c(f_multi = stimuli[[1]]$points[["y", 1]],
m_multi = stimuli[[2]]$points[["y", 1]])
expect_equal(metrics(stimuli, "x[0]"), x0)
expect_equal(metrics(stimuli, "y[0]"), y0)
})
test_that("eye spacing", {
es <- metrics(stimuli, c(0, 1))
comp <- c(f_multi = 77.40749, m_multi = 75.37856)
expect_equal(es, comp, tol = .0001)
})
test_that("formula", {
eye_spacing <- "sqrt(pow(x[0]-x[1], 2) + pow(y[0]-y[1],2))"
es <- metrics(stimuli, eye_spacing)
comp <- c(f_multi = 77.40749, m_multi = 75.37856)
expect_equal(es, comp, tol = .0001)
})
|
# Stat 243 Fall 2013 Group Project
#Jeff Darling, Russell Chen, Xuecong Jia, Frank Cleary
#This is the file for the main function of the project
#Placeholder values for testing - will need user to input
n <- 20
f <- function(x) {
dnorm(x)
}
upperB <- 1000
lowerB <- -1000
main <- function(f, n) {
#Initialize g based on the given f and bounds
g <- initG(f, upperB, lowerB)
acceptedX <- NA
while(length(acceptedX) < n) {
if(is.na(acceptedX)) {
#For the first run, replace NA with points generated
acceptedX <- generatePoints(n - length(acceptedX), upperG, lowerG)
} else {
#For subsequent runs, lengthen existing vector
acceptedX <- c(acceptedX, generatePoints(n-length(acceptedX)))
}
}
return(acceptedX)
}
sample <- main(f, n)
######################initg.R######################
initG <- function(f, upperB = Inf, lowerB = -Inf) {
if(upperB == Inf || lowerB == -Inf) {
#If either bound is infinite, use 5 or -5 as starting point for upper or lower bounds
u <- (max(lowerB, -6)+1)
v <- (min(upperB, 6)-1)
} else {
#If neither bound is infinite, divide interval into thirds
u <- (2 * lowerB + upperB) / 3
v <- (2 * upperB + lowerB) / 3
}
#browser()
library(numDeriv)
#Use the fPrime module to evaluate slopes at u and v
logf <- function(x) { log(f(x)) }
uSlope <- grad(logf, x=u)
vSlope <- grad(logf, x=v)
#If f'(u) <= 0, slide u towards lowerB until f'(u) > 0
while(uSlope <= 0) {
u <- (2 * u + max(3*u, lowerB)) / 3
uSlope <- grad(f, x=u)
}
#If f'(v) >= 0, slide v towards upperB until f'(v) < 0
while(vSlope >= 0) {
v <- (2 * v + min(3*v, upperB)) / 3
vSlope <- grad(f, x=v)
}
uVal <- log(f(u))
vVal <- log(f(v))
#Find the intersection point of the tangent lines
w <- (log(f(v)) - log(f(u)) - v*vSlope + u*uSlope)/(uSlope - vSlope)
#Create g
g <- rbind(c(lowerB, w, u, uSlope, uVal), c(w, upperB, v, vSlope, vVal))
colnames(g) = c('start', 'end', 'intersect', 'm', 'b')
return(list(Upper=g))
}
############################################
######################checkconcav.R######################
checkConcav <- function(fVal, g_upperVal){
if(fVal > g_upperVal) {
stop("f is not log-concave; this method will not work.")
}
}
############################################
######################updateg.R######################
library(numDeriv)
updateGUpper <- function(x, g, f, fval) {
### Return g with an added row with intersect = x
### Calculates which other rows of g need updating and updates them
###
### x: The point to update at
### f: The distribution being drawn from
### g: A matrix where each row's values are start, end, intersect, m and b
### where start and end define the range g applies to, intersect is the
### point x at which g is tangent to log(f(x)), and b is the value log(f(x))
# find index of the function whose range includes x:
toUpdate <- which(g[ ,'start'] <= x & g[ ,'end'] > x)
logfval <- log(fval)
logf <- function(x) { log(f(x)) }
fprime <- grad(logf, x=x)
# check if x is to the left or right of the intersection toUpdate
# update either the g to the right or left.
if (x < g[toUpdate, 'intersect']) {
left <- toUpdate - 1
gl <- g[left, ]
newRangeLeft <- (logfval - gl['b'] +
gl['m']*gl['intersect'] - fprime*x)/(gl['m'] - fprime)
newRangeRight <- (g[toUpdate, 'b'] - logfval +
fprime*x - g[toUpdate, 'm']*g[toUpdate, 'intersect'])/(fprime - g[toUpdate, 'm'])
g[left, 'end'] <- newRangeLeft
g[toUpdate, 'start'] <- newRangeRight
g <- rbind(g, c(newRangeLeft, newRangeRight, x, fprime, logfval))
}
if (x > g[toUpdate, 'intersect']) {
right <- toUpdate + 1
newRangeRight <- (logfval - g[right, 'b'] +
g[right, 'm']*g[right, 'intersect'] - fprime*x)/(g[right, 'm'] - fprime)
newRangeLeft <- (g[toUpdate, 'b'] - logfval +
fprime*x - g[toUpdate, 'm']*g[toUpdate, 'intersect'])/(fprime - g[toUpdate, 'm'])
g[right, 'start'] <- newRangeLeft
g[toUpdate, 'end'] <- newRangeRight
g <- rbind(g, c(newRangeLeft, newRangeRight, x, fprime, logfval))
}
g <- g[sort.list(g[ ,1], ), ]
return(g)
}
updateGLower <- function(x, gu, f) {
### Return the lower bound glower, based on gu, the upper bound
### Returns a matrix where each row is contain start, end, intersect, m, and b
### start and end define the range that g lower is valid on
### intersect is not used
### m is the slope of the line
### b is the value of log(f(x)) at x = 'start', which can be used to
### calculate the equation of the line.
glower <- gu
glower[ , 'start'] <- gu[ , 'intersect']
glower[ , 'end'] <- c(gu[-1, 'intersect'], Inf)
# calculate slope:
glower[ , 'm'] <- c(diff(glower[ , 'b']), 0)/(glower[ , 'end'] - glower[ , 'start'])
glower <- glower[-nrow(glower), ] # remove last row - it's meaningless
glower # note the b column is the value of log(f) at 'start'
}
checkConcav <- function(fval, g_upperVal){
if(fval > g_upperVal) {
stop("f is not log-concave; this method will not work.")
}
}
updateG <- function(x, glist, f){
# Return a list with elements Upper (the upper bound of g in matrix form)
# and Lower (the lower bound of g in matrix form)
# TODO: add ability to accept or reject x.
# Will add after meeting Thrusday
# Something like:
# fx <- f(x)
# if u < eval(g)/fx { accept x }
# and maybe pass fx to updateGUpper
index_Upper <- which(glist$Upper[ ,'start'] <= x & glist$Upper[ ,'end'] > x)
upperX <- glist$Upper[index_Upper, 'm'] * (x - glist$Upper[index_Upper, 'intersect']) + glist$Upper[index_Upper, 'b']
fval <- f(x)
checkConcav(fval, upperX)
gu <- updateGUpper(x, glist$Upper, f, fval)
gLower <- updateGLower(x, gu, f)
return(list(Upper=gu, Lower=gLower, fx=fval))
}
############################################
######################generatePoints.R######################
generatePoints <- function(N, glist){
X <- sampleg(N)
# sample N points from upper bound of g function
U <- runif(N)
# generate N points from uniform (0,1) distribution
sampleX <- NULL
# a vector to store X's that can be accepted
for (i in 1:N){
index_Upper <- which(glist$Upper[ ,'start'] <= X[i] & glist$Upper[ ,'end'] > X[i])
index_Lower <- which(glist$Lower[ ,'start'] <= X[i] & glist$Lower[ ,'end'] > X[i])
# in fact index_Upper and index_Lower have some relation
upperX <- glist$Upper[index_Upper, 'm'] * (X[i] - glist$Upper[index_Upper, 'intersect']) + glist$Upper[index_Upper, 'b']
# value of upper bound at X[i]
lowerX <- glist$Lower[index_Lower, 'm'] * (X[i] - glist$Lower[index_Lower, 'start']) + glist$Upper[index_Lower, 'b']
# value of lower bound at X[i]
if (U[i] < exp(lowerX - upperX)){
sampleX <- c(sampleX, X[i])
}
else{
glist <- updateG(X[i], glist, f)
if (U[i] < glist$fx / exp(upperX)){
sampleX <- c(sampleX, X[i])
break
}
else{
break
}
}
}
return(sampleX)
}
############################################
######################sampleg.R######################
sampleSX <- function(g,n) {
#this function draws n random samples from the exponentiated upper envelope
#g is a matrix where
#xk is a vector of the k abscissae x_i
#hk is a vector of h(x) the k abscissae x_i
#hpx is a vector of h'(x) of the k abscissae x_i
#zk is the intersection points of the tangents
xk <- g$x
k <- length(xk)
hk <- g$
hpk <- (g$slope)*(g$x) + g$intercept
zk <- c(g$start,g$end[k]) #note that there are (k+1) z values
#calculate the areas of the k chunks (after exponentiating):
# areas <- rep(NA,k)
# #TO DO: vectorize11111111111111
# for (i in 1:k) {
# areas[i] <- (exp(zk[i+1])-exp(zk[i]))/hpx[i]
# }
#here it is, a vectorized version of computing the areas:
t <- exp(c(zk,1)) - exp(c(1,zk))
areas <- t[-c(1,k+2)]/hpx
#normalizing factor of the exponentiated upper hull:
normFactor <- sum(areas)
scum <- c(0,cumsum(areas)/normFactor)
# u <- runif(1)
# j <- 1
# while (u > scum[j+1]) {
# j <- j+1
# }
# sample <- zk[j] + log(1 + (hpx[j+1])*normFactor*(u-scum[j]) / (exp(zk[j])) )/hpx[j+1]
# return(sample)
#try a vectorized version:
u <- runif(n)
whichChunk <- rep(1,n)
for (k in 1:n) {
while (u[k] > scum[whichChunk[k]+1])
whichChunk[k] <- whichChunk[k] + 1
}
sample <- zk[whichChunk] + log(1 + (hpx[whichChunk+1])*normFactor*(u-scum[whichChunk]) / (exp(zk[whichChunk])) )/hpx[whichChunk+1]
return(sample)
}
############################################
| /mainFun.R | no_license | rrruss/243group | R | false | false | 8,749 | r | # Stat 243 Fall 2013 Group Project
#Jeff Darling, Russell Chen, Xuecong Jia, Frank Cleary
#This is the file for the main function of the project
#Placeholder values for testing - will need user to input
n <- 20
f <- function(x) {
dnorm(x)
}
upperB <- 1000
lowerB <- -1000
main <- function(f, n) {
#Initialize g based on the given f and bounds
g <- initG(f, upperB, lowerB)
acceptedX <- NA
while(length(acceptedX) < n) {
if(is.na(acceptedX)) {
#For the first run, replace NA with points generated
acceptedX <- generatePoints(n - length(acceptedX), upperG, lowerG)
} else {
#For subsequent runs, lengthen existing vector
acceptedX <- c(acceptedX, generatePoints(n-length(acceptedX)))
}
}
return(acceptedX)
}
sample <- main(f, n)
######################initg.R######################
initG <- function(f, upperB = Inf, lowerB = -Inf) {
if(upperB == Inf || lowerB == -Inf) {
#If either bound is infinite, use 5 or -5 as starting point for upper or lower bounds
u <- (max(lowerB, -6)+1)
v <- (min(upperB, 6)-1)
} else {
#If neither bound is infinite, divide interval into thirds
u <- (2 * lowerB + upperB) / 3
v <- (2 * upperB + lowerB) / 3
}
#browser()
library(numDeriv)
#Use the fPrime module to evaluate slopes at u and v
logf <- function(x) { log(f(x)) }
uSlope <- grad(logf, x=u)
vSlope <- grad(logf, x=v)
#If f'(u) <= 0, slide u towards lowerB until f'(u) > 0
while(uSlope <= 0) {
u <- (2 * u + max(3*u, lowerB)) / 3
uSlope <- grad(f, x=u)
}
#If f'(v) >= 0, slide v towards upperB until f'(v) < 0
while(vSlope >= 0) {
v <- (2 * v + min(3*v, upperB)) / 3
vSlope <- grad(f, x=v)
}
uVal <- log(f(u))
vVal <- log(f(v))
#Find the intersection point of the tangent lines
w <- (log(f(v)) - log(f(u)) - v*vSlope + u*uSlope)/(uSlope - vSlope)
#Create g
g <- rbind(c(lowerB, w, u, uSlope, uVal), c(w, upperB, v, vSlope, vVal))
colnames(g) = c('start', 'end', 'intersect', 'm', 'b')
return(list(Upper=g))
}
############################################
######################checkconcav.R######################
checkConcav <- function(fVal, g_upperVal){
if(fVal > g_upperVal) {
stop("f is not log-concave; this method will not work.")
}
}
############################################
######################updateg.R######################
library(numDeriv)
updateGUpper <- function(x, g, f, fval) {
### Return g with an added row with intersect = x
### Calculates which other rows of g need updating and updates them
###
### x: The point to update at
### f: The distribution being drawn from
### g: A matrix where each row's values are start, end, intersect, m and b
### where start and end define the range g applies to, intersect is the
### point x at which g is tangent to log(f(x)), and b is the value log(f(x))
# find index of the function whose range includes x:
toUpdate <- which(g[ ,'start'] <= x & g[ ,'end'] > x)
logfval <- log(fval)
logf <- function(x) { log(f(x)) }
fprime <- grad(logf, x=x)
# check if x is to the left or right of the intersection toUpdate
# update either the g to the right or left.
if (x < g[toUpdate, 'intersect']) {
left <- toUpdate - 1
gl <- g[left, ]
newRangeLeft <- (logfval - gl['b'] +
gl['m']*gl['intersect'] - fprime*x)/(gl['m'] - fprime)
newRangeRight <- (g[toUpdate, 'b'] - logfval +
fprime*x - g[toUpdate, 'm']*g[toUpdate, 'intersect'])/(fprime - g[toUpdate, 'm'])
g[left, 'end'] <- newRangeLeft
g[toUpdate, 'start'] <- newRangeRight
g <- rbind(g, c(newRangeLeft, newRangeRight, x, fprime, logfval))
}
if (x > g[toUpdate, 'intersect']) {
right <- toUpdate + 1
newRangeRight <- (logfval - g[right, 'b'] +
g[right, 'm']*g[right, 'intersect'] - fprime*x)/(g[right, 'm'] - fprime)
newRangeLeft <- (g[toUpdate, 'b'] - logfval +
fprime*x - g[toUpdate, 'm']*g[toUpdate, 'intersect'])/(fprime - g[toUpdate, 'm'])
g[right, 'start'] <- newRangeLeft
g[toUpdate, 'end'] <- newRangeRight
g <- rbind(g, c(newRangeLeft, newRangeRight, x, fprime, logfval))
}
g <- g[sort.list(g[ ,1], ), ]
return(g)
}
updateGLower <- function(x, gu, f) {
### Return the lower bound glower, based on gu, the upper bound
### Returns a matrix where each row is contain start, end, intersect, m, and b
### start and end define the range that g lower is valid on
### intersect is not used
### m is the slope of the line
### b is the value of log(f(x)) at x = 'start', which can be used to
### calculate the equation of the line.
glower <- gu
glower[ , 'start'] <- gu[ , 'intersect']
glower[ , 'end'] <- c(gu[-1, 'intersect'], Inf)
# calculate slope:
glower[ , 'm'] <- c(diff(glower[ , 'b']), 0)/(glower[ , 'end'] - glower[ , 'start'])
glower <- glower[-nrow(glower), ] # remove last row - it's meaningless
glower # note the b column is the value of log(f) at 'start'
}
checkConcav <- function(fval, g_upperVal){
if(fval > g_upperVal) {
stop("f is not log-concave; this method will not work.")
}
}
updateG <- function(x, glist, f){
# Return a list with elements Upper (the upper bound of g in matrix form)
# and Lower (the lower bound of g in matrix form)
# TODO: add ability to accept or reject x.
# Will add after meeting Thrusday
# Something like:
# fx <- f(x)
# if u < eval(g)/fx { accept x }
# and maybe pass fx to updateGUpper
index_Upper <- which(glist$Upper[ ,'start'] <= x & glist$Upper[ ,'end'] > x)
upperX <- glist$Upper[index_Upper, 'm'] * (x - glist$Upper[index_Upper, 'intersect']) + glist$Upper[index_Upper, 'b']
fval <- f(x)
checkConcav(fval, upperX)
gu <- updateGUpper(x, glist$Upper, f, fval)
gLower <- updateGLower(x, gu, f)
return(list(Upper=gu, Lower=gLower, fx=fval))
}
############################################
######################generatePoints.R######################
generatePoints <- function(N, glist){
X <- sampleg(N)
# sample N points from upper bound of g function
U <- runif(N)
# generate N points from uniform (0,1) distribution
sampleX <- NULL
# a vector to store X's that can be accepted
for (i in 1:N){
index_Upper <- which(glist$Upper[ ,'start'] <= X[i] & glist$Upper[ ,'end'] > X[i])
index_Lower <- which(glist$Lower[ ,'start'] <= X[i] & glist$Lower[ ,'end'] > X[i])
# in fact index_Upper and index_Lower have some relation
upperX <- glist$Upper[index_Upper, 'm'] * (X[i] - glist$Upper[index_Upper, 'intersect']) + glist$Upper[index_Upper, 'b']
# value of upper bound at X[i]
lowerX <- glist$Lower[index_Lower, 'm'] * (X[i] - glist$Lower[index_Lower, 'start']) + glist$Upper[index_Lower, 'b']
# value of lower bound at X[i]
if (U[i] < exp(lowerX - upperX)){
sampleX <- c(sampleX, X[i])
}
else{
glist <- updateG(X[i], glist, f)
if (U[i] < glist$fx / exp(upperX)){
sampleX <- c(sampleX, X[i])
break
}
else{
break
}
}
}
return(sampleX)
}
############################################
######################sampleg.R######################
sampleSX <- function(g,n) {
#this function draws n random samples from the exponentiated upper envelope
#g is a matrix where
#xk is a vector of the k abscissae x_i
#hk is a vector of h(x) the k abscissae x_i
#hpx is a vector of h'(x) of the k abscissae x_i
#zk is the intersection points of the tangents
xk <- g$x
k <- length(xk)
hk <- g$
hpk <- (g$slope)*(g$x) + g$intercept
zk <- c(g$start,g$end[k]) #note that there are (k+1) z values
#calculate the areas of the k chunks (after exponentiating):
# areas <- rep(NA,k)
# #TO DO: vectorize11111111111111
# for (i in 1:k) {
# areas[i] <- (exp(zk[i+1])-exp(zk[i]))/hpx[i]
# }
#here it is, a vectorized version of computing the areas:
t <- exp(c(zk,1)) - exp(c(1,zk))
areas <- t[-c(1,k+2)]/hpx
#normalizing factor of the exponentiated upper hull:
normFactor <- sum(areas)
scum <- c(0,cumsum(areas)/normFactor)
# u <- runif(1)
# j <- 1
# while (u > scum[j+1]) {
# j <- j+1
# }
# sample <- zk[j] + log(1 + (hpx[j+1])*normFactor*(u-scum[j]) / (exp(zk[j])) )/hpx[j+1]
# return(sample)
#try a vectorized version:
u <- runif(n)
whichChunk <- rep(1,n)
for (k in 1:n) {
while (u[k] > scum[whichChunk[k]+1])
whichChunk[k] <- whichChunk[k] + 1
}
sample <- zk[whichChunk] + log(1 + (hpx[whichChunk+1])*normFactor*(u-scum[whichChunk]) / (exp(zk[whichChunk])) )/hpx[whichChunk+1]
return(sample)
}
############################################
|
sink("computer_science_major.csv")
count = 0
for(i in 1:length(major))
{
if(grepl("omp sci", major[i], perl=TRUE) || grepl("CS ", major[i], perl=TRUE) || grepl("omputer", major[i], perl=TRUE)){
count = count + 1
cat(major[i])
cat("\n")
}
}
sink()
cat("Computer Science: ")
cat(count)
cat("\n") | /Scripts/R/Major/parse_computer_science.R | no_license | rzs7w6/Twitter-Tweepy-Data-Scraper | R | false | false | 302 | r | sink("computer_science_major.csv")
count = 0
for(i in 1:length(major))
{
if(grepl("omp sci", major[i], perl=TRUE) || grepl("CS ", major[i], perl=TRUE) || grepl("omputer", major[i], perl=TRUE)){
count = count + 1
cat(major[i])
cat("\n")
}
}
sink()
cat("Computer Science: ")
cat(count)
cat("\n") |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/internaldbs.R
\name{MassCer}
\alias{MassCer}
\title{Calculate formula and mass of ceramides}
\usage{
MassCer(cer)
}
\arguments{
\item{cer}{character value indicating total number of carbons and double
bounds}
}
\value{
vector containing formula and mass
}
\description{
Calculate formula and mass of ceramides
}
\author{
M Isabel Alcoriza-Balaguer <maialba@alumni.uv.es>
}
\keyword{internal}
| /man/MassCer.Rd | no_license | cran/LipidMS | R | false | true | 492 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/internaldbs.R
\name{MassCer}
\alias{MassCer}
\title{Calculate formula and mass of ceramides}
\usage{
MassCer(cer)
}
\arguments{
\item{cer}{character value indicating total number of carbons and double
bounds}
}
\value{
vector containing formula and mass
}
\description{
Calculate formula and mass of ceramides
}
\author{
M Isabel Alcoriza-Balaguer <maialba@alumni.uv.es>
}
\keyword{internal}
|
\alias{gtkTestSliderGetValue}
\name{gtkTestSliderGetValue}
\title{gtkTestSliderGetValue}
\description{Retrive the literal adjustment value for GtkRange based
widgets and spin buttons. Note that the value returned by
this function is anything between the lower and upper bounds
of the adjustment belonging to \code{widget}, and is not a percentage
as passed in to \code{\link{gtkTestSliderSetPerc}}.}
\usage{gtkTestSliderGetValue(widget)}
\arguments{\item{\verb{widget}}{valid widget pointer.}}
\details{Since 2.14}
\value{[numeric] adjustment->value for an adjustment belonging to \code{widget}.}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
| /RGtk2/man/gtkTestSliderGetValue.Rd | no_license | lawremi/RGtk2 | R | false | false | 668 | rd | \alias{gtkTestSliderGetValue}
\name{gtkTestSliderGetValue}
\title{gtkTestSliderGetValue}
\description{Retrive the literal adjustment value for GtkRange based
widgets and spin buttons. Note that the value returned by
this function is anything between the lower and upper bounds
of the adjustment belonging to \code{widget}, and is not a percentage
as passed in to \code{\link{gtkTestSliderSetPerc}}.}
\usage{gtkTestSliderGetValue(widget)}
\arguments{\item{\verb{widget}}{valid widget pointer.}}
\details{Since 2.14}
\value{[numeric] adjustment->value for an adjustment belonging to \code{widget}.}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
#Logical
# TRUE T
# FALSE F
4 < 5
10 > 100
4 == 5
# ==
# !=
# <
# >
# <=
# >=
# !
# |
# ||
# &
# isTRUE(x)
var <- 4 < 5
var
isTRUE(var)
var1 <- 4 > 5
isTRUE(var1)
var2 <- !TRUE
var2
| /R Programming/3_Intro/LogicalExpressions.R | no_license | jacobrozell/DataScience | R | false | false | 187 | r | #Logical
# TRUE T
# FALSE F
4 < 5
10 > 100
4 == 5
# ==
# !=
# <
# >
# <=
# >=
# !
# |
# ||
# &
# isTRUE(x)
var <- 4 < 5
var
isTRUE(var)
var1 <- 4 > 5
isTRUE(var1)
var2 <- !TRUE
var2
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/AmStockChart.R
\docType{methods}
\name{setChartCursorSettings}
\alias{setChartCursorSettings}
\title{Setter}
\usage{
\S4method{setChartCursorSettings}{AmStockChart}(.Object,
chartCursorSettings = NULL, ...)
}
\arguments{
\item{\code{.Object}:}{Object of class \code{\linkS4class{AmChart}}.}
\item{\code{chartCursorSettings}:}{Object of class \code{\linkS4class{ChartCursor}}.}
}
\value{
The updated object of class \code{\linkS4class{AmChart}}.
}
\description{
Setter
}
\examples{
library(pipeR)
amStockChart() \%>>\% setChartCursorSettings( oneBallOnly = TRUE )
}
\seealso{
\code{\linkS4class{AmChart}} S4 class
\code{\linkS4class{ChartCursorSettings}} S4 class
Other AmStockChart methods: \code{\link{addComparedDataSet}};
\code{\link{addDataSet}}; \code{\link{addPanel}};
\code{\link{setCategoryAxesSettings}};
\code{\link{setChartScrollbarSettings}};
\code{\link{setComparedDataSets}};
\code{\link{setDataSetSelector}};
\code{\link{setDataSets}};
\code{\link{setLegendSettings}};
\code{\link{setMainDataSet}};
\code{\link{setPanelsSettings}}; \code{\link{setPanels}};
\code{\link{setPeriodSelector}};
\code{\link{setStockEventsSettings}};
\code{\link{setValueAxesSettings}}
Other AmStockChart setters: \code{\link{addComparedDataSet}};
\code{\link{addDataSet}}; \code{\link{addPanel}};
\code{\link{setCategoryAxesSettings}};
\code{\link{setChartScrollbarSettings}};
\code{\link{setComparedDataSets}};
\code{\link{setDataSetSelector}};
\code{\link{setDataSets}};
\code{\link{setLegendSettings}};
\code{\link{setMainDataSet}};
\code{\link{setPanelsSettings}}; \code{\link{setPanels}};
\code{\link{setPeriodSelector}};
\code{\link{setStockEventsSettings}};
\code{\link{setValueAxesSettings}}
}
| /man/setChartCursorSettings.Rd | no_license | myndworkz/rAmCharts | R | false | false | 1,836 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/AmStockChart.R
\docType{methods}
\name{setChartCursorSettings}
\alias{setChartCursorSettings}
\title{Setter}
\usage{
\S4method{setChartCursorSettings}{AmStockChart}(.Object,
chartCursorSettings = NULL, ...)
}
\arguments{
\item{\code{.Object}:}{Object of class \code{\linkS4class{AmChart}}.}
\item{\code{chartCursorSettings}:}{Object of class \code{\linkS4class{ChartCursor}}.}
}
\value{
The updated object of class \code{\linkS4class{AmChart}}.
}
\description{
Setter
}
\examples{
library(pipeR)
amStockChart() \%>>\% setChartCursorSettings( oneBallOnly = TRUE )
}
\seealso{
\code{\linkS4class{AmChart}} S4 class
\code{\linkS4class{ChartCursorSettings}} S4 class
Other AmStockChart methods: \code{\link{addComparedDataSet}};
\code{\link{addDataSet}}; \code{\link{addPanel}};
\code{\link{setCategoryAxesSettings}};
\code{\link{setChartScrollbarSettings}};
\code{\link{setComparedDataSets}};
\code{\link{setDataSetSelector}};
\code{\link{setDataSets}};
\code{\link{setLegendSettings}};
\code{\link{setMainDataSet}};
\code{\link{setPanelsSettings}}; \code{\link{setPanels}};
\code{\link{setPeriodSelector}};
\code{\link{setStockEventsSettings}};
\code{\link{setValueAxesSettings}}
Other AmStockChart setters: \code{\link{addComparedDataSet}};
\code{\link{addDataSet}}; \code{\link{addPanel}};
\code{\link{setCategoryAxesSettings}};
\code{\link{setChartScrollbarSettings}};
\code{\link{setComparedDataSets}};
\code{\link{setDataSetSelector}};
\code{\link{setDataSets}};
\code{\link{setLegendSettings}};
\code{\link{setMainDataSet}};
\code{\link{setPanelsSettings}}; \code{\link{setPanels}};
\code{\link{setPeriodSelector}};
\code{\link{setStockEventsSettings}};
\code{\link{setValueAxesSettings}}
}
|
\alias{gtkSelectionDataGetText}
\name{gtkSelectionDataGetText}
\title{gtkSelectionDataGetText}
\description{Gets the contents of the selection data as a UTF-8 string.}
\usage{gtkSelectionDataGetText(object)}
\arguments{\item{\code{object}}{[\code{\link{GtkSelectionData}}] a \code{\link{GtkSelectionData}}}}
\value{[raw] if the selection data contained a recognized
text type and it could be converted to UTF-8, a newly allocated
string containing the converted text, otherwise \code{NULL}.}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
| /man/gtkSelectionDataGetText.Rd | no_license | cran/RGtk2.10 | R | false | false | 569 | rd | \alias{gtkSelectionDataGetText}
\name{gtkSelectionDataGetText}
\title{gtkSelectionDataGetText}
\description{Gets the contents of the selection data as a UTF-8 string.}
\usage{gtkSelectionDataGetText(object)}
\arguments{\item{\code{object}}{[\code{\link{GtkSelectionData}}] a \code{\link{GtkSelectionData}}}}
\value{[raw] if the selection data contained a recognized
text type and it could be converted to UTF-8, a newly allocated
string containing the converted text, otherwise \code{NULL}.}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils_table.R
\name{summarize_short.default}
\alias{summarize_short.default}
\title{Create variable summary for all other variable types}
\usage{
\method{summarize_short}{default}(x)
}
\arguments{
\item{x}{an object of any other class}
}
\value{
List of counts for unique and missing values in \code{x}.
}
\description{
Create variable summary for all other variable types
}
| /man/summarize_short.default.Rd | permissive | LenaNoel/visR | R | false | true | 453 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils_table.R
\name{summarize_short.default}
\alias{summarize_short.default}
\title{Create variable summary for all other variable types}
\usage{
\method{summarize_short}{default}(x)
}
\arguments{
\item{x}{an object of any other class}
}
\value{
List of counts for unique and missing values in \code{x}.
}
\description{
Create variable summary for all other variable types
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/load_naei_layers.R
\name{load_naei_layers}
\alias{load_naei_layers}
\title{Load NAEI Layers}
\usage{
load_naei_layers(
dir = "C:/Users/Will/Google Drive/PhD/site_BT Tower/NAEI Data/NAEI NO2 2014/"
)
}
\arguments{
\item{dir}{directory containing".asc" files}
}
\description{
Read the unzipped NAEI sector layers into a raster stack
}
\author{
W. S. Drysdale
}
| /man/load_naei_layers.Rd | no_license | willdrysdale/wsdmiscr | R | false | true | 439 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/load_naei_layers.R
\name{load_naei_layers}
\alias{load_naei_layers}
\title{Load NAEI Layers}
\usage{
load_naei_layers(
dir = "C:/Users/Will/Google Drive/PhD/site_BT Tower/NAEI Data/NAEI NO2 2014/"
)
}
\arguments{
\item{dir}{directory containing".asc" files}
}
\description{
Read the unzipped NAEI sector layers into a raster stack
}
\author{
W. S. Drysdale
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/style.R
\name{style}
\alias{style}
\title{Create a string-representation of CSS style}
\usage{
style(...)
}
\arguments{
\item{...}{style attributes in form of \code{name = value}. Many CSS properties
contains \code{'-'} in the middle of their names. In this case, use
\code{"the-name" = value} instead. \code{NA} will cancel the attribute.}
}
\value{
a string-representation of css styles
}
\description{
Most HTML elements can be stylized by a set of CSS style
properties. This function helps build CSS strings using
conventional argument-passing in R.
}
\details{
The general usage of CSS styling is
\code{<span style = "color: red; border: 1px">Text</span>}
The text color can be specified by `color`, the border of
element by `border`, and etc.
Basic styles like \code{color}, \code{border}, \code{background}
work properly and mostly consistently in modern web browsers.
However, some style properties may not work consistently in
different browsers.
}
\examples{
style(color = "red")
style(color = "red", "font-weight" = "bold")
style("background-color" = "gray", "border-radius" = "4px")
style("padding-right" = "2px")
formattable(mtcars, list(
mpg = formatter("span",
style = x ~ style(color = ifelse(x > median(x), "red", NA)))))
}
\seealso{
\href{http://www.w3.org/Style/CSS/all-properties}{List of CSS properties},
\href{http://www.w3schools.com/cssref/}{CSS Reference}
}
| /man/style.Rd | permissive | githubfun/formattable | R | false | false | 1,482 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/style.R
\name{style}
\alias{style}
\title{Create a string-representation of CSS style}
\usage{
style(...)
}
\arguments{
\item{...}{style attributes in form of \code{name = value}. Many CSS properties
contains \code{'-'} in the middle of their names. In this case, use
\code{"the-name" = value} instead. \code{NA} will cancel the attribute.}
}
\value{
a string-representation of css styles
}
\description{
Most HTML elements can be stylized by a set of CSS style
properties. This function helps build CSS strings using
conventional argument-passing in R.
}
\details{
The general usage of CSS styling is
\code{<span style = "color: red; border: 1px">Text</span>}
The text color can be specified by `color`, the border of
element by `border`, and etc.
Basic styles like \code{color}, \code{border}, \code{background}
work properly and mostly consistently in modern web browsers.
However, some style properties may not work consistently in
different browsers.
}
\examples{
style(color = "red")
style(color = "red", "font-weight" = "bold")
style("background-color" = "gray", "border-radius" = "4px")
style("padding-right" = "2px")
formattable(mtcars, list(
mpg = formatter("span",
style = x ~ style(color = ifelse(x > median(x), "red", NA)))))
}
\seealso{
\href{http://www.w3.org/Style/CSS/all-properties}{List of CSS properties},
\href{http://www.w3schools.com/cssref/}{CSS Reference}
}
|
library(raster)
### Name: shapefile
### Title: Read or write a shapefile
### Aliases: shapefile shapefile,character-method shapefile,Spatial-method
### Keywords: spatial
### ** Examples
if (require(rgdal)) {
filename <- system.file("external/lux.shp", package="raster")
filename
p <- shapefile(filename)
## Not run:
##D shapefile(p, 'copy.shp')
## End(Not run)
}
| /data/genthat_extracted_code/raster/examples/shapefile.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 373 | r | library(raster)
### Name: shapefile
### Title: Read or write a shapefile
### Aliases: shapefile shapefile,character-method shapefile,Spatial-method
### Keywords: spatial
### ** Examples
if (require(rgdal)) {
filename <- system.file("external/lux.shp", package="raster")
filename
p <- shapefile(filename)
## Not run:
##D shapefile(p, 'copy.shp')
## End(Not run)
}
|
#--------------------------------- Class Definitions ----------------------------------#
# WARNING: Do NOT touch the env slot! It is used to link garbage collection between R and H2O
setClass("H2OClient", representation(ip="character", port="numeric"), prototype(ip="127.0.0.1", port=54321))
setClass("H2ORawData", representation(h2o="H2OClient", key="character"))
# setClass("H2ORawData", representation(h2o="H2OClient", key="character", env="environment"))
setClass("H2OParsedData", representation(h2o="H2OClient", key="character", logic="logical", col_names="vector", nrows="numeric", ncols="numeric", any_enum="logical"),
prototype(logic=FALSE, col_names="", ncols=-1, nrows=-1, any_enum = FALSE))
# setClass("H2OParsedData", representation(h2o="H2OClient", key="character", env="environment", logic="logical"), prototype(logic=FALSE))
setClass("H2OModel", representation(key="character", data="H2OParsedData", model="list", "VIRTUAL"))
# setClass("H2OModel", representation(key="character", data="H2OParsedData", model="list", env="environment", "VIRTUAL"))
setClass("H2OGrid", representation(key="character", data="H2OParsedData", model="list", sumtable="list", "VIRTUAL"))
setClass("H2OPerfModel", representation(cutoffs="numeric", measure="numeric", perf="character", model="list", roc="data.frame"))
setClass("H2OGLMModel", contains="H2OModel", representation(xval="list"))
setClass("H2OKMeansModel", contains="H2OModel")
setClass("H2ODeepLearningModel", contains="H2OModel", representation(valid="H2OParsedData", xval="list"))
setClass("H2ODRFModel", contains="H2OModel", representation(valid="H2OParsedData", xval="list"))
setClass("H2ONBModel", contains="H2OModel")
setClass("H2OPCAModel", contains="H2OModel")
setClass("H2OGBMModel", contains="H2OModel", representation(valid="H2OParsedData", xval="list"))
setClass("H2OSpeeDRFModel", contains="H2OModel", representation(valid="H2OParsedData", xval="list"))
setClass("H2OGLMGrid", contains="H2OGrid")
setClass("H2OGBMGrid", contains="H2OGrid")
setClass("H2OKMeansGrid", contains="H2OGrid")
setClass("H2ODRFGrid", contains="H2OGrid")
setClass("H2ODeepLearningGrid", contains="H2OGrid")
setClass("H2OSpeeDRFGrid", contains="H2OGrid")
setClass("H2OGLMModelList", representation(models="list", best_model="numeric", lambdas="numeric"))
# Register finalizers for H2O data and model objects
# setMethod("initialize", "H2ORawData", function(.Object, h2o = new("H2OClient"), key = "") {
# .Object@h2o = h2o
# .Object@key = key
# .Object@env = new.env()
#
# assign("h2o", .Object@h2o, envir = .Object@env)
# assign("key", .Object@key, envir = .Object@env)
#
# # Empty keys don't refer to any object in H2O
# if(key != "") reg.finalizer(.Object@env, .h2o.__finalizer)
# return(.Object)
# })
#
# setMethod("initialize", "H2OParsedData", function(.Object, h2o = new("H2OClient"), key = "") {
# .Object@h2o = h2o
# .Object@key = key
# .Object@env = new.env()
#
# assign("h2o", .Object@h2o, envir = .Object@env)
# assign("key", .Object@key, envir = .Object@env)
#
# # Empty keys don't refer to any object in H2O
# if(key != "") reg.finalizer(.Object@env, .h2o.__finalizer)
# return(.Object)
# })
#
# setMethod("initialize", "H2OModel", function(.Object, key = "", data = new("H2OParsedData"), model = list()) {
# .Object@key = key
# .Object@data = data
# .Object@model = model
# .Object@env = new.env()
#
# assign("h2o", .Object@data@h2o, envir = .Object@env)
# assign("key", .Object@key, envir = .Object@env)
#
# # Empty keys don't refer to any object in H2O
# if(key != "") reg.finalizer(.Object@env, .h2o.__finalizer)
# return(.Object)
# })
#--------------------------------- Class Display Functions ----------------------------------#
setMethod("show", "H2OClient", function(object) {
cat("IP Address:", object@ip, "\n")
cat("Port :", object@port, "\n")
})
setMethod("show", "H2ORawData", function(object) {
print(object@h2o)
cat("Raw Data Key:", object@key, "\n")
})
setMethod("show", "H2OParsedData", function(object) {
print(object@h2o)
cat("Parsed Data Key:", object@key, "\n\n")
print(head(object))
})
setMethod("show", "H2OGrid", function(object) {
print(object@data@h2o)
cat("Parsed Data Key:", object@data@key, "\n\n")
cat("Grid Search Model Key:", object@key, "\n")
temp = data.frame(t(sapply(object@sumtable, c)))
cat("\nSummary\n"); print(temp)
})
setMethod("show", "H2OKMeansModel", function(object) {
print(object@data@h2o)
cat("Parsed Data Key:", object@data@key, "\n\n")
cat("K-Means Model Key:", object@key)
model = object@model
cat("\n\nK-means clustering with", length(model$size), "clusters of sizes "); cat(model$size, sep=", ")
cat("\n\nCluster means:\n"); print(model$centers)
cat("\nClustering vector:\n"); print(summary(model$cluster))
cat("\nWithin cluster sum of squares by cluster:\n"); print(model$withinss)
cat("(between_SS / total_SS = ", round(100*sum(model$betweenss)/model$totss, 1), "%)\n")
cat("\nAvailable components:\n\n"); print(names(model))
})
setMethod("show", "H2OGLMModel", function(object) {
print(object@data@h2o)
cat("Parsed Data Key:", object@data@key, "\n\n")
cat("GLM2 Model Key:", object@key)
model <- object@model
cat("\n\nCoefficients:\n"); print(round(model$coefficients,5))
if(!is.null(model$normalized_coefficients)) {
cat("\nNormalized Coefficients:\n"); print(round(model$normalized_coefficients,5))
}
cat("\nDegrees of Freedom:", model$df.null, "Total (i.e. Null); ", model$df.residual, "Residual")
cat("\nNull Deviance: ", round(model$null.deviance,1))
cat("\nResidual Deviance:", round(model$deviance,1), " AIC:", round(model$aic,1))
cat("\nDeviance Explained:", round(1-model$deviance/model$null.deviance,5), "\n")
# cat("\nAvg Training Error Rate:", round(model$train.err,5), "\n")
family <- model$params$family$family
if(family == "binomial") {
cat("AUC:", round(model$auc,5), " Best Threshold:", round(model$best_threshold,5))
cat("\n\nConfusion Matrix:\n"); print(model$confusion)
}
if(length(object@xval) > 0) {
cat("\nCross-Validation Models:\n")
if(family == "binomial") {
modelXval <- t(sapply(object@xval, function(x) { c(x@model$rank-1, x@model$auc, 1-x@model$deviance/x@model$null.deviance) }))
colnames(modelXval) = c("Nonzeros", "AUC", "Deviance Explained")
} else {
modelXval <- t(sapply(object@xval, function(x) { c(x@model$rank-1, x@model$aic, 1-x@model$deviance/x@model$null.deviance) }))
colnames(modelXval) = c("Nonzeros", "AIC", "Deviance Explained")
}
rownames(modelXval) <- paste("Model", 1:nrow(modelXval))
print(modelXval)
}
})
setMethod("summary","H2OGLMModelList", function(object) {
summary <- NULL
if(object@models[[1]]@model$params$family$family == 'binomial'){
for(m in object@models) {
model = m@model
if(is.null(summary)) {
summary = t(as.matrix(c(model$lambda, model$df.null-model$df.residual,round((1-model$deviance/model$null.deviance),2),round(model$auc,2))))
} else {
summary = rbind(summary,c(model$lambda,model$df.null-model$df.residual,round((1-model$deviance/model$null.deviance),2),round(model$auc,2)))
}
}
summary = cbind(1:nrow(summary),summary)
colnames(summary) <- c("id","lambda","predictors","dev.ratio"," AUC ")
} else {
for(m in object@models) {
model = m@model
if(is.null(summary)) {
summary = t(as.matrix(c(model$lambda, model$df.null-model$df.residual,round((1-model$deviance/model$null.deviance),2))))
} else {
summary = rbind(summary,c(model$lambda,model$df.null-model$df.residual,round((1-model$deviance/model$null.deviance),2)))
}
}
summary = cbind(1:nrow(summary),summary)
colnames(summary) <- c("id","lambda","predictors","explained dev")
}
summary
})
setMethod("show", "H2OGLMModelList", function(object) {
print(summary(object))
cat("best model:",object@best_model, "\n")
})
setMethod("show", "H2ODeepLearningModel", function(object) {
print(object@data@h2o)
cat("Parsed Data Key:", object@data@key, "\n\n")
cat("Deep Learning Model Key:", object@key)
model = object@model
cat("\n\nTraining classification error:", model$train_class_error)
cat("\nTraining mean square error:", model$train_sqr_error)
cat("\n\nValidation classification error:", model$valid_class_error)
cat("\nValidation square error:", model$valid_sqr_error)
if(!is.null(model$confusion)) {
cat("\n\nConfusion matrix:\n")
if(is.na(object@valid@key)) {
if(model$params$nfolds == 0)
cat("Reported on", object@data@key, "\n")
else
cat("Reported on", paste(model$params$nfolds, "-fold cross-validated data", sep = ""), "\n")
} else
cat("Reported on", object@valid@key, "\n")
print(model$confusion)
}
if(!is.null(model$hit_ratios)) {
cat("\nHit Ratios for Multi-class Classification:\n")
print(model$hit_ratios)
}
if(!is.null(object@xval) && length(object@xval) > 0) {
cat("\nCross-Validation Models:\n")
temp = lapply(object@xval, function(x) { cat(" ", x@key, "\n") })
}
})
setMethod("show", "H2ODRFModel", function(object) {
print(object@data@h2o)
cat("Parsed Data Key:", object@data@key, "\n\n")
cat("Distributed Random Forest Model Key:", object@key)
model = object@model
cat("\n\nClasification:", model$params$classification)
cat("\nNumber of trees:", model$params$ntree)
cat("\nTree statistics:\n"); print(model$forest)
if(model$params$classification) {
cat("\nConfusion matrix:\n")
if(is.na(object@valid@key))
cat("Reported on", paste(object@model$params$nfolds, "-fold cross-validated data", sep = ""), "\n")
else
cat("Reported on", object@valid@key, "\n")
print(model$confusion)
if(!is.null(model$auc) && !is.null(model$gini))
cat("\nAUC:", model$auc, "\nGini:", model$gini, "\n")
}
if(!is.null(model$varimp)) {
cat("\nVariable importance:\n"); print(model$varimp)
}
cat("\nMean-squared Error by tree:\n"); print(model$mse)
if(length(object@xval) > 0) {
cat("\nCross-Validation Models:\n")
print(sapply(object@xval, function(x) x@key))
}
})
setMethod("show", "H2OSpeeDRFModel", function(object) {
print(object@data@h2o)
cat("Parsed Data Key:", object@data@key, "\n\n")
cat("SpeeDRF Model Key:", object@key)
model = object@model
cat("\n\nClassification:", model$params$classification)
cat("\nNumber of trees:", model$params$ntree)
if(FALSE){ #model$params$oobee) {
cat("\nConfusion matrix:\n"); cat("Reported on oobee from", object@valid@key, "\n")
if(is.na(object@valid@key))
cat("Reported on oobee from", paste(object@model$params$nfolds, "-fold cross-validated data", sep = ""), "\n")
else
cat("Reported on oobee from", object@valid@key, "\n")
} else {
cat("\nConfusion matrix:\n");
if(is.na(object@valid@key))
cat("Reported on", paste(object@model$params$nfolds, "-fold cross-validated data", sep = ""), "\n")
else
cat("Reported on", object@valid@key, "\n")
}
print(model$confusion)
if(!is.null(model$varimp)) {
cat("\nVariable importance:\n"); print(model$varimp)
}
#mse <-model$mse[length(model$mse)] # (model$mse[is.na(model$mse) | model$mse <= 0] <- "")
if (model$mse != -1) {
cat("\nMean-squared Error from the",model$params$ntree, "trees: "); cat(model$mse, "\n")
}
if(length(object@xval) > 0) {
cat("\nCross-Validation Models:\n")
print(sapply(object@xval, function(x) x@key))
}
})
setMethod("show", "H2OPCAModel", function(object) {
print(object@data@h2o)
cat("Parsed Data Key:", object@data@key, "\n\n")
cat("PCA Model Key:", object@key)
model = object@model
cat("\n\nStandard deviations:\n", model$sdev)
cat("\n\nRotation:\n"); print(model$rotation)
})
setMethod("show", "H2ONBModel", function(object) {
print(object@data@h2o)
cat("Parsed Data Key:", object@data@key, "\n\n")
cat("Naive Bayes Model Key:", object@key)
model = object@model
cat("\n\nA-priori probabilities:\n"); print(model$apriori_prob)
cat("\n\nConditional probabilities:\n"); print(model$tables)
})
setMethod("show", "H2OGBMModel", function(object) {
print(object@data@h2o)
cat("Parsed Data Key:", object@data@key, "\n\n")
cat("GBM Model Key:", object@key, "\n")
model = object@model
if(model$params$distribution %in% c("multinomial", "bernoulli")) {
cat("\nConfusion matrix:\n")
if(is.na(object@valid@key))
cat("Reported on", paste(object@model$params$nfolds, "-fold cross-validated data", sep = ""), "\n")
else
cat("Reported on", object@valid@key, "\n")
print(model$confusion)
if(!is.null(model$auc) && !is.null(model$gini))
cat("\nAUC:", model$auc, "\nGini:", model$gini, "\n")
}
if(!is.null(model$varimp)) {
cat("\nVariable importance:\n"); print(model$varimp)
}
cat("\nMean-squared Error by tree:\n"); print(model$err)
if(length(object@xval) > 0) {
cat("\nCross-Validation Models:\n")
print(sapply(object@xval, function(x) x@key))
}
})
setMethod("show", "H2OPerfModel", function(object) {
model = object@model
tmp = t(data.frame(model[-length(model)]))
if(object@perf == "mcc")
criterion = "MCC"
else
criterion = paste(toupper(substring(object@perf, 1, 1)), substring(object@perf, 2), sep = "")
rownames(tmp) = c("AUC", "Gini", paste("Best Cutoff for", criterion), "F1", "Accuracy", "Error", "Precision", "Recall", "Specificity", "MCC", "Max per Class Error")
colnames(tmp) = "Value"; print(tmp)
cat("\n\nConfusion matrix:\n"); print(model$confusion)
})
#--------------------------------- Unique H2O Methods ----------------------------------#
# TODO: s4 year, month impls as well?
h2o.year <- function(x) {
if(missing(x)) stop('must specify x')
if(class(x) != 'H2OParsedData' ) stop('x must be an H2OParsedData object')
res1 <- .h2o.__unop2('year', x)
.h2o.__binop2("-", res1, 1900)
}
h2o.month <- function(x){
if(missing(x)) stop('must specify x')
if(class(x) != 'H2OParsedData') stop('x must be an H2OParsedData object')
.h2o.__unop2('month', x)
}
year <- function(x) UseMethod('year', x)
year.H2OParsedData <- h2o.year
month <- function(x) UseMethod('month', x)
month.H2OParsedData <- h2o.month
diff.H2OParsedData <- function(x, lag = 1, differences = 1, ...) {
if(!is.numeric(lag)) stop("lag must be numeric")
if(!is.numeric(differences)) stop("differences must be numeric")
expr = paste("diff(", paste(x@key, lag, differences, sep = ","), ")", sep = "")
res = .h2o.__exec2(x@h2o, expr)
res <- .h2o.exec2(res$dest_key, h2o = x@h2o, res$dest_key)
res@logic <- FALSE
}
as.h2o <- function(client, object, key = "", header, sep = "") {
if(missing(client) || class(client) != "H2OClient") stop("client must be a H2OClient object")
if(missing(object) || !is.numeric(object) && !is.data.frame(object)) stop("object must be numeric or a data frame")
if(!is.character(key)) stop("key must be of class character")
if(missing(key) || nchar(key) == 0) {
key = paste(.TEMP_KEY, ".", .pkg.env$temp_count, sep="")
.pkg.env$temp_count = (.pkg.env$temp_count + 1) %% .RESULT_MAX
}
# TODO: Be careful, there might be a limit on how long a vector you can define in console
if(is.numeric(object) && is.vector(object)) {
res <- .h2o.__exec2_dest_key(client, paste("c(", paste(object, sep=',', collapse=","), ")", collapse=""), key)
return(.h2o.exec2(res$dest_key, h2o = client, res$dest_key))
} else {
tmpf <- tempfile(fileext=".csv")
write.csv(object, file=tmpf, quote = TRUE, row.names = FALSE)
h2f <- h2o.uploadFile(client, tmpf, key=key, header=header, sep=sep)
unlink(tmpf)
return(h2f)
}
}
h2o.exec <- function(expr_to_execute, h2o = NULL, dest_key = "") {
if (!is.null(h2o) && !.anyH2O(substitute(expr_to_execute), envir = parent.frame())) {
return(.h2o.exec2(h2o, deparse(substitute(expr_to_execute)), dest_key))
}
expr <- .replace_with_keys(substitute( expr_to_execute ), envir = parent.frame())
res <- NULL
if (dest_key != "") .pkg.env$DESTKEY <- dest_key
if (.pkg.env$DESTKEY == "") {
res <- .h2o.__exec2(.pkg.env$SERVER, deparse(expr))
} else {
res <- .h2o.__exec2_dest_key(.pkg.env$SERVER, deparse(expr), .pkg.env$DESTKEY)
}
if (.pkg.env$NEWCOL != "") {
.h2o.__remoteSend(.pkg.env$SERVER, .h2o.__HACK_SETCOLNAMES2, source=.pkg.env$FRAMEKEY,
cols=.pkg.env$NUMCOLS, comma_separated_list=.pkg.env$NEWCOL)
}
if(res$num_rows == 0 && res$num_cols == 0)
return(res$scalar)
key <- res$dest_key
if (.pkg.env$FRAMEKEY != "") {
key <- as.character(.pkg.env$FRAMEKEY)
newFrame <- .h2o.exec2(key, h2o = .pkg.env$SERVER, key)
topCall <- sys.calls()[[1]]
idxs <- which( "H2OParsedData" == unlist(lapply(as.list(topCall), .eval_class, envir=parent.frame())))
obj_name <- as.character(.pkg.env$CURS4)
if (length(idxs) != 0) obj_name <- as.character(topCall[[idxs]])[1]
env <- .lookUp(obj_name)
if (is.null(env)) {
env <- parent.frame()
}
assign(obj_name, newFrame, env)
return(newFrame)
}
.h2o.exec2(key, h2o = .pkg.env$SERVER, key)
}
h2o.cut <- function(x, breaks) {
if(missing(x)) stop("Must specify data set")
if(!inherits(x, "H2OParsedData")) stop(cat("\nData must be an H2O data set. Got ", class(x), "\n"))
if(missing(breaks) || !is.numeric(breaks)) stop("breaks must be a numeric vector")
nums = ifelse(length(breaks) == 1, breaks, paste("c(", paste(breaks, collapse=","), ")", sep=""))
expr = paste("cut(", x@key, ",", nums, ")", sep="")
res = .h2o.__exec2(x@h2o, expr)
if(res$num_rows == 0 && res$num_cols == 0) # TODO: If logical operator, need to indicate
return(res$scalar)
.h2o.exec2(res$dest_key, h2o = x@h2o, res$dest_key)
}
# TODO: H2O doesn't support any arguments beyond the single H2OParsedData object (with <= 2 cols)
h2o.table <- function(x, return.in.R = FALSE) {
if(missing(x)) stop("Must specify data set")
if(!inherits(x, "H2OParsedData")) stop(cat("\nData must be an H2O data set. Got ", class(x), "\n"))
if(ncol(x) > 2) stop("Unimplemented")
tb <- .h2o.__unop2("table", x)
if(return.in.R) {
df <- as.data.frame(tb)
if(!is.null(df$Count))
return(xtabs(Count ~ ., data = df))
rownames(df) <- df$'row.names'
df$'row.names' <- NULL
tb <- as.table(as.matrix(df))
# TODO: Dimension names should be the names of the columns containing the cross-classifying factors
dimnames(tb) <- list("row.levels" = rownames(tb), "col.levels" = colnames(tb))
}
return(tb)
}
h2o.ddply <- function (.data, .variables, .fun = NULL, ..., .progress = 'none') {
if(missing(.data)) stop('must specify .data')
if(class(.data) != "H2OParsedData") stop('.data must be an H2OParsedData object')
if( missing(.variables) ) stop('must specify .variables')
if( missing(.fun) ) stop('must specify .fun')
mm <- match.call()
# we accept eg .(col1, col2), c('col1', 'col2'), 1:2, c(1,2)
# as column names. This is a bit complicated
if( class(.variables) == 'character'){
vars <- .variables
idx <- match(vars, colnames(.data))
} else if( class(.variables) == 'H2Oquoted' ){
vars <- as.character(.variables)
idx <- match(vars, colnames(.data))
} else if( class(.variables) == 'quoted' ){ # plyr overwrote our . fn
vars <- names(.variables)
idx <- match(vars, colnames(.data))
} else if( class(.variables) == 'integer' ){
vars <- .variables
idx <- .variables
} else if( class(.variables) == 'numeric' ){ # this will happen eg c(1,2,3)
vars <- .variables
idx <- as.integer(.variables)
}
bad <- is.na(idx) | idx < 1 | idx > ncol(.data)
if( any(bad) ) stop( sprintf('can\'t recognize .variables %s', paste(vars[bad], sep=',')) )
fun_name <- mm[[ '.fun' ]]
if(identical(as.list(substitute(.fun))[[1]], quote(`function`))) {
h2o.addFunction(.data@h2o, .fun, "anonymous")
fun_name <- "anonymous"
}
exec_cmd <- sprintf('ddply(%s,c(%s),%s)', .data@key, paste(idx, collapse=','), as.character(fun_name))
res <- .h2o.__exec2(.data@h2o, exec_cmd)
.h2o.exec2(res$dest_key, h2o = .data@h2o, res$dest_key)
}
ddply <- h2o.ddply
# TODO: how to avoid masking plyr?
`h2o..` <- function(...) {
mm <- match.call()
mm <- mm[-1]
structure( as.list(mm), class='H2Oquoted')
}
`.` <- `h2o..`
h2o.addFunction <- function(object, fun, name){
if( missing(object) || class(object) != 'H2OClient' ) stop('must specify h2o connection in object')
if( missing(fun) ) stop('must specify fun')
if( !missing(name) ){
if( class(name) != 'character' ) stop('name must be a name')
fun_name <- name
} else {
fun_name <- match.call()[['fun']]
}
src <- paste(deparse(fun), collapse='\n')
exec_cmd <- sprintf('%s <- %s', as.character(fun_name), src)
res <- .h2o.__exec2(object, exec_cmd)
}
h2o.unique <- function(x, incomparables = FALSE, ...){
# NB: we do nothing with incomparables right now
# NB: we only support MARGIN = 2 (which is the default)
if(class(x) != "H2OParsedData")
stop('h2o.unique: x must be an H2OParsedData object')
if( nrow(x) == 0 | ncol(x) == 0) return(NULL)
if( nrow(x) == 1) return(x)
args <- list(...)
if( 'MARGIN' %in% names(args) && args[['MARGIN']] != 2 ) stop('h2o.unique: only MARGIN 2 supported')
.h2o.__unop2("unique", x)
# uniq <- function(df){1}
# h2o.addFunction(l, uniq)
# res <- h2o.ddply(x, 1:ncol(x), uniq)
#
# res[,1:(ncol(res)-1)]
}
unique.H2OParsedData <- h2o.unique
h2o.runif <- function(x, min = 0, max = 1, seed = -1) {
if(missing(x)) stop("Must specify data set")
if(class(x) != "H2OParsedData") stop(cat("\nData must be an H2O data set. Got ", class(x), "\n"))
if(!is.numeric(min)) stop("min must be a single number")
if(!is.numeric(max)) stop("max must be a single number")
if(length(min) > 1 || length(max) > 1) stop("Unimplemented")
if(min > max) stop("min must be a number less than or equal to max")
if(!is.numeric(seed)) stop("seed must be an integer >= 0")
expr = paste("runif(", x@key, ",", seed, ")*(", max - min, ")+", min, sep = "")
res = .h2o.__exec2(x@h2o, expr)
if(res$num_rows == 0 && res$num_cols == 0)
return(res$scalar)
else {
res <- .h2o.exec2(res$dest_key, h2o = x@h2o, res$dest_key)
res@logic <- FALSE
return(res)
}
}
h2o.anyFactor <- function(x) {
if(class(x) != "H2OParsedData") stop("x must be an H2OParsedData object")
x@any_enum
# as.logical(.h2o.__unop2("any.factor", x))
}
setMethod("colnames", "H2OParsedData", function(x, do.NULL = TRUE, prefix = "col") {
x@col_names
})
#--------------------------------- Overloaded R Methods ----------------------------------#
#--------------------------------- Slicing ----------------------------------#
# i are the rows, j are the columns. These can be vectors of integers or character strings, or a single logical data object
setMethod("[", "H2OParsedData", function(x, i, j, ..., drop = TRUE) {
numRows <- nrow(x); numCols <- ncol(x)
if (!missing(j) && is.numeric(j) && any(abs(j) < 1 || abs(j) > numCols))
stop("Array index out of bounds")
if(missing(i) && missing(j)) return(x)
if(missing(i) && !missing(j)) {
if(is.character(j)) {
# return(do.call("$", c(x, j)))
myCol <- colnames(x)
if(any(!(j %in% myCol))) stop("Undefined columns selected")
j <- match(j, myCol)
}
# if(is.logical(j)) j = -which(!j)
if(is.logical(j)) j <- which(j)
# if(class(j) == "H2OLogicalData")
if(class(j) == "H2OParsedData" && j@logic)
expr <- paste(x@key, "[", j@key, ",]", sep="")
else if(is.numeric(j) || is.integer(j))
expr <- paste(x@key, "[,c(", paste(j, collapse=","), ")]", sep="")
else stop(paste("Column index of type", class(j), "unsupported!"))
} else if(!missing(i) && missing(j)) {
# treat `i` as a column selector in this case...
if (is.character(i)) {
myCol <- colnames(x)
if (any(!(i %in% myCol))) stop ("Undefined columns selected")
i <- match(i, myCol)
if(is.logical(i)) i <- which(i)
if(class(i) == "H2OParsedData" && i@logic)
expr <- paste(x@key, "[", i@key, ",]", sep="")
else if(is.numeric(i) || is.integer(i))
expr <- paste(x@key, "[,c(", paste(i, collapse=","), ")]", sep="")
else stop(paste("Column index of type", class(i), "unsupported!"))
} else {
# if(is.logical(i)) i = -which(!i)
if(is.logical(i)) i = which(i)
# if(class(i) == "H2OLogicalData")
if(class(i) == "H2OParsedData" && i@logic)
expr <- paste(x@key, "[", i@key, ",]", sep="")
else if(is.numeric(i) || is.integer(i))
expr <- paste(x@key, "[c(", paste(i, collapse=","), "),]", sep="")
else stop(paste("Row index of type", class(i), "unsupported!"))
}
} else {
# if(is.logical(i)) i = -which(!i)
if(is.logical(i)) i <- which(i)
# if(class(i) == "H2OLogicalData") rind = i@key
if(class(i) == "H2OParsedData" && i@logic) rind = i@key
else if(is.numeric(i) || is.integer(i))
rind <- paste("c(", paste(i, collapse=","), ")", sep="")
else stop(paste("Row index of type", class(i), "unsupported!"))
if(is.character(j)) {
# return(do.call("$", c(x, j)))
myCol <- colnames(x)
if(any(!(j %in% myCol))) stop("Undefined columns selected")
j <- match(j, myCol)
}
# if(is.logical(j)) j = -which(!j)
if(is.logical(j)) j <- which(j)
# if(class(j) == "H2OLogicalData") cind = j@key
if(class(j) == "H2OParsedData" && j@logic) cind <- j@key
else if(is.numeric(j) || is.integer(j))
cind <- paste("c(", paste(j, collapse=","), ")", sep="")
else stop(paste("Column index of type", class(j), "unsupported!"))
expr <- paste(x@key, "[", rind, ",", cind, "]", sep="")
}
res <- .h2o.__exec2(x@h2o, expr)
if(res$num_rows == 0 && res$num_cols == 0)
res$scalar
else
.h2o.exec2(res$dest_key, h2o = x@h2o, res$dest_key)
})
setMethod("$", "H2OParsedData", function(x, name) {
myNames <- colnames(x)
# if(!(name %in% myNames)) return(NULL)
if(!(name %in% myNames)) stop(paste("Column", name, "does not exist!"))
cind <- match(name, myNames)
expr <- paste(x@key, "[,", cind, "]", sep="")
res <- .h2o.__exec2(x@h2o, expr)
if(res$num_rows == 0 && res$num_cols == 0)
res$scalar
else
.h2o.exec2(res$dest_key, h2o = x@h2o, res$dest_key)
})
setMethod("[<-", "H2OParsedData", function(x, i, j, ..., value) {
numRows = nrow(x); numCols = ncol(x)
# if((!missing(i) && is.numeric(i) && any(abs(i) < 1 || abs(i) > numRows)) ||
# (!missing(j) && is.numeric(j) && any(abs(j) < 1 || abs(j) > numCols)))
# stop("Array index out of bounds!")
if(!(missing(i) || is.numeric(i) || is.character(i)) || !(missing(j) || is.numeric(j) || is.character(j)))
stop("Row/column types not supported!")
if(class(value) != "H2OParsedData" && !is.numeric(value))
stop("value can only be numeric or an H2OParsedData object")
if(is.numeric(value) && length(value) != 1 && length(value) != numRows)
stop("value must be either a single number or a vector of length ", numRows)
if(!missing(i) && is.numeric(i)) {
if(any(i == 0)) stop("Array index out of bounds")
if(any(i < 0 && abs(i) > numRows)) stop("Unimplemented: can't extend rows")
if(min(i) > numRows+1) stop("new rows would leave holes after existing rows")
}
if(!missing(j) && is.numeric(j)) {
if(any(j == 0)) stop("Array index out of bounds")
if(any(j < 0 && abs(j) > numCols)) stop("Unimplemented: can't extend columns")
if(min(j) > numCols+1) stop("new columns would leaves holes after existing columns")
}
if(missing(i) && missing(j))
lhs <- x@key
else if(missing(i) && !missing(j)) {
if(is.character(j)) {
myNames <- colnames(x)
if(any(!(j %in% myNames))) {
if(length(j) == 1)
return(do.call("$<-", list(x, j, value)))
else stop("Unimplemented: undefined column names specified")
}
cind <- match(j, myNames)
} else cind <- j
cind <- paste("c(", paste(cind, collapse = ","), ")", sep = "")
lhs <- paste(x@key, "[,", cind, "]", sep = "")
} else if(!missing(i) && missing(j)) {
# treat `i` as a column selector in this case...
if (is.character(i)) {
myNames <- colnames(x)
if (any(!(i %in% myNames))) {
if (length(i) == 1) return(do.call("$<-", list(x, i, value)))
else stop("Unimplemented: undefined column names specified")
}
cind <- match(i, myNames)
cind <- paste("c(", paste(cind, collapse = ","), ")", sep = "")
lhs <- paste(x@key, "[,", cind, "]", sep = "")
} else {
rind <- paste("c(", paste(i, collapse = ","), ")", sep = "")
lhs <- paste(x@key, "[", rind, ",]", sep = "")
}
} else {
if(is.character(j)) {
myNames <- colnames(x)
if(any(!(j %in% myNames))) stop("Unimplemented: undefined column names specified")
cind <- match(j, myNames)
# cind = match(j[j %in% myNames], myNames)
} else cind <- j
cind <- paste("c(", paste(cind, collapse = ","), ")", sep = "")
rind <- paste("c(", paste(i, collapse = ","), ")", sep = "")
lhs <- paste(x@key, "[", rind, ",", cind, "]", sep = "")
}
# rhs = ifelse(class(value) == "H2OParsedData", value@key, paste("c(", paste(value, collapse = ","), ")", sep=""))
if(class(value) == "H2OParsedData")
rhs <- value@key
else
rhs <- ifelse(length(value) == 1, value, paste("c(", paste(value, collapse = ","), ")", sep=""))
res <- .h2o.__exec2(x@h2o, paste(lhs, "=", rhs))
.h2o.exec2(x@key, h2o = x@h2o, x@key)
})
setMethod("$<-", "H2OParsedData", function(x, name, value) {
if(missing(name) || !is.character(name) || nchar(name) == 0)
stop("name must be a non-empty string")
if(class(value) != "H2OParsedData" && !is.numeric(value))
stop("value can only be numeric or an H2OParsedData object")
numCols <- ncol(x); numRows <- nrow(x)
if(is.numeric(value) && length(value) != 1 && length(value) != numRows)
stop("value must be either a single number or a vector of length ", numRows)
myNames <- colnames(x); idx <- match(name, myNames)
lhs <- paste(x@key, "[,", ifelse(is.na(idx), numCols+1, idx), "]", sep = "")
# rhs = ifelse(class(value) == "H2OParsedData", value@key, paste("c(", paste(value, collapse = ","), ")", sep=""))
if(class(value) == "H2OParsedData")
rhs <- value@key
else
rhs <- ifelse(length(value) == 1, value, paste("c(", paste(value, collapse = ","), ")", sep=""))
.h2o.__exec2(x@h2o, paste(lhs, "=", rhs))
if(is.na(idx))
.h2o.__remoteSend(x@h2o, .h2o.__HACK_SETCOLNAMES2, source=x@key, cols=numCols, comma_separated_list=name)
.h2o.exec2(x@key, h2o = x@h2o, x@key)
})
setMethod("[[", "H2OParsedData", function(x, i, exact = TRUE) {
if(missing(i)) return(x)
if(length(i) > 1) stop("[[]] may only select one column")
if(!i %in% colnames(x) ) { warning(paste("Column", i, "does not exist!")); return(NULL) }
x[, i]
})
setMethod("[[<-", "H2OParsedData", function(x, i, value) {
if(class(value) != "H2OParsedData") stop('Can only append H2O data to H2O data')
if( ncol(value) > 1 ) stop('May only set a single column')
if( nrow(value) != nrow(x) ) stop(sprintf('Replacement has %d row, data has %d', nrow(value), nrow(x)))
mm <- match.call()
col_name <- as.list(i)[[1]]
cc <- colnames(x)
if( col_name %in% cc ){
x[, match( col_name, cc ) ] <- value
} else {
x <- cbind(x, value)
cc <- c( cc, col_name )
colnames(x) <- cc
}
x
})
# Note: right now, all things must be H2OParsedData
cbind.H2OParsedData <- function(..., deparse.level = 1) {
if(deparse.level != 1) stop("Unimplemented")
l <- list(...)
# l_dep <- sapply(substitute(placeholderFunction(...))[-1], deparse)
if(length(l) == 0) stop('cbind requires an H2O parsed dataset')
klass <- 'H2OParsedData'
h2o <- l[[1]]@h2o
nrows <- nrow(l[[1]])
m <- Map(function(elem){ inherits(elem, klass) & elem@h2o@ip == h2o@ip & elem@h2o@port == h2o@port & nrows == nrow(elem) }, l)
compatible <- Reduce(function(l,r) l & r, x=m, init=T)
if(!compatible){ stop(paste('cbind: all elements must be of type', klass, 'and in the same H2O instance'))}
# If cbind(x,x), dupe colnames will automatically be renamed by H2O
# TODO: cbind(df[,1], df[,2]) should retain colnames of original data frame (not temp keys from slice)
if(is.null(names(l)))
tmp <- Map(function(x) x@key, l)
else
tmp <- mapply(function(x,n) { if(is.null(n) || is.na(n) || nchar(n) == 0) x@key else paste(n, x@key, sep = "=") }, l, names(l))
exec_cmd <- sprintf("cbind(%s)", paste(as.vector(tmp), collapse = ","))
res <- .h2o.__exec2(h2o, exec_cmd)
.h2o.exec2(res$dest_key, h2o = h2o, res$dest_key)
}
#--------------------------------- Arithmetic ----------------------------------#
setMethod("+", c("H2OParsedData", "missing"), function(e1, e2) { .h2o.__binop2("+", 0, e1) })
setMethod("-", c("H2OParsedData", "missing"), function(e1, e2) { .h2o.__binop2("-", 0, e1) })
setMethod("+", c("H2OParsedData", "H2OParsedData"), function(e1, e2) { .h2o.__binop2("+", e1, e2) })
setMethod("-", c("H2OParsedData", "H2OParsedData"), function(e1, e2) { .h2o.__binop2("-", e1, e2) })
setMethod("*", c("H2OParsedData", "H2OParsedData"), function(e1, e2) { .h2o.__binop2("*", e1, e2) })
setMethod("/", c("H2OParsedData", "H2OParsedData"), function(e1, e2) { .h2o.__binop2("/", e1, e2) })
setMethod("%%", c("H2OParsedData", "H2OParsedData"), function(e1, e2) { .h2o.__binop2("%", e1, e2) })
setMethod("==", c("H2OParsedData", "H2OParsedData"), function(e1, e2) { .h2o.__binop2("==", e1, e2) })
setMethod(">", c("H2OParsedData", "H2OParsedData"), function(e1, e2) { .h2o.__binop2(">", e1, e2) })
setMethod("<", c("H2OParsedData", "H2OParsedData"), function(e1, e2) { .h2o.__binop2("<", e1, e2) })
setMethod("!=", c("H2OParsedData", "H2OParsedData"), function(e1, e2) { .h2o.__binop2("!=", e1, e2) })
setMethod(">=", c("H2OParsedData", "H2OParsedData"), function(e1, e2) { .h2o.__binop2(">=", e1, e2) })
setMethod("<=", c("H2OParsedData", "H2OParsedData"), function(e1, e2) { .h2o.__binop2("<=", e1, e2) })
setMethod("&", c("H2OParsedData", "H2OParsedData"), function(e1, e2) { .h2o.__binop2("&", e1, e2) })
setMethod("|", c("H2OParsedData", "H2OParsedData"), function(e1, e2) { .h2o.__binop2("|", e1, e2) })
setMethod("%*%", c("H2OParsedData", "H2OParsedData"), function(x, y) { .h2o.__binop2("%*%", x, y) })
setMethod("+", c("numeric", "H2OParsedData"), function(e1, e2) { .h2o.__binop2("+", e1, e2) })
setMethod("-", c("numeric", "H2OParsedData"), function(e1, e2) { .h2o.__binop2("-", e1, e2) })
setMethod("*", c("numeric", "H2OParsedData"), function(e1, e2) { .h2o.__binop2("*", e1, e2) })
setMethod("/", c("numeric", "H2OParsedData"), function(e1, e2) { .h2o.__binop2("/", e1, e2) })
setMethod("%%", c("numeric", "H2OParsedData"), function(e1, e2) { .h2o.__binop2("%", e1, e2) })
setMethod("==", c("numeric", "H2OParsedData"), function(e1, e2) { .h2o.__binop2("==", e1, e2) })
setMethod(">", c("numeric", "H2OParsedData"), function(e1, e2) { .h2o.__binop2(">", e1, e2) })
setMethod("<", c("numeric", "H2OParsedData"), function(e1, e2) { .h2o.__binop2("<", e1, e2) })
setMethod("!=", c("numeric", "H2OParsedData"), function(e1, e2) { .h2o.__binop2("!=", e1, e2) })
setMethod(">=", c("numeric", "H2OParsedData"), function(e1, e2) { .h2o.__binop2(">=", e1, e2) })
setMethod("<=", c("numeric", "H2OParsedData"), function(e1, e2) { .h2o.__binop2("<=", e1, e2) })
setMethod("&", c("numeric", "H2OParsedData"), function(e1, e2) { .h2o.__binop2("&", e1, e2) })
setMethod("|", c("numeric", "H2OParsedData"), function(e1, e2) { .h2o.__binop2("|", e1, e2) })
setMethod("+", c("H2OParsedData", "numeric"), function(e1, e2) { .h2o.__binop2("+", e1, e2) })
setMethod("-", c("H2OParsedData", "numeric"), function(e1, e2) { .h2o.__binop2("-", e1, e2) })
setMethod("*", c("H2OParsedData", "numeric"), function(e1, e2) { .h2o.__binop2("*", e1, e2) })
setMethod("/", c("H2OParsedData", "numeric"), function(e1, e2) { .h2o.__binop2("/", e1, e2) })
setMethod("%%", c("H2OParsedData", "numeric"), function(e1, e2) { .h2o.__binop2("%", e1, e2) })
setMethod("==", c("H2OParsedData", "numeric"), function(e1, e2) { .h2o.__binop2("==", e1, e2) })
setMethod(">", c("H2OParsedData", "numeric"), function(e1, e2) { .h2o.__binop2(">", e1, e2) })
setMethod("<", c("H2OParsedData", "numeric"), function(e1, e2) { .h2o.__binop2("<", e1, e2) })
setMethod("!=", c("H2OParsedData", "numeric"), function(e1, e2) { .h2o.__binop2("!=", e1, e2) })
setMethod(">=", c("H2OParsedData", "numeric"), function(e1, e2) { .h2o.__binop2(">=", e1, e2) })
setMethod("<=", c("H2OParsedData", "numeric"), function(e1, e2) { .h2o.__binop2("<=", e1, e2) })
setMethod("&", c("H2OParsedData", "numeric"), function(e1, e2) { .h2o.__binop2("&", e1, e2) })
setMethod("|", c("H2OParsedData", "numeric"), function(e1, e2) { .h2o.__binop2("|", e1, e2) })
setMethod("&", c("logical", "H2OParsedData"), function(e1, e2) { .h2o.__binop2("&", as.numeric(e1), e2) })
setMethod("|", c("logical", "H2OParsedData"), function(e1, e2) { .h2o.__binop2("|", as.numeric(e1), e2) })
setMethod("&", c("H2OParsedData", "logical"), function(e1, e2) { .h2o.__binop2("&", e1, as.numeric(e2)) })
setMethod("|", c("H2OParsedData", "logical"), function(e1, e2) { .h2o.__binop2("|", e1, as.numeric(e2)) })
setMethod("%/%", c("numeric", "H2OParsedData"), function(e1, e2) {.h2o.__binop2("%/%", as.numeric(e1), e2) })
setMethod("%/%", c("H2OParsedData", "numeric"), function(e1, e2){ .h2o.__binop2("%/%", e1, as.numeric(e2)) })
setMethod("^", c("numeric", "H2OParsedData"), function(e1, e2) {.h2o.__binop2("^", as.numeric(e1), e2) })
setMethod("^", c("H2OParsedData", "numeric"), function(e1, e2){ .h2o.__binop2("^", e1, as.numeric(e2)) })
#'
#' Get the domain mapping of an int and a String
#'
.getDomainMapping <- function(vec, s="") {
if(class(vec) != "H2OParsedData") stop("Object must be a H2OParsedData object. Input was: ", vec)
.h2o.__remoteSend(vec@h2o, .h2o.__DOMAIN_MAPPING, src_key = vec@key, str = s)
}
setMethod("==", c("H2OParsedData", "character"), function(e1, e2) {
m <- .getDomainMapping(e1,e2)$map
.h2o.__binop2("==", e1, m)
})
setMethod("==", c("character", "H2OParsedData"), function(e1, e2) {
m <- .getDomainMapping(e2,e1)$map
.h2o.__binop2("==", m, e2)
})
setMethod("!", "H2OParsedData", function(x) { .h2o.__unop2("!", x) })
setMethod("abs", "H2OParsedData", function(x) { .h2o.__unop2("abs", x) })
setMethod("sign", "H2OParsedData", function(x) { .h2o.__unop2("sgn", x) })
setMethod("sqrt", "H2OParsedData", function(x) { .h2o.__unop2("sqrt", x) })
setMethod("ceiling", "H2OParsedData", function(x) { .h2o.__unop2("ceil", x) })
setMethod("floor", "H2OParsedData", function(x) { .h2o.__unop2("floor", x) })
setMethod("trunc", "H2OParsedData", function(x) { .h2o.__unop2("trunc", x) })
setMethod("log", "H2OParsedData", function(x) { .h2o.__unop2("log", x) })
setMethod("exp", "H2OParsedData", function(x) { .h2o.__unop2("exp", x) })
setMethod("is.na", "H2OParsedData", function(x) { .h2o.__unop2("is.na", x) })
setMethod("t", "H2OParsedData", function(x) { .h2o.__unop2("t", x) })
round.H2OParsedData <- function(x, digits = 0) {
if(length(digits) > 1 || !is.numeric(digits)) stop("digits must be a single number")
if(digits < 0) digits = 10^(-digits)
expr <- paste("round(", paste(x@key, digits, sep = ","), ")", sep = "")
res <- .h2o.__exec2(x@h2o, expr)
if(res$num_rows == 0 && res$num_cols == 0)
return(res$scalar)
.h2o.exec2(expr = res$dest_key, h2o = x@h2o, dest_key = res$dest_key)
}
setMethod("colnames<-", signature(x="H2OParsedData", value="H2OParsedData"),
function(x, value) {
if(ncol(value) != ncol(x)) stop("Mismatched number of columns")
res <- .h2o.__remoteSend(x@h2o, .h2o.__HACK_SETCOLNAMES2, source=x@key, copy_from=value@key)
x@col_names <- value@col_names
return(x)
})
setMethod("colnames<-", signature(x="H2OParsedData", value="character"),
function(x, value) {
if(any(nchar(value) == 0)) stop("Column names must be of non-zero length")
else if(any(duplicated(value))) stop("Column names must be unique")
else if(length(value) != (num = ncol(x))) stop(paste("Must specify a vector of exactly", num, "column names"))
res <- .h2o.__remoteSend(x@h2o, .h2o.__HACK_SETCOLNAMES2, source=x@key, comma_separated_list=value)
x@col_names <- value
return(x)
})
setMethod("names", "H2OParsedData", function(x) { colnames(x) })
setMethod("names<-", "H2OParsedData", function(x, value) { colnames(x) <- value; return(x) })
# setMethod("nrow", "H2OParsedData", function(x) { .h2o.__unop2("nrow", x) })
# setMethod("ncol", "H2OParsedData", function(x) { .h2o.__unop2("ncol", x) })
setMethod("nrow", "H2OParsedData", function(x) {
x@nrows
})
# res = .h2o.__remoteSend(x@h2o, .h2o.__PAGE_INSPECT2, src_key=x@key); as.numeric(res$numRows) })
setMethod("ncol", "H2OParsedData", function(x) {
x@ncols
# res = .h2o.__remoteSend(x@h2o, .h2o.__PAGE_INSPECT2, src_key=x@key); as.numeric(res$numCols)
})
setMethod("length", "H2OParsedData", function(x) {
numCols <- ncol(x)
if (numCols == 1) {
numRows <- nrow(x)
return (numRows)
}
return (numCols)
})
setMethod("dim", "H2OParsedData", function(x) {
c(x@nrows, x@ncols)
# res = .h2o.__remoteSend(x@h2o, .h2o.__PAGE_INSPECT2, src_key=x@key)
# as.numeric(c(res$numRows, res$numCols))
})
setMethod("dim<-", "H2OParsedData", function(x, value) { stop("Unimplemented") })
# setMethod("min", "H2OParsedData", function(x, ..., na.rm = FALSE) {
# if(na.rm) stop("Unimplemented")
# # res = .h2o.__remoteSend(x@h2o, .h2o.__PAGE_INSPECT2, src_key=x@key)
# # min(..., sapply(res$cols, function(x) { x$min }), na.rm)
# min(..., .h2o.__unop2("min", x), na.rm)
# })
#
# setMethod("max", "H2OParsedData", function(x, ..., na.rm = FALSE) {
# if(na.rm) stop("Unimplemented")
# # res = .h2o.__remoteSend(x@h2o, .h2o.__PAGE_INSPECT2, src_key=x@key)
# # max(..., sapply(res$cols, function(x) { x$max }), na.rm)
# max(..., .h2o.__unop2("max", x), na.rm)
# })
.min_internal <- min
min <- function(..., na.rm = FALSE) {
idx = sapply(c(...), function(y) { class(y) == "H2OParsedData" })
if(any(idx)) {
hex.op = ifelse(na.rm, "min.na.rm", "min")
myVals = c(...); myData = myVals[idx]
myKeys = sapply(myData, function(y) { y@key })
expr = paste(hex.op, "(", paste(myKeys, collapse=","), ")", sep = "")
res = .h2o.__exec2(myData[[1]]@h2o, expr)
.Primitive("min")(unlist(myVals[!idx]), res$scalar, na.rm = na.rm)
} else
.Primitive("min")(..., na.rm = na.rm)
}
.max_internal <- max
max <- function(..., na.rm = FALSE) {
idx = sapply(c(...), function(y) { class(y) == "H2OParsedData" })
if(any(idx)) {
hex.op = ifelse(na.rm, "max.na.rm", "max")
myVals = c(...); myData = myVals[idx]
myKeys = sapply(myData, function(y) { y@key })
expr = paste(hex.op, "(", paste(myKeys, collapse=","), ")", sep = "")
res = .h2o.__exec2(myData[[1]]@h2o, expr)
.Primitive("max")(unlist(myVals[!idx]), res$scalar, na.rm = na.rm)
} else
.Primitive("max")(..., na.rm = na.rm)
}
.sum_internal <- sum
sum <- function(..., na.rm = FALSE) {
idx = sapply(c(...), function(y) { class(y) == "H2OParsedData" })
if(any(idx)) {
hex.op = ifelse(na.rm, "sum.na.rm", "sum")
myVals = c(...); myData = myVals[idx]
myKeys = sapply(myData, function(y) { y@key })
expr = paste(hex.op, "(", paste(myKeys, collapse=","), ")", sep = "")
res = .h2o.__exec2(myData[[1]]@h2o, expr)
.Primitive("sum")(unlist(myVals[!idx]), res$scalar, na.rm = na.rm)
} else
.Primitive("sum")(..., na.rm = na.rm)
}
setMethod("range", "H2OParsedData", function(x) {
res = .h2o.__remoteSend(x@h2o, .h2o.__PAGE_INSPECT2, src_key=x@key)
temp = sapply(res$cols, function(x) { c(x$min, x$max) })
c(min(temp[1,]), max(temp[2,]))
})
mean.H2OParsedData <- function(x, trim = 0, na.rm = FALSE, ...) {
if(ncol(x) != 1 || trim != 0) stop("Unimplemented")
if(h2o.anyFactor(x) || dim(x)[2] != 1) {
warning("argument is not numeric or logical: returning NA")
return(NA_real_)
}
if(!na.rm && .h2o.__unop2("any.na", x)) return(NA)
.h2o.__unop2("mean", x)
}
setMethod("sd", "H2OParsedData", function(x, na.rm = FALSE) {
if(ncol(x) != 1) stop("Unimplemented")
if(dim(x)[2] != 1 || h2o.anyFactor(x)) stop("Could not coerce argument to double. H2O sd requires a single numeric column.")
if(!na.rm && .h2o.__unop2("any.na", x)) return(NA)
.h2o.__unop2("sd", x)
})
setMethod("var", "H2OParsedData", function(x, y = NULL, na.rm = FALSE, use) {
if(!is.null(y) || !missing(use)) stop("Unimplemented")
if(h2o.anyFactor(x)) stop("x cannot contain any categorical columns")
if(!na.rm && .h2o.__unop2("any.na", x)) return(NA)
.h2o.__unop2("var", x)
})
as.data.frame.H2OParsedData <- function(x, ...) {
if(class(x) != "H2OParsedData") stop("x must be of class H2OParsedData")
# Versions of R prior to 3.1 should not use hex string.
# Versions of R including 3.1 and later should use hex string.
use_hex_string = FALSE
if (as.numeric(R.Version()$major) >= 3) {
if (as.numeric(R.Version()$minor) >= 1) {
use_hex_string = TRUE
}
}
url <- paste('http://', x@h2o@ip, ':', x@h2o@port,
'/2/DownloadDataset',
'?src_key=', URLencode(x@key),
'&hex_string=', as.numeric(use_hex_string),
sep='')
ttt <- getURL(url)
n = nchar(ttt)
# Delete last 1 or 2 characters if it's a newline.
# Handle \r\n (for windows) or just \n (for not windows).
chars_to_trim = 0
if (n >= 2) {
c = substr(ttt, n, n)
if (c == "\n") {
chars_to_trim = chars_to_trim + 1
}
if (chars_to_trim > 0) {
c = substr(ttt, n-1, n-1)
if (c == "\r") {
chars_to_trim = chars_to_trim + 1
}
}
}
if (chars_to_trim > 0) {
ttt2 = substr(ttt, 1, n-chars_to_trim)
# Is this going to use an extra copy? Or should we assign directly to ttt?
ttt = ttt2
}
# if((df.ncol = ncol(df)) != (x.ncol = ncol(x)))
# stop("Stopping conversion: Expected ", x.ncol, " columns, but data frame imported with ", df.ncol)
# if(x.ncol > .MAX_INSPECT_COL_VIEW)
# warning(x@key, " has greater than ", .MAX_INSPECT_COL_VIEW, " columns. This may take awhile...")
# Obtain the correct factor levels for each column
# res = .h2o.__remoteSend(x@h2o, .h2o.__HACK_LEVELS2, source=x@key, max_ncols=.Machine$integer.max)
# colClasses = sapply(res$levels, function(x) { ifelse(is.null(x), "numeric", "factor") })
# Substitute NAs for blank cells rather than skipping
df <- read.csv((tcon <- textConnection(ttt)), blank.lines.skip = FALSE, ...)
# df = read.csv(textConnection(ttt), blank.lines.skip = FALSE, colClasses = colClasses, ...)
close(tcon)
return(df)
}
as.matrix.H2OParsedData <- function(x, ...) { as.matrix(as.data.frame(x, ...)) }
as.table.H2OParsedData <- function(x, ...) { as.table(as.matrix(x, ...))}
head.H2OParsedData <- function(x, n = 6L, ...) {
numRows = nrow(x)
stopifnot(length(n) == 1L)
n <- ifelse(n < 0L, max(numRows + n, 0L), min(n, numRows))
if(n == 0) return(data.frame())
tmp_head <- x[seq_len(n),]
x.slice = as.data.frame(tmp_head)
h2o.rm(tmp_head@h2o, tmp_head@key)
return(x.slice)
}
tail.H2OParsedData <- function(x, n = 6L, ...) {
stopifnot(length(n) == 1L)
nrx <- nrow(x)
n <- ifelse(n < 0L, max(nrx + n, 0L), min(n, nrx))
if(n == 0) return(data.frame())
idx <- seq.int(to = nrx, length.out = n)
tmp_tail <- x[idx,]
x.slice <- as.data.frame(tmp_tail)
h2o.rm(tmp_tail@h2o, tmp_tail@key)
rownames(x.slice) <- idx
return(x.slice)
}
setMethod("as.factor", "H2OParsedData", function(x) { .h2o.__unop2("factor", x) })
setMethod("is.factor", "H2OParsedData", function(x) { as.logical(.h2o.__unop2("is.factor", x)) })
quantile.H2OParsedData <- function(x, probs = seq(0, 1, 0.25), na.rm = FALSE, names = TRUE, type = 7, ...) {
if((numCols = ncol(x)) != 1) stop("quantile only operates on a single column")
if(is.factor(x)) stop("factors are not allowed")
if(!na.rm && .h2o.__unop2("any.na", x)) stop("missing values and NaN's not allowed if 'na.rm' is FALSE")
if(!is.numeric(probs)) stop("probs must be a numeric vector")
if(any(probs < 0 | probs > 1)) stop("probs must fall in the range of [0,1]")
if(type != 2 && type != 7) stop("type must be either 2 (mean interpolation) or 7 (linear interpolation)")
if(type != 7) stop("Unimplemented: Only type 7 (linear interpolation) is supported from the console")
myProbs <- paste("c(", paste(probs, collapse = ","), ")", sep = "")
expr <- paste("quantile(", x@key, ",", myProbs, ")", sep = "")
res <- .h2o.__exec2(x@h2o, expr)
# res = .h2o.__remoteSend(x@h2o, .h2o.__PAGE_QUANTILES, source_key = x@key, column = 0, quantile = paste(probs, collapse = ","), interpolation_type = type, ...)
# col <- as.numeric(strsplit(res$result, "\n")[[1]][-1])
# if(numCols > .MAX_INSPECT_COL_VIEW)
# warning(x@key, " has greater than ", .MAX_INSPECT_COL_VIEW, " columns. This may take awhile...")
# res2 = .h2o.__remoteSend(x@h2o, .h2o.__PAGE_INSPECT, key=res$dest_key, view=res$num_rows, max_column_display=.Machine$integer.max)
# col <- sapply(res2$rows, function(x) { x[[2]] })
col <- as.data.frame(new("H2OParsedData", h2o=x@h2o, key=res$dest_key))[[1]]
if(names) names(col) <- paste(100*probs, "%", sep="")
return(col)
}
# setMethod("summary", "H2OParsedData", function(object) {
summary.H2OParsedData <- function(object, ...) {
digits <- 12L
if(ncol(object) > .MAX_INSPECT_COL_VIEW)
warning(object@key, " has greater than ", .MAX_INSPECT_COL_VIEW, " columns. This may take awhile...")
res <- .h2o.__remoteSend(object@h2o, .h2o.__PAGE_SUMMARY2, source=object@key, max_ncols=.Machine$integer.max)
cols <- sapply(res$summaries, function(col) {
if(col$stats$type != 'Enum') { # numeric column
if(is.null(col$stats$mins) || length(col$stats$mins) == 0) col$stats$mins = NaN
if(is.null(col$stats$maxs) || length(col$stats$maxs) == 0) col$stats$maxs = NaN
if(is.null(col$stats$pctile))
params <- format(rep(signif(as.numeric(col$stats$mean), digits), 6), digits = 4)
else
params <- format(signif(as.numeric(c(
col$stats$mins[1],
col$stats$pctile[4],
col$stats$pctile[6],
col$stats$mean,
col$stats$pctile[8],
col$stats$maxs[1])), digits), digits = 4)
result = c(paste("Min. :", params[1], " ", sep=""), paste("1st Qu.:", params[2], " ", sep=""),
paste("Median :", params[3], " ", sep=""), paste("Mean :", params[4], " ", sep=""),
paste("3rd Qu.:", params[5], " ", sep=""), paste("Max. :", params[6], " ", sep=""))
}
else {
top.ix <- sort.int(col$hcnt, decreasing=TRUE, index.return=TRUE)$ix[1:6]
if(is.null(col$hbrk)) domains <- top.ix[1:6] else domains <- col$hbrk[top.ix]
counts <- col$hcnt[top.ix]
# TODO: Make sure "NA's" isn't a legal domain level
if(!is.null(col$nacnt) && col$nacnt > 0) {
idx <- ifelse(any(is.na(top.ix)), which(is.na(top.ix))[1], 6)
domains[idx] <- "NA's"
counts[idx] <- col$nacnt
}
# width <- max(cbind(nchar(domains), nchar(counts)))
width <- c(max(nchar(domains)), max(nchar(counts)))
result <- paste(domains,
sapply(domains, function(x) { ifelse(width[1] == nchar(x), "", paste(rep(' ', width[1] - nchar(x)), collapse='')) }),
":",
sapply(counts, function(y) { ifelse(width[2] == nchar(y), "", paste(rep(' ', width[2] - nchar(y)), collapse='')) }),
counts,
" ",
sep='')
# result[is.na(top.ix)] <- NA
result[is.na(domains)] <- NA
result
}
})
# Filter out rows with nothing in them
cidx <- apply(cols, 1, function(x) { any(!is.na(x)) })
if(ncol(cols) == 1) { cols <- as.matrix(cols[cidx,]) } else { cols <- cols[cidx,] }
# cols <- as.matrix(cols[cidx,])
result <- as.table(cols)
rownames(result) <- rep("", nrow(result))
colnames(result) <- sapply(res$summaries, function(col) col$colname)
result
}
summary.H2OPCAModel <- function(object, ...) {
# TODO: Save propVar and cumVar from the Java output instead of computing here
myVar = object@model$sdev^2
myProp = myVar/sum(myVar)
result = rbind(object@model$sdev, myProp, cumsum(myProp)) # Need to limit decimal places to 4
colnames(result) = paste("PC", seq(1, length(myVar)), sep="")
rownames(result) = c("Standard deviation", "Proportion of Variance", "Cumulative Proportion")
cat("Importance of components:\n")
print(result)
}
screeplot.H2OPCAModel <- function(x, npcs = min(10, length(x@model$sdev)), type = "barplot", main = paste("h2o.prcomp(", x@data@key, ")", sep=""), ...) {
if(type == "barplot")
barplot(x@model$sdev[1:npcs]^2, main = main, ylab = "Variances", ...)
else if(type == "lines")
lines(x@model$sdev[1:npcs]^2, main = main, ylab = "Variances", ...)
else
stop("type must be either 'barplot' or 'lines'")
}
.canBeCoercedToLogical <- function(vec) {
if(class(vec) != "H2OParsedData") stop("Object must be a H2OParsedData object. Input was: ", vec)
# expects fr to be a vec.
as.logical(.h2o.__unop2("canBeCoercedToLogical", vec))
}
.check.ifelse.conditions <-
function(test, yes, no, type) {
if (type == "test") {
return(class(test) == "H2OParsedData"
&& (is.numeric(yes) || class(yes) == "H2OParsedData" || is.logical(yes))
&& (is.numeric(no) || class(no) == "H2OParsedData" || is.logical(no))
&& (test@logic || .canBeCoercedToLogical(test)))
}
}
ifelse<-
function (test, yes, no)
{
if (.check.ifelse.conditions(test, yes, no, "test")) {
if (is.logical(yes)) yes <- as.numeric(yes)
if (is.logical(no)) no <- as.numeric(no)
return(.h2o.__multop2("ifelse", test, yes, no))
} else if ( class(yes) == "H2OParsedData" && class(test) == "logical") {
if (is.logical(yes)) yes <- as.numeric(yes)
if (is.logical(no)) no <- as.numeric(no)
return(.h2o.__multop2("ifelse", as.numeric(test), yes, no))
} else if (class(no) == "H2OParsedData" && class(test) == "logical") {
if (is.logical(yes)) yes <- as.numeric(yes)
if (is.logical(no)) no <- as.numeric(no)
return(.h2o.__multop2("ifelse", as.numeric(test), yes, no))
}
if (is.atomic(test))
storage.mode(test) <- "logical"
else test <- if (isS4(test))
as(test, "logical")
else as.logical(test)
ans <- test
ok <- !(nas <- is.na(test))
if (any(test[ok]))
ans[test & ok] <- rep(yes, length.out = length(ans))[test &
ok]
if (any(!test[ok]))
ans[!test & ok] <- rep(no, length.out = length(ans))[!test &
ok]
ans[nas] <- NA
ans
}
#setMethod("ifelse", signature(test="H2OParsedData", yes="ANY", no="ANY"), function(test, yes, no) {
# if(!(is.numeric(yes) || class(yes) == "H2OParsedData") || !(is.numeric(no) || class(no) == "H2OParsedData"))
# stop("Unimplemented")
# if(!test@logic && !.canBeCoercedToLogical(test)) stop(test@key, " is not a H2O logical data type")
# h2o.exec(ifelse(test, yes, no))
## .h2o.__multop2("ifelse", eval(test), yes, no)
#})
##
#setMethod("ifelse", signature(test="logical", yes="H2OParsedData", no="ANY"), function(test, yes, no) {
# if(length(test) > 1) stop("test must be a single logical value")
# h2o.exec(ifelse(test, yes, no))
## .h2o.__multop2("ifelse", as.numeric(test), eval(yes), no)
#})
#
#setMethod("ifelse", signature(test="logical", yes="ANY", no="H2OParsedData"), function(test, yes, no) {
# if(length(test) > 1) stop("test must be a single logical value")
# h2o.exec(ifelse(test, yes, no))
## .h2o.__multop2("ifelse", as.numeric(test), yes, eval(no))
#})
#
#setMethod("ifelse", signature(test="logical", yes="H2OParsedData", no="H2OParsedData"), function(test, yes, no) {
# if(length(test) > 1) stop("test must be a single logical value")
# h2o.exec(ifelse(test, yes, no))
## .h2o.__multop2("ifelse", as.numeric(test), eval(yes), eval(no))
#})
#
setMethod("levels", "H2OParsedData", function(x) {
# if(ncol(x) != 1) return(NULL)
if(ncol(x) != 1) stop("Can only retrieve levels of one column.")
res = .h2o.__remoteSend(x@h2o, .h2o.__HACK_LEVELS2, source = x@key, max_ncols = .Machine$integer.max)
res$levels[[1]]
})
#----------------------------- Work in Progress -------------------------------#
# TODO: Need to change ... to environment variables and pass to substitute method,
# Can't figure out how to access outside environment from within lapply
setMethod("apply", "H2OParsedData", function(X, MARGIN, FUN, ...) {
if(missing(X) || class(X) != "H2OParsedData")
stop("X must be a H2OParsedData object")
if(missing(MARGIN) || !(length(MARGIN) <= 2 && all(MARGIN %in% c(1,2))))
stop("MARGIN must be either 1 (rows), 2 (cols), or a vector containing both")
if(missing(FUN) || !is.function(FUN))
stop("FUN must be an R function")
myList <- list(...)
if(length(myList) > 0) {
stop("Unimplemented")
tmp = sapply(myList, function(x) { !class(x) %in% c("H2OParsedData", "numeric") } )
if(any(tmp)) stop("H2O only recognizes H2OParsedData and numeric objects")
idx = which(sapply(myList, function(x) { class(x) == "H2OParsedData" }))
# myList <- lapply(myList, function(x) { if(class(x) == "H2OParsedData") x@key else x })
myList[idx] <- lapply(myList[idx], function(x) { x@key })
# TODO: Substitute in key name for H2OParsedData objects and push over wire to console
if(any(names(myList) == ""))
stop("Must specify corresponding variable names of ", myList[names(myList) == ""])
}
# Substitute in function name: FUN <- match.fun(FUN)
myfun = deparse(substitute(FUN))
len = length(myfun)
if(len > 3 && substr(myfun[1], nchar(myfun[1]), nchar(myfun[1])) == "{" && myfun[len] == "}")
myfun = paste(myfun[1], paste(myfun[2:(len-1)], collapse = ";"), "}")
else
myfun = paste(myfun, collapse = "")
if(length(MARGIN) > 1)
params = c(X@key, paste("c(", paste(MARGIN, collapse = ","), ")", sep = ""), myfun)
else
params = c(X@key, MARGIN, myfun)
expr = paste("apply(", paste(params, collapse = ","), ")", sep="")
res = .h2o.__exec2(X@h2o, expr)
.h2o.exec2(res$dest_key, h2o = X@h2o, res$dest_key)
})
str.H2OParsedData <- function(object, ...) {
if (length(l <- list(...)) && any("give.length" == names(l)))
invisible(NextMethod("str", ...))
else invisible(NextMethod("str", give.length = FALSE, ...))
if(ncol(object) > .MAX_INSPECT_COL_VIEW)
warning(object@key, " has greater than ", .MAX_INSPECT_COL_VIEW, " columns. This may take awhile...")
res = .h2o.__remoteSend(object@h2o, .h2o.__PAGE_INSPECT2, src_key=object@key)
cat("\nH2O dataset '", object@key, "':\t", res$numRows, " obs. of ", (p <- res$numCols),
" variable", if(p != 1) "s", if(p > 0) ":", "\n", sep = "")
cc <- unlist(lapply(res$cols, function(y) y$name))
width <- max(nchar(cc))
rows <- res$rows[1:min(res$numRows, 10)] # TODO: Might need to check rows > 0
res2 = .h2o.__remoteSend(object@h2o, .h2o.__HACK_LEVELS2, source=object@key, max_ncols=.Machine$integer.max)
for(i in 1:p) {
cat("$ ", cc[i], rep(' ', width - nchar(cc[i])), ": ", sep = "")
rhead <- sapply(rows, function(x) { x[i+1] })
if(is.null(res2$levels[[i]]))
cat("num ", paste(rhead, collapse = " "), if(res$numRows > 10) " ...", "\n", sep = "")
else {
rlevels = res2$levels[[i]]
cat("Factor w/ ", (count <- length(rlevels)), " level", if(count != 1) "s", ' "', paste(rlevels[1:min(count, 2)], collapse = '","'), '"', if(count > 2) ",..", ": ", sep = "")
cat(paste(match(rhead, rlevels), collapse = " "), if(res$numRows > 10) " ...", "\n", sep = "")
}
}
}
setMethod("findInterval", "H2OParsedData", function(x, vec, rightmost.closed = FALSE, all.inside = FALSE) {
if(any(is.na(vec)))
stop("'vec' contains NAs")
if(is.unsorted(vec))
stop("'vec' must be sorted non-decreasingly")
if(all.inside) stop("Unimplemented")
myVec = paste("c(", .seq_to_string(vec), ")", sep = "")
expr = paste("findInterval(", x@key, ",", myVec, ",", as.numeric(rightmost.closed), ")", sep = "")
res = .h2o.__exec2(x@h2o, expr)
new('H2OParsedData', h2o=x@h2o, key=res$dest_key)
})
# setGeneric("histograms", function(object) { standardGeneric("histograms") })
# setMethod("histograms", "H2OParsedData", function(object) {
# if(ncol(object) > .MAX_INSPECT_COL_VIEW)
# warning(object@key, " has greater than ", .MAX_INSPECT_COL_VIEW, " columns. This may take awhile...")
# res = .h2o.__remoteSend(object@h2o, .h2o.__PAGE_SUMMARY2, source=object@key, max_ncols=.Machine$integer.max)
# list.of.bins <- lapply(res$summaries, function(x) {
# if (x$stats$type == 'Enum') {
# bins <- NULL
# } else {
# counts <- x$hcnt
# breaks <- seq(x$hstart, by=x$hstep, length.out=length(x$hcnt) + 1)
# bins <- list(counts,breaks)
# names(bins) <- cbind('counts', 'breaks')
# }
# bins
# })
# return(list.of.bins)
# })
| /R/h2o-package/R/Classes.R | permissive | Jrobinso09/h2o | R | false | false | 63,298 | r | #--------------------------------- Class Definitions ----------------------------------#
# WARNING: Do NOT touch the env slot! It is used to link garbage collection between R and H2O
setClass("H2OClient", representation(ip="character", port="numeric"), prototype(ip="127.0.0.1", port=54321))
setClass("H2ORawData", representation(h2o="H2OClient", key="character"))
# setClass("H2ORawData", representation(h2o="H2OClient", key="character", env="environment"))
setClass("H2OParsedData", representation(h2o="H2OClient", key="character", logic="logical", col_names="vector", nrows="numeric", ncols="numeric", any_enum="logical"),
prototype(logic=FALSE, col_names="", ncols=-1, nrows=-1, any_enum = FALSE))
# setClass("H2OParsedData", representation(h2o="H2OClient", key="character", env="environment", logic="logical"), prototype(logic=FALSE))
setClass("H2OModel", representation(key="character", data="H2OParsedData", model="list", "VIRTUAL"))
# setClass("H2OModel", representation(key="character", data="H2OParsedData", model="list", env="environment", "VIRTUAL"))
setClass("H2OGrid", representation(key="character", data="H2OParsedData", model="list", sumtable="list", "VIRTUAL"))
setClass("H2OPerfModel", representation(cutoffs="numeric", measure="numeric", perf="character", model="list", roc="data.frame"))
setClass("H2OGLMModel", contains="H2OModel", representation(xval="list"))
setClass("H2OKMeansModel", contains="H2OModel")
setClass("H2ODeepLearningModel", contains="H2OModel", representation(valid="H2OParsedData", xval="list"))
setClass("H2ODRFModel", contains="H2OModel", representation(valid="H2OParsedData", xval="list"))
setClass("H2ONBModel", contains="H2OModel")
setClass("H2OPCAModel", contains="H2OModel")
setClass("H2OGBMModel", contains="H2OModel", representation(valid="H2OParsedData", xval="list"))
setClass("H2OSpeeDRFModel", contains="H2OModel", representation(valid="H2OParsedData", xval="list"))
setClass("H2OGLMGrid", contains="H2OGrid")
setClass("H2OGBMGrid", contains="H2OGrid")
setClass("H2OKMeansGrid", contains="H2OGrid")
setClass("H2ODRFGrid", contains="H2OGrid")
setClass("H2ODeepLearningGrid", contains="H2OGrid")
setClass("H2OSpeeDRFGrid", contains="H2OGrid")
setClass("H2OGLMModelList", representation(models="list", best_model="numeric", lambdas="numeric"))
# Register finalizers for H2O data and model objects
# setMethod("initialize", "H2ORawData", function(.Object, h2o = new("H2OClient"), key = "") {
# .Object@h2o = h2o
# .Object@key = key
# .Object@env = new.env()
#
# assign("h2o", .Object@h2o, envir = .Object@env)
# assign("key", .Object@key, envir = .Object@env)
#
# # Empty keys don't refer to any object in H2O
# if(key != "") reg.finalizer(.Object@env, .h2o.__finalizer)
# return(.Object)
# })
#
# setMethod("initialize", "H2OParsedData", function(.Object, h2o = new("H2OClient"), key = "") {
# .Object@h2o = h2o
# .Object@key = key
# .Object@env = new.env()
#
# assign("h2o", .Object@h2o, envir = .Object@env)
# assign("key", .Object@key, envir = .Object@env)
#
# # Empty keys don't refer to any object in H2O
# if(key != "") reg.finalizer(.Object@env, .h2o.__finalizer)
# return(.Object)
# })
#
# setMethod("initialize", "H2OModel", function(.Object, key = "", data = new("H2OParsedData"), model = list()) {
# .Object@key = key
# .Object@data = data
# .Object@model = model
# .Object@env = new.env()
#
# assign("h2o", .Object@data@h2o, envir = .Object@env)
# assign("key", .Object@key, envir = .Object@env)
#
# # Empty keys don't refer to any object in H2O
# if(key != "") reg.finalizer(.Object@env, .h2o.__finalizer)
# return(.Object)
# })
#--------------------------------- Class Display Functions ----------------------------------#
setMethod("show", "H2OClient", function(object) {
cat("IP Address:", object@ip, "\n")
cat("Port :", object@port, "\n")
})
setMethod("show", "H2ORawData", function(object) {
print(object@h2o)
cat("Raw Data Key:", object@key, "\n")
})
setMethod("show", "H2OParsedData", function(object) {
print(object@h2o)
cat("Parsed Data Key:", object@key, "\n\n")
print(head(object))
})
setMethod("show", "H2OGrid", function(object) {
print(object@data@h2o)
cat("Parsed Data Key:", object@data@key, "\n\n")
cat("Grid Search Model Key:", object@key, "\n")
temp = data.frame(t(sapply(object@sumtable, c)))
cat("\nSummary\n"); print(temp)
})
setMethod("show", "H2OKMeansModel", function(object) {
print(object@data@h2o)
cat("Parsed Data Key:", object@data@key, "\n\n")
cat("K-Means Model Key:", object@key)
model = object@model
cat("\n\nK-means clustering with", length(model$size), "clusters of sizes "); cat(model$size, sep=", ")
cat("\n\nCluster means:\n"); print(model$centers)
cat("\nClustering vector:\n"); print(summary(model$cluster))
cat("\nWithin cluster sum of squares by cluster:\n"); print(model$withinss)
cat("(between_SS / total_SS = ", round(100*sum(model$betweenss)/model$totss, 1), "%)\n")
cat("\nAvailable components:\n\n"); print(names(model))
})
setMethod("show", "H2OGLMModel", function(object) {
print(object@data@h2o)
cat("Parsed Data Key:", object@data@key, "\n\n")
cat("GLM2 Model Key:", object@key)
model <- object@model
cat("\n\nCoefficients:\n"); print(round(model$coefficients,5))
if(!is.null(model$normalized_coefficients)) {
cat("\nNormalized Coefficients:\n"); print(round(model$normalized_coefficients,5))
}
cat("\nDegrees of Freedom:", model$df.null, "Total (i.e. Null); ", model$df.residual, "Residual")
cat("\nNull Deviance: ", round(model$null.deviance,1))
cat("\nResidual Deviance:", round(model$deviance,1), " AIC:", round(model$aic,1))
cat("\nDeviance Explained:", round(1-model$deviance/model$null.deviance,5), "\n")
# cat("\nAvg Training Error Rate:", round(model$train.err,5), "\n")
family <- model$params$family$family
if(family == "binomial") {
cat("AUC:", round(model$auc,5), " Best Threshold:", round(model$best_threshold,5))
cat("\n\nConfusion Matrix:\n"); print(model$confusion)
}
if(length(object@xval) > 0) {
cat("\nCross-Validation Models:\n")
if(family == "binomial") {
modelXval <- t(sapply(object@xval, function(x) { c(x@model$rank-1, x@model$auc, 1-x@model$deviance/x@model$null.deviance) }))
colnames(modelXval) = c("Nonzeros", "AUC", "Deviance Explained")
} else {
modelXval <- t(sapply(object@xval, function(x) { c(x@model$rank-1, x@model$aic, 1-x@model$deviance/x@model$null.deviance) }))
colnames(modelXval) = c("Nonzeros", "AIC", "Deviance Explained")
}
rownames(modelXval) <- paste("Model", 1:nrow(modelXval))
print(modelXval)
}
})
setMethod("summary","H2OGLMModelList", function(object) {
summary <- NULL
if(object@models[[1]]@model$params$family$family == 'binomial'){
for(m in object@models) {
model = m@model
if(is.null(summary)) {
summary = t(as.matrix(c(model$lambda, model$df.null-model$df.residual,round((1-model$deviance/model$null.deviance),2),round(model$auc,2))))
} else {
summary = rbind(summary,c(model$lambda,model$df.null-model$df.residual,round((1-model$deviance/model$null.deviance),2),round(model$auc,2)))
}
}
summary = cbind(1:nrow(summary),summary)
colnames(summary) <- c("id","lambda","predictors","dev.ratio"," AUC ")
} else {
for(m in object@models) {
model = m@model
if(is.null(summary)) {
summary = t(as.matrix(c(model$lambda, model$df.null-model$df.residual,round((1-model$deviance/model$null.deviance),2))))
} else {
summary = rbind(summary,c(model$lambda,model$df.null-model$df.residual,round((1-model$deviance/model$null.deviance),2)))
}
}
summary = cbind(1:nrow(summary),summary)
colnames(summary) <- c("id","lambda","predictors","explained dev")
}
summary
})
setMethod("show", "H2OGLMModelList", function(object) {
print(summary(object))
cat("best model:",object@best_model, "\n")
})
setMethod("show", "H2ODeepLearningModel", function(object) {
print(object@data@h2o)
cat("Parsed Data Key:", object@data@key, "\n\n")
cat("Deep Learning Model Key:", object@key)
model = object@model
cat("\n\nTraining classification error:", model$train_class_error)
cat("\nTraining mean square error:", model$train_sqr_error)
cat("\n\nValidation classification error:", model$valid_class_error)
cat("\nValidation square error:", model$valid_sqr_error)
if(!is.null(model$confusion)) {
cat("\n\nConfusion matrix:\n")
if(is.na(object@valid@key)) {
if(model$params$nfolds == 0)
cat("Reported on", object@data@key, "\n")
else
cat("Reported on", paste(model$params$nfolds, "-fold cross-validated data", sep = ""), "\n")
} else
cat("Reported on", object@valid@key, "\n")
print(model$confusion)
}
if(!is.null(model$hit_ratios)) {
cat("\nHit Ratios for Multi-class Classification:\n")
print(model$hit_ratios)
}
if(!is.null(object@xval) && length(object@xval) > 0) {
cat("\nCross-Validation Models:\n")
temp = lapply(object@xval, function(x) { cat(" ", x@key, "\n") })
}
})
setMethod("show", "H2ODRFModel", function(object) {
print(object@data@h2o)
cat("Parsed Data Key:", object@data@key, "\n\n")
cat("Distributed Random Forest Model Key:", object@key)
model = object@model
cat("\n\nClasification:", model$params$classification)
cat("\nNumber of trees:", model$params$ntree)
cat("\nTree statistics:\n"); print(model$forest)
if(model$params$classification) {
cat("\nConfusion matrix:\n")
if(is.na(object@valid@key))
cat("Reported on", paste(object@model$params$nfolds, "-fold cross-validated data", sep = ""), "\n")
else
cat("Reported on", object@valid@key, "\n")
print(model$confusion)
if(!is.null(model$auc) && !is.null(model$gini))
cat("\nAUC:", model$auc, "\nGini:", model$gini, "\n")
}
if(!is.null(model$varimp)) {
cat("\nVariable importance:\n"); print(model$varimp)
}
cat("\nMean-squared Error by tree:\n"); print(model$mse)
if(length(object@xval) > 0) {
cat("\nCross-Validation Models:\n")
print(sapply(object@xval, function(x) x@key))
}
})
setMethod("show", "H2OSpeeDRFModel", function(object) {
print(object@data@h2o)
cat("Parsed Data Key:", object@data@key, "\n\n")
cat("SpeeDRF Model Key:", object@key)
model = object@model
cat("\n\nClassification:", model$params$classification)
cat("\nNumber of trees:", model$params$ntree)
if(FALSE){ #model$params$oobee) {
cat("\nConfusion matrix:\n"); cat("Reported on oobee from", object@valid@key, "\n")
if(is.na(object@valid@key))
cat("Reported on oobee from", paste(object@model$params$nfolds, "-fold cross-validated data", sep = ""), "\n")
else
cat("Reported on oobee from", object@valid@key, "\n")
} else {
cat("\nConfusion matrix:\n");
if(is.na(object@valid@key))
cat("Reported on", paste(object@model$params$nfolds, "-fold cross-validated data", sep = ""), "\n")
else
cat("Reported on", object@valid@key, "\n")
}
print(model$confusion)
if(!is.null(model$varimp)) {
cat("\nVariable importance:\n"); print(model$varimp)
}
#mse <-model$mse[length(model$mse)] # (model$mse[is.na(model$mse) | model$mse <= 0] <- "")
if (model$mse != -1) {
cat("\nMean-squared Error from the",model$params$ntree, "trees: "); cat(model$mse, "\n")
}
if(length(object@xval) > 0) {
cat("\nCross-Validation Models:\n")
print(sapply(object@xval, function(x) x@key))
}
})
setMethod("show", "H2OPCAModel", function(object) {
print(object@data@h2o)
cat("Parsed Data Key:", object@data@key, "\n\n")
cat("PCA Model Key:", object@key)
model = object@model
cat("\n\nStandard deviations:\n", model$sdev)
cat("\n\nRotation:\n"); print(model$rotation)
})
setMethod("show", "H2ONBModel", function(object) {
print(object@data@h2o)
cat("Parsed Data Key:", object@data@key, "\n\n")
cat("Naive Bayes Model Key:", object@key)
model = object@model
cat("\n\nA-priori probabilities:\n"); print(model$apriori_prob)
cat("\n\nConditional probabilities:\n"); print(model$tables)
})
setMethod("show", "H2OGBMModel", function(object) {
print(object@data@h2o)
cat("Parsed Data Key:", object@data@key, "\n\n")
cat("GBM Model Key:", object@key, "\n")
model = object@model
if(model$params$distribution %in% c("multinomial", "bernoulli")) {
cat("\nConfusion matrix:\n")
if(is.na(object@valid@key))
cat("Reported on", paste(object@model$params$nfolds, "-fold cross-validated data", sep = ""), "\n")
else
cat("Reported on", object@valid@key, "\n")
print(model$confusion)
if(!is.null(model$auc) && !is.null(model$gini))
cat("\nAUC:", model$auc, "\nGini:", model$gini, "\n")
}
if(!is.null(model$varimp)) {
cat("\nVariable importance:\n"); print(model$varimp)
}
cat("\nMean-squared Error by tree:\n"); print(model$err)
if(length(object@xval) > 0) {
cat("\nCross-Validation Models:\n")
print(sapply(object@xval, function(x) x@key))
}
})
setMethod("show", "H2OPerfModel", function(object) {
model = object@model
tmp = t(data.frame(model[-length(model)]))
if(object@perf == "mcc")
criterion = "MCC"
else
criterion = paste(toupper(substring(object@perf, 1, 1)), substring(object@perf, 2), sep = "")
rownames(tmp) = c("AUC", "Gini", paste("Best Cutoff for", criterion), "F1", "Accuracy", "Error", "Precision", "Recall", "Specificity", "MCC", "Max per Class Error")
colnames(tmp) = "Value"; print(tmp)
cat("\n\nConfusion matrix:\n"); print(model$confusion)
})
#--------------------------------- Unique H2O Methods ----------------------------------#
# TODO: s4 year, month impls as well?
h2o.year <- function(x) {
if(missing(x)) stop('must specify x')
if(class(x) != 'H2OParsedData' ) stop('x must be an H2OParsedData object')
res1 <- .h2o.__unop2('year', x)
.h2o.__binop2("-", res1, 1900)
}
h2o.month <- function(x){
if(missing(x)) stop('must specify x')
if(class(x) != 'H2OParsedData') stop('x must be an H2OParsedData object')
.h2o.__unop2('month', x)
}
year <- function(x) UseMethod('year', x)
year.H2OParsedData <- h2o.year
month <- function(x) UseMethod('month', x)
month.H2OParsedData <- h2o.month
diff.H2OParsedData <- function(x, lag = 1, differences = 1, ...) {
if(!is.numeric(lag)) stop("lag must be numeric")
if(!is.numeric(differences)) stop("differences must be numeric")
expr = paste("diff(", paste(x@key, lag, differences, sep = ","), ")", sep = "")
res = .h2o.__exec2(x@h2o, expr)
res <- .h2o.exec2(res$dest_key, h2o = x@h2o, res$dest_key)
res@logic <- FALSE
}
as.h2o <- function(client, object, key = "", header, sep = "") {
if(missing(client) || class(client) != "H2OClient") stop("client must be a H2OClient object")
if(missing(object) || !is.numeric(object) && !is.data.frame(object)) stop("object must be numeric or a data frame")
if(!is.character(key)) stop("key must be of class character")
if(missing(key) || nchar(key) == 0) {
key = paste(.TEMP_KEY, ".", .pkg.env$temp_count, sep="")
.pkg.env$temp_count = (.pkg.env$temp_count + 1) %% .RESULT_MAX
}
# TODO: Be careful, there might be a limit on how long a vector you can define in console
if(is.numeric(object) && is.vector(object)) {
res <- .h2o.__exec2_dest_key(client, paste("c(", paste(object, sep=',', collapse=","), ")", collapse=""), key)
return(.h2o.exec2(res$dest_key, h2o = client, res$dest_key))
} else {
tmpf <- tempfile(fileext=".csv")
write.csv(object, file=tmpf, quote = TRUE, row.names = FALSE)
h2f <- h2o.uploadFile(client, tmpf, key=key, header=header, sep=sep)
unlink(tmpf)
return(h2f)
}
}
h2o.exec <- function(expr_to_execute, h2o = NULL, dest_key = "") {
if (!is.null(h2o) && !.anyH2O(substitute(expr_to_execute), envir = parent.frame())) {
return(.h2o.exec2(h2o, deparse(substitute(expr_to_execute)), dest_key))
}
expr <- .replace_with_keys(substitute( expr_to_execute ), envir = parent.frame())
res <- NULL
if (dest_key != "") .pkg.env$DESTKEY <- dest_key
if (.pkg.env$DESTKEY == "") {
res <- .h2o.__exec2(.pkg.env$SERVER, deparse(expr))
} else {
res <- .h2o.__exec2_dest_key(.pkg.env$SERVER, deparse(expr), .pkg.env$DESTKEY)
}
if (.pkg.env$NEWCOL != "") {
.h2o.__remoteSend(.pkg.env$SERVER, .h2o.__HACK_SETCOLNAMES2, source=.pkg.env$FRAMEKEY,
cols=.pkg.env$NUMCOLS, comma_separated_list=.pkg.env$NEWCOL)
}
if(res$num_rows == 0 && res$num_cols == 0)
return(res$scalar)
key <- res$dest_key
if (.pkg.env$FRAMEKEY != "") {
key <- as.character(.pkg.env$FRAMEKEY)
newFrame <- .h2o.exec2(key, h2o = .pkg.env$SERVER, key)
topCall <- sys.calls()[[1]]
idxs <- which( "H2OParsedData" == unlist(lapply(as.list(topCall), .eval_class, envir=parent.frame())))
obj_name <- as.character(.pkg.env$CURS4)
if (length(idxs) != 0) obj_name <- as.character(topCall[[idxs]])[1]
env <- .lookUp(obj_name)
if (is.null(env)) {
env <- parent.frame()
}
assign(obj_name, newFrame, env)
return(newFrame)
}
.h2o.exec2(key, h2o = .pkg.env$SERVER, key)
}
h2o.cut <- function(x, breaks) {
if(missing(x)) stop("Must specify data set")
if(!inherits(x, "H2OParsedData")) stop(cat("\nData must be an H2O data set. Got ", class(x), "\n"))
if(missing(breaks) || !is.numeric(breaks)) stop("breaks must be a numeric vector")
nums = ifelse(length(breaks) == 1, breaks, paste("c(", paste(breaks, collapse=","), ")", sep=""))
expr = paste("cut(", x@key, ",", nums, ")", sep="")
res = .h2o.__exec2(x@h2o, expr)
if(res$num_rows == 0 && res$num_cols == 0) # TODO: If logical operator, need to indicate
return(res$scalar)
.h2o.exec2(res$dest_key, h2o = x@h2o, res$dest_key)
}
# TODO: H2O doesn't support any arguments beyond the single H2OParsedData object (with <= 2 cols)
h2o.table <- function(x, return.in.R = FALSE) {
if(missing(x)) stop("Must specify data set")
if(!inherits(x, "H2OParsedData")) stop(cat("\nData must be an H2O data set. Got ", class(x), "\n"))
if(ncol(x) > 2) stop("Unimplemented")
tb <- .h2o.__unop2("table", x)
if(return.in.R) {
df <- as.data.frame(tb)
if(!is.null(df$Count))
return(xtabs(Count ~ ., data = df))
rownames(df) <- df$'row.names'
df$'row.names' <- NULL
tb <- as.table(as.matrix(df))
# TODO: Dimension names should be the names of the columns containing the cross-classifying factors
dimnames(tb) <- list("row.levels" = rownames(tb), "col.levels" = colnames(tb))
}
return(tb)
}
h2o.ddply <- function (.data, .variables, .fun = NULL, ..., .progress = 'none') {
if(missing(.data)) stop('must specify .data')
if(class(.data) != "H2OParsedData") stop('.data must be an H2OParsedData object')
if( missing(.variables) ) stop('must specify .variables')
if( missing(.fun) ) stop('must specify .fun')
mm <- match.call()
# we accept eg .(col1, col2), c('col1', 'col2'), 1:2, c(1,2)
# as column names. This is a bit complicated
if( class(.variables) == 'character'){
vars <- .variables
idx <- match(vars, colnames(.data))
} else if( class(.variables) == 'H2Oquoted' ){
vars <- as.character(.variables)
idx <- match(vars, colnames(.data))
} else if( class(.variables) == 'quoted' ){ # plyr overwrote our . fn
vars <- names(.variables)
idx <- match(vars, colnames(.data))
} else if( class(.variables) == 'integer' ){
vars <- .variables
idx <- .variables
} else if( class(.variables) == 'numeric' ){ # this will happen eg c(1,2,3)
vars <- .variables
idx <- as.integer(.variables)
}
bad <- is.na(idx) | idx < 1 | idx > ncol(.data)
if( any(bad) ) stop( sprintf('can\'t recognize .variables %s', paste(vars[bad], sep=',')) )
fun_name <- mm[[ '.fun' ]]
if(identical(as.list(substitute(.fun))[[1]], quote(`function`))) {
h2o.addFunction(.data@h2o, .fun, "anonymous")
fun_name <- "anonymous"
}
exec_cmd <- sprintf('ddply(%s,c(%s),%s)', .data@key, paste(idx, collapse=','), as.character(fun_name))
res <- .h2o.__exec2(.data@h2o, exec_cmd)
.h2o.exec2(res$dest_key, h2o = .data@h2o, res$dest_key)
}
ddply <- h2o.ddply
# TODO: how to avoid masking plyr?
`h2o..` <- function(...) {
mm <- match.call()
mm <- mm[-1]
structure( as.list(mm), class='H2Oquoted')
}
`.` <- `h2o..`
h2o.addFunction <- function(object, fun, name){
if( missing(object) || class(object) != 'H2OClient' ) stop('must specify h2o connection in object')
if( missing(fun) ) stop('must specify fun')
if( !missing(name) ){
if( class(name) != 'character' ) stop('name must be a name')
fun_name <- name
} else {
fun_name <- match.call()[['fun']]
}
src <- paste(deparse(fun), collapse='\n')
exec_cmd <- sprintf('%s <- %s', as.character(fun_name), src)
res <- .h2o.__exec2(object, exec_cmd)
}
h2o.unique <- function(x, incomparables = FALSE, ...){
# NB: we do nothing with incomparables right now
# NB: we only support MARGIN = 2 (which is the default)
if(class(x) != "H2OParsedData")
stop('h2o.unique: x must be an H2OParsedData object')
if( nrow(x) == 0 | ncol(x) == 0) return(NULL)
if( nrow(x) == 1) return(x)
args <- list(...)
if( 'MARGIN' %in% names(args) && args[['MARGIN']] != 2 ) stop('h2o.unique: only MARGIN 2 supported')
.h2o.__unop2("unique", x)
# uniq <- function(df){1}
# h2o.addFunction(l, uniq)
# res <- h2o.ddply(x, 1:ncol(x), uniq)
#
# res[,1:(ncol(res)-1)]
}
unique.H2OParsedData <- h2o.unique
h2o.runif <- function(x, min = 0, max = 1, seed = -1) {
if(missing(x)) stop("Must specify data set")
if(class(x) != "H2OParsedData") stop(cat("\nData must be an H2O data set. Got ", class(x), "\n"))
if(!is.numeric(min)) stop("min must be a single number")
if(!is.numeric(max)) stop("max must be a single number")
if(length(min) > 1 || length(max) > 1) stop("Unimplemented")
if(min > max) stop("min must be a number less than or equal to max")
if(!is.numeric(seed)) stop("seed must be an integer >= 0")
expr = paste("runif(", x@key, ",", seed, ")*(", max - min, ")+", min, sep = "")
res = .h2o.__exec2(x@h2o, expr)
if(res$num_rows == 0 && res$num_cols == 0)
return(res$scalar)
else {
res <- .h2o.exec2(res$dest_key, h2o = x@h2o, res$dest_key)
res@logic <- FALSE
return(res)
}
}
h2o.anyFactor <- function(x) {
if(class(x) != "H2OParsedData") stop("x must be an H2OParsedData object")
x@any_enum
# as.logical(.h2o.__unop2("any.factor", x))
}
setMethod("colnames", "H2OParsedData", function(x, do.NULL = TRUE, prefix = "col") {
x@col_names
})
#--------------------------------- Overloaded R Methods ----------------------------------#
#--------------------------------- Slicing ----------------------------------#
# i are the rows, j are the columns. These can be vectors of integers or character strings, or a single logical data object
setMethod("[", "H2OParsedData", function(x, i, j, ..., drop = TRUE) {
numRows <- nrow(x); numCols <- ncol(x)
if (!missing(j) && is.numeric(j) && any(abs(j) < 1 || abs(j) > numCols))
stop("Array index out of bounds")
if(missing(i) && missing(j)) return(x)
if(missing(i) && !missing(j)) {
if(is.character(j)) {
# return(do.call("$", c(x, j)))
myCol <- colnames(x)
if(any(!(j %in% myCol))) stop("Undefined columns selected")
j <- match(j, myCol)
}
# if(is.logical(j)) j = -which(!j)
if(is.logical(j)) j <- which(j)
# if(class(j) == "H2OLogicalData")
if(class(j) == "H2OParsedData" && j@logic)
expr <- paste(x@key, "[", j@key, ",]", sep="")
else if(is.numeric(j) || is.integer(j))
expr <- paste(x@key, "[,c(", paste(j, collapse=","), ")]", sep="")
else stop(paste("Column index of type", class(j), "unsupported!"))
} else if(!missing(i) && missing(j)) {
# treat `i` as a column selector in this case...
if (is.character(i)) {
myCol <- colnames(x)
if (any(!(i %in% myCol))) stop ("Undefined columns selected")
i <- match(i, myCol)
if(is.logical(i)) i <- which(i)
if(class(i) == "H2OParsedData" && i@logic)
expr <- paste(x@key, "[", i@key, ",]", sep="")
else if(is.numeric(i) || is.integer(i))
expr <- paste(x@key, "[,c(", paste(i, collapse=","), ")]", sep="")
else stop(paste("Column index of type", class(i), "unsupported!"))
} else {
# if(is.logical(i)) i = -which(!i)
if(is.logical(i)) i = which(i)
# if(class(i) == "H2OLogicalData")
if(class(i) == "H2OParsedData" && i@logic)
expr <- paste(x@key, "[", i@key, ",]", sep="")
else if(is.numeric(i) || is.integer(i))
expr <- paste(x@key, "[c(", paste(i, collapse=","), "),]", sep="")
else stop(paste("Row index of type", class(i), "unsupported!"))
}
} else {
# if(is.logical(i)) i = -which(!i)
if(is.logical(i)) i <- which(i)
# if(class(i) == "H2OLogicalData") rind = i@key
if(class(i) == "H2OParsedData" && i@logic) rind = i@key
else if(is.numeric(i) || is.integer(i))
rind <- paste("c(", paste(i, collapse=","), ")", sep="")
else stop(paste("Row index of type", class(i), "unsupported!"))
if(is.character(j)) {
# return(do.call("$", c(x, j)))
myCol <- colnames(x)
if(any(!(j %in% myCol))) stop("Undefined columns selected")
j <- match(j, myCol)
}
# if(is.logical(j)) j = -which(!j)
if(is.logical(j)) j <- which(j)
# if(class(j) == "H2OLogicalData") cind = j@key
if(class(j) == "H2OParsedData" && j@logic) cind <- j@key
else if(is.numeric(j) || is.integer(j))
cind <- paste("c(", paste(j, collapse=","), ")", sep="")
else stop(paste("Column index of type", class(j), "unsupported!"))
expr <- paste(x@key, "[", rind, ",", cind, "]", sep="")
}
res <- .h2o.__exec2(x@h2o, expr)
if(res$num_rows == 0 && res$num_cols == 0)
res$scalar
else
.h2o.exec2(res$dest_key, h2o = x@h2o, res$dest_key)
})
setMethod("$", "H2OParsedData", function(x, name) {
myNames <- colnames(x)
# if(!(name %in% myNames)) return(NULL)
if(!(name %in% myNames)) stop(paste("Column", name, "does not exist!"))
cind <- match(name, myNames)
expr <- paste(x@key, "[,", cind, "]", sep="")
res <- .h2o.__exec2(x@h2o, expr)
if(res$num_rows == 0 && res$num_cols == 0)
res$scalar
else
.h2o.exec2(res$dest_key, h2o = x@h2o, res$dest_key)
})
setMethod("[<-", "H2OParsedData", function(x, i, j, ..., value) {
numRows = nrow(x); numCols = ncol(x)
# if((!missing(i) && is.numeric(i) && any(abs(i) < 1 || abs(i) > numRows)) ||
# (!missing(j) && is.numeric(j) && any(abs(j) < 1 || abs(j) > numCols)))
# stop("Array index out of bounds!")
if(!(missing(i) || is.numeric(i) || is.character(i)) || !(missing(j) || is.numeric(j) || is.character(j)))
stop("Row/column types not supported!")
if(class(value) != "H2OParsedData" && !is.numeric(value))
stop("value can only be numeric or an H2OParsedData object")
if(is.numeric(value) && length(value) != 1 && length(value) != numRows)
stop("value must be either a single number or a vector of length ", numRows)
if(!missing(i) && is.numeric(i)) {
if(any(i == 0)) stop("Array index out of bounds")
if(any(i < 0 && abs(i) > numRows)) stop("Unimplemented: can't extend rows")
if(min(i) > numRows+1) stop("new rows would leave holes after existing rows")
}
if(!missing(j) && is.numeric(j)) {
if(any(j == 0)) stop("Array index out of bounds")
if(any(j < 0 && abs(j) > numCols)) stop("Unimplemented: can't extend columns")
if(min(j) > numCols+1) stop("new columns would leaves holes after existing columns")
}
if(missing(i) && missing(j))
lhs <- x@key
else if(missing(i) && !missing(j)) {
if(is.character(j)) {
myNames <- colnames(x)
if(any(!(j %in% myNames))) {
if(length(j) == 1)
return(do.call("$<-", list(x, j, value)))
else stop("Unimplemented: undefined column names specified")
}
cind <- match(j, myNames)
} else cind <- j
cind <- paste("c(", paste(cind, collapse = ","), ")", sep = "")
lhs <- paste(x@key, "[,", cind, "]", sep = "")
} else if(!missing(i) && missing(j)) {
# treat `i` as a column selector in this case...
if (is.character(i)) {
myNames <- colnames(x)
if (any(!(i %in% myNames))) {
if (length(i) == 1) return(do.call("$<-", list(x, i, value)))
else stop("Unimplemented: undefined column names specified")
}
cind <- match(i, myNames)
cind <- paste("c(", paste(cind, collapse = ","), ")", sep = "")
lhs <- paste(x@key, "[,", cind, "]", sep = "")
} else {
rind <- paste("c(", paste(i, collapse = ","), ")", sep = "")
lhs <- paste(x@key, "[", rind, ",]", sep = "")
}
} else {
if(is.character(j)) {
myNames <- colnames(x)
if(any(!(j %in% myNames))) stop("Unimplemented: undefined column names specified")
cind <- match(j, myNames)
# cind = match(j[j %in% myNames], myNames)
} else cind <- j
cind <- paste("c(", paste(cind, collapse = ","), ")", sep = "")
rind <- paste("c(", paste(i, collapse = ","), ")", sep = "")
lhs <- paste(x@key, "[", rind, ",", cind, "]", sep = "")
}
# rhs = ifelse(class(value) == "H2OParsedData", value@key, paste("c(", paste(value, collapse = ","), ")", sep=""))
if(class(value) == "H2OParsedData")
rhs <- value@key
else
rhs <- ifelse(length(value) == 1, value, paste("c(", paste(value, collapse = ","), ")", sep=""))
res <- .h2o.__exec2(x@h2o, paste(lhs, "=", rhs))
.h2o.exec2(x@key, h2o = x@h2o, x@key)
})
setMethod("$<-", "H2OParsedData", function(x, name, value) {
if(missing(name) || !is.character(name) || nchar(name) == 0)
stop("name must be a non-empty string")
if(class(value) != "H2OParsedData" && !is.numeric(value))
stop("value can only be numeric or an H2OParsedData object")
numCols <- ncol(x); numRows <- nrow(x)
if(is.numeric(value) && length(value) != 1 && length(value) != numRows)
stop("value must be either a single number or a vector of length ", numRows)
myNames <- colnames(x); idx <- match(name, myNames)
lhs <- paste(x@key, "[,", ifelse(is.na(idx), numCols+1, idx), "]", sep = "")
# rhs = ifelse(class(value) == "H2OParsedData", value@key, paste("c(", paste(value, collapse = ","), ")", sep=""))
if(class(value) == "H2OParsedData")
rhs <- value@key
else
rhs <- ifelse(length(value) == 1, value, paste("c(", paste(value, collapse = ","), ")", sep=""))
.h2o.__exec2(x@h2o, paste(lhs, "=", rhs))
if(is.na(idx))
.h2o.__remoteSend(x@h2o, .h2o.__HACK_SETCOLNAMES2, source=x@key, cols=numCols, comma_separated_list=name)
.h2o.exec2(x@key, h2o = x@h2o, x@key)
})
setMethod("[[", "H2OParsedData", function(x, i, exact = TRUE) {
if(missing(i)) return(x)
if(length(i) > 1) stop("[[]] may only select one column")
if(!i %in% colnames(x) ) { warning(paste("Column", i, "does not exist!")); return(NULL) }
x[, i]
})
setMethod("[[<-", "H2OParsedData", function(x, i, value) {
if(class(value) != "H2OParsedData") stop('Can only append H2O data to H2O data')
if( ncol(value) > 1 ) stop('May only set a single column')
if( nrow(value) != nrow(x) ) stop(sprintf('Replacement has %d row, data has %d', nrow(value), nrow(x)))
mm <- match.call()
col_name <- as.list(i)[[1]]
cc <- colnames(x)
if( col_name %in% cc ){
x[, match( col_name, cc ) ] <- value
} else {
x <- cbind(x, value)
cc <- c( cc, col_name )
colnames(x) <- cc
}
x
})
# Note: right now, all things must be H2OParsedData
cbind.H2OParsedData <- function(..., deparse.level = 1) {
if(deparse.level != 1) stop("Unimplemented")
l <- list(...)
# l_dep <- sapply(substitute(placeholderFunction(...))[-1], deparse)
if(length(l) == 0) stop('cbind requires an H2O parsed dataset')
klass <- 'H2OParsedData'
h2o <- l[[1]]@h2o
nrows <- nrow(l[[1]])
m <- Map(function(elem){ inherits(elem, klass) & elem@h2o@ip == h2o@ip & elem@h2o@port == h2o@port & nrows == nrow(elem) }, l)
compatible <- Reduce(function(l,r) l & r, x=m, init=T)
if(!compatible){ stop(paste('cbind: all elements must be of type', klass, 'and in the same H2O instance'))}
# If cbind(x,x), dupe colnames will automatically be renamed by H2O
# TODO: cbind(df[,1], df[,2]) should retain colnames of original data frame (not temp keys from slice)
if(is.null(names(l)))
tmp <- Map(function(x) x@key, l)
else
tmp <- mapply(function(x,n) { if(is.null(n) || is.na(n) || nchar(n) == 0) x@key else paste(n, x@key, sep = "=") }, l, names(l))
exec_cmd <- sprintf("cbind(%s)", paste(as.vector(tmp), collapse = ","))
res <- .h2o.__exec2(h2o, exec_cmd)
.h2o.exec2(res$dest_key, h2o = h2o, res$dest_key)
}
#--------------------------------- Arithmetic ----------------------------------#
setMethod("+", c("H2OParsedData", "missing"), function(e1, e2) { .h2o.__binop2("+", 0, e1) })
setMethod("-", c("H2OParsedData", "missing"), function(e1, e2) { .h2o.__binop2("-", 0, e1) })
setMethod("+", c("H2OParsedData", "H2OParsedData"), function(e1, e2) { .h2o.__binop2("+", e1, e2) })
setMethod("-", c("H2OParsedData", "H2OParsedData"), function(e1, e2) { .h2o.__binop2("-", e1, e2) })
setMethod("*", c("H2OParsedData", "H2OParsedData"), function(e1, e2) { .h2o.__binop2("*", e1, e2) })
setMethod("/", c("H2OParsedData", "H2OParsedData"), function(e1, e2) { .h2o.__binop2("/", e1, e2) })
setMethod("%%", c("H2OParsedData", "H2OParsedData"), function(e1, e2) { .h2o.__binop2("%", e1, e2) })
setMethod("==", c("H2OParsedData", "H2OParsedData"), function(e1, e2) { .h2o.__binop2("==", e1, e2) })
setMethod(">", c("H2OParsedData", "H2OParsedData"), function(e1, e2) { .h2o.__binop2(">", e1, e2) })
setMethod("<", c("H2OParsedData", "H2OParsedData"), function(e1, e2) { .h2o.__binop2("<", e1, e2) })
setMethod("!=", c("H2OParsedData", "H2OParsedData"), function(e1, e2) { .h2o.__binop2("!=", e1, e2) })
setMethod(">=", c("H2OParsedData", "H2OParsedData"), function(e1, e2) { .h2o.__binop2(">=", e1, e2) })
setMethod("<=", c("H2OParsedData", "H2OParsedData"), function(e1, e2) { .h2o.__binop2("<=", e1, e2) })
setMethod("&", c("H2OParsedData", "H2OParsedData"), function(e1, e2) { .h2o.__binop2("&", e1, e2) })
setMethod("|", c("H2OParsedData", "H2OParsedData"), function(e1, e2) { .h2o.__binop2("|", e1, e2) })
setMethod("%*%", c("H2OParsedData", "H2OParsedData"), function(x, y) { .h2o.__binop2("%*%", x, y) })
setMethod("+", c("numeric", "H2OParsedData"), function(e1, e2) { .h2o.__binop2("+", e1, e2) })
setMethod("-", c("numeric", "H2OParsedData"), function(e1, e2) { .h2o.__binop2("-", e1, e2) })
setMethod("*", c("numeric", "H2OParsedData"), function(e1, e2) { .h2o.__binop2("*", e1, e2) })
setMethod("/", c("numeric", "H2OParsedData"), function(e1, e2) { .h2o.__binop2("/", e1, e2) })
setMethod("%%", c("numeric", "H2OParsedData"), function(e1, e2) { .h2o.__binop2("%", e1, e2) })
setMethod("==", c("numeric", "H2OParsedData"), function(e1, e2) { .h2o.__binop2("==", e1, e2) })
setMethod(">", c("numeric", "H2OParsedData"), function(e1, e2) { .h2o.__binop2(">", e1, e2) })
setMethod("<", c("numeric", "H2OParsedData"), function(e1, e2) { .h2o.__binop2("<", e1, e2) })
setMethod("!=", c("numeric", "H2OParsedData"), function(e1, e2) { .h2o.__binop2("!=", e1, e2) })
setMethod(">=", c("numeric", "H2OParsedData"), function(e1, e2) { .h2o.__binop2(">=", e1, e2) })
setMethod("<=", c("numeric", "H2OParsedData"), function(e1, e2) { .h2o.__binop2("<=", e1, e2) })
setMethod("&", c("numeric", "H2OParsedData"), function(e1, e2) { .h2o.__binop2("&", e1, e2) })
setMethod("|", c("numeric", "H2OParsedData"), function(e1, e2) { .h2o.__binop2("|", e1, e2) })
setMethod("+", c("H2OParsedData", "numeric"), function(e1, e2) { .h2o.__binop2("+", e1, e2) })
setMethod("-", c("H2OParsedData", "numeric"), function(e1, e2) { .h2o.__binop2("-", e1, e2) })
setMethod("*", c("H2OParsedData", "numeric"), function(e1, e2) { .h2o.__binop2("*", e1, e2) })
setMethod("/", c("H2OParsedData", "numeric"), function(e1, e2) { .h2o.__binop2("/", e1, e2) })
setMethod("%%", c("H2OParsedData", "numeric"), function(e1, e2) { .h2o.__binop2("%", e1, e2) })
setMethod("==", c("H2OParsedData", "numeric"), function(e1, e2) { .h2o.__binop2("==", e1, e2) })
setMethod(">", c("H2OParsedData", "numeric"), function(e1, e2) { .h2o.__binop2(">", e1, e2) })
setMethod("<", c("H2OParsedData", "numeric"), function(e1, e2) { .h2o.__binop2("<", e1, e2) })
setMethod("!=", c("H2OParsedData", "numeric"), function(e1, e2) { .h2o.__binop2("!=", e1, e2) })
setMethod(">=", c("H2OParsedData", "numeric"), function(e1, e2) { .h2o.__binop2(">=", e1, e2) })
setMethod("<=", c("H2OParsedData", "numeric"), function(e1, e2) { .h2o.__binop2("<=", e1, e2) })
setMethod("&", c("H2OParsedData", "numeric"), function(e1, e2) { .h2o.__binop2("&", e1, e2) })
setMethod("|", c("H2OParsedData", "numeric"), function(e1, e2) { .h2o.__binop2("|", e1, e2) })
setMethod("&", c("logical", "H2OParsedData"), function(e1, e2) { .h2o.__binop2("&", as.numeric(e1), e2) })
setMethod("|", c("logical", "H2OParsedData"), function(e1, e2) { .h2o.__binop2("|", as.numeric(e1), e2) })
setMethod("&", c("H2OParsedData", "logical"), function(e1, e2) { .h2o.__binop2("&", e1, as.numeric(e2)) })
setMethod("|", c("H2OParsedData", "logical"), function(e1, e2) { .h2o.__binop2("|", e1, as.numeric(e2)) })
setMethod("%/%", c("numeric", "H2OParsedData"), function(e1, e2) {.h2o.__binop2("%/%", as.numeric(e1), e2) })
setMethod("%/%", c("H2OParsedData", "numeric"), function(e1, e2){ .h2o.__binop2("%/%", e1, as.numeric(e2)) })
setMethod("^", c("numeric", "H2OParsedData"), function(e1, e2) {.h2o.__binop2("^", as.numeric(e1), e2) })
setMethod("^", c("H2OParsedData", "numeric"), function(e1, e2){ .h2o.__binop2("^", e1, as.numeric(e2)) })
#'
#' Get the domain mapping of an int and a String
#'
.getDomainMapping <- function(vec, s="") {
if(class(vec) != "H2OParsedData") stop("Object must be a H2OParsedData object. Input was: ", vec)
.h2o.__remoteSend(vec@h2o, .h2o.__DOMAIN_MAPPING, src_key = vec@key, str = s)
}
setMethod("==", c("H2OParsedData", "character"), function(e1, e2) {
m <- .getDomainMapping(e1,e2)$map
.h2o.__binop2("==", e1, m)
})
setMethod("==", c("character", "H2OParsedData"), function(e1, e2) {
m <- .getDomainMapping(e2,e1)$map
.h2o.__binop2("==", m, e2)
})
setMethod("!", "H2OParsedData", function(x) { .h2o.__unop2("!", x) })
setMethod("abs", "H2OParsedData", function(x) { .h2o.__unop2("abs", x) })
setMethod("sign", "H2OParsedData", function(x) { .h2o.__unop2("sgn", x) })
setMethod("sqrt", "H2OParsedData", function(x) { .h2o.__unop2("sqrt", x) })
setMethod("ceiling", "H2OParsedData", function(x) { .h2o.__unop2("ceil", x) })
setMethod("floor", "H2OParsedData", function(x) { .h2o.__unop2("floor", x) })
setMethod("trunc", "H2OParsedData", function(x) { .h2o.__unop2("trunc", x) })
setMethod("log", "H2OParsedData", function(x) { .h2o.__unop2("log", x) })
setMethod("exp", "H2OParsedData", function(x) { .h2o.__unop2("exp", x) })
setMethod("is.na", "H2OParsedData", function(x) { .h2o.__unop2("is.na", x) })
setMethod("t", "H2OParsedData", function(x) { .h2o.__unop2("t", x) })
round.H2OParsedData <- function(x, digits = 0) {
if(length(digits) > 1 || !is.numeric(digits)) stop("digits must be a single number")
if(digits < 0) digits = 10^(-digits)
expr <- paste("round(", paste(x@key, digits, sep = ","), ")", sep = "")
res <- .h2o.__exec2(x@h2o, expr)
if(res$num_rows == 0 && res$num_cols == 0)
return(res$scalar)
.h2o.exec2(expr = res$dest_key, h2o = x@h2o, dest_key = res$dest_key)
}
setMethod("colnames<-", signature(x="H2OParsedData", value="H2OParsedData"),
function(x, value) {
if(ncol(value) != ncol(x)) stop("Mismatched number of columns")
res <- .h2o.__remoteSend(x@h2o, .h2o.__HACK_SETCOLNAMES2, source=x@key, copy_from=value@key)
x@col_names <- value@col_names
return(x)
})
setMethod("colnames<-", signature(x="H2OParsedData", value="character"),
function(x, value) {
if(any(nchar(value) == 0)) stop("Column names must be of non-zero length")
else if(any(duplicated(value))) stop("Column names must be unique")
else if(length(value) != (num = ncol(x))) stop(paste("Must specify a vector of exactly", num, "column names"))
res <- .h2o.__remoteSend(x@h2o, .h2o.__HACK_SETCOLNAMES2, source=x@key, comma_separated_list=value)
x@col_names <- value
return(x)
})
setMethod("names", "H2OParsedData", function(x) { colnames(x) })
setMethod("names<-", "H2OParsedData", function(x, value) { colnames(x) <- value; return(x) })
# setMethod("nrow", "H2OParsedData", function(x) { .h2o.__unop2("nrow", x) })
# setMethod("ncol", "H2OParsedData", function(x) { .h2o.__unop2("ncol", x) })
setMethod("nrow", "H2OParsedData", function(x) {
x@nrows
})
# res = .h2o.__remoteSend(x@h2o, .h2o.__PAGE_INSPECT2, src_key=x@key); as.numeric(res$numRows) })
setMethod("ncol", "H2OParsedData", function(x) {
x@ncols
# res = .h2o.__remoteSend(x@h2o, .h2o.__PAGE_INSPECT2, src_key=x@key); as.numeric(res$numCols)
})
setMethod("length", "H2OParsedData", function(x) {
numCols <- ncol(x)
if (numCols == 1) {
numRows <- nrow(x)
return (numRows)
}
return (numCols)
})
setMethod("dim", "H2OParsedData", function(x) {
c(x@nrows, x@ncols)
# res = .h2o.__remoteSend(x@h2o, .h2o.__PAGE_INSPECT2, src_key=x@key)
# as.numeric(c(res$numRows, res$numCols))
})
setMethod("dim<-", "H2OParsedData", function(x, value) { stop("Unimplemented") })
# setMethod("min", "H2OParsedData", function(x, ..., na.rm = FALSE) {
# if(na.rm) stop("Unimplemented")
# # res = .h2o.__remoteSend(x@h2o, .h2o.__PAGE_INSPECT2, src_key=x@key)
# # min(..., sapply(res$cols, function(x) { x$min }), na.rm)
# min(..., .h2o.__unop2("min", x), na.rm)
# })
#
# setMethod("max", "H2OParsedData", function(x, ..., na.rm = FALSE) {
# if(na.rm) stop("Unimplemented")
# # res = .h2o.__remoteSend(x@h2o, .h2o.__PAGE_INSPECT2, src_key=x@key)
# # max(..., sapply(res$cols, function(x) { x$max }), na.rm)
# max(..., .h2o.__unop2("max", x), na.rm)
# })
.min_internal <- min
min <- function(..., na.rm = FALSE) {
idx = sapply(c(...), function(y) { class(y) == "H2OParsedData" })
if(any(idx)) {
hex.op = ifelse(na.rm, "min.na.rm", "min")
myVals = c(...); myData = myVals[idx]
myKeys = sapply(myData, function(y) { y@key })
expr = paste(hex.op, "(", paste(myKeys, collapse=","), ")", sep = "")
res = .h2o.__exec2(myData[[1]]@h2o, expr)
.Primitive("min")(unlist(myVals[!idx]), res$scalar, na.rm = na.rm)
} else
.Primitive("min")(..., na.rm = na.rm)
}
.max_internal <- max
max <- function(..., na.rm = FALSE) {
idx = sapply(c(...), function(y) { class(y) == "H2OParsedData" })
if(any(idx)) {
hex.op = ifelse(na.rm, "max.na.rm", "max")
myVals = c(...); myData = myVals[idx]
myKeys = sapply(myData, function(y) { y@key })
expr = paste(hex.op, "(", paste(myKeys, collapse=","), ")", sep = "")
res = .h2o.__exec2(myData[[1]]@h2o, expr)
.Primitive("max")(unlist(myVals[!idx]), res$scalar, na.rm = na.rm)
} else
.Primitive("max")(..., na.rm = na.rm)
}
.sum_internal <- sum
sum <- function(..., na.rm = FALSE) {
idx = sapply(c(...), function(y) { class(y) == "H2OParsedData" })
if(any(idx)) {
hex.op = ifelse(na.rm, "sum.na.rm", "sum")
myVals = c(...); myData = myVals[idx]
myKeys = sapply(myData, function(y) { y@key })
expr = paste(hex.op, "(", paste(myKeys, collapse=","), ")", sep = "")
res = .h2o.__exec2(myData[[1]]@h2o, expr)
.Primitive("sum")(unlist(myVals[!idx]), res$scalar, na.rm = na.rm)
} else
.Primitive("sum")(..., na.rm = na.rm)
}
setMethod("range", "H2OParsedData", function(x) {
res = .h2o.__remoteSend(x@h2o, .h2o.__PAGE_INSPECT2, src_key=x@key)
temp = sapply(res$cols, function(x) { c(x$min, x$max) })
c(min(temp[1,]), max(temp[2,]))
})
mean.H2OParsedData <- function(x, trim = 0, na.rm = FALSE, ...) {
if(ncol(x) != 1 || trim != 0) stop("Unimplemented")
if(h2o.anyFactor(x) || dim(x)[2] != 1) {
warning("argument is not numeric or logical: returning NA")
return(NA_real_)
}
if(!na.rm && .h2o.__unop2("any.na", x)) return(NA)
.h2o.__unop2("mean", x)
}
setMethod("sd", "H2OParsedData", function(x, na.rm = FALSE) {
if(ncol(x) != 1) stop("Unimplemented")
if(dim(x)[2] != 1 || h2o.anyFactor(x)) stop("Could not coerce argument to double. H2O sd requires a single numeric column.")
if(!na.rm && .h2o.__unop2("any.na", x)) return(NA)
.h2o.__unop2("sd", x)
})
setMethod("var", "H2OParsedData", function(x, y = NULL, na.rm = FALSE, use) {
if(!is.null(y) || !missing(use)) stop("Unimplemented")
if(h2o.anyFactor(x)) stop("x cannot contain any categorical columns")
if(!na.rm && .h2o.__unop2("any.na", x)) return(NA)
.h2o.__unop2("var", x)
})
as.data.frame.H2OParsedData <- function(x, ...) {
if(class(x) != "H2OParsedData") stop("x must be of class H2OParsedData")
# Versions of R prior to 3.1 should not use hex string.
# Versions of R including 3.1 and later should use hex string.
use_hex_string = FALSE
if (as.numeric(R.Version()$major) >= 3) {
if (as.numeric(R.Version()$minor) >= 1) {
use_hex_string = TRUE
}
}
url <- paste('http://', x@h2o@ip, ':', x@h2o@port,
'/2/DownloadDataset',
'?src_key=', URLencode(x@key),
'&hex_string=', as.numeric(use_hex_string),
sep='')
ttt <- getURL(url)
n = nchar(ttt)
# Delete last 1 or 2 characters if it's a newline.
# Handle \r\n (for windows) or just \n (for not windows).
chars_to_trim = 0
if (n >= 2) {
c = substr(ttt, n, n)
if (c == "\n") {
chars_to_trim = chars_to_trim + 1
}
if (chars_to_trim > 0) {
c = substr(ttt, n-1, n-1)
if (c == "\r") {
chars_to_trim = chars_to_trim + 1
}
}
}
if (chars_to_trim > 0) {
ttt2 = substr(ttt, 1, n-chars_to_trim)
# Is this going to use an extra copy? Or should we assign directly to ttt?
ttt = ttt2
}
# if((df.ncol = ncol(df)) != (x.ncol = ncol(x)))
# stop("Stopping conversion: Expected ", x.ncol, " columns, but data frame imported with ", df.ncol)
# if(x.ncol > .MAX_INSPECT_COL_VIEW)
# warning(x@key, " has greater than ", .MAX_INSPECT_COL_VIEW, " columns. This may take awhile...")
# Obtain the correct factor levels for each column
# res = .h2o.__remoteSend(x@h2o, .h2o.__HACK_LEVELS2, source=x@key, max_ncols=.Machine$integer.max)
# colClasses = sapply(res$levels, function(x) { ifelse(is.null(x), "numeric", "factor") })
# Substitute NAs for blank cells rather than skipping
df <- read.csv((tcon <- textConnection(ttt)), blank.lines.skip = FALSE, ...)
# df = read.csv(textConnection(ttt), blank.lines.skip = FALSE, colClasses = colClasses, ...)
close(tcon)
return(df)
}
as.matrix.H2OParsedData <- function(x, ...) { as.matrix(as.data.frame(x, ...)) }
as.table.H2OParsedData <- function(x, ...) { as.table(as.matrix(x, ...))}
head.H2OParsedData <- function(x, n = 6L, ...) {
numRows = nrow(x)
stopifnot(length(n) == 1L)
n <- ifelse(n < 0L, max(numRows + n, 0L), min(n, numRows))
if(n == 0) return(data.frame())
tmp_head <- x[seq_len(n),]
x.slice = as.data.frame(tmp_head)
h2o.rm(tmp_head@h2o, tmp_head@key)
return(x.slice)
}
tail.H2OParsedData <- function(x, n = 6L, ...) {
stopifnot(length(n) == 1L)
nrx <- nrow(x)
n <- ifelse(n < 0L, max(nrx + n, 0L), min(n, nrx))
if(n == 0) return(data.frame())
idx <- seq.int(to = nrx, length.out = n)
tmp_tail <- x[idx,]
x.slice <- as.data.frame(tmp_tail)
h2o.rm(tmp_tail@h2o, tmp_tail@key)
rownames(x.slice) <- idx
return(x.slice)
}
setMethod("as.factor", "H2OParsedData", function(x) { .h2o.__unop2("factor", x) })
setMethod("is.factor", "H2OParsedData", function(x) { as.logical(.h2o.__unop2("is.factor", x)) })
quantile.H2OParsedData <- function(x, probs = seq(0, 1, 0.25), na.rm = FALSE, names = TRUE, type = 7, ...) {
if((numCols = ncol(x)) != 1) stop("quantile only operates on a single column")
if(is.factor(x)) stop("factors are not allowed")
if(!na.rm && .h2o.__unop2("any.na", x)) stop("missing values and NaN's not allowed if 'na.rm' is FALSE")
if(!is.numeric(probs)) stop("probs must be a numeric vector")
if(any(probs < 0 | probs > 1)) stop("probs must fall in the range of [0,1]")
if(type != 2 && type != 7) stop("type must be either 2 (mean interpolation) or 7 (linear interpolation)")
if(type != 7) stop("Unimplemented: Only type 7 (linear interpolation) is supported from the console")
myProbs <- paste("c(", paste(probs, collapse = ","), ")", sep = "")
expr <- paste("quantile(", x@key, ",", myProbs, ")", sep = "")
res <- .h2o.__exec2(x@h2o, expr)
# res = .h2o.__remoteSend(x@h2o, .h2o.__PAGE_QUANTILES, source_key = x@key, column = 0, quantile = paste(probs, collapse = ","), interpolation_type = type, ...)
# col <- as.numeric(strsplit(res$result, "\n")[[1]][-1])
# if(numCols > .MAX_INSPECT_COL_VIEW)
# warning(x@key, " has greater than ", .MAX_INSPECT_COL_VIEW, " columns. This may take awhile...")
# res2 = .h2o.__remoteSend(x@h2o, .h2o.__PAGE_INSPECT, key=res$dest_key, view=res$num_rows, max_column_display=.Machine$integer.max)
# col <- sapply(res2$rows, function(x) { x[[2]] })
col <- as.data.frame(new("H2OParsedData", h2o=x@h2o, key=res$dest_key))[[1]]
if(names) names(col) <- paste(100*probs, "%", sep="")
return(col)
}
# setMethod("summary", "H2OParsedData", function(object) {
summary.H2OParsedData <- function(object, ...) {
digits <- 12L
if(ncol(object) > .MAX_INSPECT_COL_VIEW)
warning(object@key, " has greater than ", .MAX_INSPECT_COL_VIEW, " columns. This may take awhile...")
res <- .h2o.__remoteSend(object@h2o, .h2o.__PAGE_SUMMARY2, source=object@key, max_ncols=.Machine$integer.max)
cols <- sapply(res$summaries, function(col) {
if(col$stats$type != 'Enum') { # numeric column
if(is.null(col$stats$mins) || length(col$stats$mins) == 0) col$stats$mins = NaN
if(is.null(col$stats$maxs) || length(col$stats$maxs) == 0) col$stats$maxs = NaN
if(is.null(col$stats$pctile))
params <- format(rep(signif(as.numeric(col$stats$mean), digits), 6), digits = 4)
else
params <- format(signif(as.numeric(c(
col$stats$mins[1],
col$stats$pctile[4],
col$stats$pctile[6],
col$stats$mean,
col$stats$pctile[8],
col$stats$maxs[1])), digits), digits = 4)
result = c(paste("Min. :", params[1], " ", sep=""), paste("1st Qu.:", params[2], " ", sep=""),
paste("Median :", params[3], " ", sep=""), paste("Mean :", params[4], " ", sep=""),
paste("3rd Qu.:", params[5], " ", sep=""), paste("Max. :", params[6], " ", sep=""))
}
else {
top.ix <- sort.int(col$hcnt, decreasing=TRUE, index.return=TRUE)$ix[1:6]
if(is.null(col$hbrk)) domains <- top.ix[1:6] else domains <- col$hbrk[top.ix]
counts <- col$hcnt[top.ix]
# TODO: Make sure "NA's" isn't a legal domain level
if(!is.null(col$nacnt) && col$nacnt > 0) {
idx <- ifelse(any(is.na(top.ix)), which(is.na(top.ix))[1], 6)
domains[idx] <- "NA's"
counts[idx] <- col$nacnt
}
# width <- max(cbind(nchar(domains), nchar(counts)))
width <- c(max(nchar(domains)), max(nchar(counts)))
result <- paste(domains,
sapply(domains, function(x) { ifelse(width[1] == nchar(x), "", paste(rep(' ', width[1] - nchar(x)), collapse='')) }),
":",
sapply(counts, function(y) { ifelse(width[2] == nchar(y), "", paste(rep(' ', width[2] - nchar(y)), collapse='')) }),
counts,
" ",
sep='')
# result[is.na(top.ix)] <- NA
result[is.na(domains)] <- NA
result
}
})
# Filter out rows with nothing in them
cidx <- apply(cols, 1, function(x) { any(!is.na(x)) })
if(ncol(cols) == 1) { cols <- as.matrix(cols[cidx,]) } else { cols <- cols[cidx,] }
# cols <- as.matrix(cols[cidx,])
result <- as.table(cols)
rownames(result) <- rep("", nrow(result))
colnames(result) <- sapply(res$summaries, function(col) col$colname)
result
}
summary.H2OPCAModel <- function(object, ...) {
# TODO: Save propVar and cumVar from the Java output instead of computing here
myVar = object@model$sdev^2
myProp = myVar/sum(myVar)
result = rbind(object@model$sdev, myProp, cumsum(myProp)) # Need to limit decimal places to 4
colnames(result) = paste("PC", seq(1, length(myVar)), sep="")
rownames(result) = c("Standard deviation", "Proportion of Variance", "Cumulative Proportion")
cat("Importance of components:\n")
print(result)
}
screeplot.H2OPCAModel <- function(x, npcs = min(10, length(x@model$sdev)), type = "barplot", main = paste("h2o.prcomp(", x@data@key, ")", sep=""), ...) {
if(type == "barplot")
barplot(x@model$sdev[1:npcs]^2, main = main, ylab = "Variances", ...)
else if(type == "lines")
lines(x@model$sdev[1:npcs]^2, main = main, ylab = "Variances", ...)
else
stop("type must be either 'barplot' or 'lines'")
}
.canBeCoercedToLogical <- function(vec) {
if(class(vec) != "H2OParsedData") stop("Object must be a H2OParsedData object. Input was: ", vec)
# expects fr to be a vec.
as.logical(.h2o.__unop2("canBeCoercedToLogical", vec))
}
.check.ifelse.conditions <-
function(test, yes, no, type) {
if (type == "test") {
return(class(test) == "H2OParsedData"
&& (is.numeric(yes) || class(yes) == "H2OParsedData" || is.logical(yes))
&& (is.numeric(no) || class(no) == "H2OParsedData" || is.logical(no))
&& (test@logic || .canBeCoercedToLogical(test)))
}
}
ifelse<-
function (test, yes, no)
{
if (.check.ifelse.conditions(test, yes, no, "test")) {
if (is.logical(yes)) yes <- as.numeric(yes)
if (is.logical(no)) no <- as.numeric(no)
return(.h2o.__multop2("ifelse", test, yes, no))
} else if ( class(yes) == "H2OParsedData" && class(test) == "logical") {
if (is.logical(yes)) yes <- as.numeric(yes)
if (is.logical(no)) no <- as.numeric(no)
return(.h2o.__multop2("ifelse", as.numeric(test), yes, no))
} else if (class(no) == "H2OParsedData" && class(test) == "logical") {
if (is.logical(yes)) yes <- as.numeric(yes)
if (is.logical(no)) no <- as.numeric(no)
return(.h2o.__multop2("ifelse", as.numeric(test), yes, no))
}
if (is.atomic(test))
storage.mode(test) <- "logical"
else test <- if (isS4(test))
as(test, "logical")
else as.logical(test)
ans <- test
ok <- !(nas <- is.na(test))
if (any(test[ok]))
ans[test & ok] <- rep(yes, length.out = length(ans))[test &
ok]
if (any(!test[ok]))
ans[!test & ok] <- rep(no, length.out = length(ans))[!test &
ok]
ans[nas] <- NA
ans
}
#setMethod("ifelse", signature(test="H2OParsedData", yes="ANY", no="ANY"), function(test, yes, no) {
# if(!(is.numeric(yes) || class(yes) == "H2OParsedData") || !(is.numeric(no) || class(no) == "H2OParsedData"))
# stop("Unimplemented")
# if(!test@logic && !.canBeCoercedToLogical(test)) stop(test@key, " is not a H2O logical data type")
# h2o.exec(ifelse(test, yes, no))
## .h2o.__multop2("ifelse", eval(test), yes, no)
#})
##
#setMethod("ifelse", signature(test="logical", yes="H2OParsedData", no="ANY"), function(test, yes, no) {
# if(length(test) > 1) stop("test must be a single logical value")
# h2o.exec(ifelse(test, yes, no))
## .h2o.__multop2("ifelse", as.numeric(test), eval(yes), no)
#})
#
#setMethod("ifelse", signature(test="logical", yes="ANY", no="H2OParsedData"), function(test, yes, no) {
# if(length(test) > 1) stop("test must be a single logical value")
# h2o.exec(ifelse(test, yes, no))
## .h2o.__multop2("ifelse", as.numeric(test), yes, eval(no))
#})
#
#setMethod("ifelse", signature(test="logical", yes="H2OParsedData", no="H2OParsedData"), function(test, yes, no) {
# if(length(test) > 1) stop("test must be a single logical value")
# h2o.exec(ifelse(test, yes, no))
## .h2o.__multop2("ifelse", as.numeric(test), eval(yes), eval(no))
#})
#
setMethod("levels", "H2OParsedData", function(x) {
# if(ncol(x) != 1) return(NULL)
if(ncol(x) != 1) stop("Can only retrieve levels of one column.")
res = .h2o.__remoteSend(x@h2o, .h2o.__HACK_LEVELS2, source = x@key, max_ncols = .Machine$integer.max)
res$levels[[1]]
})
#----------------------------- Work in Progress -------------------------------#
# TODO: Need to change ... to environment variables and pass to substitute method,
# Can't figure out how to access outside environment from within lapply
setMethod("apply", "H2OParsedData", function(X, MARGIN, FUN, ...) {
if(missing(X) || class(X) != "H2OParsedData")
stop("X must be a H2OParsedData object")
if(missing(MARGIN) || !(length(MARGIN) <= 2 && all(MARGIN %in% c(1,2))))
stop("MARGIN must be either 1 (rows), 2 (cols), or a vector containing both")
if(missing(FUN) || !is.function(FUN))
stop("FUN must be an R function")
myList <- list(...)
if(length(myList) > 0) {
stop("Unimplemented")
tmp = sapply(myList, function(x) { !class(x) %in% c("H2OParsedData", "numeric") } )
if(any(tmp)) stop("H2O only recognizes H2OParsedData and numeric objects")
idx = which(sapply(myList, function(x) { class(x) == "H2OParsedData" }))
# myList <- lapply(myList, function(x) { if(class(x) == "H2OParsedData") x@key else x })
myList[idx] <- lapply(myList[idx], function(x) { x@key })
# TODO: Substitute in key name for H2OParsedData objects and push over wire to console
if(any(names(myList) == ""))
stop("Must specify corresponding variable names of ", myList[names(myList) == ""])
}
# Substitute in function name: FUN <- match.fun(FUN)
myfun = deparse(substitute(FUN))
len = length(myfun)
if(len > 3 && substr(myfun[1], nchar(myfun[1]), nchar(myfun[1])) == "{" && myfun[len] == "}")
myfun = paste(myfun[1], paste(myfun[2:(len-1)], collapse = ";"), "}")
else
myfun = paste(myfun, collapse = "")
if(length(MARGIN) > 1)
params = c(X@key, paste("c(", paste(MARGIN, collapse = ","), ")", sep = ""), myfun)
else
params = c(X@key, MARGIN, myfun)
expr = paste("apply(", paste(params, collapse = ","), ")", sep="")
res = .h2o.__exec2(X@h2o, expr)
.h2o.exec2(res$dest_key, h2o = X@h2o, res$dest_key)
})
str.H2OParsedData <- function(object, ...) {
if (length(l <- list(...)) && any("give.length" == names(l)))
invisible(NextMethod("str", ...))
else invisible(NextMethod("str", give.length = FALSE, ...))
if(ncol(object) > .MAX_INSPECT_COL_VIEW)
warning(object@key, " has greater than ", .MAX_INSPECT_COL_VIEW, " columns. This may take awhile...")
res = .h2o.__remoteSend(object@h2o, .h2o.__PAGE_INSPECT2, src_key=object@key)
cat("\nH2O dataset '", object@key, "':\t", res$numRows, " obs. of ", (p <- res$numCols),
" variable", if(p != 1) "s", if(p > 0) ":", "\n", sep = "")
cc <- unlist(lapply(res$cols, function(y) y$name))
width <- max(nchar(cc))
rows <- res$rows[1:min(res$numRows, 10)] # TODO: Might need to check rows > 0
res2 = .h2o.__remoteSend(object@h2o, .h2o.__HACK_LEVELS2, source=object@key, max_ncols=.Machine$integer.max)
for(i in 1:p) {
cat("$ ", cc[i], rep(' ', width - nchar(cc[i])), ": ", sep = "")
rhead <- sapply(rows, function(x) { x[i+1] })
if(is.null(res2$levels[[i]]))
cat("num ", paste(rhead, collapse = " "), if(res$numRows > 10) " ...", "\n", sep = "")
else {
rlevels = res2$levels[[i]]
cat("Factor w/ ", (count <- length(rlevels)), " level", if(count != 1) "s", ' "', paste(rlevels[1:min(count, 2)], collapse = '","'), '"', if(count > 2) ",..", ": ", sep = "")
cat(paste(match(rhead, rlevels), collapse = " "), if(res$numRows > 10) " ...", "\n", sep = "")
}
}
}
setMethod("findInterval", "H2OParsedData", function(x, vec, rightmost.closed = FALSE, all.inside = FALSE) {
if(any(is.na(vec)))
stop("'vec' contains NAs")
if(is.unsorted(vec))
stop("'vec' must be sorted non-decreasingly")
if(all.inside) stop("Unimplemented")
myVec = paste("c(", .seq_to_string(vec), ")", sep = "")
expr = paste("findInterval(", x@key, ",", myVec, ",", as.numeric(rightmost.closed), ")", sep = "")
res = .h2o.__exec2(x@h2o, expr)
new('H2OParsedData', h2o=x@h2o, key=res$dest_key)
})
# setGeneric("histograms", function(object) { standardGeneric("histograms") })
# setMethod("histograms", "H2OParsedData", function(object) {
# if(ncol(object) > .MAX_INSPECT_COL_VIEW)
# warning(object@key, " has greater than ", .MAX_INSPECT_COL_VIEW, " columns. This may take awhile...")
# res = .h2o.__remoteSend(object@h2o, .h2o.__PAGE_SUMMARY2, source=object@key, max_ncols=.Machine$integer.max)
# list.of.bins <- lapply(res$summaries, function(x) {
# if (x$stats$type == 'Enum') {
# bins <- NULL
# } else {
# counts <- x$hcnt
# breaks <- seq(x$hstart, by=x$hstep, length.out=length(x$hcnt) + 1)
# bins <- list(counts,breaks)
# names(bins) <- cbind('counts', 'breaks')
# }
# bins
# })
# return(list.of.bins)
# })
|
##This function Function makeCacheMatrix gets a matrix as an input, set the value of the matrix,
#get the value of the matrix, set the inverse Matrix and get the inverse Matrix. The matrix object
#can cache its own object.
#<<- operator is used to assign a value to an object in an environment that is different
#from the current environment
#take the matrix as an input
makeCacheMatrix <- function(x = matrix()) {
invMatrix <- NULL
#set the value of the Matrix
setMatrix <- function(y) {
x <<- y
invMatrix <<- NULL
}
getMatrix <- function() x #get the value of the Matrix
setInverse <- function(inverse) invMatrix <<- inverse #set the value of the invertible matrix
getInverse <- function() invMatrix #get the value of the invertible matrix
list(setMatrix = setMatrix, getMatrix = getMatrix,
setInverse = setInverse, getInverse = getInverse)
}
## The function cacheSolve takes the output of the previous matrix makeCacheMatrix(matrix) as an
# input and checks inverse matrix from makeCacheMatrix(matrix) has any value in it or not.
# In case inverse matrix from makeCacheMatrix((matrix) is empty, it gets the original matrix data from
# and set the invertible matrix by using the solve function.
# In case inverse matrix from makeCacheMatrix((matrix) has some value in it (always works
#after running the code 1st time), it returns a message "Getting Cached Invertible Matrix"
#and the cached object
cacheSolve <- function(x, ...) {
invMatrix <- x$getInverse()
if(!is.null(invMatrix)) { #if inverse matrix is not NULL
message("Getting Cached Invertible Matrix") #Type message: Getting Cached Invertible Matrix
return(invMatrix) #return the invertible matrix
}
#if value of the invertible matrix is NULL then
MatrixData <- x$getMatrix() #get the original Matrix Data
invMatrix <- solve(MatrixData, ...) #use solve function to inverse the matrix
x$setInverse(invMatrix) #set the invertible matrix
return(invMatrix) #return the invertible matrix
}
| /cachematrix.R | no_license | shalu143/cachematrix.R | R | false | false | 2,367 | r |
##This function Function makeCacheMatrix gets a matrix as an input, set the value of the matrix,
#get the value of the matrix, set the inverse Matrix and get the inverse Matrix. The matrix object
#can cache its own object.
#<<- operator is used to assign a value to an object in an environment that is different
#from the current environment
#take the matrix as an input
makeCacheMatrix <- function(x = matrix()) {
invMatrix <- NULL
#set the value of the Matrix
setMatrix <- function(y) {
x <<- y
invMatrix <<- NULL
}
getMatrix <- function() x #get the value of the Matrix
setInverse <- function(inverse) invMatrix <<- inverse #set the value of the invertible matrix
getInverse <- function() invMatrix #get the value of the invertible matrix
list(setMatrix = setMatrix, getMatrix = getMatrix,
setInverse = setInverse, getInverse = getInverse)
}
## The function cacheSolve takes the output of the previous matrix makeCacheMatrix(matrix) as an
# input and checks inverse matrix from makeCacheMatrix(matrix) has any value in it or not.
# In case inverse matrix from makeCacheMatrix((matrix) is empty, it gets the original matrix data from
# and set the invertible matrix by using the solve function.
# In case inverse matrix from makeCacheMatrix((matrix) has some value in it (always works
#after running the code 1st time), it returns a message "Getting Cached Invertible Matrix"
#and the cached object
cacheSolve <- function(x, ...) {
invMatrix <- x$getInverse()
if(!is.null(invMatrix)) { #if inverse matrix is not NULL
message("Getting Cached Invertible Matrix") #Type message: Getting Cached Invertible Matrix
return(invMatrix) #return the invertible matrix
}
#if value of the invertible matrix is NULL then
MatrixData <- x$getMatrix() #get the original Matrix Data
invMatrix <- solve(MatrixData, ...) #use solve function to inverse the matrix
x$setInverse(invMatrix) #set the invertible matrix
return(invMatrix) #return the invertible matrix
}
|
## The functions will return the inverse of a matrix. Use the
## first function to input the matrix and use the second to
## caculate. And if the matrix remains the same, the second
## function will cache the result from memory instead of
## caculating again.
## The first function creates a R object which is a list of
## functions that stores a matrix and its inverse. When a new
## matrix is put in, the inverse will be assigned as NULL.
## Double arrow indicates that the assignment should be made
## to the parent environment, so the value assigned to within
## the function is accessible after the function ends.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(x) {
x <<- x
m <<- NULL
}
get <- function() {
x
}
setsolve <- function(solve) {
m <<- solve
}
getsolve <- function() {
m
}
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
## The second function first check the inverse. If it's NULL then
## caculate and return the inverse. If it's not NULL then skip
## caculation and retrun the inverse.
cacheSolve <- function(x, ...) {
m <- x$getsolve()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setsolve(m)
m
## Return a matrix that is the inverse of 'x'
}
| /cachematrix.R | no_license | lujay1115/ProgrammingAssignment2 | R | false | false | 1,607 | r | ## The functions will return the inverse of a matrix. Use the
## first function to input the matrix and use the second to
## caculate. And if the matrix remains the same, the second
## function will cache the result from memory instead of
## caculating again.
## The first function creates a R object which is a list of
## functions that stores a matrix and its inverse. When a new
## matrix is put in, the inverse will be assigned as NULL.
## Double arrow indicates that the assignment should be made
## to the parent environment, so the value assigned to within
## the function is accessible after the function ends.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(x) {
x <<- x
m <<- NULL
}
get <- function() {
x
}
setsolve <- function(solve) {
m <<- solve
}
getsolve <- function() {
m
}
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
## The second function first check the inverse. If it's NULL then
## caculate and return the inverse. If it's not NULL then skip
## caculation and retrun the inverse.
cacheSolve <- function(x, ...) {
m <- x$getsolve()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setsolve(m)
m
## Return a matrix that is the inverse of 'x'
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/train.spLearner.R
\name{train.spLearner.matrix}
\alias{train.spLearner.matrix}
\title{Train a spatial prediction and/or interpolation model using Ensemble Machine Learning
from a regression/classification matrix}
\usage{
train.spLearner.matrix(
observations,
formulaString,
covariates,
SL.library,
family = stats::gaussian(),
method = "stack.cv",
predict.type,
super.learner,
subsets = 5,
lambda = 0.5,
cov.model = "exponential",
subsample = 10000,
parallel = "multicore",
cell.size,
id = NULL,
weights = NULL,
quantreg = TRUE,
...
)
}
\arguments{
\item{observations}{Data frame regression matrix,}
\item{formulaString}{Model formula,}
\item{covariates}{SpatialPixelsDataFrame object,}
\item{SL.library}{List of learners,}
\item{family}{Family e.g. gaussian(),}
\item{method}{Ensemble stacking method (see makeStackedLearner),}
\item{predict.type}{Prediction type 'prob' or 'response',}
\item{super.learner}{Ensemble stacking model usually \code{regr.lm},}
\item{subsets}{Number of subsets for repeated CV,}
\item{lambda}{Target variable transformation (0.5 or 1),}
\item{cov.model}{Covariance model for variogram fitting,}
\item{subsample}{For large datasets consider random subsetting training data,}
\item{parallel}{Initiate parellel processing,}
\item{cell.size}{Block size for spatial Cross-validation,}
\item{id}{Id column name to control clusters of data,}
\item{weights}{Optional weights (per row) that learners will use to account for variable data quality,}
\item{quantreg}{Fit additional ranger model as meta-learner to allow for derivation of prediction intervals,}
\item{...}{other arguments that can be passed on to \code{mlr::makeStackedLearner},}
}
\value{
Object of class \code{spLearner}
}
\description{
Train a spatial prediction and/or interpolation model using Ensemble Machine Learning
from a regression/classification matrix
}
\author{
\href{https://opengeohub.org/people/tom-hengl}{Tom Hengl}
}
| /man/train.spLearner.matrix.Rd | no_license | morandiaye/landmap | R | false | true | 2,050 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/train.spLearner.R
\name{train.spLearner.matrix}
\alias{train.spLearner.matrix}
\title{Train a spatial prediction and/or interpolation model using Ensemble Machine Learning
from a regression/classification matrix}
\usage{
train.spLearner.matrix(
observations,
formulaString,
covariates,
SL.library,
family = stats::gaussian(),
method = "stack.cv",
predict.type,
super.learner,
subsets = 5,
lambda = 0.5,
cov.model = "exponential",
subsample = 10000,
parallel = "multicore",
cell.size,
id = NULL,
weights = NULL,
quantreg = TRUE,
...
)
}
\arguments{
\item{observations}{Data frame regression matrix,}
\item{formulaString}{Model formula,}
\item{covariates}{SpatialPixelsDataFrame object,}
\item{SL.library}{List of learners,}
\item{family}{Family e.g. gaussian(),}
\item{method}{Ensemble stacking method (see makeStackedLearner),}
\item{predict.type}{Prediction type 'prob' or 'response',}
\item{super.learner}{Ensemble stacking model usually \code{regr.lm},}
\item{subsets}{Number of subsets for repeated CV,}
\item{lambda}{Target variable transformation (0.5 or 1),}
\item{cov.model}{Covariance model for variogram fitting,}
\item{subsample}{For large datasets consider random subsetting training data,}
\item{parallel}{Initiate parellel processing,}
\item{cell.size}{Block size for spatial Cross-validation,}
\item{id}{Id column name to control clusters of data,}
\item{weights}{Optional weights (per row) that learners will use to account for variable data quality,}
\item{quantreg}{Fit additional ranger model as meta-learner to allow for derivation of prediction intervals,}
\item{...}{other arguments that can be passed on to \code{mlr::makeStackedLearner},}
}
\value{
Object of class \code{spLearner}
}
\description{
Train a spatial prediction and/or interpolation model using Ensemble Machine Learning
from a regression/classification matrix
}
\author{
\href{https://opengeohub.org/people/tom-hengl}{Tom Hengl}
}
|
library(tidyverse)
library(foreign)
library(nlme)
exercise <- read.dta("../datasets/exercise.dta")
exercise <- exercise %>%
as_tibble() %>%
gather(day, strength, y0:y12) %>%
mutate(
group_label = ifelse(group == 1, "Program1", "Program2"),
day_num = case_when(
day == "y0" ~ 0,
day == "y2" ~ 2,
day == "y4" ~ 4,
day == "y6" ~ 6,
day == "y8" ~ 8,
day == "y10" ~ 10,
day == "y12" ~ 12
)
) %>%
mutate(
day_num2 = as.integer(day_num),
group_label2 = factor(group_label),
group_label3 = group - 1
)
# Program1 = number of repetitions increased, but same weight
# Program2 = repetitions fixed, but weight increased
# 8.1.1
# On a single graph, construct a time plot that displays the mean strength versus
# time (in days) for the two treatment groups. Describe the general characteristics
# of the time trends for the two exercise programs.
mean_pop <- exercise %>%
group_by(group, day_num) %>%
summarize(mean = mean(strength, na.rm = TRUE))
p1 <- mean_pop %>%
ggplot(aes(x = day_num, y = mean, color = factor(group))) +
geom_line() +
labs(x = "day", y = "population mean strength", color = "Program")
# People enrolled in Program 2 appear to be inherently stronger.
# Strength in both programs increase until day 6 and then it plateaus.
# 8.1.2
# Read the data from the external file and put the data in a "univariate" or "long"
# format, with 7 "records" per patient.
# 8.1.3
# Fit a model with randomly varying intercepts and slopes, and allow the mean
# values of the intercept and slope to depend on treatment group (i.e., include
# main effect of treatment, a linear time trend, and a treatment by linear time
# trend ineteraction as fixed effects).
#(a) What is the estimated variance of the random intercepts?
#(b) What is the estimated variance of the random slopes?
#(c) What is the estimated correlation between the random intercepts and slopes?
#(d) Give an interpretation to the magnitude of the estimate variance of the
# random intercepts. For example, "approximately 95% of subjects have baseline
# measures of strength between a and b" (calculate the limits of the interval
# between a and b).
#(e) Give an interpretation to the magnitude of the estimate variance of the random
# slopes.
exercise <- exercise %>%
filter(!is.na(strength))
model1 <- lme(
strength ~ group_label * day_num,
data = exercise,
random = ~ group_label * day_num | id
)
# create individual models for my purpose
exercise_grouped <- groupedData(
strength ~ day_num | id,
data = as_data_frame(exercise),
labels = list(x = "day", y = "strength"),
outer = ~ group
)
model2 <- lme(
strength ~ group_label + day_num,
data = exercise,
random = ~ group_label + day_num | id
)
# (a) What is the estimated variance of the random intercepts?
# Intercept = 9.69
# group (Program2) = 1.70
# (b) What is the estiamted variance of the random slopes?
# day (time) = 0.047
# intereaction = 0.029
# (c) What is the estimated correlation between the random intercepts and slopes?
# use getVarCov to get these values
# intercept - group = -0.59
# intercept - day (time) = 0.11
# intercept - interaction = -0.16
# group - day (time) = -0.07
# group - interaction = 0.0024
# day (time) - interaction = -0.027
#(d) Give an interpretation to the magnitude of the estimate variance of the
# random intercepts. For example, "approximately 95% of subjects have baseline
# measures of strength between a and b" (calculate the limits of the interval
# between a and b).
# Approximately 95% of subjects in program1 have baseline measures of strength between
# (80.1 - 1.96 * 3.11, 80.1 + 1.96 * 3.11) = (73.9, 86.1)
#
# Variance of program2 population = (9.69 + 1.70 + 2 * (-0.59) = 10.21
# Apprixmately 95% of subjects in program2 have baseline measuers of strength between
# (81.2 - 1.96 * 3.20, 81.2 + 1.96 * 3.20) = (74.9, 87.5)
# Since there is much overlap between program1 and program2 initial strength, there is
# no significant difference between the two. This is confirmed by looking at the
# p-value (0.2901).
#(e) Give an interpretation to the magnitude of the estimate variance of the random
# slopes.
# The estimate of the program1 mean slope is 0.11715 which is significant at the 0.05 level.
# Approximately 95% of people in program1 have strength changes between
# (0.12 - 1.96 * 0.22, 0.12 + 1.96 * 0.22) = (-0.31, 0.5512) which is not a lot.
# 36% of interval is strength loss.
# The estimate of the program2-time interaction slope is 0.049 which is not significant
# at the 0.05 level. This means under program2 strength changes at the rate
# 0.049 + 0.12 = 0.17. The variance for this estimate is 0.047 + 0.029 + 2 * (-0.027) = 0.022
# Approximately 95% of people in program2 have strength changes between
# (0.17 - 1.96 * 0.15, 0.17 + 1.96 * 0.15) = (-0.124, 0.464). This interval is entirely
# within group1 rate of increase, hence it is not significant.
# 8.1.4
# Is a model with only randomly varying intercepts defensible? Explain.
model3 <- lme(
strength ~ group_label * day_num,
data = exercise,
random = ~ group_label | id
)
# From the plot of the intervals in lmList, there is some participant variation accross
# the slope day_num, but not nearly as much as the intercept. When performing an anova
# between model1 and model3, model1 is better in terms of AIC, BIC, and logLik.
# 8.1.5
# What are teh mean intercept and slope in the two exercise programs?
# Program1 intercept = 80.1
# Program1 slope = 0.12
# Program2 intercept = 80.1 + 1.3 = 81.4
# Program2 slope = 0.12 + 0.049 = 0.169
# 8.16
# Based on teh previous analysis, interpret the effect of treatment on chagnes in
# strength. Does your analysis suggest a difference between the two groups?
# There is not a difference of baseline strength between the two groups (p = 0.2901).
# Tehre is not a difference of strength change between tht two groups (p = 0.4815)
# 8.1.7
# What is the estimate of Var(Yi1|bi)? What is the estimate of Var(Yi1)? Explain
# the difference.
# Cov(Yi|bi) = Ri = sigma^2 * I_ni
# Cov(Yi) = ZiGZi' + sigma^2 * I_ni
# so that Var(Yi1|bi) = sigma^2 and Var(Yi1) = a long equation dependent on time, grouping,
# and G Var(Yi1|bi) is the deviation of the first measurement from the specific participant's
# mean response profile. Var(Yi1) is the deviation of the first measurement from the
# population mean response profile. It accounts for the correlation for a participant's
# measurements.
# 8.1.8
# Obtain the predicted (emrirical BLUP) intercept and slope for each subject.
bs <- coefficients(model1)
# 8.1.9
# Using any standard linear regression procedure, obtain the ordinary least squares
# (OLS) estiamtes of the intercept and slpe from the regression of strength on
# time (in days) for subject 24 (ID = 24)l. That is rescrite the analysis to data on
# subject 24 only and estimate that subject's intercept and slope.
data_24 <- exercise %>%
filter(id == 24)
ols_24 <- lm(strength ~ day_num, data = data_24)
# 8.1.10
# For subject 24 (ID = 24), compare the predicted intercepts and slopes obtained
# in Problems 8.1.8 and 8.1.9. How and why might these differ?
# ols intercept = 87.8
# ols slope = 0.45
# lme intercept = 86.94 + 1.27 = 88.21
# lme slope = 0.372 + (-0.05195) = 0.32
# For lme model, intercept and group share variance, so some of it might be lost hence the
# lower final intercept value compared with ols model. This is the same with the slopes.
| /Chapter8/Problem1.R | no_license | heffjos/applied_longitudinal_analysis | R | false | false | 7,616 | r | library(tidyverse)
library(foreign)
library(nlme)
exercise <- read.dta("../datasets/exercise.dta")
exercise <- exercise %>%
as_tibble() %>%
gather(day, strength, y0:y12) %>%
mutate(
group_label = ifelse(group == 1, "Program1", "Program2"),
day_num = case_when(
day == "y0" ~ 0,
day == "y2" ~ 2,
day == "y4" ~ 4,
day == "y6" ~ 6,
day == "y8" ~ 8,
day == "y10" ~ 10,
day == "y12" ~ 12
)
) %>%
mutate(
day_num2 = as.integer(day_num),
group_label2 = factor(group_label),
group_label3 = group - 1
)
# Program1 = number of repetitions increased, but same weight
# Program2 = repetitions fixed, but weight increased
# 8.1.1
# On a single graph, construct a time plot that displays the mean strength versus
# time (in days) for the two treatment groups. Describe the general characteristics
# of the time trends for the two exercise programs.
mean_pop <- exercise %>%
group_by(group, day_num) %>%
summarize(mean = mean(strength, na.rm = TRUE))
p1 <- mean_pop %>%
ggplot(aes(x = day_num, y = mean, color = factor(group))) +
geom_line() +
labs(x = "day", y = "population mean strength", color = "Program")
# People enrolled in Program 2 appear to be inherently stronger.
# Strength in both programs increase until day 6 and then it plateaus.
# 8.1.2
# Read the data from the external file and put the data in a "univariate" or "long"
# format, with 7 "records" per patient.
# 8.1.3
# Fit a model with randomly varying intercepts and slopes, and allow the mean
# values of the intercept and slope to depend on treatment group (i.e., include
# main effect of treatment, a linear time trend, and a treatment by linear time
# trend ineteraction as fixed effects).
#(a) What is the estimated variance of the random intercepts?
#(b) What is the estimated variance of the random slopes?
#(c) What is the estimated correlation between the random intercepts and slopes?
#(d) Give an interpretation to the magnitude of the estimate variance of the
# random intercepts. For example, "approximately 95% of subjects have baseline
# measures of strength between a and b" (calculate the limits of the interval
# between a and b).
#(e) Give an interpretation to the magnitude of the estimate variance of the random
# slopes.
exercise <- exercise %>%
filter(!is.na(strength))
model1 <- lme(
strength ~ group_label * day_num,
data = exercise,
random = ~ group_label * day_num | id
)
# create individual models for my purpose
exercise_grouped <- groupedData(
strength ~ day_num | id,
data = as_data_frame(exercise),
labels = list(x = "day", y = "strength"),
outer = ~ group
)
model2 <- lme(
strength ~ group_label + day_num,
data = exercise,
random = ~ group_label + day_num | id
)
# (a) What is the estimated variance of the random intercepts?
# Intercept = 9.69
# group (Program2) = 1.70
# (b) What is the estiamted variance of the random slopes?
# day (time) = 0.047
# intereaction = 0.029
# (c) What is the estimated correlation between the random intercepts and slopes?
# use getVarCov to get these values
# intercept - group = -0.59
# intercept - day (time) = 0.11
# intercept - interaction = -0.16
# group - day (time) = -0.07
# group - interaction = 0.0024
# day (time) - interaction = -0.027
#(d) Give an interpretation to the magnitude of the estimate variance of the
# random intercepts. For example, "approximately 95% of subjects have baseline
# measures of strength between a and b" (calculate the limits of the interval
# between a and b).
# Approximately 95% of subjects in program1 have baseline measures of strength between
# (80.1 - 1.96 * 3.11, 80.1 + 1.96 * 3.11) = (73.9, 86.1)
#
# Variance of program2 population = (9.69 + 1.70 + 2 * (-0.59) = 10.21
# Apprixmately 95% of subjects in program2 have baseline measuers of strength between
# (81.2 - 1.96 * 3.20, 81.2 + 1.96 * 3.20) = (74.9, 87.5)
# Since there is much overlap between program1 and program2 initial strength, there is
# no significant difference between the two. This is confirmed by looking at the
# p-value (0.2901).
#(e) Give an interpretation to the magnitude of the estimate variance of the random
# slopes.
# The estimate of the program1 mean slope is 0.11715 which is significant at the 0.05 level.
# Approximately 95% of people in program1 have strength changes between
# (0.12 - 1.96 * 0.22, 0.12 + 1.96 * 0.22) = (-0.31, 0.5512) which is not a lot.
# 36% of interval is strength loss.
# The estimate of the program2-time interaction slope is 0.049 which is not significant
# at the 0.05 level. This means under program2 strength changes at the rate
# 0.049 + 0.12 = 0.17. The variance for this estimate is 0.047 + 0.029 + 2 * (-0.027) = 0.022
# Approximately 95% of people in program2 have strength changes between
# (0.17 - 1.96 * 0.15, 0.17 + 1.96 * 0.15) = (-0.124, 0.464). This interval is entirely
# within group1 rate of increase, hence it is not significant.
# 8.1.4
# Is a model with only randomly varying intercepts defensible? Explain.
model3 <- lme(
strength ~ group_label * day_num,
data = exercise,
random = ~ group_label | id
)
# From the plot of the intervals in lmList, there is some participant variation accross
# the slope day_num, but not nearly as much as the intercept. When performing an anova
# between model1 and model3, model1 is better in terms of AIC, BIC, and logLik.
# 8.1.5
# What are teh mean intercept and slope in the two exercise programs?
# Program1 intercept = 80.1
# Program1 slope = 0.12
# Program2 intercept = 80.1 + 1.3 = 81.4
# Program2 slope = 0.12 + 0.049 = 0.169
# 8.16
# Based on teh previous analysis, interpret the effect of treatment on chagnes in
# strength. Does your analysis suggest a difference between the two groups?
# There is not a difference of baseline strength between the two groups (p = 0.2901).
# Tehre is not a difference of strength change between tht two groups (p = 0.4815)
# 8.1.7
# What is the estimate of Var(Yi1|bi)? What is the estimate of Var(Yi1)? Explain
# the difference.
# Cov(Yi|bi) = Ri = sigma^2 * I_ni
# Cov(Yi) = ZiGZi' + sigma^2 * I_ni
# so that Var(Yi1|bi) = sigma^2 and Var(Yi1) = a long equation dependent on time, grouping,
# and G Var(Yi1|bi) is the deviation of the first measurement from the specific participant's
# mean response profile. Var(Yi1) is the deviation of the first measurement from the
# population mean response profile. It accounts for the correlation for a participant's
# measurements.
# 8.1.8
# Obtain the predicted (emrirical BLUP) intercept and slope for each subject.
bs <- coefficients(model1)
# 8.1.9
# Using any standard linear regression procedure, obtain the ordinary least squares
# (OLS) estiamtes of the intercept and slpe from the regression of strength on
# time (in days) for subject 24 (ID = 24)l. That is rescrite the analysis to data on
# subject 24 only and estimate that subject's intercept and slope.
data_24 <- exercise %>%
filter(id == 24)
ols_24 <- lm(strength ~ day_num, data = data_24)
# 8.1.10
# For subject 24 (ID = 24), compare the predicted intercepts and slopes obtained
# in Problems 8.1.8 and 8.1.9. How and why might these differ?
# ols intercept = 87.8
# ols slope = 0.45
# lme intercept = 86.94 + 1.27 = 88.21
# lme slope = 0.372 + (-0.05195) = 0.32
# For lme model, intercept and group share variance, so some of it might be lost hence the
# lower final intercept value compared with ols model. This is the same with the slopes.
|
library("ggplot2")
greet <- function(name, birthday = NULL) {
paste0("Hi ", name, if (isTRUE(birthday)) "and HAPPY BIRTHDAY"
)
}
greet("Sophia")
# "Hi Sophia"
greet("Sophia", FALSE)
# "Hi Sophia"
greet("Sophia", TRUE)
# "Hi Sophia and HAPPY BIRTHDAY"
trial <- function (name, birthday = NULL){
paste0("Hi ", name, if(isTRUE(birthday)) "Happy Birthday", ", how are you doing?"
)
}
trial("Sophia")
newtrial <- function (name, birthday = NULL, age = NULL){
if(isTRUE(birthday) && is.null(age)){
print(paste0("Hi", " ", name, ", Happy Birthday!!"))
}
else if(isTRUE(birthday) && is.numeric(age)){
print(paste0("Hi"," ", name, ", Happy", " ", age, " ", "Birthday"))
}
else if(isFALSE(birthday) && is.numeric(age)){
print(paste0("Hi"," ", name, ", is it your", " ", age, " " , "birthday?"))
}
else {
print(paste0(name, ", How are you doing?"))
}
}
newtrial("Sophia",TRUE)
# "Hi Sophia, Happy Birthday!!"
newtrial("Sophia",T, 24)
#"Hi Sophia, Happy Birthday!!"
newtrial("Sophia",F, 40)
# "Hi Sophia, is it your 40 birthday?"
newtrial("Sophia")
#"Sophia, How are you doing?"
f2 <- function(x = z) {
z <- 100
x
}
f2(1)
| /RSandbox/ifelsestatements.R | no_license | sophiacarryl/R-functions | R | false | false | 1,164 | r | library("ggplot2")
greet <- function(name, birthday = NULL) {
paste0("Hi ", name, if (isTRUE(birthday)) "and HAPPY BIRTHDAY"
)
}
greet("Sophia")
# "Hi Sophia"
greet("Sophia", FALSE)
# "Hi Sophia"
greet("Sophia", TRUE)
# "Hi Sophia and HAPPY BIRTHDAY"
trial <- function (name, birthday = NULL){
paste0("Hi ", name, if(isTRUE(birthday)) "Happy Birthday", ", how are you doing?"
)
}
trial("Sophia")
newtrial <- function (name, birthday = NULL, age = NULL){
if(isTRUE(birthday) && is.null(age)){
print(paste0("Hi", " ", name, ", Happy Birthday!!"))
}
else if(isTRUE(birthday) && is.numeric(age)){
print(paste0("Hi"," ", name, ", Happy", " ", age, " ", "Birthday"))
}
else if(isFALSE(birthday) && is.numeric(age)){
print(paste0("Hi"," ", name, ", is it your", " ", age, " " , "birthday?"))
}
else {
print(paste0(name, ", How are you doing?"))
}
}
newtrial("Sophia",TRUE)
# "Hi Sophia, Happy Birthday!!"
newtrial("Sophia",T, 24)
#"Hi Sophia, Happy Birthday!!"
newtrial("Sophia",F, 40)
# "Hi Sophia, is it your 40 birthday?"
newtrial("Sophia")
#"Sophia, How are you doing?"
f2 <- function(x = z) {
z <- 100
x
}
f2(1)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{save_plots}
\alias{save_plots}
\alias{save_plots,CEMiTool-method}
\title{Save CEMiTool object plots}
\usage{
save_plots(cem, ...)
\S4method{save_plots}{CEMiTool}(
cem,
value = c("all", "profile", "gsea", "ora", "interaction", "beta_r2", "mean_k",
"sample_tree", "mean_var", "hist", "qq"),
force = FALSE,
directory = "./Plots"
)
}
\arguments{
\item{cem}{Object of class \code{CEMiTool}.}
\item{...}{Optional parameters
One of "all", "profile", "gsea", "ora", "interaction", "beta_r2", "mean_k",
"sample_tree", "mean_var", "hist", "qq".}
\item{value}{A character string containing the name of the plot to be saved.}
\item{force}{If the directory exists, execution will not stop.}
\item{directory}{Directory into which the files will be saved.}
}
\value{
A pdf file or files with the desired plot(s)
}
\description{
Save plots into the directory specified by the \code{directory} argument.
}
\examples{
# Get example CEMiTool object
data(cem)
# Plot beta x R squared graph
cem <- plot_beta_r2(cem)
# Save plot
\dontrun{save_plots(cem, value="beta_r2", directory="./Plots")}
}
| /man/save_plots.Rd | no_license | csbl-usp/CEMiTool | R | false | true | 1,190 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{save_plots}
\alias{save_plots}
\alias{save_plots,CEMiTool-method}
\title{Save CEMiTool object plots}
\usage{
save_plots(cem, ...)
\S4method{save_plots}{CEMiTool}(
cem,
value = c("all", "profile", "gsea", "ora", "interaction", "beta_r2", "mean_k",
"sample_tree", "mean_var", "hist", "qq"),
force = FALSE,
directory = "./Plots"
)
}
\arguments{
\item{cem}{Object of class \code{CEMiTool}.}
\item{...}{Optional parameters
One of "all", "profile", "gsea", "ora", "interaction", "beta_r2", "mean_k",
"sample_tree", "mean_var", "hist", "qq".}
\item{value}{A character string containing the name of the plot to be saved.}
\item{force}{If the directory exists, execution will not stop.}
\item{directory}{Directory into which the files will be saved.}
}
\value{
A pdf file or files with the desired plot(s)
}
\description{
Save plots into the directory specified by the \code{directory} argument.
}
\examples{
# Get example CEMiTool object
data(cem)
# Plot beta x R squared graph
cem <- plot_beta_r2(cem)
# Save plot
\dontrun{save_plots(cem, value="beta_r2", directory="./Plots")}
}
|
library(proportion)
### Name: PloterrBA
### Title: Plots error, long term power and pass/fail criteria using
### Bayesian method
### Aliases: PloterrBA
### ** Examples
n=20; alp=0.05; phi=0.05; f=-2;a=0.5;b=0.5
PloterrBA(n,alp,phi,f,a,b)
| /data/genthat_extracted_code/proportion/examples/PloterrBA.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 247 | r | library(proportion)
### Name: PloterrBA
### Title: Plots error, long term power and pass/fail criteria using
### Bayesian method
### Aliases: PloterrBA
### ** Examples
n=20; alp=0.05; phi=0.05; f=-2;a=0.5;b=0.5
PloterrBA(n,alp,phi,f,a,b)
|
#'Generalized FRAMA
#'@description the generalized FRAMA (see: FRAMA)
#'@param x a time series
#'@param n a lookback period
#'@param FC fast constant for an EMA
#'@param SC slow constant for an EMA
#'@return the FRAMA for the time series
#'@export
"GFRAMA" <- function(x, n=20, FC=1, SC=200, ...) {
runSum(x) #test for non-leading NAs
index <- index(x)
if (n%%2==1) n=n-1 #n must be even
N3 <- (runMax(x, n)-runMin(x, n))/n
N1 <- (runMax(x, n/2)-runMin(x, n/2))/(n/2)
lagSeries <- lag(x, n/2)
N2 <- (runMax(lagSeries, n/2)-runMin(lagSeries, n/2))/(n/2)
dimen <- (log(N1+N2)-log(N3))/log(2)
w <- log(2/(SC+1))
oldAlpha <- exp(w*(dimen-1))
oldN <- (2-oldAlpha)/oldAlpha
newN <- ((SC-FC)*(oldN-1)/(SC-1))+FC
alpha <- 2/(newN+1)
alpha[which(alpha > 1)] <- 1
alpha[which(alpha < w)] <- w
alphaComplement <- 1-alpha
initializationIndex <- index(alpha[is.na(alpha)])
alpha[is.na(alpha)] <- 1; alphaComplement[is.na(alphaComplement)] <- 0
initialNAs <- rep(NA, sum(is.na(x)))
x <- x[!is.na(x)]
FRAMA <- rep(0, length(x))
FRAMA[1] <- x[1]
FRAMA <- computeFRAMA(alpha, alphaComplement, FRAMA, x)
FRAMA <- c(initialNAs, FRAMA)
FRAMA <- xts(FRAMA, order.by=index)
FRAMA[initializationIndex] <- alpha[initializationIndex] <- NA
out <- FRAMA
colnames(out) <- "GFRAMA"
return(out)
} | /Alpha Modelling/QuantStrat/Packages/DSTrading/R/GFRAMA.R | no_license | Bakeforfun/Quant | R | false | false | 1,326 | r | #'Generalized FRAMA
#'@description the generalized FRAMA (see: FRAMA)
#'@param x a time series
#'@param n a lookback period
#'@param FC fast constant for an EMA
#'@param SC slow constant for an EMA
#'@return the FRAMA for the time series
#'@export
"GFRAMA" <- function(x, n=20, FC=1, SC=200, ...) {
runSum(x) #test for non-leading NAs
index <- index(x)
if (n%%2==1) n=n-1 #n must be even
N3 <- (runMax(x, n)-runMin(x, n))/n
N1 <- (runMax(x, n/2)-runMin(x, n/2))/(n/2)
lagSeries <- lag(x, n/2)
N2 <- (runMax(lagSeries, n/2)-runMin(lagSeries, n/2))/(n/2)
dimen <- (log(N1+N2)-log(N3))/log(2)
w <- log(2/(SC+1))
oldAlpha <- exp(w*(dimen-1))
oldN <- (2-oldAlpha)/oldAlpha
newN <- ((SC-FC)*(oldN-1)/(SC-1))+FC
alpha <- 2/(newN+1)
alpha[which(alpha > 1)] <- 1
alpha[which(alpha < w)] <- w
alphaComplement <- 1-alpha
initializationIndex <- index(alpha[is.na(alpha)])
alpha[is.na(alpha)] <- 1; alphaComplement[is.na(alphaComplement)] <- 0
initialNAs <- rep(NA, sum(is.na(x)))
x <- x[!is.na(x)]
FRAMA <- rep(0, length(x))
FRAMA[1] <- x[1]
FRAMA <- computeFRAMA(alpha, alphaComplement, FRAMA, x)
FRAMA <- c(initialNAs, FRAMA)
FRAMA <- xts(FRAMA, order.by=index)
FRAMA[initializationIndex] <- alpha[initializationIndex] <- NA
out <- FRAMA
colnames(out) <- "GFRAMA"
return(out)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_state.R
\name{read_state}
\alias{read_state}
\title{Download shape files of Brazilian states as sf objects. Data at scale 1:250,000, using Geodetic reference system "SIRGAS2000" and CRS(4674)}
\usage{
read_state(code_state, year = NULL)
}
\arguments{
\item{code_state}{The two-digit code of a state or a two-letter uppercase abbreviation (e.g. 33 or "RJ"). If code_state="all", all states will be loaded.}
\item{year}{Year of the data (defaults to 2010)}
}
\description{
Download shape files of Brazilian states as sf objects. Data at scale 1:250,000, using Geodetic reference system "SIRGAS2000" and CRS(4674)
}
\examples{
\donttest{
library(geobr)
# Read specific state at a given year
uf <- read_state(code_state=12, year=2017)
# Read specific state at a given year
uf <- read_state(code_state="SC", year=2000)
# Read all states at a given year
ufs <- read_state(code_state="all", year=2010)
}
}
\seealso{
Other general area functions: \code{\link{read_census_tract}},
\code{\link{read_country}},
\code{\link{read_meso_region2}},
\code{\link{read_micro_region}},
\code{\link{read_municipality}},
\code{\link{read_region}},
\code{\link{read_statistical_grid}},
\code{\link{read_weighting_area}}
}
\concept{general area functions}
| /man/read_state.Rd | no_license | cavedo95/geobr | R | false | true | 1,341 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_state.R
\name{read_state}
\alias{read_state}
\title{Download shape files of Brazilian states as sf objects. Data at scale 1:250,000, using Geodetic reference system "SIRGAS2000" and CRS(4674)}
\usage{
read_state(code_state, year = NULL)
}
\arguments{
\item{code_state}{The two-digit code of a state or a two-letter uppercase abbreviation (e.g. 33 or "RJ"). If code_state="all", all states will be loaded.}
\item{year}{Year of the data (defaults to 2010)}
}
\description{
Download shape files of Brazilian states as sf objects. Data at scale 1:250,000, using Geodetic reference system "SIRGAS2000" and CRS(4674)
}
\examples{
\donttest{
library(geobr)
# Read specific state at a given year
uf <- read_state(code_state=12, year=2017)
# Read specific state at a given year
uf <- read_state(code_state="SC", year=2000)
# Read all states at a given year
ufs <- read_state(code_state="all", year=2010)
}
}
\seealso{
Other general area functions: \code{\link{read_census_tract}},
\code{\link{read_country}},
\code{\link{read_meso_region2}},
\code{\link{read_micro_region}},
\code{\link{read_municipality}},
\code{\link{read_region}},
\code{\link{read_statistical_grid}},
\code{\link{read_weighting_area}}
}
\concept{general area functions}
|
# Esli russkie bukvi prevratilitis v krakozyabry, to File - Reopen with
# encoding... - UTF-8 - Set as default - OK
# lab 09
library("dplyr") # манипуляции с данными
library("caret") # стандартизованный подход к регрессионным и классификационным моделям
library("AER") # инструментальные переменные
library("ggplot2") # графики
library("sandwich") # робастные стандартные ошибки
library("ivpack") # дополнительные плющки для инструментальных переменных
library("memisc") # табличка mtable
######################### задача прогнозирования
# прочитаем данные из .txt файла есть заголовок, header = TRUE разделитель данных
# - табуляция, sep='\t' разделитель дробной части - точка, dec='.'
h <- read.csv("flats_moscow.txt", header = TRUE, sep = "\t", dec = ".")
glimpse(h) # бросим взгляд на данные
# добавим логарифмы цены и площадей
h2 <- mutate(h, logprice = log(price), logtotsp = log(totsp), logkitsp = log(kitsp),
loglivesp = log(livesp))
# создадим разбиение данных, отберем 75% случайных номеров
in_train <- createDataPartition(y = h2$logprice, p = 0.75, list = FALSE)
h2_train <- h2[in_train, ] # отберем обучающую часть выборки
h2_test <- h2[-in_train, ] # оставшееся пойдет в тестовую часть выборки
# оценим две модели с помощью МНК
model_1 <- lm(data = h2_train, logprice ~ logkitsp + logtotsp + loglivesp)
model_2 <- lm(data = h2_train, logprice ~ logtotsp)
# построим прогнозы по двум моделям на тестовой выборке
pred_1 <- predict(model_1, h2_test)
pred_2 <- predict(model_2, h2_test)
# посчитаем руками суммы квадратов остатков по тестовой выборке
sum((pred_1 - h2_test$logprice)^2)
sum((pred_2 - h2_test$logprice)^2)
############################################### оценивание заданной формы модели с эндогенностью
## данные
data("CigarettesSW", package = "AER") # активируем набор данных
help("CigarettesSW") # читаем справку
# для удобства назовем покороче
h <- CigarettesSW
glimpse(h) # посмотрим на структуру данных
# построим диаграмму рассеяния
qplot(data = h, price, packs)
# отберем данные относящиеся к 1995 году
h2 <- filter(h, year == "1995")
# создадим новые переменные
h2 <- mutate(h2, rprice = price/cpi, rincome = income/cpi/population, tdiff = (taxs -
tax)/cpi)
# снова глянем на диаграмму рассеяния
qplot(data = h2, price, packs)
# и бросим взгляд на набор данных
glimpse(h2)
# оценим функцию спроса с помощью МНК забыв, что имеет место эндогенность
model_0 <- lm(data = h2, log(packs) ~ log(rprice))
summary(model_0)
# двухшаговый МНК руками Шаг 1. Строим регрессию эндогенного регрессора на
# инструментальную переменную
st_1 <- lm(data = h2, log(rprice) ~ tdiff)
# сохраняем прогнозы из регрессии первого шага
h2$logprice_hat <- fitted(st_1)
# Шаг 2. Строим регрессию зависимой переменной на прогнозы с первого шага
st_2 <- lm(data = h2, log(packs) ~ logprice_hat)
coeftest(st_2)
# здесь функция coeftest использует неверные стандартные ошибки (даже при
# гомоскедастичности)
help(ivreg) # документация по команде ivreg
# двухшаговый МНК в одну строчку
model_iv <- ivreg(data = h2, log(packs) ~ log(rprice) | tdiff)
coeftest(model_iv) # здесь стандартные ошибки --- корректные
# сравним три модели в одной табличке
mtable(model_0, model_iv, st_2)
# используем для проверки гипотез робастные стандартные ошибки
coeftest(model_iv, vcov = vcovHC)
# модель с одной экзогенной, log(rincome), и одной эндогенной переменной,
# log(rprice)
iv_model_2 <- ivreg(data = h2, log(packs) ~ log(rprice) + log(rincome) | log(rincome) +
tdiff)
# тестируем гипотезы с использованием робастных стандартных ошибок
coeftest(iv_model_2, vcov = vcovHC)
# модель с одной экзогенной, одной эндогенной и двумя инструментальными
# переменными для эндогенной
iv_model_3 <- ivreg(data = h2, log(packs) ~ log(rprice) + log(rincome) | log(rincome) +
tdiff + I(tax/cpi))
# тестируем гипотезы с использованием робастных стандартных ошибок
coeftest(iv_model_3, vcov = vcovHC)
| /week_14/lab_09/lab_09_after.R | no_license | bdemeshev/openedu_metrics | R | false | false | 5,519 | r | # Esli russkie bukvi prevratilitis v krakozyabry, to File - Reopen with
# encoding... - UTF-8 - Set as default - OK
# lab 09
library("dplyr") # манипуляции с данными
library("caret") # стандартизованный подход к регрессионным и классификационным моделям
library("AER") # инструментальные переменные
library("ggplot2") # графики
library("sandwich") # робастные стандартные ошибки
library("ivpack") # дополнительные плющки для инструментальных переменных
library("memisc") # табличка mtable
######################### задача прогнозирования
# прочитаем данные из .txt файла есть заголовок, header = TRUE разделитель данных
# - табуляция, sep='\t' разделитель дробной части - точка, dec='.'
h <- read.csv("flats_moscow.txt", header = TRUE, sep = "\t", dec = ".")
glimpse(h) # бросим взгляд на данные
# добавим логарифмы цены и площадей
h2 <- mutate(h, logprice = log(price), logtotsp = log(totsp), logkitsp = log(kitsp),
loglivesp = log(livesp))
# создадим разбиение данных, отберем 75% случайных номеров
in_train <- createDataPartition(y = h2$logprice, p = 0.75, list = FALSE)
h2_train <- h2[in_train, ] # отберем обучающую часть выборки
h2_test <- h2[-in_train, ] # оставшееся пойдет в тестовую часть выборки
# оценим две модели с помощью МНК
model_1 <- lm(data = h2_train, logprice ~ logkitsp + logtotsp + loglivesp)
model_2 <- lm(data = h2_train, logprice ~ logtotsp)
# построим прогнозы по двум моделям на тестовой выборке
pred_1 <- predict(model_1, h2_test)
pred_2 <- predict(model_2, h2_test)
# посчитаем руками суммы квадратов остатков по тестовой выборке
sum((pred_1 - h2_test$logprice)^2)
sum((pred_2 - h2_test$logprice)^2)
############################################### оценивание заданной формы модели с эндогенностью
## данные
data("CigarettesSW", package = "AER") # активируем набор данных
help("CigarettesSW") # читаем справку
# для удобства назовем покороче
h <- CigarettesSW
glimpse(h) # посмотрим на структуру данных
# построим диаграмму рассеяния
qplot(data = h, price, packs)
# отберем данные относящиеся к 1995 году
h2 <- filter(h, year == "1995")
# создадим новые переменные
h2 <- mutate(h2, rprice = price/cpi, rincome = income/cpi/population, tdiff = (taxs -
tax)/cpi)
# снова глянем на диаграмму рассеяния
qplot(data = h2, price, packs)
# и бросим взгляд на набор данных
glimpse(h2)
# оценим функцию спроса с помощью МНК забыв, что имеет место эндогенность
model_0 <- lm(data = h2, log(packs) ~ log(rprice))
summary(model_0)
# двухшаговый МНК руками Шаг 1. Строим регрессию эндогенного регрессора на
# инструментальную переменную
st_1 <- lm(data = h2, log(rprice) ~ tdiff)
# сохраняем прогнозы из регрессии первого шага
h2$logprice_hat <- fitted(st_1)
# Шаг 2. Строим регрессию зависимой переменной на прогнозы с первого шага
st_2 <- lm(data = h2, log(packs) ~ logprice_hat)
coeftest(st_2)
# здесь функция coeftest использует неверные стандартные ошибки (даже при
# гомоскедастичности)
help(ivreg) # документация по команде ivreg
# двухшаговый МНК в одну строчку
model_iv <- ivreg(data = h2, log(packs) ~ log(rprice) | tdiff)
coeftest(model_iv) # здесь стандартные ошибки --- корректные
# сравним три модели в одной табличке
mtable(model_0, model_iv, st_2)
# используем для проверки гипотез робастные стандартные ошибки
coeftest(model_iv, vcov = vcovHC)
# модель с одной экзогенной, log(rincome), и одной эндогенной переменной,
# log(rprice)
iv_model_2 <- ivreg(data = h2, log(packs) ~ log(rprice) + log(rincome) | log(rincome) +
tdiff)
# тестируем гипотезы с использованием робастных стандартных ошибок
coeftest(iv_model_2, vcov = vcovHC)
# модель с одной экзогенной, одной эндогенной и двумя инструментальными
# переменными для эндогенной
iv_model_3 <- ivreg(data = h2, log(packs) ~ log(rprice) + log(rincome) | log(rincome) +
tdiff + I(tax/cpi))
# тестируем гипотезы с использованием робастных стандартных ошибок
coeftest(iv_model_3, vcov = vcovHC)
|
library(ENMeval)
### Name: get.evaluation.bins
### Title: Methods to partition data for evaluation
### Aliases: get.evaluation.bins get.block get.checkerboard1
### get.checkerboard2 get.randomkfold get.user get.jackknife
### ** Examples
require(raster)
set.seed(1)
### Create environmental extent (raster)
env <- raster(matrix(nrow=25, ncol=25))
### Create presence localities
set.seed(1)
nocc <- 25
xocc <- rnorm(nocc, sd=0.25) + 0.5
yocc <- runif(nocc, 0, 1)
occ.pts <- as.data.frame(cbind(xocc, yocc))
### Create background points
nbg <- 500
xbg <- runif(nbg, 0, 1)
ybg <- runif(nbg, 0, 1)
bg.pts <- as.data.frame(cbind(xbg, ybg))
### Show points
plot(env)
points(bg.pts)
points(occ.pts, pch=21, bg=2)
### Block partitioning method
blk.pts <- get.block(occ.pts, bg.pts)
plot(env)
points(occ.pts, pch=23, bg=blk.pts$occ.grp)
plot(env)
points(bg.pts, pch=21, bg=blk.pts$bg.grp)
### Checkerboard1 partitioning method
chk1.pts <- get.checkerboard1(occ.pts, env, bg.pts, 4)
plot(env)
points(occ.pts, pch=23, bg=chk1.pts$occ.grp)
plot(env)
points(bg.pts, pch=21, bg=chk1.pts$bg.grp)
### Checkerboard2 partitioning method
chk2.pts <- get.checkerboard2(occ.pts, env, bg.pts, c(2,2))
plot(env)
points(occ.pts, pch=23, bg=chk2.pts$occ.grp)
plot(env)
points(bg.pts, pch=21, bg=chk2.pts$bg.grp)
### Random k-fold partitions
# Note that k random does not partition the background
krandom.pts <- get.randomkfold(occ.pts, bg.pts, 4)
plot(env)
points(occ.pts, pch=23, bg=krandom.pts$occ.grp)
plot(env)
points(bg.pts, pch=21, bg=krandom.pts$bg.grp)
### k-1 jackknife partitions
# Note background is not partitioned
jack.pts <- get.jackknife(occ.pts, bg.pts)
plot(env)
points(occ.pts, pch=23, bg=rainbow(length(jack.pts$occ.grp)))
plot(env)
points(bg.pts, pch=21, bg=jack.pts$bg.grp)
### User-defined partitions
# Note background is not partitioned
occ.grp <- c(rep(1, 10), rep(2, 5), rep(3, 10))
bg.grp <- c(rep(1, 200), rep(2, 100), rep(3, 200))
user.pts <- get.user(occ.grp, bg.grp)
plot(env)
points(occ.pts, pch=23, bg=user.pts$occ.grp)
plot(env)
points(bg.pts, pch=21, bg=user.pts$bg.grp)
| /data/genthat_extracted_code/ENMeval/examples/get.evaluation.bins.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 2,102 | r | library(ENMeval)
### Name: get.evaluation.bins
### Title: Methods to partition data for evaluation
### Aliases: get.evaluation.bins get.block get.checkerboard1
### get.checkerboard2 get.randomkfold get.user get.jackknife
### ** Examples
require(raster)
set.seed(1)
### Create environmental extent (raster)
env <- raster(matrix(nrow=25, ncol=25))
### Create presence localities
set.seed(1)
nocc <- 25
xocc <- rnorm(nocc, sd=0.25) + 0.5
yocc <- runif(nocc, 0, 1)
occ.pts <- as.data.frame(cbind(xocc, yocc))
### Create background points
nbg <- 500
xbg <- runif(nbg, 0, 1)
ybg <- runif(nbg, 0, 1)
bg.pts <- as.data.frame(cbind(xbg, ybg))
### Show points
plot(env)
points(bg.pts)
points(occ.pts, pch=21, bg=2)
### Block partitioning method
blk.pts <- get.block(occ.pts, bg.pts)
plot(env)
points(occ.pts, pch=23, bg=blk.pts$occ.grp)
plot(env)
points(bg.pts, pch=21, bg=blk.pts$bg.grp)
### Checkerboard1 partitioning method
chk1.pts <- get.checkerboard1(occ.pts, env, bg.pts, 4)
plot(env)
points(occ.pts, pch=23, bg=chk1.pts$occ.grp)
plot(env)
points(bg.pts, pch=21, bg=chk1.pts$bg.grp)
### Checkerboard2 partitioning method
chk2.pts <- get.checkerboard2(occ.pts, env, bg.pts, c(2,2))
plot(env)
points(occ.pts, pch=23, bg=chk2.pts$occ.grp)
plot(env)
points(bg.pts, pch=21, bg=chk2.pts$bg.grp)
### Random k-fold partitions
# Note that k random does not partition the background
krandom.pts <- get.randomkfold(occ.pts, bg.pts, 4)
plot(env)
points(occ.pts, pch=23, bg=krandom.pts$occ.grp)
plot(env)
points(bg.pts, pch=21, bg=krandom.pts$bg.grp)
### k-1 jackknife partitions
# Note background is not partitioned
jack.pts <- get.jackknife(occ.pts, bg.pts)
plot(env)
points(occ.pts, pch=23, bg=rainbow(length(jack.pts$occ.grp)))
plot(env)
points(bg.pts, pch=21, bg=jack.pts$bg.grp)
### User-defined partitions
# Note background is not partitioned
occ.grp <- c(rep(1, 10), rep(2, 5), rep(3, 10))
bg.grp <- c(rep(1, 200), rep(2, 100), rep(3, 200))
user.pts <- get.user(occ.grp, bg.grp)
plot(env)
points(occ.pts, pch=23, bg=user.pts$occ.grp)
plot(env)
points(bg.pts, pch=21, bg=user.pts$bg.grp)
|
# This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common get_config new_operation new_request send_request
#' @include locationservice_service.R
NULL
#' Creates an association between a geofence collection and a tracker
#' resource
#'
#' @description
#' Creates an association between a geofence collection and a tracker resource. This allows the tracker resource to communicate location data to the linked geofence collection.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_associate_tracker_consumer/](https://www.paws-r-sdk.com/docs/locationservice_associate_tracker_consumer/) for full documentation.
#'
#' @param ConsumerArn [required] The Amazon Resource Name (ARN) for the geofence collection to be
#' associated to tracker resource. Used when you need to specify a resource
#' across all Amazon Web Services.
#'
#' - Format example:
#' `arn:aws:geo:region:account-id:geofence-collection/ExampleGeofenceCollectionConsumer`
#' @param TrackerName [required] The name of the tracker resource to be associated with a geofence
#' collection.
#'
#' @keywords internal
#'
#' @rdname locationservice_associate_tracker_consumer
locationservice_associate_tracker_consumer <- function(ConsumerArn, TrackerName) {
op <- new_operation(
name = "AssociateTrackerConsumer",
http_method = "POST",
http_path = "/tracking/v0/trackers/{TrackerName}/consumers",
paginator = list()
)
input <- .locationservice$associate_tracker_consumer_input(ConsumerArn = ConsumerArn, TrackerName = TrackerName)
output <- .locationservice$associate_tracker_consumer_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$associate_tracker_consumer <- locationservice_associate_tracker_consumer
#' Deletes the position history of one or more devices from a tracker
#' resource
#'
#' @description
#' Deletes the position history of one or more devices from a tracker resource.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_batch_delete_device_position_history/](https://www.paws-r-sdk.com/docs/locationservice_batch_delete_device_position_history/) for full documentation.
#'
#' @param DeviceIds [required] Devices whose position history you want to delete.
#'
#' - For example, for two devices: `“DeviceIds” : [DeviceId1,DeviceId2]`
#' @param TrackerName [required] The name of the tracker resource to delete the device position history
#' from.
#'
#' @keywords internal
#'
#' @rdname locationservice_batch_delete_device_position_history
locationservice_batch_delete_device_position_history <- function(DeviceIds, TrackerName) {
op <- new_operation(
name = "BatchDeleteDevicePositionHistory",
http_method = "POST",
http_path = "/tracking/v0/trackers/{TrackerName}/delete-positions",
paginator = list()
)
input <- .locationservice$batch_delete_device_position_history_input(DeviceIds = DeviceIds, TrackerName = TrackerName)
output <- .locationservice$batch_delete_device_position_history_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$batch_delete_device_position_history <- locationservice_batch_delete_device_position_history
#' Deletes a batch of geofences from a geofence collection
#'
#' @description
#' Deletes a batch of geofences from a geofence collection.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_batch_delete_geofence/](https://www.paws-r-sdk.com/docs/locationservice_batch_delete_geofence/) for full documentation.
#'
#' @param CollectionName [required] The geofence collection storing the geofences to be deleted.
#' @param GeofenceIds [required] The batch of geofences to be deleted.
#'
#' @keywords internal
#'
#' @rdname locationservice_batch_delete_geofence
locationservice_batch_delete_geofence <- function(CollectionName, GeofenceIds) {
op <- new_operation(
name = "BatchDeleteGeofence",
http_method = "POST",
http_path = "/geofencing/v0/collections/{CollectionName}/delete-geofences",
paginator = list()
)
input <- .locationservice$batch_delete_geofence_input(CollectionName = CollectionName, GeofenceIds = GeofenceIds)
output <- .locationservice$batch_delete_geofence_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$batch_delete_geofence <- locationservice_batch_delete_geofence
#' Evaluates device positions against the geofence geometries from a given
#' geofence collection
#'
#' @description
#' Evaluates device positions against the geofence geometries from a given geofence collection.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_batch_evaluate_geofences/](https://www.paws-r-sdk.com/docs/locationservice_batch_evaluate_geofences/) for full documentation.
#'
#' @param CollectionName [required] The geofence collection used in evaluating the position of devices
#' against its geofences.
#' @param DevicePositionUpdates [required] Contains device details for each device to be evaluated against the
#' given geofence collection.
#'
#' @keywords internal
#'
#' @rdname locationservice_batch_evaluate_geofences
locationservice_batch_evaluate_geofences <- function(CollectionName, DevicePositionUpdates) {
op <- new_operation(
name = "BatchEvaluateGeofences",
http_method = "POST",
http_path = "/geofencing/v0/collections/{CollectionName}/positions",
paginator = list()
)
input <- .locationservice$batch_evaluate_geofences_input(CollectionName = CollectionName, DevicePositionUpdates = DevicePositionUpdates)
output <- .locationservice$batch_evaluate_geofences_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$batch_evaluate_geofences <- locationservice_batch_evaluate_geofences
#' Lists the latest device positions for requested devices
#'
#' @description
#' Lists the latest device positions for requested devices.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_batch_get_device_position/](https://www.paws-r-sdk.com/docs/locationservice_batch_get_device_position/) for full documentation.
#'
#' @param DeviceIds [required] Devices whose position you want to retrieve.
#'
#' - For example, for two devices:
#' `device-ids=DeviceId1&device-ids=DeviceId2`
#' @param TrackerName [required] The tracker resource retrieving the device position.
#'
#' @keywords internal
#'
#' @rdname locationservice_batch_get_device_position
locationservice_batch_get_device_position <- function(DeviceIds, TrackerName) {
op <- new_operation(
name = "BatchGetDevicePosition",
http_method = "POST",
http_path = "/tracking/v0/trackers/{TrackerName}/get-positions",
paginator = list()
)
input <- .locationservice$batch_get_device_position_input(DeviceIds = DeviceIds, TrackerName = TrackerName)
output <- .locationservice$batch_get_device_position_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$batch_get_device_position <- locationservice_batch_get_device_position
#' A batch request for storing geofence geometries into a given geofence
#' collection, or updates the geometry of an existing geofence if a
#' geofence ID is included in the request
#'
#' @description
#' A batch request for storing geofence geometries into a given geofence collection, or updates the geometry of an existing geofence if a geofence ID is included in the request.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_batch_put_geofence/](https://www.paws-r-sdk.com/docs/locationservice_batch_put_geofence/) for full documentation.
#'
#' @param CollectionName [required] The geofence collection storing the geofences.
#' @param Entries [required] The batch of geofences to be stored in a geofence collection.
#'
#' @keywords internal
#'
#' @rdname locationservice_batch_put_geofence
locationservice_batch_put_geofence <- function(CollectionName, Entries) {
op <- new_operation(
name = "BatchPutGeofence",
http_method = "POST",
http_path = "/geofencing/v0/collections/{CollectionName}/put-geofences",
paginator = list()
)
input <- .locationservice$batch_put_geofence_input(CollectionName = CollectionName, Entries = Entries)
output <- .locationservice$batch_put_geofence_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$batch_put_geofence <- locationservice_batch_put_geofence
#' Uploads position update data for one or more devices to a tracker
#' resource
#'
#' @description
#' Uploads position update data for one or more devices to a tracker resource. Amazon Location uses the data when it reports the last known device position and position history. Amazon Location retains location data for 30 days.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_batch_update_device_position/](https://www.paws-r-sdk.com/docs/locationservice_batch_update_device_position/) for full documentation.
#'
#' @param TrackerName [required] The name of the tracker resource to update.
#' @param Updates [required] Contains the position update details for each device.
#'
#' @keywords internal
#'
#' @rdname locationservice_batch_update_device_position
locationservice_batch_update_device_position <- function(TrackerName, Updates) {
op <- new_operation(
name = "BatchUpdateDevicePosition",
http_method = "POST",
http_path = "/tracking/v0/trackers/{TrackerName}/positions",
paginator = list()
)
input <- .locationservice$batch_update_device_position_input(TrackerName = TrackerName, Updates = Updates)
output <- .locationservice$batch_update_device_position_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$batch_update_device_position <- locationservice_batch_update_device_position
#' Calculates a route given the following required parameters:
#' DeparturePosition and DestinationPosition
#'
#' @description
#' [Calculates a route](https://docs.aws.amazon.com/location/latest/developerguide/calculate-route.html) given the following required parameters: `DeparturePosition` and `DestinationPosition`. Requires that you first [create a route calculator resource](https://docs.aws.amazon.com/location/latest/APIReference/API_CreateRouteCalculator.html).
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_calculate_route/](https://www.paws-r-sdk.com/docs/locationservice_calculate_route/) for full documentation.
#'
#' @param CalculatorName [required] The name of the route calculator resource that you want to use to
#' calculate the route.
#' @param CarModeOptions Specifies route preferences when traveling by `Car`, such as avoiding
#' routes that use ferries or tolls.
#'
#' Requirements: `TravelMode` must be specified as `Car`.
#' @param DepartNow Sets the time of departure as the current time. Uses the current time to
#' calculate a route. Otherwise, the best time of day to travel with the
#' best traffic conditions is used to calculate the route.
#'
#' Default Value: `false`
#'
#' Valid Values: `false` | `true`
#' @param DeparturePosition [required] The start position for the route. Defined in [World Geodetic System (WGS
#' 84)](https://earth-info.nga.mil/index.php?dir=wgs84&action=wgs84)
#' format: `[longitude, latitude]`.
#'
#' - For example, `[-123.115, 49.285]`
#'
#' If you specify a departure that's not located on a road, Amazon Location
#' [moves the position to the nearest
#' road](https://docs.aws.amazon.com/location/latest/developerguide/snap-to-nearby-road.html).
#' If Esri is the provider for your route calculator, specifying a route
#' that is longer than 400 km returns a `400 RoutesValidationException`
#' error.
#'
#' Valid Values: `[-180 to 180,-90 to 90]`
#' @param DepartureTime Specifies the desired time of departure. Uses the given time to
#' calculate the route. Otherwise, the best time of day to travel with the
#' best traffic conditions is used to calculate the route.
#'
#' Setting a departure time in the past returns a `400 ValidationException`
#' error.
#'
#' - In [ISO
#' 8601](https://www.iso.org/iso-8601-date-and-time-format.html)
#' format: `YYYY-MM-DDThh:mm:ss.sssZ`. For example,
#' `2020–07-2T12:15:20.000Z+01:00`
#' @param DestinationPosition [required] The finish position for the route. Defined in [World Geodetic System
#' (WGS 84)](https://earth-info.nga.mil/index.php?dir=wgs84&action=wgs84)
#' format: `[longitude, latitude]`.
#'
#' - For example, `[-122.339, 47.615]`
#'
#' If you specify a destination that's not located on a road, Amazon
#' Location [moves the position to the nearest
#' road](https://docs.aws.amazon.com/location/latest/developerguide/snap-to-nearby-road.html).
#'
#' Valid Values: `[-180 to 180,-90 to 90]`
#' @param DistanceUnit Set the unit system to specify the distance.
#'
#' Default Value: `Kilometers`
#' @param IncludeLegGeometry Set to include the geometry details in the result for each path between
#' a pair of positions.
#'
#' Default Value: `false`
#'
#' Valid Values: `false` | `true`
#' @param TravelMode Specifies the mode of transport when calculating a route. Used in
#' estimating the speed of travel and road compatibility. You can choose
#' `Car`, `Truck`, `Walking`, `Bicycle` or `Motorcycle` as options for the
#' `TravelMode`.
#'
#' `Bicycle` and `Motorcycle` are only valid when using Grab as a data
#' provider, and only within Southeast Asia.
#'
#' `Truck` is not available for Grab.
#'
#' For more details on the using Grab for routing, including areas of
#' coverage, see
#' [GrabMaps](https://docs.aws.amazon.com/location/latest/developerguide/grab.html)
#' in the *Amazon Location Service Developer Guide*.
#'
#' The `TravelMode` you specify also determines how you specify route
#' preferences:
#'
#' - If traveling by `Car` use the `CarModeOptions` parameter.
#'
#' - If traveling by `Truck` use the `TruckModeOptions` parameter.
#'
#' Default Value: `Car`
#' @param TruckModeOptions Specifies route preferences when traveling by `Truck`, such as avoiding
#' routes that use ferries or tolls, and truck specifications to consider
#' when choosing an optimal road.
#'
#' Requirements: `TravelMode` must be specified as `Truck`.
#' @param WaypointPositions Specifies an ordered list of up to 23 intermediate positions to include
#' along a route between the departure position and destination position.
#'
#' - For example, from the `DeparturePosition` `[-123.115, 49.285]`, the
#' route follows the order that the waypoint positions are given
#' `[[-122.757, 49.0021],[-122.349, 47.620]]`
#'
#' If you specify a waypoint position that's not located on a road, Amazon
#' Location [moves the position to the nearest
#' road](https://docs.aws.amazon.com/location/latest/developerguide/snap-to-nearby-road.html).
#'
#' Specifying more than 23 waypoints returns a `400 ValidationException`
#' error.
#'
#' If Esri is the provider for your route calculator, specifying a route
#' that is longer than 400 km returns a `400 RoutesValidationException`
#' error.
#'
#' Valid Values: `[-180 to 180,-90 to 90]`
#'
#' @keywords internal
#'
#' @rdname locationservice_calculate_route
locationservice_calculate_route <- function(CalculatorName, CarModeOptions = NULL, DepartNow = NULL, DeparturePosition, DepartureTime = NULL, DestinationPosition, DistanceUnit = NULL, IncludeLegGeometry = NULL, TravelMode = NULL, TruckModeOptions = NULL, WaypointPositions = NULL) {
op <- new_operation(
name = "CalculateRoute",
http_method = "POST",
http_path = "/routes/v0/calculators/{CalculatorName}/calculate/route",
paginator = list()
)
input <- .locationservice$calculate_route_input(CalculatorName = CalculatorName, CarModeOptions = CarModeOptions, DepartNow = DepartNow, DeparturePosition = DeparturePosition, DepartureTime = DepartureTime, DestinationPosition = DestinationPosition, DistanceUnit = DistanceUnit, IncludeLegGeometry = IncludeLegGeometry, TravelMode = TravelMode, TruckModeOptions = TruckModeOptions, WaypointPositions = WaypointPositions)
output <- .locationservice$calculate_route_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$calculate_route <- locationservice_calculate_route
#' Calculates a route matrix given the following required parameters:
#' DeparturePositions and DestinationPositions
#'
#' @description
#' [Calculates a route matrix](https://docs.aws.amazon.com/location/latest/developerguide/calculate-route-matrix.html) given the following required parameters: `DeparturePositions` and `DestinationPositions`. [`calculate_route_matrix`][locationservice_calculate_route_matrix] calculates routes and returns the travel time and travel distance from each departure position to each destination position in the request. For example, given departure positions A and B, and destination positions X and Y, [`calculate_route_matrix`][locationservice_calculate_route_matrix] will return time and distance for routes from A to X, A to Y, B to X, and B to Y (in that order). The number of results returned (and routes calculated) will be the number of `DeparturePositions` times the number of `DestinationPositions`.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_calculate_route_matrix/](https://www.paws-r-sdk.com/docs/locationservice_calculate_route_matrix/) for full documentation.
#'
#' @param CalculatorName [required] The name of the route calculator resource that you want to use to
#' calculate the route matrix.
#' @param CarModeOptions Specifies route preferences when traveling by `Car`, such as avoiding
#' routes that use ferries or tolls.
#'
#' Requirements: `TravelMode` must be specified as `Car`.
#' @param DepartNow Sets the time of departure as the current time. Uses the current time to
#' calculate the route matrix. You can't set both `DepartureTime` and
#' `DepartNow`. If neither is set, the best time of day to travel with the
#' best traffic conditions is used to calculate the route matrix.
#'
#' Default Value: `false`
#'
#' Valid Values: `false` | `true`
#' @param DeparturePositions [required] The list of departure (origin) positions for the route matrix. An array
#' of points, each of which is itself a 2-value array defined in [WGS
#' 84](https://earth-info.nga.mil/index.php?dir=wgs84&action=wgs84) format:
#' `[longitude, latitude]`. For example, `[-123.115, 49.285]`.
#'
#' Depending on the data provider selected in the route calculator resource
#' there may be additional restrictions on the inputs you can choose. See
#' [Position
#' restrictions](https://docs.aws.amazon.com/location/latest/developerguide/calculate-route-matrix.html#matrix-routing-position-limits)
#' in the *Amazon Location Service Developer Guide*.
#'
#' For route calculators that use Esri as the data provider, if you specify
#' a departure that's not located on a road, Amazon Location [moves the
#' position to the nearest
#' road](https://docs.aws.amazon.com/location/latest/developerguide/snap-to-nearby-road.html).
#' The snapped value is available in the result in
#' `SnappedDeparturePositions`.
#'
#' Valid Values: `[-180 to 180,-90 to 90]`
#' @param DepartureTime Specifies the desired time of departure. Uses the given time to
#' calculate the route matrix. You can't set both `DepartureTime` and
#' `DepartNow`. If neither is set, the best time of day to travel with the
#' best traffic conditions is used to calculate the route matrix.
#'
#' Setting a departure time in the past returns a `400 ValidationException`
#' error.
#'
#' - In [ISO
#' 8601](https://www.iso.org/iso-8601-date-and-time-format.html)
#' format: `YYYY-MM-DDThh:mm:ss.sssZ`. For example,
#' `2020–07-2T12:15:20.000Z+01:00`
#' @param DestinationPositions [required] The list of destination positions for the route matrix. An array of
#' points, each of which is itself a 2-value array defined in [WGS
#' 84](https://earth-info.nga.mil/index.php?dir=wgs84&action=wgs84) format:
#' `[longitude, latitude]`. For example, `[-122.339, 47.615]`
#'
#' Depending on the data provider selected in the route calculator resource
#' there may be additional restrictions on the inputs you can choose. See
#' [Position
#' restrictions](https://docs.aws.amazon.com/location/latest/developerguide/calculate-route-matrix.html#matrix-routing-position-limits)
#' in the *Amazon Location Service Developer Guide*.
#'
#' For route calculators that use Esri as the data provider, if you specify
#' a destination that's not located on a road, Amazon Location [moves the
#' position to the nearest
#' road](https://docs.aws.amazon.com/location/latest/developerguide/snap-to-nearby-road.html).
#' The snapped value is available in the result in
#' `SnappedDestinationPositions`.
#'
#' Valid Values: `[-180 to 180,-90 to 90]`
#' @param DistanceUnit Set the unit system to specify the distance.
#'
#' Default Value: `Kilometers`
#' @param TravelMode Specifies the mode of transport when calculating a route. Used in
#' estimating the speed of travel and road compatibility.
#'
#' The `TravelMode` you specify also determines how you specify route
#' preferences:
#'
#' - If traveling by `Car` use the `CarModeOptions` parameter.
#'
#' - If traveling by `Truck` use the `TruckModeOptions` parameter.
#'
#' `Bicycle` or `Motorcycle` are only valid when using `Grab` as a data
#' provider, and only within Southeast Asia.
#'
#' `Truck` is not available for Grab.
#'
#' For more information about using Grab as a data provider, see
#' [GrabMaps](https://docs.aws.amazon.com/location/latest/developerguide/grab.html)
#' in the *Amazon Location Service Developer Guide*.
#'
#' Default Value: `Car`
#' @param TruckModeOptions Specifies route preferences when traveling by `Truck`, such as avoiding
#' routes that use ferries or tolls, and truck specifications to consider
#' when choosing an optimal road.
#'
#' Requirements: `TravelMode` must be specified as `Truck`.
#'
#' @keywords internal
#'
#' @rdname locationservice_calculate_route_matrix
locationservice_calculate_route_matrix <- function(CalculatorName, CarModeOptions = NULL, DepartNow = NULL, DeparturePositions, DepartureTime = NULL, DestinationPositions, DistanceUnit = NULL, TravelMode = NULL, TruckModeOptions = NULL) {
op <- new_operation(
name = "CalculateRouteMatrix",
http_method = "POST",
http_path = "/routes/v0/calculators/{CalculatorName}/calculate/route-matrix",
paginator = list()
)
input <- .locationservice$calculate_route_matrix_input(CalculatorName = CalculatorName, CarModeOptions = CarModeOptions, DepartNow = DepartNow, DeparturePositions = DeparturePositions, DepartureTime = DepartureTime, DestinationPositions = DestinationPositions, DistanceUnit = DistanceUnit, TravelMode = TravelMode, TruckModeOptions = TruckModeOptions)
output <- .locationservice$calculate_route_matrix_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$calculate_route_matrix <- locationservice_calculate_route_matrix
#' Creates a geofence collection, which manages and stores geofences
#'
#' @description
#' Creates a geofence collection, which manages and stores geofences.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_create_geofence_collection/](https://www.paws-r-sdk.com/docs/locationservice_create_geofence_collection/) for full documentation.
#'
#' @param CollectionName [required] A custom name for the geofence collection.
#'
#' Requirements:
#'
#' - Contain only alphanumeric characters (A–Z, a–z, 0–9), hyphens (-),
#' periods (.), and underscores (_).
#'
#' - Must be a unique geofence collection name.
#'
#' - No spaces allowed. For example, `ExampleGeofenceCollection`.
#' @param Description An optional description for the geofence collection.
#' @param KmsKeyId A key identifier for an [Amazon Web Services KMS customer managed
#' key](https://docs.aws.amazon.com/kms/latest/developerguide/create-keys.html).
#' Enter a key ID, key ARN, alias name, or alias ARN.
#' @param PricingPlan No longer used. If included, the only allowed value is
#' `RequestBasedUsage`.
#' @param PricingPlanDataSource This parameter is no longer used.
#' @param Tags Applies one or more tags to the geofence collection. A tag is a
#' key-value pair helps manage, identify, search, and filter your resources
#' by labelling them.
#'
#' Format: `"key" : "value"`
#'
#' Restrictions:
#'
#' - Maximum 50 tags per resource
#'
#' - Each resource tag must be unique with a maximum of one value.
#'
#' - Maximum key length: 128 Unicode characters in UTF-8
#'
#' - Maximum value length: 256 Unicode characters in UTF-8
#'
#' - Can use alphanumeric characters (A–Z, a–z, 0–9), and the following
#' characters: + - = . _ : / @@.
#'
#' - Cannot use "aws:" as a prefix for a key.
#'
#' @keywords internal
#'
#' @rdname locationservice_create_geofence_collection
locationservice_create_geofence_collection <- function(CollectionName, Description = NULL, KmsKeyId = NULL, PricingPlan = NULL, PricingPlanDataSource = NULL, Tags = NULL) {
op <- new_operation(
name = "CreateGeofenceCollection",
http_method = "POST",
http_path = "/geofencing/v0/collections",
paginator = list()
)
input <- .locationservice$create_geofence_collection_input(CollectionName = CollectionName, Description = Description, KmsKeyId = KmsKeyId, PricingPlan = PricingPlan, PricingPlanDataSource = PricingPlanDataSource, Tags = Tags)
output <- .locationservice$create_geofence_collection_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$create_geofence_collection <- locationservice_create_geofence_collection
#' Creates an API key resource in your Amazon Web Services account, which
#' lets you grant geo:GetMap* actions for Amazon Location Map resources to
#' the API key bearer
#'
#' @description
#' Creates an API key resource in your Amazon Web Services account, which lets you grant `geo:GetMap*` actions for Amazon Location Map resources to the API key bearer.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_create_key/](https://www.paws-r-sdk.com/docs/locationservice_create_key/) for full documentation.
#'
#' @param Description An optional description for the API key resource.
#' @param ExpireTime The optional timestamp for when the API key resource will expire in [ISO
#' 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format:
#' `YYYY-MM-DDThh:mm:ss.sssZ`. One of `NoExpiry` or `ExpireTime` must be
#' set.
#' @param KeyName [required] A custom name for the API key resource.
#'
#' Requirements:
#'
#' - Contain only alphanumeric characters (A–Z, a–z, 0–9), hyphens (-),
#' periods (.), and underscores (_).
#'
#' - Must be a unique API key name.
#'
#' - No spaces allowed. For example, `ExampleAPIKey`.
#' @param NoExpiry Optionally set to `true` to set no expiration time for the API key. One
#' of `NoExpiry` or `ExpireTime` must be set.
#' @param Restrictions [required] The API key restrictions for the API key resource.
#' @param Tags Applies one or more tags to the map resource. A tag is a key-value pair
#' that helps manage, identify, search, and filter your resources by
#' labelling them.
#'
#' Format: `"key" : "value"`
#'
#' Restrictions:
#'
#' - Maximum 50 tags per resource
#'
#' - Each resource tag must be unique with a maximum of one value.
#'
#' - Maximum key length: 128 Unicode characters in UTF-8
#'
#' - Maximum value length: 256 Unicode characters in UTF-8
#'
#' - Can use alphanumeric characters (A–Z, a–z, 0–9), and the following
#' characters: + - = . _ : / @@.
#'
#' - Cannot use "aws:" as a prefix for a key.
#'
#' @keywords internal
#'
#' @rdname locationservice_create_key
locationservice_create_key <- function(Description = NULL, ExpireTime = NULL, KeyName, NoExpiry = NULL, Restrictions, Tags = NULL) {
op <- new_operation(
name = "CreateKey",
http_method = "POST",
http_path = "/metadata/v0/keys",
paginator = list()
)
input <- .locationservice$create_key_input(Description = Description, ExpireTime = ExpireTime, KeyName = KeyName, NoExpiry = NoExpiry, Restrictions = Restrictions, Tags = Tags)
output <- .locationservice$create_key_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$create_key <- locationservice_create_key
#' Creates a map resource in your Amazon Web Services account, which
#' provides map tiles of different styles sourced from global location data
#' providers
#'
#' @description
#' Creates a map resource in your Amazon Web Services account, which provides map tiles of different styles sourced from global location data providers.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_create_map/](https://www.paws-r-sdk.com/docs/locationservice_create_map/) for full documentation.
#'
#' @param Configuration [required] Specifies the `MapConfiguration`, including the map style, for the map
#' resource that you create. The map style defines the look of maps and the
#' data provider for your map resource.
#' @param Description An optional description for the map resource.
#' @param MapName [required] The name for the map resource.
#'
#' Requirements:
#'
#' - Must contain only alphanumeric characters (A–Z, a–z, 0–9), hyphens
#' (-), periods (.), and underscores (_).
#'
#' - Must be a unique map resource name.
#'
#' - No spaces allowed. For example, `ExampleMap`.
#' @param PricingPlan No longer used. If included, the only allowed value is
#' `RequestBasedUsage`.
#' @param Tags Applies one or more tags to the map resource. A tag is a key-value pair
#' helps manage, identify, search, and filter your resources by labelling
#' them.
#'
#' Format: `"key" : "value"`
#'
#' Restrictions:
#'
#' - Maximum 50 tags per resource
#'
#' - Each resource tag must be unique with a maximum of one value.
#'
#' - Maximum key length: 128 Unicode characters in UTF-8
#'
#' - Maximum value length: 256 Unicode characters in UTF-8
#'
#' - Can use alphanumeric characters (A–Z, a–z, 0–9), and the following
#' characters: + - = . _ : / @@.
#'
#' - Cannot use "aws:" as a prefix for a key.
#'
#' @keywords internal
#'
#' @rdname locationservice_create_map
locationservice_create_map <- function(Configuration, Description = NULL, MapName, PricingPlan = NULL, Tags = NULL) {
op <- new_operation(
name = "CreateMap",
http_method = "POST",
http_path = "/maps/v0/maps",
paginator = list()
)
input <- .locationservice$create_map_input(Configuration = Configuration, Description = Description, MapName = MapName, PricingPlan = PricingPlan, Tags = Tags)
output <- .locationservice$create_map_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$create_map <- locationservice_create_map
#' Creates a place index resource in your Amazon Web Services account
#'
#' @description
#' Creates a place index resource in your Amazon Web Services account. Use a place index resource to geocode addresses and other text queries by using the [`search_place_index_for_text`][locationservice_search_place_index_for_text] operation, and reverse geocode coordinates by using the [`search_place_index_for_position`][locationservice_search_place_index_for_position] operation, and enable autosuggestions by using the [`search_place_index_for_suggestions`][locationservice_search_place_index_for_suggestions] operation.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_create_place_index/](https://www.paws-r-sdk.com/docs/locationservice_create_place_index/) for full documentation.
#'
#' @param DataSource [required] Specifies the geospatial data provider for the new place index.
#'
#' This field is case-sensitive. Enter the valid values as shown. For
#' example, entering `HERE` returns an error.
#'
#' Valid values include:
#'
#' - `Esri` – For additional information about
#' [Esri](https://docs.aws.amazon.com/location/latest/developerguide/esri.html)'s
#' coverage in your region of interest, see [Esri details on geocoding
#' coverage](https://developers.arcgis.com/rest/geocode/api-reference/geocode-coverage.htm).
#'
#' - `Grab` – Grab provides place index functionality for Southeast Asia.
#' For additional information about
#' [GrabMaps](https://docs.aws.amazon.com/location/latest/developerguide/grab.html)'
#' coverage, see [GrabMaps countries and areas
#' covered](https://docs.aws.amazon.com/location/latest/developerguide/grab.html#grab-coverage-area).
#'
#' - `Here` – For additional information about [HERE
#' Technologies](https://docs.aws.amazon.com/location/latest/developerguide/HERE.html)'
#' coverage in your region of interest, see HERE details on goecoding
#' coverage.
#'
#' If you specify HERE Technologies (`Here`) as the data provider, you
#' may not [store
#' results](https://docs.aws.amazon.com/location/latest/APIReference/API_DataSourceConfiguration.html)
#' for locations in Japan. For more information, see the [Amazon Web
#' Services Service Terms](https://aws.amazon.com/service-terms/) for
#' Amazon Location Service.
#'
#' For additional information , see [Data
#' providers](https://docs.aws.amazon.com/location/latest/developerguide/what-is-data-provider.html)
#' on the *Amazon Location Service Developer Guide*.
#' @param DataSourceConfiguration Specifies the data storage option requesting Places.
#' @param Description The optional description for the place index resource.
#' @param IndexName [required] The name of the place index resource.
#'
#' Requirements:
#'
#' - Contain only alphanumeric characters (A–Z, a–z, 0–9), hyphens (-),
#' periods (.), and underscores (_).
#'
#' - Must be a unique place index resource name.
#'
#' - No spaces allowed. For example, `ExamplePlaceIndex`.
#' @param PricingPlan No longer used. If included, the only allowed value is
#' `RequestBasedUsage`.
#' @param Tags Applies one or more tags to the place index resource. A tag is a
#' key-value pair that helps you manage, identify, search, and filter your
#' resources.
#'
#' Format: `"key" : "value"`
#'
#' Restrictions:
#'
#' - Maximum 50 tags per resource.
#'
#' - Each tag key must be unique and must have exactly one associated
#' value.
#'
#' - Maximum key length: 128 Unicode characters in UTF-8.
#'
#' - Maximum value length: 256 Unicode characters in UTF-8.
#'
#' - Can use alphanumeric characters (A–Z, a–z, 0–9), and the following
#' characters: + - = . _ : / @@
#'
#' - Cannot use "aws:" as a prefix for a key.
#'
#' @keywords internal
#'
#' @rdname locationservice_create_place_index
locationservice_create_place_index <- function(DataSource, DataSourceConfiguration = NULL, Description = NULL, IndexName, PricingPlan = NULL, Tags = NULL) {
op <- new_operation(
name = "CreatePlaceIndex",
http_method = "POST",
http_path = "/places/v0/indexes",
paginator = list()
)
input <- .locationservice$create_place_index_input(DataSource = DataSource, DataSourceConfiguration = DataSourceConfiguration, Description = Description, IndexName = IndexName, PricingPlan = PricingPlan, Tags = Tags)
output <- .locationservice$create_place_index_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$create_place_index <- locationservice_create_place_index
#' Creates a route calculator resource in your Amazon Web Services account
#'
#' @description
#' Creates a route calculator resource in your Amazon Web Services account.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_create_route_calculator/](https://www.paws-r-sdk.com/docs/locationservice_create_route_calculator/) for full documentation.
#'
#' @param CalculatorName [required] The name of the route calculator resource.
#'
#' Requirements:
#'
#' - Can use alphanumeric characters (A–Z, a–z, 0–9) , hyphens (-),
#' periods (.), and underscores (_).
#'
#' - Must be a unique Route calculator resource name.
#'
#' - No spaces allowed. For example, `ExampleRouteCalculator`.
#' @param DataSource [required] Specifies the data provider of traffic and road network data.
#'
#' This field is case-sensitive. Enter the valid values as shown. For
#' example, entering `HERE` returns an error.
#'
#' Valid values include:
#'
#' - `Esri` – For additional information about
#' [Esri](https://docs.aws.amazon.com/location/latest/developerguide/esri.html)'s
#' coverage in your region of interest, see [Esri details on street
#' networks and traffic
#' coverage](https://doc.arcgis.com/en/arcgis-online/reference/network-coverage.htm).
#'
#' Route calculators that use Esri as a data source only calculate
#' routes that are shorter than 400 km.
#'
#' - `Grab` – Grab provides routing functionality for Southeast Asia. For
#' additional information about
#' [GrabMaps](https://docs.aws.amazon.com/location/latest/developerguide/grab.html)'
#' coverage, see [GrabMaps countries and areas
#' covered](https://docs.aws.amazon.com/location/latest/developerguide/grab.html#grab-coverage-area).
#'
#' - `Here` – For additional information about [HERE
#' Technologies](https://docs.aws.amazon.com/location/latest/developerguide/HERE.html)'
#' coverage in your region of interest, see HERE car routing coverage
#' and HERE truck routing coverage.
#'
#' For additional information , see [Data
#' providers](https://docs.aws.amazon.com/location/latest/developerguide/what-is-data-provider.html)
#' on the *Amazon Location Service Developer Guide*.
#' @param Description The optional description for the route calculator resource.
#' @param PricingPlan No longer used. If included, the only allowed value is
#' `RequestBasedUsage`.
#' @param Tags Applies one or more tags to the route calculator resource. A tag is a
#' key-value pair helps manage, identify, search, and filter your resources
#' by labelling them.
#'
#' - For example: \{ `"tag1" : "value1"`, `"tag2" : "value2"`\}
#'
#' Format: `"key" : "value"`
#'
#' Restrictions:
#'
#' - Maximum 50 tags per resource
#'
#' - Each resource tag must be unique with a maximum of one value.
#'
#' - Maximum key length: 128 Unicode characters in UTF-8
#'
#' - Maximum value length: 256 Unicode characters in UTF-8
#'
#' - Can use alphanumeric characters (A–Z, a–z, 0–9), and the following
#' characters: + - = . _ : / @@.
#'
#' - Cannot use "aws:" as a prefix for a key.
#'
#' @keywords internal
#'
#' @rdname locationservice_create_route_calculator
locationservice_create_route_calculator <- function(CalculatorName, DataSource, Description = NULL, PricingPlan = NULL, Tags = NULL) {
op <- new_operation(
name = "CreateRouteCalculator",
http_method = "POST",
http_path = "/routes/v0/calculators",
paginator = list()
)
input <- .locationservice$create_route_calculator_input(CalculatorName = CalculatorName, DataSource = DataSource, Description = Description, PricingPlan = PricingPlan, Tags = Tags)
output <- .locationservice$create_route_calculator_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$create_route_calculator <- locationservice_create_route_calculator
#' Creates a tracker resource in your Amazon Web Services account, which
#' lets you retrieve current and historical location of devices
#'
#' @description
#' Creates a tracker resource in your Amazon Web Services account, which lets you retrieve current and historical location of devices.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_create_tracker/](https://www.paws-r-sdk.com/docs/locationservice_create_tracker/) for full documentation.
#'
#' @param Description An optional description for the tracker resource.
#' @param KmsKeyId A key identifier for an [Amazon Web Services KMS customer managed
#' key](https://docs.aws.amazon.com/kms/latest/developerguide/create-keys.html).
#' Enter a key ID, key ARN, alias name, or alias ARN.
#' @param PositionFiltering Specifies the position filtering for the tracker resource.
#'
#' Valid values:
#'
#' - `TimeBased` - Location updates are evaluated against linked geofence
#' collections, but not every location update is stored. If your update
#' frequency is more often than 30 seconds, only one update per 30
#' seconds is stored for each unique device ID.
#'
#' - `DistanceBased` - If the device has moved less than 30 m (98.4 ft),
#' location updates are ignored. Location updates within this area are
#' neither evaluated against linked geofence collections, nor stored.
#' This helps control costs by reducing the number of geofence
#' evaluations and historical device positions to paginate through.
#' Distance-based filtering can also reduce the effects of GPS noise
#' when displaying device trajectories on a map.
#'
#' - `AccuracyBased` - If the device has moved less than the measured
#' accuracy, location updates are ignored. For example, if two
#' consecutive updates from a device have a horizontal accuracy of 5 m
#' and 10 m, the second update is ignored if the device has moved less
#' than 15 m. Ignored location updates are neither evaluated against
#' linked geofence collections, nor stored. This can reduce the effects
#' of GPS noise when displaying device trajectories on a map, and can
#' help control your costs by reducing the number of geofence
#' evaluations.
#'
#' This field is optional. If not specified, the default value is
#' `TimeBased`.
#' @param PricingPlan No longer used. If included, the only allowed value is
#' `RequestBasedUsage`.
#' @param PricingPlanDataSource This parameter is no longer used.
#' @param Tags Applies one or more tags to the tracker resource. A tag is a key-value
#' pair helps manage, identify, search, and filter your resources by
#' labelling them.
#'
#' Format: `"key" : "value"`
#'
#' Restrictions:
#'
#' - Maximum 50 tags per resource
#'
#' - Each resource tag must be unique with a maximum of one value.
#'
#' - Maximum key length: 128 Unicode characters in UTF-8
#'
#' - Maximum value length: 256 Unicode characters in UTF-8
#'
#' - Can use alphanumeric characters (A–Z, a–z, 0–9), and the following
#' characters: + - = . _ : / @@.
#'
#' - Cannot use "aws:" as a prefix for a key.
#' @param TrackerName [required] The name for the tracker resource.
#'
#' Requirements:
#'
#' - Contain only alphanumeric characters (A-Z, a-z, 0-9) , hyphens (-),
#' periods (.), and underscores (_).
#'
#' - Must be a unique tracker resource name.
#'
#' - No spaces allowed. For example, `ExampleTracker`.
#'
#' @keywords internal
#'
#' @rdname locationservice_create_tracker
locationservice_create_tracker <- function(Description = NULL, KmsKeyId = NULL, PositionFiltering = NULL, PricingPlan = NULL, PricingPlanDataSource = NULL, Tags = NULL, TrackerName) {
op <- new_operation(
name = "CreateTracker",
http_method = "POST",
http_path = "/tracking/v0/trackers",
paginator = list()
)
input <- .locationservice$create_tracker_input(Description = Description, KmsKeyId = KmsKeyId, PositionFiltering = PositionFiltering, PricingPlan = PricingPlan, PricingPlanDataSource = PricingPlanDataSource, Tags = Tags, TrackerName = TrackerName)
output <- .locationservice$create_tracker_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$create_tracker <- locationservice_create_tracker
#' Deletes a geofence collection from your Amazon Web Services account
#'
#' @description
#' Deletes a geofence collection from your Amazon Web Services account.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_delete_geofence_collection/](https://www.paws-r-sdk.com/docs/locationservice_delete_geofence_collection/) for full documentation.
#'
#' @param CollectionName [required] The name of the geofence collection to be deleted.
#'
#' @keywords internal
#'
#' @rdname locationservice_delete_geofence_collection
locationservice_delete_geofence_collection <- function(CollectionName) {
op <- new_operation(
name = "DeleteGeofenceCollection",
http_method = "DELETE",
http_path = "/geofencing/v0/collections/{CollectionName}",
paginator = list()
)
input <- .locationservice$delete_geofence_collection_input(CollectionName = CollectionName)
output <- .locationservice$delete_geofence_collection_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$delete_geofence_collection <- locationservice_delete_geofence_collection
#' Deletes the specified API key
#'
#' @description
#' Deletes the specified API key. The API key must have been deactivated more than 90 days previously.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_delete_key/](https://www.paws-r-sdk.com/docs/locationservice_delete_key/) for full documentation.
#'
#' @param KeyName [required] The name of the API key to delete.
#'
#' @keywords internal
#'
#' @rdname locationservice_delete_key
locationservice_delete_key <- function(KeyName) {
op <- new_operation(
name = "DeleteKey",
http_method = "DELETE",
http_path = "/metadata/v0/keys/{KeyName}",
paginator = list()
)
input <- .locationservice$delete_key_input(KeyName = KeyName)
output <- .locationservice$delete_key_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$delete_key <- locationservice_delete_key
#' Deletes a map resource from your Amazon Web Services account
#'
#' @description
#' Deletes a map resource from your Amazon Web Services account.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_delete_map/](https://www.paws-r-sdk.com/docs/locationservice_delete_map/) for full documentation.
#'
#' @param MapName [required] The name of the map resource to be deleted.
#'
#' @keywords internal
#'
#' @rdname locationservice_delete_map
locationservice_delete_map <- function(MapName) {
op <- new_operation(
name = "DeleteMap",
http_method = "DELETE",
http_path = "/maps/v0/maps/{MapName}",
paginator = list()
)
input <- .locationservice$delete_map_input(MapName = MapName)
output <- .locationservice$delete_map_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$delete_map <- locationservice_delete_map
#' Deletes a place index resource from your Amazon Web Services account
#'
#' @description
#' Deletes a place index resource from your Amazon Web Services account.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_delete_place_index/](https://www.paws-r-sdk.com/docs/locationservice_delete_place_index/) for full documentation.
#'
#' @param IndexName [required] The name of the place index resource to be deleted.
#'
#' @keywords internal
#'
#' @rdname locationservice_delete_place_index
locationservice_delete_place_index <- function(IndexName) {
op <- new_operation(
name = "DeletePlaceIndex",
http_method = "DELETE",
http_path = "/places/v0/indexes/{IndexName}",
paginator = list()
)
input <- .locationservice$delete_place_index_input(IndexName = IndexName)
output <- .locationservice$delete_place_index_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$delete_place_index <- locationservice_delete_place_index
#' Deletes a route calculator resource from your Amazon Web Services
#' account
#'
#' @description
#' Deletes a route calculator resource from your Amazon Web Services account.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_delete_route_calculator/](https://www.paws-r-sdk.com/docs/locationservice_delete_route_calculator/) for full documentation.
#'
#' @param CalculatorName [required] The name of the route calculator resource to be deleted.
#'
#' @keywords internal
#'
#' @rdname locationservice_delete_route_calculator
locationservice_delete_route_calculator <- function(CalculatorName) {
op <- new_operation(
name = "DeleteRouteCalculator",
http_method = "DELETE",
http_path = "/routes/v0/calculators/{CalculatorName}",
paginator = list()
)
input <- .locationservice$delete_route_calculator_input(CalculatorName = CalculatorName)
output <- .locationservice$delete_route_calculator_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$delete_route_calculator <- locationservice_delete_route_calculator
#' Deletes a tracker resource from your Amazon Web Services account
#'
#' @description
#' Deletes a tracker resource from your Amazon Web Services account.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_delete_tracker/](https://www.paws-r-sdk.com/docs/locationservice_delete_tracker/) for full documentation.
#'
#' @param TrackerName [required] The name of the tracker resource to be deleted.
#'
#' @keywords internal
#'
#' @rdname locationservice_delete_tracker
locationservice_delete_tracker <- function(TrackerName) {
op <- new_operation(
name = "DeleteTracker",
http_method = "DELETE",
http_path = "/tracking/v0/trackers/{TrackerName}",
paginator = list()
)
input <- .locationservice$delete_tracker_input(TrackerName = TrackerName)
output <- .locationservice$delete_tracker_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$delete_tracker <- locationservice_delete_tracker
#' Retrieves the geofence collection details
#'
#' @description
#' Retrieves the geofence collection details.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_describe_geofence_collection/](https://www.paws-r-sdk.com/docs/locationservice_describe_geofence_collection/) for full documentation.
#'
#' @param CollectionName [required] The name of the geofence collection.
#'
#' @keywords internal
#'
#' @rdname locationservice_describe_geofence_collection
locationservice_describe_geofence_collection <- function(CollectionName) {
op <- new_operation(
name = "DescribeGeofenceCollection",
http_method = "GET",
http_path = "/geofencing/v0/collections/{CollectionName}",
paginator = list()
)
input <- .locationservice$describe_geofence_collection_input(CollectionName = CollectionName)
output <- .locationservice$describe_geofence_collection_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$describe_geofence_collection <- locationservice_describe_geofence_collection
#' Retrieves the API key resource details
#'
#' @description
#' Retrieves the API key resource details.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_describe_key/](https://www.paws-r-sdk.com/docs/locationservice_describe_key/) for full documentation.
#'
#' @param KeyName [required] The name of the API key resource.
#'
#' @keywords internal
#'
#' @rdname locationservice_describe_key
locationservice_describe_key <- function(KeyName) {
op <- new_operation(
name = "DescribeKey",
http_method = "GET",
http_path = "/metadata/v0/keys/{KeyName}",
paginator = list()
)
input <- .locationservice$describe_key_input(KeyName = KeyName)
output <- .locationservice$describe_key_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$describe_key <- locationservice_describe_key
#' Retrieves the map resource details
#'
#' @description
#' Retrieves the map resource details.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_describe_map/](https://www.paws-r-sdk.com/docs/locationservice_describe_map/) for full documentation.
#'
#' @param MapName [required] The name of the map resource.
#'
#' @keywords internal
#'
#' @rdname locationservice_describe_map
locationservice_describe_map <- function(MapName) {
op <- new_operation(
name = "DescribeMap",
http_method = "GET",
http_path = "/maps/v0/maps/{MapName}",
paginator = list()
)
input <- .locationservice$describe_map_input(MapName = MapName)
output <- .locationservice$describe_map_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$describe_map <- locationservice_describe_map
#' Retrieves the place index resource details
#'
#' @description
#' Retrieves the place index resource details.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_describe_place_index/](https://www.paws-r-sdk.com/docs/locationservice_describe_place_index/) for full documentation.
#'
#' @param IndexName [required] The name of the place index resource.
#'
#' @keywords internal
#'
#' @rdname locationservice_describe_place_index
locationservice_describe_place_index <- function(IndexName) {
op <- new_operation(
name = "DescribePlaceIndex",
http_method = "GET",
http_path = "/places/v0/indexes/{IndexName}",
paginator = list()
)
input <- .locationservice$describe_place_index_input(IndexName = IndexName)
output <- .locationservice$describe_place_index_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$describe_place_index <- locationservice_describe_place_index
#' Retrieves the route calculator resource details
#'
#' @description
#' Retrieves the route calculator resource details.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_describe_route_calculator/](https://www.paws-r-sdk.com/docs/locationservice_describe_route_calculator/) for full documentation.
#'
#' @param CalculatorName [required] The name of the route calculator resource.
#'
#' @keywords internal
#'
#' @rdname locationservice_describe_route_calculator
locationservice_describe_route_calculator <- function(CalculatorName) {
op <- new_operation(
name = "DescribeRouteCalculator",
http_method = "GET",
http_path = "/routes/v0/calculators/{CalculatorName}",
paginator = list()
)
input <- .locationservice$describe_route_calculator_input(CalculatorName = CalculatorName)
output <- .locationservice$describe_route_calculator_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$describe_route_calculator <- locationservice_describe_route_calculator
#' Retrieves the tracker resource details
#'
#' @description
#' Retrieves the tracker resource details.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_describe_tracker/](https://www.paws-r-sdk.com/docs/locationservice_describe_tracker/) for full documentation.
#'
#' @param TrackerName [required] The name of the tracker resource.
#'
#' @keywords internal
#'
#' @rdname locationservice_describe_tracker
locationservice_describe_tracker <- function(TrackerName) {
op <- new_operation(
name = "DescribeTracker",
http_method = "GET",
http_path = "/tracking/v0/trackers/{TrackerName}",
paginator = list()
)
input <- .locationservice$describe_tracker_input(TrackerName = TrackerName)
output <- .locationservice$describe_tracker_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$describe_tracker <- locationservice_describe_tracker
#' Removes the association between a tracker resource and a geofence
#' collection
#'
#' @description
#' Removes the association between a tracker resource and a geofence collection.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_disassociate_tracker_consumer/](https://www.paws-r-sdk.com/docs/locationservice_disassociate_tracker_consumer/) for full documentation.
#'
#' @param ConsumerArn [required] The Amazon Resource Name (ARN) for the geofence collection to be
#' disassociated from the tracker resource. Used when you need to specify a
#' resource across all Amazon Web Services.
#'
#' - Format example:
#' `arn:aws:geo:region:account-id:geofence-collection/ExampleGeofenceCollectionConsumer`
#' @param TrackerName [required] The name of the tracker resource to be dissociated from the consumer.
#'
#' @keywords internal
#'
#' @rdname locationservice_disassociate_tracker_consumer
locationservice_disassociate_tracker_consumer <- function(ConsumerArn, TrackerName) {
op <- new_operation(
name = "DisassociateTrackerConsumer",
http_method = "DELETE",
http_path = "/tracking/v0/trackers/{TrackerName}/consumers/{ConsumerArn}",
paginator = list()
)
input <- .locationservice$disassociate_tracker_consumer_input(ConsumerArn = ConsumerArn, TrackerName = TrackerName)
output <- .locationservice$disassociate_tracker_consumer_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$disassociate_tracker_consumer <- locationservice_disassociate_tracker_consumer
#' Retrieves a device's most recent position according to its sample time
#'
#' @description
#' Retrieves a device's most recent position according to its sample time.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_get_device_position/](https://www.paws-r-sdk.com/docs/locationservice_get_device_position/) for full documentation.
#'
#' @param DeviceId [required] The device whose position you want to retrieve.
#' @param TrackerName [required] The tracker resource receiving the position update.
#'
#' @keywords internal
#'
#' @rdname locationservice_get_device_position
locationservice_get_device_position <- function(DeviceId, TrackerName) {
op <- new_operation(
name = "GetDevicePosition",
http_method = "GET",
http_path = "/tracking/v0/trackers/{TrackerName}/devices/{DeviceId}/positions/latest",
paginator = list()
)
input <- .locationservice$get_device_position_input(DeviceId = DeviceId, TrackerName = TrackerName)
output <- .locationservice$get_device_position_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$get_device_position <- locationservice_get_device_position
#' Retrieves the device position history from a tracker resource within a
#' specified range of time
#'
#' @description
#' Retrieves the device position history from a tracker resource within a specified range of time.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_get_device_position_history/](https://www.paws-r-sdk.com/docs/locationservice_get_device_position_history/) for full documentation.
#'
#' @param DeviceId [required] The device whose position history you want to retrieve.
#' @param EndTimeExclusive Specify the end time for the position history in [ISO
#' 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format:
#' `YYYY-MM-DDThh:mm:ss.sssZ`. By default, the value will be the time that
#' the request is made.
#'
#' Requirement:
#'
#' - The time specified for `EndTimeExclusive` must be after the time for
#' `StartTimeInclusive`.
#' @param MaxResults An optional limit for the number of device positions returned in a
#' single call.
#'
#' Default value: `100`
#' @param NextToken The pagination token specifying which page of results to return in the
#' response. If no token is provided, the default page is the first page.
#'
#' Default value: `null`
#' @param StartTimeInclusive Specify the start time for the position history in [ISO
#' 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format:
#' `YYYY-MM-DDThh:mm:ss.sssZ`. By default, the value will be 24 hours prior
#' to the time that the request is made.
#'
#' Requirement:
#'
#' - The time specified for `StartTimeInclusive` must be before
#' `EndTimeExclusive`.
#' @param TrackerName [required] The tracker resource receiving the request for the device position
#' history.
#'
#' @keywords internal
#'
#' @rdname locationservice_get_device_position_history
locationservice_get_device_position_history <- function(DeviceId, EndTimeExclusive = NULL, MaxResults = NULL, NextToken = NULL, StartTimeInclusive = NULL, TrackerName) {
op <- new_operation(
name = "GetDevicePositionHistory",
http_method = "POST",
http_path = "/tracking/v0/trackers/{TrackerName}/devices/{DeviceId}/list-positions",
paginator = list()
)
input <- .locationservice$get_device_position_history_input(DeviceId = DeviceId, EndTimeExclusive = EndTimeExclusive, MaxResults = MaxResults, NextToken = NextToken, StartTimeInclusive = StartTimeInclusive, TrackerName = TrackerName)
output <- .locationservice$get_device_position_history_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$get_device_position_history <- locationservice_get_device_position_history
#' Retrieves the geofence details from a geofence collection
#'
#' @description
#' Retrieves the geofence details from a geofence collection.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_get_geofence/](https://www.paws-r-sdk.com/docs/locationservice_get_geofence/) for full documentation.
#'
#' @param CollectionName [required] The geofence collection storing the target geofence.
#' @param GeofenceId [required] The geofence you're retrieving details for.
#'
#' @keywords internal
#'
#' @rdname locationservice_get_geofence
locationservice_get_geofence <- function(CollectionName, GeofenceId) {
op <- new_operation(
name = "GetGeofence",
http_method = "GET",
http_path = "/geofencing/v0/collections/{CollectionName}/geofences/{GeofenceId}",
paginator = list()
)
input <- .locationservice$get_geofence_input(CollectionName = CollectionName, GeofenceId = GeofenceId)
output <- .locationservice$get_geofence_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$get_geofence <- locationservice_get_geofence
#' Retrieves glyphs used to display labels on a map
#'
#' @description
#' Retrieves glyphs used to display labels on a map.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_get_map_glyphs/](https://www.paws-r-sdk.com/docs/locationservice_get_map_glyphs/) for full documentation.
#'
#' @param FontStack [required] A comma-separated list of fonts to load glyphs from in order of
#' preference. For example, `Noto Sans Regular, Arial Unicode`.
#'
#' Valid fonts stacks for
#' [Esri](https://docs.aws.amazon.com/location/latest/developerguide/esri.html)
#' styles:
#'
#' - VectorEsriDarkGrayCanvas – `Ubuntu Medium Italic` | `Ubuntu Medium`
#' | `Ubuntu Italic` | `Ubuntu Regular` | `Ubuntu Bold`
#'
#' - VectorEsriLightGrayCanvas – `Ubuntu Italic` | `Ubuntu Regular` |
#' `Ubuntu Light` | `Ubuntu Bold`
#'
#' - VectorEsriTopographic – `Noto Sans Italic` | `Noto Sans Regular` |
#' `Noto Sans Bold` | `Noto Serif Regular` |
#' `Roboto Condensed Light Italic`
#'
#' - VectorEsriStreets – `Arial Regular` | `Arial Italic` | `Arial Bold`
#'
#' - VectorEsriNavigation – `Arial Regular` | `Arial Italic` |
#' `Arial Bold`
#'
#' Valid font stacks for [HERE
#' Technologies](https://docs.aws.amazon.com/location/latest/developerguide/HERE.html)
#' styles:
#'
#' - VectorHereContrast – `Fira GO Regular` | `Fira GO Bold`
#'
#' - VectorHereExplore, VectorHereExploreTruck,
#' HybridHereExploreSatellite – `Fira GO Italic` | `Fira GO Map` |
#' `Fira GO Map Bold` | `Noto Sans CJK JP Bold` |
#' `Noto Sans CJK JP Light` | `Noto Sans CJK JP Regular`
#'
#' Valid font stacks for
#' [GrabMaps](https://docs.aws.amazon.com/location/latest/developerguide/grab.html)
#' styles:
#'
#' - VectorGrabStandardLight, VectorGrabStandardDark –
#' `Noto Sans Regular` | `Noto Sans Medium` | `Noto Sans Bold`
#'
#' Valid font stacks for [Open
#' Data](https://docs.aws.amazon.com/location/latest/developerguide/open-data.html)
#' styles:
#'
#' - VectorOpenDataStandardLight, VectorOpenDataStandardDark,
#' VectorOpenDataVisualizationLight, VectorOpenDataVisualizationDark –
#' `Amazon Ember Regular,Noto Sans Regular` |
#' `Amazon Ember Bold,Noto Sans Bold` |
#' `Amazon Ember Medium,Noto Sans Medium` |
#' `Amazon Ember Regular Italic,Noto Sans Italic` |
#' `Amazon Ember Condensed RC Regular,Noto Sans Regular` |
#' `Amazon Ember Condensed RC Bold,Noto Sans Bold` |
#' `Amazon Ember Regular,Noto Sans Regular,Noto Sans Arabic Regular` |
#' `Amazon Ember Condensed RC Bold,Noto Sans Bold,Noto Sans Arabic Condensed Bold`
#' | `Amazon Ember Bold,Noto Sans Bold,Noto Sans Arabic Bold` |
#' `Amazon Ember Regular Italic,Noto Sans Italic,Noto Sans Arabic Regular`
#' |
#' `Amazon Ember Condensed RC Regular,Noto Sans Regular,Noto Sans Arabic Condensed Regular`
#' | `Amazon Ember Medium,Noto Sans Medium,Noto Sans Arabic Medium`
#'
#' The fonts used by the Open Data map styles are combined fonts that use
#' `Amazon Ember` for most glyphs but `Noto Sans` for glyphs unsupported by
#' `Amazon Ember`.
#' @param FontUnicodeRange [required] A Unicode range of characters to download glyphs for. Each response will
#' contain 256 characters. For example, 0–255 includes all characters from
#' range `U+0000` to `00FF`. Must be aligned to multiples of 256.
#' @param Key The optional [API
#' key](https://docs.aws.amazon.com/location/latest/developerguide/using-apikeys.html)
#' to authorize the request.
#' @param MapName [required] The map resource associated with the glyph file.
#'
#' @keywords internal
#'
#' @rdname locationservice_get_map_glyphs
locationservice_get_map_glyphs <- function(FontStack, FontUnicodeRange, Key = NULL, MapName) {
op <- new_operation(
name = "GetMapGlyphs",
http_method = "GET",
http_path = "/maps/v0/maps/{MapName}/glyphs/{FontStack}/{FontUnicodeRange}",
paginator = list()
)
input <- .locationservice$get_map_glyphs_input(FontStack = FontStack, FontUnicodeRange = FontUnicodeRange, Key = Key, MapName = MapName)
output <- .locationservice$get_map_glyphs_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$get_map_glyphs <- locationservice_get_map_glyphs
#' Retrieves the sprite sheet corresponding to a map resource
#'
#' @description
#' Retrieves the sprite sheet corresponding to a map resource. The sprite sheet is a PNG image paired with a JSON document describing the offsets of individual icons that will be displayed on a rendered map.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_get_map_sprites/](https://www.paws-r-sdk.com/docs/locationservice_get_map_sprites/) for full documentation.
#'
#' @param FileName [required] The name of the sprite file. Use the following file names for the sprite
#' sheet:
#'
#' - `sprites.png`
#'
#' - `sprites@@2x.png` for high pixel density displays
#'
#' For the JSON document containing image offsets. Use the following file
#' names:
#'
#' - `sprites.json`
#'
#' - `sprites@@2x.json` for high pixel density displays
#' @param Key The optional [API
#' key](https://docs.aws.amazon.com/location/latest/developerguide/using-apikeys.html)
#' to authorize the request.
#' @param MapName [required] The map resource associated with the sprite file.
#'
#' @keywords internal
#'
#' @rdname locationservice_get_map_sprites
locationservice_get_map_sprites <- function(FileName, Key = NULL, MapName) {
op <- new_operation(
name = "GetMapSprites",
http_method = "GET",
http_path = "/maps/v0/maps/{MapName}/sprites/{FileName}",
paginator = list()
)
input <- .locationservice$get_map_sprites_input(FileName = FileName, Key = Key, MapName = MapName)
output <- .locationservice$get_map_sprites_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$get_map_sprites <- locationservice_get_map_sprites
#' Retrieves the map style descriptor from a map resource
#'
#' @description
#' Retrieves the map style descriptor from a map resource.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_get_map_style_descriptor/](https://www.paws-r-sdk.com/docs/locationservice_get_map_style_descriptor/) for full documentation.
#'
#' @param Key The optional [API
#' key](https://docs.aws.amazon.com/location/latest/developerguide/using-apikeys.html)
#' to authorize the request.
#' @param MapName [required] The map resource to retrieve the style descriptor from.
#'
#' @keywords internal
#'
#' @rdname locationservice_get_map_style_descriptor
locationservice_get_map_style_descriptor <- function(Key = NULL, MapName) {
op <- new_operation(
name = "GetMapStyleDescriptor",
http_method = "GET",
http_path = "/maps/v0/maps/{MapName}/style-descriptor",
paginator = list()
)
input <- .locationservice$get_map_style_descriptor_input(Key = Key, MapName = MapName)
output <- .locationservice$get_map_style_descriptor_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$get_map_style_descriptor <- locationservice_get_map_style_descriptor
#' Retrieves a vector data tile from the map resource
#'
#' @description
#' Retrieves a vector data tile from the map resource. Map tiles are used by clients to render a map. they're addressed using a grid arrangement with an X coordinate, Y coordinate, and Z (zoom) level.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_get_map_tile/](https://www.paws-r-sdk.com/docs/locationservice_get_map_tile/) for full documentation.
#'
#' @param Key The optional [API
#' key](https://docs.aws.amazon.com/location/latest/developerguide/using-apikeys.html)
#' to authorize the request.
#' @param MapName [required] The map resource to retrieve the map tiles from.
#' @param X [required] The X axis value for the map tile.
#' @param Y [required] The Y axis value for the map tile.
#' @param Z [required] The zoom value for the map tile.
#'
#' @keywords internal
#'
#' @rdname locationservice_get_map_tile
locationservice_get_map_tile <- function(Key = NULL, MapName, X, Y, Z) {
op <- new_operation(
name = "GetMapTile",
http_method = "GET",
http_path = "/maps/v0/maps/{MapName}/tiles/{Z}/{X}/{Y}",
paginator = list()
)
input <- .locationservice$get_map_tile_input(Key = Key, MapName = MapName, X = X, Y = Y, Z = Z)
output <- .locationservice$get_map_tile_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$get_map_tile <- locationservice_get_map_tile
#' Finds a place by its unique ID
#'
#' @description
#' Finds a place by its unique ID. A `PlaceId` is returned by other search operations.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_get_place/](https://www.paws-r-sdk.com/docs/locationservice_get_place/) for full documentation.
#'
#' @param IndexName [required] The name of the place index resource that you want to use for the
#' search.
#' @param Language The preferred language used to return results. The value must be a valid
#' BCP 47 language tag, for example, `en` for English.
#'
#' This setting affects the languages used in the results, but not the
#' results themselves. If no language is specified, or not supported for a
#' particular result, the partner automatically chooses a language for the
#' result.
#'
#' For an example, we'll use the Greek language. You search for a location
#' around Athens, Greece, with the `language` parameter set to `en`. The
#' `city` in the results will most likely be returned as `Athens`.
#'
#' If you set the `language` parameter to `el`, for Greek, then the `city`
#' in the results will more likely be returned as \eqn{A\Theta\eta\nu\alpha}.
#'
#' If the data provider does not have a value for Greek, the result will be
#' in a language that the provider does support.
#' @param PlaceId [required] The identifier of the place to find.
#'
#' @keywords internal
#'
#' @rdname locationservice_get_place
locationservice_get_place <- function(IndexName, Language = NULL, PlaceId) {
op <- new_operation(
name = "GetPlace",
http_method = "GET",
http_path = "/places/v0/indexes/{IndexName}/places/{PlaceId}",
paginator = list()
)
input <- .locationservice$get_place_input(IndexName = IndexName, Language = Language, PlaceId = PlaceId)
output <- .locationservice$get_place_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$get_place <- locationservice_get_place
#' A batch request to retrieve all device positions
#'
#' @description
#' A batch request to retrieve all device positions.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_list_device_positions/](https://www.paws-r-sdk.com/docs/locationservice_list_device_positions/) for full documentation.
#'
#' @param MaxResults An optional limit for the number of entries returned in a single call.
#'
#' Default value: `100`
#' @param NextToken The pagination token specifying which page of results to return in the
#' response. If no token is provided, the default page is the first page.
#'
#' Default value: `null`
#' @param TrackerName [required] The tracker resource containing the requested devices.
#'
#' @keywords internal
#'
#' @rdname locationservice_list_device_positions
locationservice_list_device_positions <- function(MaxResults = NULL, NextToken = NULL, TrackerName) {
op <- new_operation(
name = "ListDevicePositions",
http_method = "POST",
http_path = "/tracking/v0/trackers/{TrackerName}/list-positions",
paginator = list()
)
input <- .locationservice$list_device_positions_input(MaxResults = MaxResults, NextToken = NextToken, TrackerName = TrackerName)
output <- .locationservice$list_device_positions_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$list_device_positions <- locationservice_list_device_positions
#' Lists geofence collections in your Amazon Web Services account
#'
#' @description
#' Lists geofence collections in your Amazon Web Services account.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_list_geofence_collections/](https://www.paws-r-sdk.com/docs/locationservice_list_geofence_collections/) for full documentation.
#'
#' @param MaxResults An optional limit for the number of resources returned in a single call.
#'
#' Default value: `100`
#' @param NextToken The pagination token specifying which page of results to return in the
#' response. If no token is provided, the default page is the first page.
#'
#' Default value: `null`
#'
#' @keywords internal
#'
#' @rdname locationservice_list_geofence_collections
locationservice_list_geofence_collections <- function(MaxResults = NULL, NextToken = NULL) {
op <- new_operation(
name = "ListGeofenceCollections",
http_method = "POST",
http_path = "/geofencing/v0/list-collections",
paginator = list()
)
input <- .locationservice$list_geofence_collections_input(MaxResults = MaxResults, NextToken = NextToken)
output <- .locationservice$list_geofence_collections_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$list_geofence_collections <- locationservice_list_geofence_collections
#' Lists geofences stored in a given geofence collection
#'
#' @description
#' Lists geofences stored in a given geofence collection.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_list_geofences/](https://www.paws-r-sdk.com/docs/locationservice_list_geofences/) for full documentation.
#'
#' @param CollectionName [required] The name of the geofence collection storing the list of geofences.
#' @param MaxResults An optional limit for the number of geofences returned in a single call.
#'
#' Default value: `100`
#' @param NextToken The pagination token specifying which page of results to return in the
#' response. If no token is provided, the default page is the first page.
#'
#' Default value: `null`
#'
#' @keywords internal
#'
#' @rdname locationservice_list_geofences
locationservice_list_geofences <- function(CollectionName, MaxResults = NULL, NextToken = NULL) {
op <- new_operation(
name = "ListGeofences",
http_method = "POST",
http_path = "/geofencing/v0/collections/{CollectionName}/list-geofences",
paginator = list()
)
input <- .locationservice$list_geofences_input(CollectionName = CollectionName, MaxResults = MaxResults, NextToken = NextToken)
output <- .locationservice$list_geofences_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$list_geofences <- locationservice_list_geofences
#' Lists API key resources in your Amazon Web Services account
#'
#' @description
#' Lists API key resources in your Amazon Web Services account.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_list_keys/](https://www.paws-r-sdk.com/docs/locationservice_list_keys/) for full documentation.
#'
#' @param Filter Optionally filter the list to only `Active` or `Expired` API keys.
#' @param MaxResults An optional limit for the number of resources returned in a single call.
#'
#' Default value: `100`
#' @param NextToken The pagination token specifying which page of results to return in the
#' response. If no token is provided, the default page is the first page.
#'
#' Default value: `null`
#'
#' @keywords internal
#'
#' @rdname locationservice_list_keys
locationservice_list_keys <- function(Filter = NULL, MaxResults = NULL, NextToken = NULL) {
op <- new_operation(
name = "ListKeys",
http_method = "POST",
http_path = "/metadata/v0/list-keys",
paginator = list()
)
input <- .locationservice$list_keys_input(Filter = Filter, MaxResults = MaxResults, NextToken = NextToken)
output <- .locationservice$list_keys_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$list_keys <- locationservice_list_keys
#' Lists map resources in your Amazon Web Services account
#'
#' @description
#' Lists map resources in your Amazon Web Services account.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_list_maps/](https://www.paws-r-sdk.com/docs/locationservice_list_maps/) for full documentation.
#'
#' @param MaxResults An optional limit for the number of resources returned in a single call.
#'
#' Default value: `100`
#' @param NextToken The pagination token specifying which page of results to return in the
#' response. If no token is provided, the default page is the first page.
#'
#' Default value: `null`
#'
#' @keywords internal
#'
#' @rdname locationservice_list_maps
locationservice_list_maps <- function(MaxResults = NULL, NextToken = NULL) {
op <- new_operation(
name = "ListMaps",
http_method = "POST",
http_path = "/maps/v0/list-maps",
paginator = list()
)
input <- .locationservice$list_maps_input(MaxResults = MaxResults, NextToken = NextToken)
output <- .locationservice$list_maps_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$list_maps <- locationservice_list_maps
#' Lists place index resources in your Amazon Web Services account
#'
#' @description
#' Lists place index resources in your Amazon Web Services account.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_list_place_indexes/](https://www.paws-r-sdk.com/docs/locationservice_list_place_indexes/) for full documentation.
#'
#' @param MaxResults An optional limit for the maximum number of results returned in a single
#' call.
#'
#' Default value: `100`
#' @param NextToken The pagination token specifying which page of results to return in the
#' response. If no token is provided, the default page is the first page.
#'
#' Default value: `null`
#'
#' @keywords internal
#'
#' @rdname locationservice_list_place_indexes
locationservice_list_place_indexes <- function(MaxResults = NULL, NextToken = NULL) {
op <- new_operation(
name = "ListPlaceIndexes",
http_method = "POST",
http_path = "/places/v0/list-indexes",
paginator = list()
)
input <- .locationservice$list_place_indexes_input(MaxResults = MaxResults, NextToken = NextToken)
output <- .locationservice$list_place_indexes_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$list_place_indexes <- locationservice_list_place_indexes
#' Lists route calculator resources in your Amazon Web Services account
#'
#' @description
#' Lists route calculator resources in your Amazon Web Services account.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_list_route_calculators/](https://www.paws-r-sdk.com/docs/locationservice_list_route_calculators/) for full documentation.
#'
#' @param MaxResults An optional maximum number of results returned in a single call.
#'
#' Default Value: `100`
#' @param NextToken The pagination token specifying which page of results to return in the
#' response. If no token is provided, the default page is the first page.
#'
#' Default Value: `null`
#'
#' @keywords internal
#'
#' @rdname locationservice_list_route_calculators
locationservice_list_route_calculators <- function(MaxResults = NULL, NextToken = NULL) {
op <- new_operation(
name = "ListRouteCalculators",
http_method = "POST",
http_path = "/routes/v0/list-calculators",
paginator = list()
)
input <- .locationservice$list_route_calculators_input(MaxResults = MaxResults, NextToken = NextToken)
output <- .locationservice$list_route_calculators_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$list_route_calculators <- locationservice_list_route_calculators
#' Returns a list of tags that are applied to the specified Amazon Location
#' resource
#'
#' @description
#' Returns a list of tags that are applied to the specified Amazon Location resource.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_list_tags_for_resource/](https://www.paws-r-sdk.com/docs/locationservice_list_tags_for_resource/) for full documentation.
#'
#' @param ResourceArn [required] The Amazon Resource Name (ARN) of the resource whose tags you want to
#' retrieve.
#'
#' - Format example:
#' `arn:aws:geo:region:account-id:resourcetype/ExampleResource`
#'
#' @keywords internal
#'
#' @rdname locationservice_list_tags_for_resource
locationservice_list_tags_for_resource <- function(ResourceArn) {
op <- new_operation(
name = "ListTagsForResource",
http_method = "GET",
http_path = "/tags/{ResourceArn}",
paginator = list()
)
input <- .locationservice$list_tags_for_resource_input(ResourceArn = ResourceArn)
output <- .locationservice$list_tags_for_resource_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$list_tags_for_resource <- locationservice_list_tags_for_resource
#' Lists geofence collections currently associated to the given tracker
#' resource
#'
#' @description
#' Lists geofence collections currently associated to the given tracker resource.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_list_tracker_consumers/](https://www.paws-r-sdk.com/docs/locationservice_list_tracker_consumers/) for full documentation.
#'
#' @param MaxResults An optional limit for the number of resources returned in a single call.
#'
#' Default value: `100`
#' @param NextToken The pagination token specifying which page of results to return in the
#' response. If no token is provided, the default page is the first page.
#'
#' Default value: `null`
#' @param TrackerName [required] The tracker resource whose associated geofence collections you want to
#' list.
#'
#' @keywords internal
#'
#' @rdname locationservice_list_tracker_consumers
locationservice_list_tracker_consumers <- function(MaxResults = NULL, NextToken = NULL, TrackerName) {
op <- new_operation(
name = "ListTrackerConsumers",
http_method = "POST",
http_path = "/tracking/v0/trackers/{TrackerName}/list-consumers",
paginator = list()
)
input <- .locationservice$list_tracker_consumers_input(MaxResults = MaxResults, NextToken = NextToken, TrackerName = TrackerName)
output <- .locationservice$list_tracker_consumers_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$list_tracker_consumers <- locationservice_list_tracker_consumers
#' Lists tracker resources in your Amazon Web Services account
#'
#' @description
#' Lists tracker resources in your Amazon Web Services account.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_list_trackers/](https://www.paws-r-sdk.com/docs/locationservice_list_trackers/) for full documentation.
#'
#' @param MaxResults An optional limit for the number of resources returned in a single call.
#'
#' Default value: `100`
#' @param NextToken The pagination token specifying which page of results to return in the
#' response. If no token is provided, the default page is the first page.
#'
#' Default value: `null`
#'
#' @keywords internal
#'
#' @rdname locationservice_list_trackers
locationservice_list_trackers <- function(MaxResults = NULL, NextToken = NULL) {
op <- new_operation(
name = "ListTrackers",
http_method = "POST",
http_path = "/tracking/v0/list-trackers",
paginator = list()
)
input <- .locationservice$list_trackers_input(MaxResults = MaxResults, NextToken = NextToken)
output <- .locationservice$list_trackers_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$list_trackers <- locationservice_list_trackers
#' Stores a geofence geometry in a given geofence collection, or updates
#' the geometry of an existing geofence if a geofence ID is included in the
#' request
#'
#' @description
#' Stores a geofence geometry in a given geofence collection, or updates the geometry of an existing geofence if a geofence ID is included in the request.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_put_geofence/](https://www.paws-r-sdk.com/docs/locationservice_put_geofence/) for full documentation.
#'
#' @param CollectionName [required] The geofence collection to store the geofence in.
#' @param GeofenceId [required] An identifier for the geofence. For example, `ExampleGeofence-1`.
#' @param Geometry [required] Contains the details to specify the position of the geofence. Can be
#' either a polygon or a circle. Including both will return a validation
#' error.
#'
#' Each [geofence
#' polygon](https://docs.aws.amazon.com/location/latest/APIReference/API_GeofenceGeometry.html)
#' can have a maximum of 1,000 vertices.
#'
#' @keywords internal
#'
#' @rdname locationservice_put_geofence
locationservice_put_geofence <- function(CollectionName, GeofenceId, Geometry) {
op <- new_operation(
name = "PutGeofence",
http_method = "PUT",
http_path = "/geofencing/v0/collections/{CollectionName}/geofences/{GeofenceId}",
paginator = list()
)
input <- .locationservice$put_geofence_input(CollectionName = CollectionName, GeofenceId = GeofenceId, Geometry = Geometry)
output <- .locationservice$put_geofence_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$put_geofence <- locationservice_put_geofence
#' Reverse geocodes a given coordinate and returns a legible address
#'
#' @description
#' Reverse geocodes a given coordinate and returns a legible address. Allows you to search for Places or points of interest near a given position.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_search_place_index_for_position/](https://www.paws-r-sdk.com/docs/locationservice_search_place_index_for_position/) for full documentation.
#'
#' @param IndexName [required] The name of the place index resource you want to use for the search.
#' @param Language The preferred language used to return results. The value must be a valid
#' BCP 47 language tag, for example, `en` for English.
#'
#' This setting affects the languages used in the results, but not the
#' results themselves. If no language is specified, or not supported for a
#' particular result, the partner automatically chooses a language for the
#' result.
#'
#' For an example, we'll use the Greek language. You search for a location
#' around Athens, Greece, with the `language` parameter set to `en`. The
#' `city` in the results will most likely be returned as `Athens`.
#'
#' If you set the `language` parameter to `el`, for Greek, then the `city`
#' in the results will more likely be returned as \eqn{A\Theta\eta\nu\alpha}.
#'
#' If the data provider does not have a value for Greek, the result will be
#' in a language that the provider does support.
#' @param MaxResults An optional parameter. The maximum number of results returned per
#' request.
#'
#' Default value: `50`
#' @param Position [required] Specifies the longitude and latitude of the position to query.
#'
#' This parameter must contain a pair of numbers. The first number
#' represents the X coordinate, or longitude; the second number represents
#' the Y coordinate, or latitude.
#'
#' For example, `[-123.1174, 49.2847]` represents a position with longitude
#' `-123.1174` and latitude `49.2847`.
#'
#' @keywords internal
#'
#' @rdname locationservice_search_place_index_for_position
locationservice_search_place_index_for_position <- function(IndexName, Language = NULL, MaxResults = NULL, Position) {
op <- new_operation(
name = "SearchPlaceIndexForPosition",
http_method = "POST",
http_path = "/places/v0/indexes/{IndexName}/search/position",
paginator = list()
)
input <- .locationservice$search_place_index_for_position_input(IndexName = IndexName, Language = Language, MaxResults = MaxResults, Position = Position)
output <- .locationservice$search_place_index_for_position_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$search_place_index_for_position <- locationservice_search_place_index_for_position
#' Generates suggestions for addresses and points of interest based on
#' partial or misspelled free-form text
#'
#' @description
#' Generates suggestions for addresses and points of interest based on partial or misspelled free-form text. This operation is also known as autocomplete, autosuggest, or fuzzy matching.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_search_place_index_for_suggestions/](https://www.paws-r-sdk.com/docs/locationservice_search_place_index_for_suggestions/) for full documentation.
#'
#' @param BiasPosition An optional parameter that indicates a preference for place suggestions
#' that are closer to a specified position.
#'
#' If provided, this parameter must contain a pair of numbers. The first
#' number represents the X coordinate, or longitude; the second number
#' represents the Y coordinate, or latitude.
#'
#' For example, `[-123.1174, 49.2847]` represents the position with
#' longitude `-123.1174` and latitude `49.2847`.
#'
#' `BiasPosition` and `FilterBBox` are mutually exclusive. Specifying both
#' options results in an error.
#' @param FilterBBox An optional parameter that limits the search results by returning only
#' suggestions within a specified bounding box.
#'
#' If provided, this parameter must contain a total of four consecutive
#' numbers in two pairs. The first pair of numbers represents the X and Y
#' coordinates (longitude and latitude, respectively) of the southwest
#' corner of the bounding box; the second pair of numbers represents the X
#' and Y coordinates (longitude and latitude, respectively) of the
#' northeast corner of the bounding box.
#'
#' For example, `[-12.7935, -37.4835, -12.0684, -36.9542]` represents a
#' bounding box where the southwest corner has longitude `-12.7935` and
#' latitude `-37.4835`, and the northeast corner has longitude `-12.0684`
#' and latitude `-36.9542`.
#'
#' `FilterBBox` and `BiasPosition` are mutually exclusive. Specifying both
#' options results in an error.
#' @param FilterCountries An optional parameter that limits the search results by returning only
#' suggestions within the provided list of countries.
#'
#' - Use the [ISO 3166](https://www.iso.org/iso-3166-country-codes.html)
#' 3-digit country code. For example, Australia uses three upper-case
#' characters: `AUS`.
#' @param IndexName [required] The name of the place index resource you want to use for the search.
#' @param Language The preferred language used to return results. The value must be a valid
#' BCP 47 language tag, for example, `en` for English.
#'
#' This setting affects the languages used in the results. If no language
#' is specified, or not supported for a particular result, the partner
#' automatically chooses a language for the result.
#'
#' For an example, we'll use the Greek language. You search for
#' `Athens, Gr` to get suggestions with the `language` parameter set to
#' `en`. The results found will most likely be returned as
#' `Athens, Greece`.
#'
#' If you set the `language` parameter to `el`, for Greek, then the result
#' found will more likely be returned as \eqn{A\Theta\eta\nu\sigma, E\lambda\lambda\alpha\delta}.
#'
#' If the data provider does not have a value for Greek, the result will be
#' in a language that the provider does support.
#' @param MaxResults An optional parameter. The maximum number of results returned per
#' request.
#'
#' The default: `5`
#' @param Text [required] The free-form partial text to use to generate place suggestions. For
#' example, `eiffel tow`.
#'
#' @keywords internal
#'
#' @rdname locationservice_search_place_index_for_suggestions
locationservice_search_place_index_for_suggestions <- function(BiasPosition = NULL, FilterBBox = NULL, FilterCountries = NULL, IndexName, Language = NULL, MaxResults = NULL, Text) {
op <- new_operation(
name = "SearchPlaceIndexForSuggestions",
http_method = "POST",
http_path = "/places/v0/indexes/{IndexName}/search/suggestions",
paginator = list()
)
input <- .locationservice$search_place_index_for_suggestions_input(BiasPosition = BiasPosition, FilterBBox = FilterBBox, FilterCountries = FilterCountries, IndexName = IndexName, Language = Language, MaxResults = MaxResults, Text = Text)
output <- .locationservice$search_place_index_for_suggestions_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$search_place_index_for_suggestions <- locationservice_search_place_index_for_suggestions
#' Geocodes free-form text, such as an address, name, city, or region to
#' allow you to search for Places or points of interest
#'
#' @description
#' Geocodes free-form text, such as an address, name, city, or region to allow you to search for Places or points of interest.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_search_place_index_for_text/](https://www.paws-r-sdk.com/docs/locationservice_search_place_index_for_text/) for full documentation.
#'
#' @param BiasPosition An optional parameter that indicates a preference for places that are
#' closer to a specified position.
#'
#' If provided, this parameter must contain a pair of numbers. The first
#' number represents the X coordinate, or longitude; the second number
#' represents the Y coordinate, or latitude.
#'
#' For example, `[-123.1174, 49.2847]` represents the position with
#' longitude `-123.1174` and latitude `49.2847`.
#'
#' `BiasPosition` and `FilterBBox` are mutually exclusive. Specifying both
#' options results in an error.
#' @param FilterBBox An optional parameter that limits the search results by returning only
#' places that are within the provided bounding box.
#'
#' If provided, this parameter must contain a total of four consecutive
#' numbers in two pairs. The first pair of numbers represents the X and Y
#' coordinates (longitude and latitude, respectively) of the southwest
#' corner of the bounding box; the second pair of numbers represents the X
#' and Y coordinates (longitude and latitude, respectively) of the
#' northeast corner of the bounding box.
#'
#' For example, `[-12.7935, -37.4835, -12.0684, -36.9542]` represents a
#' bounding box where the southwest corner has longitude `-12.7935` and
#' latitude `-37.4835`, and the northeast corner has longitude `-12.0684`
#' and latitude `-36.9542`.
#'
#' `FilterBBox` and `BiasPosition` are mutually exclusive. Specifying both
#' options results in an error.
#' @param FilterCountries An optional parameter that limits the search results by returning only
#' places that are in a specified list of countries.
#'
#' - Valid values include [ISO
#' 3166](https://www.iso.org/iso-3166-country-codes.html) 3-digit
#' country codes. For example, Australia uses three upper-case
#' characters: `AUS`.
#' @param IndexName [required] The name of the place index resource you want to use for the search.
#' @param Language The preferred language used to return results. The value must be a valid
#' BCP 47 language tag, for example, `en` for English.
#'
#' This setting affects the languages used in the results, but not the
#' results themselves. If no language is specified, or not supported for a
#' particular result, the partner automatically chooses a language for the
#' result.
#'
#' For an example, we'll use the Greek language. You search for
#' `Athens, Greece`, with the `language` parameter set to `en`. The result
#' found will most likely be returned as `Athens`.
#'
#' If you set the `language` parameter to `el`, for Greek, then the result
#' found will more likely be returned as \eqn{A\Theta\eta\nu\alpha}.
#'
#' If the data provider does not have a value for Greek, the result will be
#' in a language that the provider does support.
#' @param MaxResults An optional parameter. The maximum number of results returned per
#' request.
#'
#' The default: `50`
#' @param Text [required] The address, name, city, or region to be used in the search in free-form
#' text format. For example, `123 Any Street`.
#'
#' @keywords internal
#'
#' @rdname locationservice_search_place_index_for_text
locationservice_search_place_index_for_text <- function(BiasPosition = NULL, FilterBBox = NULL, FilterCountries = NULL, IndexName, Language = NULL, MaxResults = NULL, Text) {
op <- new_operation(
name = "SearchPlaceIndexForText",
http_method = "POST",
http_path = "/places/v0/indexes/{IndexName}/search/text",
paginator = list()
)
input <- .locationservice$search_place_index_for_text_input(BiasPosition = BiasPosition, FilterBBox = FilterBBox, FilterCountries = FilterCountries, IndexName = IndexName, Language = Language, MaxResults = MaxResults, Text = Text)
output <- .locationservice$search_place_index_for_text_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$search_place_index_for_text <- locationservice_search_place_index_for_text
#' Assigns one or more tags (key-value pairs) to the specified Amazon
#' Location Service resource
#'
#' @description
#' Assigns one or more tags (key-value pairs) to the specified Amazon Location Service resource.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_tag_resource/](https://www.paws-r-sdk.com/docs/locationservice_tag_resource/) for full documentation.
#'
#' @param ResourceArn [required] The Amazon Resource Name (ARN) of the resource whose tags you want to
#' update.
#'
#' - Format example:
#' `arn:aws:geo:region:account-id:resourcetype/ExampleResource`
#' @param Tags [required] Applies one or more tags to specific resource. A tag is a key-value pair
#' that helps you manage, identify, search, and filter your resources.
#'
#' Format: `"key" : "value"`
#'
#' Restrictions:
#'
#' - Maximum 50 tags per resource.
#'
#' - Each tag key must be unique and must have exactly one associated
#' value.
#'
#' - Maximum key length: 128 Unicode characters in UTF-8.
#'
#' - Maximum value length: 256 Unicode characters in UTF-8.
#'
#' - Can use alphanumeric characters (A–Z, a–z, 0–9), and the following
#' characters: + - = . _ : / @@
#'
#' - Cannot use "aws:" as a prefix for a key.
#'
#' @keywords internal
#'
#' @rdname locationservice_tag_resource
locationservice_tag_resource <- function(ResourceArn, Tags) {
op <- new_operation(
name = "TagResource",
http_method = "POST",
http_path = "/tags/{ResourceArn}",
paginator = list()
)
input <- .locationservice$tag_resource_input(ResourceArn = ResourceArn, Tags = Tags)
output <- .locationservice$tag_resource_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$tag_resource <- locationservice_tag_resource
#' Removes one or more tags from the specified Amazon Location resource
#'
#' @description
#' Removes one or more tags from the specified Amazon Location resource.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_untag_resource/](https://www.paws-r-sdk.com/docs/locationservice_untag_resource/) for full documentation.
#'
#' @param ResourceArn [required] The Amazon Resource Name (ARN) of the resource from which you want to
#' remove tags.
#'
#' - Format example:
#' `arn:aws:geo:region:account-id:resourcetype/ExampleResource`
#' @param TagKeys [required] The list of tag keys to remove from the specified resource.
#'
#' @keywords internal
#'
#' @rdname locationservice_untag_resource
locationservice_untag_resource <- function(ResourceArn, TagKeys) {
op <- new_operation(
name = "UntagResource",
http_method = "DELETE",
http_path = "/tags/{ResourceArn}",
paginator = list()
)
input <- .locationservice$untag_resource_input(ResourceArn = ResourceArn, TagKeys = TagKeys)
output <- .locationservice$untag_resource_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$untag_resource <- locationservice_untag_resource
#' Updates the specified properties of a given geofence collection
#'
#' @description
#' Updates the specified properties of a given geofence collection.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_update_geofence_collection/](https://www.paws-r-sdk.com/docs/locationservice_update_geofence_collection/) for full documentation.
#'
#' @param CollectionName [required] The name of the geofence collection to update.
#' @param Description Updates the description for the geofence collection.
#' @param PricingPlan No longer used. If included, the only allowed value is
#' `RequestBasedUsage`.
#' @param PricingPlanDataSource This parameter is no longer used.
#'
#' @keywords internal
#'
#' @rdname locationservice_update_geofence_collection
locationservice_update_geofence_collection <- function(CollectionName, Description = NULL, PricingPlan = NULL, PricingPlanDataSource = NULL) {
op <- new_operation(
name = "UpdateGeofenceCollection",
http_method = "PATCH",
http_path = "/geofencing/v0/collections/{CollectionName}",
paginator = list()
)
input <- .locationservice$update_geofence_collection_input(CollectionName = CollectionName, Description = Description, PricingPlan = PricingPlan, PricingPlanDataSource = PricingPlanDataSource)
output <- .locationservice$update_geofence_collection_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$update_geofence_collection <- locationservice_update_geofence_collection
#' Updates the specified properties of a given API key resource
#'
#' @description
#' Updates the specified properties of a given API key resource.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_update_key/](https://www.paws-r-sdk.com/docs/locationservice_update_key/) for full documentation.
#'
#' @param Description Updates the description for the API key resource.
#' @param ExpireTime Updates the timestamp for when the API key resource will expire in [ISO
#' 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format:
#' `YYYY-MM-DDThh:mm:ss.sssZ`.
#' @param ForceUpdate The boolean flag to be included for updating `ExpireTime` or
#' `Restrictions` details.
#'
#' Must be set to `true` to update an API key resource that has been used
#' in the past 7 days.
#'
#' `False` if force update is not preferred
#'
#' Default value: `False`
#' @param KeyName [required] The name of the API key resource to update.
#' @param NoExpiry Whether the API key should expire. Set to `true` to set the API key to
#' have no expiration time.
#' @param Restrictions Updates the API key restrictions for the API key resource.
#'
#' @keywords internal
#'
#' @rdname locationservice_update_key
locationservice_update_key <- function(Description = NULL, ExpireTime = NULL, ForceUpdate = NULL, KeyName, NoExpiry = NULL, Restrictions = NULL) {
op <- new_operation(
name = "UpdateKey",
http_method = "PATCH",
http_path = "/metadata/v0/keys/{KeyName}",
paginator = list()
)
input <- .locationservice$update_key_input(Description = Description, ExpireTime = ExpireTime, ForceUpdate = ForceUpdate, KeyName = KeyName, NoExpiry = NoExpiry, Restrictions = Restrictions)
output <- .locationservice$update_key_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$update_key <- locationservice_update_key
#' Updates the specified properties of a given map resource
#'
#' @description
#' Updates the specified properties of a given map resource.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_update_map/](https://www.paws-r-sdk.com/docs/locationservice_update_map/) for full documentation.
#'
#' @param ConfigurationUpdate Updates the parts of the map configuration that can be updated,
#' including the political view.
#' @param Description Updates the description for the map resource.
#' @param MapName [required] The name of the map resource to update.
#' @param PricingPlan No longer used. If included, the only allowed value is
#' `RequestBasedUsage`.
#'
#' @keywords internal
#'
#' @rdname locationservice_update_map
locationservice_update_map <- function(ConfigurationUpdate = NULL, Description = NULL, MapName, PricingPlan = NULL) {
op <- new_operation(
name = "UpdateMap",
http_method = "PATCH",
http_path = "/maps/v0/maps/{MapName}",
paginator = list()
)
input <- .locationservice$update_map_input(ConfigurationUpdate = ConfigurationUpdate, Description = Description, MapName = MapName, PricingPlan = PricingPlan)
output <- .locationservice$update_map_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$update_map <- locationservice_update_map
#' Updates the specified properties of a given place index resource
#'
#' @description
#' Updates the specified properties of a given place index resource.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_update_place_index/](https://www.paws-r-sdk.com/docs/locationservice_update_place_index/) for full documentation.
#'
#' @param DataSourceConfiguration Updates the data storage option for the place index resource.
#' @param Description Updates the description for the place index resource.
#' @param IndexName [required] The name of the place index resource to update.
#' @param PricingPlan No longer used. If included, the only allowed value is
#' `RequestBasedUsage`.
#'
#' @keywords internal
#'
#' @rdname locationservice_update_place_index
locationservice_update_place_index <- function(DataSourceConfiguration = NULL, Description = NULL, IndexName, PricingPlan = NULL) {
op <- new_operation(
name = "UpdatePlaceIndex",
http_method = "PATCH",
http_path = "/places/v0/indexes/{IndexName}",
paginator = list()
)
input <- .locationservice$update_place_index_input(DataSourceConfiguration = DataSourceConfiguration, Description = Description, IndexName = IndexName, PricingPlan = PricingPlan)
output <- .locationservice$update_place_index_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$update_place_index <- locationservice_update_place_index
#' Updates the specified properties for a given route calculator resource
#'
#' @description
#' Updates the specified properties for a given route calculator resource.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_update_route_calculator/](https://www.paws-r-sdk.com/docs/locationservice_update_route_calculator/) for full documentation.
#'
#' @param CalculatorName [required] The name of the route calculator resource to update.
#' @param Description Updates the description for the route calculator resource.
#' @param PricingPlan No longer used. If included, the only allowed value is
#' `RequestBasedUsage`.
#'
#' @keywords internal
#'
#' @rdname locationservice_update_route_calculator
locationservice_update_route_calculator <- function(CalculatorName, Description = NULL, PricingPlan = NULL) {
op <- new_operation(
name = "UpdateRouteCalculator",
http_method = "PATCH",
http_path = "/routes/v0/calculators/{CalculatorName}",
paginator = list()
)
input <- .locationservice$update_route_calculator_input(CalculatorName = CalculatorName, Description = Description, PricingPlan = PricingPlan)
output <- .locationservice$update_route_calculator_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$update_route_calculator <- locationservice_update_route_calculator
#' Updates the specified properties of a given tracker resource
#'
#' @description
#' Updates the specified properties of a given tracker resource.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_update_tracker/](https://www.paws-r-sdk.com/docs/locationservice_update_tracker/) for full documentation.
#'
#' @param Description Updates the description for the tracker resource.
#' @param PositionFiltering Updates the position filtering for the tracker resource.
#'
#' Valid values:
#'
#' - `TimeBased` - Location updates are evaluated against linked geofence
#' collections, but not every location update is stored. If your update
#' frequency is more often than 30 seconds, only one update per 30
#' seconds is stored for each unique device ID.
#'
#' - `DistanceBased` - If the device has moved less than 30 m (98.4 ft),
#' location updates are ignored. Location updates within this distance
#' are neither evaluated against linked geofence collections, nor
#' stored. This helps control costs by reducing the number of geofence
#' evaluations and historical device positions to paginate through.
#' Distance-based filtering can also reduce the effects of GPS noise
#' when displaying device trajectories on a map.
#'
#' - `AccuracyBased` - If the device has moved less than the measured
#' accuracy, location updates are ignored. For example, if two
#' consecutive updates from a device have a horizontal accuracy of 5 m
#' and 10 m, the second update is ignored if the device has moved less
#' than 15 m. Ignored location updates are neither evaluated against
#' linked geofence collections, nor stored. This helps educe the
#' effects of GPS noise when displaying device trajectories on a map,
#' and can help control costs by reducing the number of geofence
#' evaluations.
#' @param PricingPlan No longer used. If included, the only allowed value is
#' `RequestBasedUsage`.
#' @param PricingPlanDataSource This parameter is no longer used.
#' @param TrackerName [required] The name of the tracker resource to update.
#'
#' @keywords internal
#'
#' @rdname locationservice_update_tracker
locationservice_update_tracker <- function(Description = NULL, PositionFiltering = NULL, PricingPlan = NULL, PricingPlanDataSource = NULL, TrackerName) {
op <- new_operation(
name = "UpdateTracker",
http_method = "PATCH",
http_path = "/tracking/v0/trackers/{TrackerName}",
paginator = list()
)
input <- .locationservice$update_tracker_input(Description = Description, PositionFiltering = PositionFiltering, PricingPlan = PricingPlan, PricingPlanDataSource = PricingPlanDataSource, TrackerName = TrackerName)
output <- .locationservice$update_tracker_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$update_tracker <- locationservice_update_tracker
| /R/locationservice_operations.R | no_license | cran/paws.application.integration | R | false | false | 119,124 | r | # This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common get_config new_operation new_request send_request
#' @include locationservice_service.R
NULL
#' Creates an association between a geofence collection and a tracker
#' resource
#'
#' @description
#' Creates an association between a geofence collection and a tracker resource. This allows the tracker resource to communicate location data to the linked geofence collection.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_associate_tracker_consumer/](https://www.paws-r-sdk.com/docs/locationservice_associate_tracker_consumer/) for full documentation.
#'
#' @param ConsumerArn [required] The Amazon Resource Name (ARN) for the geofence collection to be
#' associated to tracker resource. Used when you need to specify a resource
#' across all Amazon Web Services.
#'
#' - Format example:
#' `arn:aws:geo:region:account-id:geofence-collection/ExampleGeofenceCollectionConsumer`
#' @param TrackerName [required] The name of the tracker resource to be associated with a geofence
#' collection.
#'
#' @keywords internal
#'
#' @rdname locationservice_associate_tracker_consumer
locationservice_associate_tracker_consumer <- function(ConsumerArn, TrackerName) {
op <- new_operation(
name = "AssociateTrackerConsumer",
http_method = "POST",
http_path = "/tracking/v0/trackers/{TrackerName}/consumers",
paginator = list()
)
input <- .locationservice$associate_tracker_consumer_input(ConsumerArn = ConsumerArn, TrackerName = TrackerName)
output <- .locationservice$associate_tracker_consumer_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$associate_tracker_consumer <- locationservice_associate_tracker_consumer
#' Deletes the position history of one or more devices from a tracker
#' resource
#'
#' @description
#' Deletes the position history of one or more devices from a tracker resource.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_batch_delete_device_position_history/](https://www.paws-r-sdk.com/docs/locationservice_batch_delete_device_position_history/) for full documentation.
#'
#' @param DeviceIds [required] Devices whose position history you want to delete.
#'
#' - For example, for two devices: `“DeviceIds” : [DeviceId1,DeviceId2]`
#' @param TrackerName [required] The name of the tracker resource to delete the device position history
#' from.
#'
#' @keywords internal
#'
#' @rdname locationservice_batch_delete_device_position_history
locationservice_batch_delete_device_position_history <- function(DeviceIds, TrackerName) {
op <- new_operation(
name = "BatchDeleteDevicePositionHistory",
http_method = "POST",
http_path = "/tracking/v0/trackers/{TrackerName}/delete-positions",
paginator = list()
)
input <- .locationservice$batch_delete_device_position_history_input(DeviceIds = DeviceIds, TrackerName = TrackerName)
output <- .locationservice$batch_delete_device_position_history_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$batch_delete_device_position_history <- locationservice_batch_delete_device_position_history
#' Deletes a batch of geofences from a geofence collection
#'
#' @description
#' Deletes a batch of geofences from a geofence collection.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_batch_delete_geofence/](https://www.paws-r-sdk.com/docs/locationservice_batch_delete_geofence/) for full documentation.
#'
#' @param CollectionName [required] The geofence collection storing the geofences to be deleted.
#' @param GeofenceIds [required] The batch of geofences to be deleted.
#'
#' @keywords internal
#'
#' @rdname locationservice_batch_delete_geofence
locationservice_batch_delete_geofence <- function(CollectionName, GeofenceIds) {
op <- new_operation(
name = "BatchDeleteGeofence",
http_method = "POST",
http_path = "/geofencing/v0/collections/{CollectionName}/delete-geofences",
paginator = list()
)
input <- .locationservice$batch_delete_geofence_input(CollectionName = CollectionName, GeofenceIds = GeofenceIds)
output <- .locationservice$batch_delete_geofence_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$batch_delete_geofence <- locationservice_batch_delete_geofence
#' Evaluates device positions against the geofence geometries from a given
#' geofence collection
#'
#' @description
#' Evaluates device positions against the geofence geometries from a given geofence collection.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_batch_evaluate_geofences/](https://www.paws-r-sdk.com/docs/locationservice_batch_evaluate_geofences/) for full documentation.
#'
#' @param CollectionName [required] The geofence collection used in evaluating the position of devices
#' against its geofences.
#' @param DevicePositionUpdates [required] Contains device details for each device to be evaluated against the
#' given geofence collection.
#'
#' @keywords internal
#'
#' @rdname locationservice_batch_evaluate_geofences
locationservice_batch_evaluate_geofences <- function(CollectionName, DevicePositionUpdates) {
op <- new_operation(
name = "BatchEvaluateGeofences",
http_method = "POST",
http_path = "/geofencing/v0/collections/{CollectionName}/positions",
paginator = list()
)
input <- .locationservice$batch_evaluate_geofences_input(CollectionName = CollectionName, DevicePositionUpdates = DevicePositionUpdates)
output <- .locationservice$batch_evaluate_geofences_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$batch_evaluate_geofences <- locationservice_batch_evaluate_geofences
#' Lists the latest device positions for requested devices
#'
#' @description
#' Lists the latest device positions for requested devices.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_batch_get_device_position/](https://www.paws-r-sdk.com/docs/locationservice_batch_get_device_position/) for full documentation.
#'
#' @param DeviceIds [required] Devices whose position you want to retrieve.
#'
#' - For example, for two devices:
#' `device-ids=DeviceId1&device-ids=DeviceId2`
#' @param TrackerName [required] The tracker resource retrieving the device position.
#'
#' @keywords internal
#'
#' @rdname locationservice_batch_get_device_position
locationservice_batch_get_device_position <- function(DeviceIds, TrackerName) {
op <- new_operation(
name = "BatchGetDevicePosition",
http_method = "POST",
http_path = "/tracking/v0/trackers/{TrackerName}/get-positions",
paginator = list()
)
input <- .locationservice$batch_get_device_position_input(DeviceIds = DeviceIds, TrackerName = TrackerName)
output <- .locationservice$batch_get_device_position_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$batch_get_device_position <- locationservice_batch_get_device_position
#' A batch request for storing geofence geometries into a given geofence
#' collection, or updates the geometry of an existing geofence if a
#' geofence ID is included in the request
#'
#' @description
#' A batch request for storing geofence geometries into a given geofence collection, or updates the geometry of an existing geofence if a geofence ID is included in the request.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_batch_put_geofence/](https://www.paws-r-sdk.com/docs/locationservice_batch_put_geofence/) for full documentation.
#'
#' @param CollectionName [required] The geofence collection storing the geofences.
#' @param Entries [required] The batch of geofences to be stored in a geofence collection.
#'
#' @keywords internal
#'
#' @rdname locationservice_batch_put_geofence
locationservice_batch_put_geofence <- function(CollectionName, Entries) {
op <- new_operation(
name = "BatchPutGeofence",
http_method = "POST",
http_path = "/geofencing/v0/collections/{CollectionName}/put-geofences",
paginator = list()
)
input <- .locationservice$batch_put_geofence_input(CollectionName = CollectionName, Entries = Entries)
output <- .locationservice$batch_put_geofence_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$batch_put_geofence <- locationservice_batch_put_geofence
#' Uploads position update data for one or more devices to a tracker
#' resource
#'
#' @description
#' Uploads position update data for one or more devices to a tracker resource. Amazon Location uses the data when it reports the last known device position and position history. Amazon Location retains location data for 30 days.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_batch_update_device_position/](https://www.paws-r-sdk.com/docs/locationservice_batch_update_device_position/) for full documentation.
#'
#' @param TrackerName [required] The name of the tracker resource to update.
#' @param Updates [required] Contains the position update details for each device.
#'
#' @keywords internal
#'
#' @rdname locationservice_batch_update_device_position
locationservice_batch_update_device_position <- function(TrackerName, Updates) {
op <- new_operation(
name = "BatchUpdateDevicePosition",
http_method = "POST",
http_path = "/tracking/v0/trackers/{TrackerName}/positions",
paginator = list()
)
input <- .locationservice$batch_update_device_position_input(TrackerName = TrackerName, Updates = Updates)
output <- .locationservice$batch_update_device_position_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$batch_update_device_position <- locationservice_batch_update_device_position
#' Calculates a route given the following required parameters:
#' DeparturePosition and DestinationPosition
#'
#' @description
#' [Calculates a route](https://docs.aws.amazon.com/location/latest/developerguide/calculate-route.html) given the following required parameters: `DeparturePosition` and `DestinationPosition`. Requires that you first [create a route calculator resource](https://docs.aws.amazon.com/location/latest/APIReference/API_CreateRouteCalculator.html).
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_calculate_route/](https://www.paws-r-sdk.com/docs/locationservice_calculate_route/) for full documentation.
#'
#' @param CalculatorName [required] The name of the route calculator resource that you want to use to
#' calculate the route.
#' @param CarModeOptions Specifies route preferences when traveling by `Car`, such as avoiding
#' routes that use ferries or tolls.
#'
#' Requirements: `TravelMode` must be specified as `Car`.
#' @param DepartNow Sets the time of departure as the current time. Uses the current time to
#' calculate a route. Otherwise, the best time of day to travel with the
#' best traffic conditions is used to calculate the route.
#'
#' Default Value: `false`
#'
#' Valid Values: `false` | `true`
#' @param DeparturePosition [required] The start position for the route. Defined in [World Geodetic System (WGS
#' 84)](https://earth-info.nga.mil/index.php?dir=wgs84&action=wgs84)
#' format: `[longitude, latitude]`.
#'
#' - For example, `[-123.115, 49.285]`
#'
#' If you specify a departure that's not located on a road, Amazon Location
#' [moves the position to the nearest
#' road](https://docs.aws.amazon.com/location/latest/developerguide/snap-to-nearby-road.html).
#' If Esri is the provider for your route calculator, specifying a route
#' that is longer than 400 km returns a `400 RoutesValidationException`
#' error.
#'
#' Valid Values: `[-180 to 180,-90 to 90]`
#' @param DepartureTime Specifies the desired time of departure. Uses the given time to
#' calculate the route. Otherwise, the best time of day to travel with the
#' best traffic conditions is used to calculate the route.
#'
#' Setting a departure time in the past returns a `400 ValidationException`
#' error.
#'
#' - In [ISO
#' 8601](https://www.iso.org/iso-8601-date-and-time-format.html)
#' format: `YYYY-MM-DDThh:mm:ss.sssZ`. For example,
#' `2020–07-2T12:15:20.000Z+01:00`
#' @param DestinationPosition [required] The finish position for the route. Defined in [World Geodetic System
#' (WGS 84)](https://earth-info.nga.mil/index.php?dir=wgs84&action=wgs84)
#' format: `[longitude, latitude]`.
#'
#' - For example, `[-122.339, 47.615]`
#'
#' If you specify a destination that's not located on a road, Amazon
#' Location [moves the position to the nearest
#' road](https://docs.aws.amazon.com/location/latest/developerguide/snap-to-nearby-road.html).
#'
#' Valid Values: `[-180 to 180,-90 to 90]`
#' @param DistanceUnit Set the unit system to specify the distance.
#'
#' Default Value: `Kilometers`
#' @param IncludeLegGeometry Set to include the geometry details in the result for each path between
#' a pair of positions.
#'
#' Default Value: `false`
#'
#' Valid Values: `false` | `true`
#' @param TravelMode Specifies the mode of transport when calculating a route. Used in
#' estimating the speed of travel and road compatibility. You can choose
#' `Car`, `Truck`, `Walking`, `Bicycle` or `Motorcycle` as options for the
#' `TravelMode`.
#'
#' `Bicycle` and `Motorcycle` are only valid when using Grab as a data
#' provider, and only within Southeast Asia.
#'
#' `Truck` is not available for Grab.
#'
#' For more details on the using Grab for routing, including areas of
#' coverage, see
#' [GrabMaps](https://docs.aws.amazon.com/location/latest/developerguide/grab.html)
#' in the *Amazon Location Service Developer Guide*.
#'
#' The `TravelMode` you specify also determines how you specify route
#' preferences:
#'
#' - If traveling by `Car` use the `CarModeOptions` parameter.
#'
#' - If traveling by `Truck` use the `TruckModeOptions` parameter.
#'
#' Default Value: `Car`
#' @param TruckModeOptions Specifies route preferences when traveling by `Truck`, such as avoiding
#' routes that use ferries or tolls, and truck specifications to consider
#' when choosing an optimal road.
#'
#' Requirements: `TravelMode` must be specified as `Truck`.
#' @param WaypointPositions Specifies an ordered list of up to 23 intermediate positions to include
#' along a route between the departure position and destination position.
#'
#' - For example, from the `DeparturePosition` `[-123.115, 49.285]`, the
#' route follows the order that the waypoint positions are given
#' `[[-122.757, 49.0021],[-122.349, 47.620]]`
#'
#' If you specify a waypoint position that's not located on a road, Amazon
#' Location [moves the position to the nearest
#' road](https://docs.aws.amazon.com/location/latest/developerguide/snap-to-nearby-road.html).
#'
#' Specifying more than 23 waypoints returns a `400 ValidationException`
#' error.
#'
#' If Esri is the provider for your route calculator, specifying a route
#' that is longer than 400 km returns a `400 RoutesValidationException`
#' error.
#'
#' Valid Values: `[-180 to 180,-90 to 90]`
#'
#' @keywords internal
#'
#' @rdname locationservice_calculate_route
locationservice_calculate_route <- function(CalculatorName, CarModeOptions = NULL, DepartNow = NULL, DeparturePosition, DepartureTime = NULL, DestinationPosition, DistanceUnit = NULL, IncludeLegGeometry = NULL, TravelMode = NULL, TruckModeOptions = NULL, WaypointPositions = NULL) {
op <- new_operation(
name = "CalculateRoute",
http_method = "POST",
http_path = "/routes/v0/calculators/{CalculatorName}/calculate/route",
paginator = list()
)
input <- .locationservice$calculate_route_input(CalculatorName = CalculatorName, CarModeOptions = CarModeOptions, DepartNow = DepartNow, DeparturePosition = DeparturePosition, DepartureTime = DepartureTime, DestinationPosition = DestinationPosition, DistanceUnit = DistanceUnit, IncludeLegGeometry = IncludeLegGeometry, TravelMode = TravelMode, TruckModeOptions = TruckModeOptions, WaypointPositions = WaypointPositions)
output <- .locationservice$calculate_route_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$calculate_route <- locationservice_calculate_route
#' Calculates a route matrix given the following required parameters:
#' DeparturePositions and DestinationPositions
#'
#' @description
#' [Calculates a route matrix](https://docs.aws.amazon.com/location/latest/developerguide/calculate-route-matrix.html) given the following required parameters: `DeparturePositions` and `DestinationPositions`. [`calculate_route_matrix`][locationservice_calculate_route_matrix] calculates routes and returns the travel time and travel distance from each departure position to each destination position in the request. For example, given departure positions A and B, and destination positions X and Y, [`calculate_route_matrix`][locationservice_calculate_route_matrix] will return time and distance for routes from A to X, A to Y, B to X, and B to Y (in that order). The number of results returned (and routes calculated) will be the number of `DeparturePositions` times the number of `DestinationPositions`.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_calculate_route_matrix/](https://www.paws-r-sdk.com/docs/locationservice_calculate_route_matrix/) for full documentation.
#'
#' @param CalculatorName [required] The name of the route calculator resource that you want to use to
#' calculate the route matrix.
#' @param CarModeOptions Specifies route preferences when traveling by `Car`, such as avoiding
#' routes that use ferries or tolls.
#'
#' Requirements: `TravelMode` must be specified as `Car`.
#' @param DepartNow Sets the time of departure as the current time. Uses the current time to
#' calculate the route matrix. You can't set both `DepartureTime` and
#' `DepartNow`. If neither is set, the best time of day to travel with the
#' best traffic conditions is used to calculate the route matrix.
#'
#' Default Value: `false`
#'
#' Valid Values: `false` | `true`
#' @param DeparturePositions [required] The list of departure (origin) positions for the route matrix. An array
#' of points, each of which is itself a 2-value array defined in [WGS
#' 84](https://earth-info.nga.mil/index.php?dir=wgs84&action=wgs84) format:
#' `[longitude, latitude]`. For example, `[-123.115, 49.285]`.
#'
#' Depending on the data provider selected in the route calculator resource
#' there may be additional restrictions on the inputs you can choose. See
#' [Position
#' restrictions](https://docs.aws.amazon.com/location/latest/developerguide/calculate-route-matrix.html#matrix-routing-position-limits)
#' in the *Amazon Location Service Developer Guide*.
#'
#' For route calculators that use Esri as the data provider, if you specify
#' a departure that's not located on a road, Amazon Location [moves the
#' position to the nearest
#' road](https://docs.aws.amazon.com/location/latest/developerguide/snap-to-nearby-road.html).
#' The snapped value is available in the result in
#' `SnappedDeparturePositions`.
#'
#' Valid Values: `[-180 to 180,-90 to 90]`
#' @param DepartureTime Specifies the desired time of departure. Uses the given time to
#' calculate the route matrix. You can't set both `DepartureTime` and
#' `DepartNow`. If neither is set, the best time of day to travel with the
#' best traffic conditions is used to calculate the route matrix.
#'
#' Setting a departure time in the past returns a `400 ValidationException`
#' error.
#'
#' - In [ISO
#' 8601](https://www.iso.org/iso-8601-date-and-time-format.html)
#' format: `YYYY-MM-DDThh:mm:ss.sssZ`. For example,
#' `2020–07-2T12:15:20.000Z+01:00`
#' @param DestinationPositions [required] The list of destination positions for the route matrix. An array of
#' points, each of which is itself a 2-value array defined in [WGS
#' 84](https://earth-info.nga.mil/index.php?dir=wgs84&action=wgs84) format:
#' `[longitude, latitude]`. For example, `[-122.339, 47.615]`
#'
#' Depending on the data provider selected in the route calculator resource
#' there may be additional restrictions on the inputs you can choose. See
#' [Position
#' restrictions](https://docs.aws.amazon.com/location/latest/developerguide/calculate-route-matrix.html#matrix-routing-position-limits)
#' in the *Amazon Location Service Developer Guide*.
#'
#' For route calculators that use Esri as the data provider, if you specify
#' a destination that's not located on a road, Amazon Location [moves the
#' position to the nearest
#' road](https://docs.aws.amazon.com/location/latest/developerguide/snap-to-nearby-road.html).
#' The snapped value is available in the result in
#' `SnappedDestinationPositions`.
#'
#' Valid Values: `[-180 to 180,-90 to 90]`
#' @param DistanceUnit Set the unit system to specify the distance.
#'
#' Default Value: `Kilometers`
#' @param TravelMode Specifies the mode of transport when calculating a route. Used in
#' estimating the speed of travel and road compatibility.
#'
#' The `TravelMode` you specify also determines how you specify route
#' preferences:
#'
#' - If traveling by `Car` use the `CarModeOptions` parameter.
#'
#' - If traveling by `Truck` use the `TruckModeOptions` parameter.
#'
#' `Bicycle` or `Motorcycle` are only valid when using `Grab` as a data
#' provider, and only within Southeast Asia.
#'
#' `Truck` is not available for Grab.
#'
#' For more information about using Grab as a data provider, see
#' [GrabMaps](https://docs.aws.amazon.com/location/latest/developerguide/grab.html)
#' in the *Amazon Location Service Developer Guide*.
#'
#' Default Value: `Car`
#' @param TruckModeOptions Specifies route preferences when traveling by `Truck`, such as avoiding
#' routes that use ferries or tolls, and truck specifications to consider
#' when choosing an optimal road.
#'
#' Requirements: `TravelMode` must be specified as `Truck`.
#'
#' @keywords internal
#'
#' @rdname locationservice_calculate_route_matrix
locationservice_calculate_route_matrix <- function(CalculatorName, CarModeOptions = NULL, DepartNow = NULL, DeparturePositions, DepartureTime = NULL, DestinationPositions, DistanceUnit = NULL, TravelMode = NULL, TruckModeOptions = NULL) {
op <- new_operation(
name = "CalculateRouteMatrix",
http_method = "POST",
http_path = "/routes/v0/calculators/{CalculatorName}/calculate/route-matrix",
paginator = list()
)
input <- .locationservice$calculate_route_matrix_input(CalculatorName = CalculatorName, CarModeOptions = CarModeOptions, DepartNow = DepartNow, DeparturePositions = DeparturePositions, DepartureTime = DepartureTime, DestinationPositions = DestinationPositions, DistanceUnit = DistanceUnit, TravelMode = TravelMode, TruckModeOptions = TruckModeOptions)
output <- .locationservice$calculate_route_matrix_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$calculate_route_matrix <- locationservice_calculate_route_matrix
#' Creates a geofence collection, which manages and stores geofences
#'
#' @description
#' Creates a geofence collection, which manages and stores geofences.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_create_geofence_collection/](https://www.paws-r-sdk.com/docs/locationservice_create_geofence_collection/) for full documentation.
#'
#' @param CollectionName [required] A custom name for the geofence collection.
#'
#' Requirements:
#'
#' - Contain only alphanumeric characters (A–Z, a–z, 0–9), hyphens (-),
#' periods (.), and underscores (_).
#'
#' - Must be a unique geofence collection name.
#'
#' - No spaces allowed. For example, `ExampleGeofenceCollection`.
#' @param Description An optional description for the geofence collection.
#' @param KmsKeyId A key identifier for an [Amazon Web Services KMS customer managed
#' key](https://docs.aws.amazon.com/kms/latest/developerguide/create-keys.html).
#' Enter a key ID, key ARN, alias name, or alias ARN.
#' @param PricingPlan No longer used. If included, the only allowed value is
#' `RequestBasedUsage`.
#' @param PricingPlanDataSource This parameter is no longer used.
#' @param Tags Applies one or more tags to the geofence collection. A tag is a
#' key-value pair helps manage, identify, search, and filter your resources
#' by labelling them.
#'
#' Format: `"key" : "value"`
#'
#' Restrictions:
#'
#' - Maximum 50 tags per resource
#'
#' - Each resource tag must be unique with a maximum of one value.
#'
#' - Maximum key length: 128 Unicode characters in UTF-8
#'
#' - Maximum value length: 256 Unicode characters in UTF-8
#'
#' - Can use alphanumeric characters (A–Z, a–z, 0–9), and the following
#' characters: + - = . _ : / @@.
#'
#' - Cannot use "aws:" as a prefix for a key.
#'
#' @keywords internal
#'
#' @rdname locationservice_create_geofence_collection
locationservice_create_geofence_collection <- function(CollectionName, Description = NULL, KmsKeyId = NULL, PricingPlan = NULL, PricingPlanDataSource = NULL, Tags = NULL) {
op <- new_operation(
name = "CreateGeofenceCollection",
http_method = "POST",
http_path = "/geofencing/v0/collections",
paginator = list()
)
input <- .locationservice$create_geofence_collection_input(CollectionName = CollectionName, Description = Description, KmsKeyId = KmsKeyId, PricingPlan = PricingPlan, PricingPlanDataSource = PricingPlanDataSource, Tags = Tags)
output <- .locationservice$create_geofence_collection_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$create_geofence_collection <- locationservice_create_geofence_collection
#' Creates an API key resource in your Amazon Web Services account, which
#' lets you grant geo:GetMap* actions for Amazon Location Map resources to
#' the API key bearer
#'
#' @description
#' Creates an API key resource in your Amazon Web Services account, which lets you grant `geo:GetMap*` actions for Amazon Location Map resources to the API key bearer.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_create_key/](https://www.paws-r-sdk.com/docs/locationservice_create_key/) for full documentation.
#'
#' @param Description An optional description for the API key resource.
#' @param ExpireTime The optional timestamp for when the API key resource will expire in [ISO
#' 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format:
#' `YYYY-MM-DDThh:mm:ss.sssZ`. One of `NoExpiry` or `ExpireTime` must be
#' set.
#' @param KeyName [required] A custom name for the API key resource.
#'
#' Requirements:
#'
#' - Contain only alphanumeric characters (A–Z, a–z, 0–9), hyphens (-),
#' periods (.), and underscores (_).
#'
#' - Must be a unique API key name.
#'
#' - No spaces allowed. For example, `ExampleAPIKey`.
#' @param NoExpiry Optionally set to `true` to set no expiration time for the API key. One
#' of `NoExpiry` or `ExpireTime` must be set.
#' @param Restrictions [required] The API key restrictions for the API key resource.
#' @param Tags Applies one or more tags to the map resource. A tag is a key-value pair
#' that helps manage, identify, search, and filter your resources by
#' labelling them.
#'
#' Format: `"key" : "value"`
#'
#' Restrictions:
#'
#' - Maximum 50 tags per resource
#'
#' - Each resource tag must be unique with a maximum of one value.
#'
#' - Maximum key length: 128 Unicode characters in UTF-8
#'
#' - Maximum value length: 256 Unicode characters in UTF-8
#'
#' - Can use alphanumeric characters (A–Z, a–z, 0–9), and the following
#' characters: + - = . _ : / @@.
#'
#' - Cannot use "aws:" as a prefix for a key.
#'
#' @keywords internal
#'
#' @rdname locationservice_create_key
locationservice_create_key <- function(Description = NULL, ExpireTime = NULL, KeyName, NoExpiry = NULL, Restrictions, Tags = NULL) {
op <- new_operation(
name = "CreateKey",
http_method = "POST",
http_path = "/metadata/v0/keys",
paginator = list()
)
input <- .locationservice$create_key_input(Description = Description, ExpireTime = ExpireTime, KeyName = KeyName, NoExpiry = NoExpiry, Restrictions = Restrictions, Tags = Tags)
output <- .locationservice$create_key_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$create_key <- locationservice_create_key
#' Creates a map resource in your Amazon Web Services account, which
#' provides map tiles of different styles sourced from global location data
#' providers
#'
#' @description
#' Creates a map resource in your Amazon Web Services account, which provides map tiles of different styles sourced from global location data providers.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_create_map/](https://www.paws-r-sdk.com/docs/locationservice_create_map/) for full documentation.
#'
#' @param Configuration [required] Specifies the `MapConfiguration`, including the map style, for the map
#' resource that you create. The map style defines the look of maps and the
#' data provider for your map resource.
#' @param Description An optional description for the map resource.
#' @param MapName [required] The name for the map resource.
#'
#' Requirements:
#'
#' - Must contain only alphanumeric characters (A–Z, a–z, 0–9), hyphens
#' (-), periods (.), and underscores (_).
#'
#' - Must be a unique map resource name.
#'
#' - No spaces allowed. For example, `ExampleMap`.
#' @param PricingPlan No longer used. If included, the only allowed value is
#' `RequestBasedUsage`.
#' @param Tags Applies one or more tags to the map resource. A tag is a key-value pair
#' helps manage, identify, search, and filter your resources by labelling
#' them.
#'
#' Format: `"key" : "value"`
#'
#' Restrictions:
#'
#' - Maximum 50 tags per resource
#'
#' - Each resource tag must be unique with a maximum of one value.
#'
#' - Maximum key length: 128 Unicode characters in UTF-8
#'
#' - Maximum value length: 256 Unicode characters in UTF-8
#'
#' - Can use alphanumeric characters (A–Z, a–z, 0–9), and the following
#' characters: + - = . _ : / @@.
#'
#' - Cannot use "aws:" as a prefix for a key.
#'
#' @keywords internal
#'
#' @rdname locationservice_create_map
locationservice_create_map <- function(Configuration, Description = NULL, MapName, PricingPlan = NULL, Tags = NULL) {
op <- new_operation(
name = "CreateMap",
http_method = "POST",
http_path = "/maps/v0/maps",
paginator = list()
)
input <- .locationservice$create_map_input(Configuration = Configuration, Description = Description, MapName = MapName, PricingPlan = PricingPlan, Tags = Tags)
output <- .locationservice$create_map_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$create_map <- locationservice_create_map
#' Creates a place index resource in your Amazon Web Services account
#'
#' @description
#' Creates a place index resource in your Amazon Web Services account. Use a place index resource to geocode addresses and other text queries by using the [`search_place_index_for_text`][locationservice_search_place_index_for_text] operation, and reverse geocode coordinates by using the [`search_place_index_for_position`][locationservice_search_place_index_for_position] operation, and enable autosuggestions by using the [`search_place_index_for_suggestions`][locationservice_search_place_index_for_suggestions] operation.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_create_place_index/](https://www.paws-r-sdk.com/docs/locationservice_create_place_index/) for full documentation.
#'
#' @param DataSource [required] Specifies the geospatial data provider for the new place index.
#'
#' This field is case-sensitive. Enter the valid values as shown. For
#' example, entering `HERE` returns an error.
#'
#' Valid values include:
#'
#' - `Esri` – For additional information about
#' [Esri](https://docs.aws.amazon.com/location/latest/developerguide/esri.html)'s
#' coverage in your region of interest, see [Esri details on geocoding
#' coverage](https://developers.arcgis.com/rest/geocode/api-reference/geocode-coverage.htm).
#'
#' - `Grab` – Grab provides place index functionality for Southeast Asia.
#' For additional information about
#' [GrabMaps](https://docs.aws.amazon.com/location/latest/developerguide/grab.html)'
#' coverage, see [GrabMaps countries and areas
#' covered](https://docs.aws.amazon.com/location/latest/developerguide/grab.html#grab-coverage-area).
#'
#' - `Here` – For additional information about [HERE
#' Technologies](https://docs.aws.amazon.com/location/latest/developerguide/HERE.html)'
#' coverage in your region of interest, see HERE details on goecoding
#' coverage.
#'
#' If you specify HERE Technologies (`Here`) as the data provider, you
#' may not [store
#' results](https://docs.aws.amazon.com/location/latest/APIReference/API_DataSourceConfiguration.html)
#' for locations in Japan. For more information, see the [Amazon Web
#' Services Service Terms](https://aws.amazon.com/service-terms/) for
#' Amazon Location Service.
#'
#' For additional information , see [Data
#' providers](https://docs.aws.amazon.com/location/latest/developerguide/what-is-data-provider.html)
#' on the *Amazon Location Service Developer Guide*.
#' @param DataSourceConfiguration Specifies the data storage option requesting Places.
#' @param Description The optional description for the place index resource.
#' @param IndexName [required] The name of the place index resource.
#'
#' Requirements:
#'
#' - Contain only alphanumeric characters (A–Z, a–z, 0–9), hyphens (-),
#' periods (.), and underscores (_).
#'
#' - Must be a unique place index resource name.
#'
#' - No spaces allowed. For example, `ExamplePlaceIndex`.
#' @param PricingPlan No longer used. If included, the only allowed value is
#' `RequestBasedUsage`.
#' @param Tags Applies one or more tags to the place index resource. A tag is a
#' key-value pair that helps you manage, identify, search, and filter your
#' resources.
#'
#' Format: `"key" : "value"`
#'
#' Restrictions:
#'
#' - Maximum 50 tags per resource.
#'
#' - Each tag key must be unique and must have exactly one associated
#' value.
#'
#' - Maximum key length: 128 Unicode characters in UTF-8.
#'
#' - Maximum value length: 256 Unicode characters in UTF-8.
#'
#' - Can use alphanumeric characters (A–Z, a–z, 0–9), and the following
#' characters: + - = . _ : / @@
#'
#' - Cannot use "aws:" as a prefix for a key.
#'
#' @keywords internal
#'
#' @rdname locationservice_create_place_index
locationservice_create_place_index <- function(DataSource, DataSourceConfiguration = NULL, Description = NULL, IndexName, PricingPlan = NULL, Tags = NULL) {
op <- new_operation(
name = "CreatePlaceIndex",
http_method = "POST",
http_path = "/places/v0/indexes",
paginator = list()
)
input <- .locationservice$create_place_index_input(DataSource = DataSource, DataSourceConfiguration = DataSourceConfiguration, Description = Description, IndexName = IndexName, PricingPlan = PricingPlan, Tags = Tags)
output <- .locationservice$create_place_index_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$create_place_index <- locationservice_create_place_index
#' Creates a route calculator resource in your Amazon Web Services account
#'
#' @description
#' Creates a route calculator resource in your Amazon Web Services account.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_create_route_calculator/](https://www.paws-r-sdk.com/docs/locationservice_create_route_calculator/) for full documentation.
#'
#' @param CalculatorName [required] The name of the route calculator resource.
#'
#' Requirements:
#'
#' - Can use alphanumeric characters (A–Z, a–z, 0–9) , hyphens (-),
#' periods (.), and underscores (_).
#'
#' - Must be a unique Route calculator resource name.
#'
#' - No spaces allowed. For example, `ExampleRouteCalculator`.
#' @param DataSource [required] Specifies the data provider of traffic and road network data.
#'
#' This field is case-sensitive. Enter the valid values as shown. For
#' example, entering `HERE` returns an error.
#'
#' Valid values include:
#'
#' - `Esri` – For additional information about
#' [Esri](https://docs.aws.amazon.com/location/latest/developerguide/esri.html)'s
#' coverage in your region of interest, see [Esri details on street
#' networks and traffic
#' coverage](https://doc.arcgis.com/en/arcgis-online/reference/network-coverage.htm).
#'
#' Route calculators that use Esri as a data source only calculate
#' routes that are shorter than 400 km.
#'
#' - `Grab` – Grab provides routing functionality for Southeast Asia. For
#' additional information about
#' [GrabMaps](https://docs.aws.amazon.com/location/latest/developerguide/grab.html)'
#' coverage, see [GrabMaps countries and areas
#' covered](https://docs.aws.amazon.com/location/latest/developerguide/grab.html#grab-coverage-area).
#'
#' - `Here` – For additional information about [HERE
#' Technologies](https://docs.aws.amazon.com/location/latest/developerguide/HERE.html)'
#' coverage in your region of interest, see HERE car routing coverage
#' and HERE truck routing coverage.
#'
#' For additional information , see [Data
#' providers](https://docs.aws.amazon.com/location/latest/developerguide/what-is-data-provider.html)
#' on the *Amazon Location Service Developer Guide*.
#' @param Description The optional description for the route calculator resource.
#' @param PricingPlan No longer used. If included, the only allowed value is
#' `RequestBasedUsage`.
#' @param Tags Applies one or more tags to the route calculator resource. A tag is a
#' key-value pair helps manage, identify, search, and filter your resources
#' by labelling them.
#'
#' - For example: \{ `"tag1" : "value1"`, `"tag2" : "value2"`\}
#'
#' Format: `"key" : "value"`
#'
#' Restrictions:
#'
#' - Maximum 50 tags per resource
#'
#' - Each resource tag must be unique with a maximum of one value.
#'
#' - Maximum key length: 128 Unicode characters in UTF-8
#'
#' - Maximum value length: 256 Unicode characters in UTF-8
#'
#' - Can use alphanumeric characters (A–Z, a–z, 0–9), and the following
#' characters: + - = . _ : / @@.
#'
#' - Cannot use "aws:" as a prefix for a key.
#'
#' @keywords internal
#'
#' @rdname locationservice_create_route_calculator
locationservice_create_route_calculator <- function(CalculatorName, DataSource, Description = NULL, PricingPlan = NULL, Tags = NULL) {
op <- new_operation(
name = "CreateRouteCalculator",
http_method = "POST",
http_path = "/routes/v0/calculators",
paginator = list()
)
input <- .locationservice$create_route_calculator_input(CalculatorName = CalculatorName, DataSource = DataSource, Description = Description, PricingPlan = PricingPlan, Tags = Tags)
output <- .locationservice$create_route_calculator_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$create_route_calculator <- locationservice_create_route_calculator
#' Creates a tracker resource in your Amazon Web Services account, which
#' lets you retrieve current and historical location of devices
#'
#' @description
#' Creates a tracker resource in your Amazon Web Services account, which lets you retrieve current and historical location of devices.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_create_tracker/](https://www.paws-r-sdk.com/docs/locationservice_create_tracker/) for full documentation.
#'
#' @param Description An optional description for the tracker resource.
#' @param KmsKeyId A key identifier for an [Amazon Web Services KMS customer managed
#' key](https://docs.aws.amazon.com/kms/latest/developerguide/create-keys.html).
#' Enter a key ID, key ARN, alias name, or alias ARN.
#' @param PositionFiltering Specifies the position filtering for the tracker resource.
#'
#' Valid values:
#'
#' - `TimeBased` - Location updates are evaluated against linked geofence
#' collections, but not every location update is stored. If your update
#' frequency is more often than 30 seconds, only one update per 30
#' seconds is stored for each unique device ID.
#'
#' - `DistanceBased` - If the device has moved less than 30 m (98.4 ft),
#' location updates are ignored. Location updates within this area are
#' neither evaluated against linked geofence collections, nor stored.
#' This helps control costs by reducing the number of geofence
#' evaluations and historical device positions to paginate through.
#' Distance-based filtering can also reduce the effects of GPS noise
#' when displaying device trajectories on a map.
#'
#' - `AccuracyBased` - If the device has moved less than the measured
#' accuracy, location updates are ignored. For example, if two
#' consecutive updates from a device have a horizontal accuracy of 5 m
#' and 10 m, the second update is ignored if the device has moved less
#' than 15 m. Ignored location updates are neither evaluated against
#' linked geofence collections, nor stored. This can reduce the effects
#' of GPS noise when displaying device trajectories on a map, and can
#' help control your costs by reducing the number of geofence
#' evaluations.
#'
#' This field is optional. If not specified, the default value is
#' `TimeBased`.
#' @param PricingPlan No longer used. If included, the only allowed value is
#' `RequestBasedUsage`.
#' @param PricingPlanDataSource This parameter is no longer used.
#' @param Tags Applies one or more tags to the tracker resource. A tag is a key-value
#' pair helps manage, identify, search, and filter your resources by
#' labelling them.
#'
#' Format: `"key" : "value"`
#'
#' Restrictions:
#'
#' - Maximum 50 tags per resource
#'
#' - Each resource tag must be unique with a maximum of one value.
#'
#' - Maximum key length: 128 Unicode characters in UTF-8
#'
#' - Maximum value length: 256 Unicode characters in UTF-8
#'
#' - Can use alphanumeric characters (A–Z, a–z, 0–9), and the following
#' characters: + - = . _ : / @@.
#'
#' - Cannot use "aws:" as a prefix for a key.
#' @param TrackerName [required] The name for the tracker resource.
#'
#' Requirements:
#'
#' - Contain only alphanumeric characters (A-Z, a-z, 0-9) , hyphens (-),
#' periods (.), and underscores (_).
#'
#' - Must be a unique tracker resource name.
#'
#' - No spaces allowed. For example, `ExampleTracker`.
#'
#' @keywords internal
#'
#' @rdname locationservice_create_tracker
locationservice_create_tracker <- function(Description = NULL, KmsKeyId = NULL, PositionFiltering = NULL, PricingPlan = NULL, PricingPlanDataSource = NULL, Tags = NULL, TrackerName) {
op <- new_operation(
name = "CreateTracker",
http_method = "POST",
http_path = "/tracking/v0/trackers",
paginator = list()
)
input <- .locationservice$create_tracker_input(Description = Description, KmsKeyId = KmsKeyId, PositionFiltering = PositionFiltering, PricingPlan = PricingPlan, PricingPlanDataSource = PricingPlanDataSource, Tags = Tags, TrackerName = TrackerName)
output <- .locationservice$create_tracker_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$create_tracker <- locationservice_create_tracker
#' Deletes a geofence collection from your Amazon Web Services account
#'
#' @description
#' Deletes a geofence collection from your Amazon Web Services account.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_delete_geofence_collection/](https://www.paws-r-sdk.com/docs/locationservice_delete_geofence_collection/) for full documentation.
#'
#' @param CollectionName [required] The name of the geofence collection to be deleted.
#'
#' @keywords internal
#'
#' @rdname locationservice_delete_geofence_collection
locationservice_delete_geofence_collection <- function(CollectionName) {
op <- new_operation(
name = "DeleteGeofenceCollection",
http_method = "DELETE",
http_path = "/geofencing/v0/collections/{CollectionName}",
paginator = list()
)
input <- .locationservice$delete_geofence_collection_input(CollectionName = CollectionName)
output <- .locationservice$delete_geofence_collection_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$delete_geofence_collection <- locationservice_delete_geofence_collection
#' Deletes the specified API key
#'
#' @description
#' Deletes the specified API key. The API key must have been deactivated more than 90 days previously.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_delete_key/](https://www.paws-r-sdk.com/docs/locationservice_delete_key/) for full documentation.
#'
#' @param KeyName [required] The name of the API key to delete.
#'
#' @keywords internal
#'
#' @rdname locationservice_delete_key
locationservice_delete_key <- function(KeyName) {
op <- new_operation(
name = "DeleteKey",
http_method = "DELETE",
http_path = "/metadata/v0/keys/{KeyName}",
paginator = list()
)
input <- .locationservice$delete_key_input(KeyName = KeyName)
output <- .locationservice$delete_key_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$delete_key <- locationservice_delete_key
#' Deletes a map resource from your Amazon Web Services account
#'
#' @description
#' Deletes a map resource from your Amazon Web Services account.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_delete_map/](https://www.paws-r-sdk.com/docs/locationservice_delete_map/) for full documentation.
#'
#' @param MapName [required] The name of the map resource to be deleted.
#'
#' @keywords internal
#'
#' @rdname locationservice_delete_map
locationservice_delete_map <- function(MapName) {
op <- new_operation(
name = "DeleteMap",
http_method = "DELETE",
http_path = "/maps/v0/maps/{MapName}",
paginator = list()
)
input <- .locationservice$delete_map_input(MapName = MapName)
output <- .locationservice$delete_map_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$delete_map <- locationservice_delete_map
#' Deletes a place index resource from your Amazon Web Services account
#'
#' @description
#' Deletes a place index resource from your Amazon Web Services account.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_delete_place_index/](https://www.paws-r-sdk.com/docs/locationservice_delete_place_index/) for full documentation.
#'
#' @param IndexName [required] The name of the place index resource to be deleted.
#'
#' @keywords internal
#'
#' @rdname locationservice_delete_place_index
locationservice_delete_place_index <- function(IndexName) {
op <- new_operation(
name = "DeletePlaceIndex",
http_method = "DELETE",
http_path = "/places/v0/indexes/{IndexName}",
paginator = list()
)
input <- .locationservice$delete_place_index_input(IndexName = IndexName)
output <- .locationservice$delete_place_index_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$delete_place_index <- locationservice_delete_place_index
#' Deletes a route calculator resource from your Amazon Web Services
#' account
#'
#' @description
#' Deletes a route calculator resource from your Amazon Web Services account.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_delete_route_calculator/](https://www.paws-r-sdk.com/docs/locationservice_delete_route_calculator/) for full documentation.
#'
#' @param CalculatorName [required] The name of the route calculator resource to be deleted.
#'
#' @keywords internal
#'
#' @rdname locationservice_delete_route_calculator
locationservice_delete_route_calculator <- function(CalculatorName) {
op <- new_operation(
name = "DeleteRouteCalculator",
http_method = "DELETE",
http_path = "/routes/v0/calculators/{CalculatorName}",
paginator = list()
)
input <- .locationservice$delete_route_calculator_input(CalculatorName = CalculatorName)
output <- .locationservice$delete_route_calculator_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$delete_route_calculator <- locationservice_delete_route_calculator
#' Deletes a tracker resource from your Amazon Web Services account
#'
#' @description
#' Deletes a tracker resource from your Amazon Web Services account.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_delete_tracker/](https://www.paws-r-sdk.com/docs/locationservice_delete_tracker/) for full documentation.
#'
#' @param TrackerName [required] The name of the tracker resource to be deleted.
#'
#' @keywords internal
#'
#' @rdname locationservice_delete_tracker
locationservice_delete_tracker <- function(TrackerName) {
op <- new_operation(
name = "DeleteTracker",
http_method = "DELETE",
http_path = "/tracking/v0/trackers/{TrackerName}",
paginator = list()
)
input <- .locationservice$delete_tracker_input(TrackerName = TrackerName)
output <- .locationservice$delete_tracker_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$delete_tracker <- locationservice_delete_tracker
#' Retrieves the geofence collection details
#'
#' @description
#' Retrieves the geofence collection details.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_describe_geofence_collection/](https://www.paws-r-sdk.com/docs/locationservice_describe_geofence_collection/) for full documentation.
#'
#' @param CollectionName [required] The name of the geofence collection.
#'
#' @keywords internal
#'
#' @rdname locationservice_describe_geofence_collection
locationservice_describe_geofence_collection <- function(CollectionName) {
op <- new_operation(
name = "DescribeGeofenceCollection",
http_method = "GET",
http_path = "/geofencing/v0/collections/{CollectionName}",
paginator = list()
)
input <- .locationservice$describe_geofence_collection_input(CollectionName = CollectionName)
output <- .locationservice$describe_geofence_collection_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$describe_geofence_collection <- locationservice_describe_geofence_collection
#' Retrieves the API key resource details
#'
#' @description
#' Retrieves the API key resource details.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_describe_key/](https://www.paws-r-sdk.com/docs/locationservice_describe_key/) for full documentation.
#'
#' @param KeyName [required] The name of the API key resource.
#'
#' @keywords internal
#'
#' @rdname locationservice_describe_key
locationservice_describe_key <- function(KeyName) {
op <- new_operation(
name = "DescribeKey",
http_method = "GET",
http_path = "/metadata/v0/keys/{KeyName}",
paginator = list()
)
input <- .locationservice$describe_key_input(KeyName = KeyName)
output <- .locationservice$describe_key_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$describe_key <- locationservice_describe_key
#' Retrieves the map resource details
#'
#' @description
#' Retrieves the map resource details.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_describe_map/](https://www.paws-r-sdk.com/docs/locationservice_describe_map/) for full documentation.
#'
#' @param MapName [required] The name of the map resource.
#'
#' @keywords internal
#'
#' @rdname locationservice_describe_map
locationservice_describe_map <- function(MapName) {
op <- new_operation(
name = "DescribeMap",
http_method = "GET",
http_path = "/maps/v0/maps/{MapName}",
paginator = list()
)
input <- .locationservice$describe_map_input(MapName = MapName)
output <- .locationservice$describe_map_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$describe_map <- locationservice_describe_map
#' Retrieves the place index resource details
#'
#' @description
#' Retrieves the place index resource details.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_describe_place_index/](https://www.paws-r-sdk.com/docs/locationservice_describe_place_index/) for full documentation.
#'
#' @param IndexName [required] The name of the place index resource.
#'
#' @keywords internal
#'
#' @rdname locationservice_describe_place_index
locationservice_describe_place_index <- function(IndexName) {
op <- new_operation(
name = "DescribePlaceIndex",
http_method = "GET",
http_path = "/places/v0/indexes/{IndexName}",
paginator = list()
)
input <- .locationservice$describe_place_index_input(IndexName = IndexName)
output <- .locationservice$describe_place_index_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$describe_place_index <- locationservice_describe_place_index
#' Retrieves the route calculator resource details
#'
#' @description
#' Retrieves the route calculator resource details.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_describe_route_calculator/](https://www.paws-r-sdk.com/docs/locationservice_describe_route_calculator/) for full documentation.
#'
#' @param CalculatorName [required] The name of the route calculator resource.
#'
#' @keywords internal
#'
#' @rdname locationservice_describe_route_calculator
locationservice_describe_route_calculator <- function(CalculatorName) {
op <- new_operation(
name = "DescribeRouteCalculator",
http_method = "GET",
http_path = "/routes/v0/calculators/{CalculatorName}",
paginator = list()
)
input <- .locationservice$describe_route_calculator_input(CalculatorName = CalculatorName)
output <- .locationservice$describe_route_calculator_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$describe_route_calculator <- locationservice_describe_route_calculator
#' Retrieves the tracker resource details
#'
#' @description
#' Retrieves the tracker resource details.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_describe_tracker/](https://www.paws-r-sdk.com/docs/locationservice_describe_tracker/) for full documentation.
#'
#' @param TrackerName [required] The name of the tracker resource.
#'
#' @keywords internal
#'
#' @rdname locationservice_describe_tracker
locationservice_describe_tracker <- function(TrackerName) {
op <- new_operation(
name = "DescribeTracker",
http_method = "GET",
http_path = "/tracking/v0/trackers/{TrackerName}",
paginator = list()
)
input <- .locationservice$describe_tracker_input(TrackerName = TrackerName)
output <- .locationservice$describe_tracker_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$describe_tracker <- locationservice_describe_tracker
#' Removes the association between a tracker resource and a geofence
#' collection
#'
#' @description
#' Removes the association between a tracker resource and a geofence collection.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_disassociate_tracker_consumer/](https://www.paws-r-sdk.com/docs/locationservice_disassociate_tracker_consumer/) for full documentation.
#'
#' @param ConsumerArn [required] The Amazon Resource Name (ARN) for the geofence collection to be
#' disassociated from the tracker resource. Used when you need to specify a
#' resource across all Amazon Web Services.
#'
#' - Format example:
#' `arn:aws:geo:region:account-id:geofence-collection/ExampleGeofenceCollectionConsumer`
#' @param TrackerName [required] The name of the tracker resource to be dissociated from the consumer.
#'
#' @keywords internal
#'
#' @rdname locationservice_disassociate_tracker_consumer
locationservice_disassociate_tracker_consumer <- function(ConsumerArn, TrackerName) {
op <- new_operation(
name = "DisassociateTrackerConsumer",
http_method = "DELETE",
http_path = "/tracking/v0/trackers/{TrackerName}/consumers/{ConsumerArn}",
paginator = list()
)
input <- .locationservice$disassociate_tracker_consumer_input(ConsumerArn = ConsumerArn, TrackerName = TrackerName)
output <- .locationservice$disassociate_tracker_consumer_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$disassociate_tracker_consumer <- locationservice_disassociate_tracker_consumer
#' Retrieves a device's most recent position according to its sample time
#'
#' @description
#' Retrieves a device's most recent position according to its sample time.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_get_device_position/](https://www.paws-r-sdk.com/docs/locationservice_get_device_position/) for full documentation.
#'
#' @param DeviceId [required] The device whose position you want to retrieve.
#' @param TrackerName [required] The tracker resource receiving the position update.
#'
#' @keywords internal
#'
#' @rdname locationservice_get_device_position
locationservice_get_device_position <- function(DeviceId, TrackerName) {
op <- new_operation(
name = "GetDevicePosition",
http_method = "GET",
http_path = "/tracking/v0/trackers/{TrackerName}/devices/{DeviceId}/positions/latest",
paginator = list()
)
input <- .locationservice$get_device_position_input(DeviceId = DeviceId, TrackerName = TrackerName)
output <- .locationservice$get_device_position_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$get_device_position <- locationservice_get_device_position
#' Retrieves the device position history from a tracker resource within a
#' specified range of time
#'
#' @description
#' Retrieves the device position history from a tracker resource within a specified range of time.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_get_device_position_history/](https://www.paws-r-sdk.com/docs/locationservice_get_device_position_history/) for full documentation.
#'
#' @param DeviceId [required] The device whose position history you want to retrieve.
#' @param EndTimeExclusive Specify the end time for the position history in [ISO
#' 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format:
#' `YYYY-MM-DDThh:mm:ss.sssZ`. By default, the value will be the time that
#' the request is made.
#'
#' Requirement:
#'
#' - The time specified for `EndTimeExclusive` must be after the time for
#' `StartTimeInclusive`.
#' @param MaxResults An optional limit for the number of device positions returned in a
#' single call.
#'
#' Default value: `100`
#' @param NextToken The pagination token specifying which page of results to return in the
#' response. If no token is provided, the default page is the first page.
#'
#' Default value: `null`
#' @param StartTimeInclusive Specify the start time for the position history in [ISO
#' 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format:
#' `YYYY-MM-DDThh:mm:ss.sssZ`. By default, the value will be 24 hours prior
#' to the time that the request is made.
#'
#' Requirement:
#'
#' - The time specified for `StartTimeInclusive` must be before
#' `EndTimeExclusive`.
#' @param TrackerName [required] The tracker resource receiving the request for the device position
#' history.
#'
#' @keywords internal
#'
#' @rdname locationservice_get_device_position_history
locationservice_get_device_position_history <- function(DeviceId, EndTimeExclusive = NULL, MaxResults = NULL, NextToken = NULL, StartTimeInclusive = NULL, TrackerName) {
op <- new_operation(
name = "GetDevicePositionHistory",
http_method = "POST",
http_path = "/tracking/v0/trackers/{TrackerName}/devices/{DeviceId}/list-positions",
paginator = list()
)
input <- .locationservice$get_device_position_history_input(DeviceId = DeviceId, EndTimeExclusive = EndTimeExclusive, MaxResults = MaxResults, NextToken = NextToken, StartTimeInclusive = StartTimeInclusive, TrackerName = TrackerName)
output <- .locationservice$get_device_position_history_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$get_device_position_history <- locationservice_get_device_position_history
#' Retrieves the geofence details from a geofence collection
#'
#' @description
#' Retrieves the geofence details from a geofence collection.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_get_geofence/](https://www.paws-r-sdk.com/docs/locationservice_get_geofence/) for full documentation.
#'
#' @param CollectionName [required] The geofence collection storing the target geofence.
#' @param GeofenceId [required] The geofence you're retrieving details for.
#'
#' @keywords internal
#'
#' @rdname locationservice_get_geofence
locationservice_get_geofence <- function(CollectionName, GeofenceId) {
op <- new_operation(
name = "GetGeofence",
http_method = "GET",
http_path = "/geofencing/v0/collections/{CollectionName}/geofences/{GeofenceId}",
paginator = list()
)
input <- .locationservice$get_geofence_input(CollectionName = CollectionName, GeofenceId = GeofenceId)
output <- .locationservice$get_geofence_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$get_geofence <- locationservice_get_geofence
#' Retrieves glyphs used to display labels on a map
#'
#' @description
#' Retrieves glyphs used to display labels on a map.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_get_map_glyphs/](https://www.paws-r-sdk.com/docs/locationservice_get_map_glyphs/) for full documentation.
#'
#' @param FontStack [required] A comma-separated list of fonts to load glyphs from in order of
#' preference. For example, `Noto Sans Regular, Arial Unicode`.
#'
#' Valid fonts stacks for
#' [Esri](https://docs.aws.amazon.com/location/latest/developerguide/esri.html)
#' styles:
#'
#' - VectorEsriDarkGrayCanvas – `Ubuntu Medium Italic` | `Ubuntu Medium`
#' | `Ubuntu Italic` | `Ubuntu Regular` | `Ubuntu Bold`
#'
#' - VectorEsriLightGrayCanvas – `Ubuntu Italic` | `Ubuntu Regular` |
#' `Ubuntu Light` | `Ubuntu Bold`
#'
#' - VectorEsriTopographic – `Noto Sans Italic` | `Noto Sans Regular` |
#' `Noto Sans Bold` | `Noto Serif Regular` |
#' `Roboto Condensed Light Italic`
#'
#' - VectorEsriStreets – `Arial Regular` | `Arial Italic` | `Arial Bold`
#'
#' - VectorEsriNavigation – `Arial Regular` | `Arial Italic` |
#' `Arial Bold`
#'
#' Valid font stacks for [HERE
#' Technologies](https://docs.aws.amazon.com/location/latest/developerguide/HERE.html)
#' styles:
#'
#' - VectorHereContrast – `Fira GO Regular` | `Fira GO Bold`
#'
#' - VectorHereExplore, VectorHereExploreTruck,
#' HybridHereExploreSatellite – `Fira GO Italic` | `Fira GO Map` |
#' `Fira GO Map Bold` | `Noto Sans CJK JP Bold` |
#' `Noto Sans CJK JP Light` | `Noto Sans CJK JP Regular`
#'
#' Valid font stacks for
#' [GrabMaps](https://docs.aws.amazon.com/location/latest/developerguide/grab.html)
#' styles:
#'
#' - VectorGrabStandardLight, VectorGrabStandardDark –
#' `Noto Sans Regular` | `Noto Sans Medium` | `Noto Sans Bold`
#'
#' Valid font stacks for [Open
#' Data](https://docs.aws.amazon.com/location/latest/developerguide/open-data.html)
#' styles:
#'
#' - VectorOpenDataStandardLight, VectorOpenDataStandardDark,
#' VectorOpenDataVisualizationLight, VectorOpenDataVisualizationDark –
#' `Amazon Ember Regular,Noto Sans Regular` |
#' `Amazon Ember Bold,Noto Sans Bold` |
#' `Amazon Ember Medium,Noto Sans Medium` |
#' `Amazon Ember Regular Italic,Noto Sans Italic` |
#' `Amazon Ember Condensed RC Regular,Noto Sans Regular` |
#' `Amazon Ember Condensed RC Bold,Noto Sans Bold` |
#' `Amazon Ember Regular,Noto Sans Regular,Noto Sans Arabic Regular` |
#' `Amazon Ember Condensed RC Bold,Noto Sans Bold,Noto Sans Arabic Condensed Bold`
#' | `Amazon Ember Bold,Noto Sans Bold,Noto Sans Arabic Bold` |
#' `Amazon Ember Regular Italic,Noto Sans Italic,Noto Sans Arabic Regular`
#' |
#' `Amazon Ember Condensed RC Regular,Noto Sans Regular,Noto Sans Arabic Condensed Regular`
#' | `Amazon Ember Medium,Noto Sans Medium,Noto Sans Arabic Medium`
#'
#' The fonts used by the Open Data map styles are combined fonts that use
#' `Amazon Ember` for most glyphs but `Noto Sans` for glyphs unsupported by
#' `Amazon Ember`.
#' @param FontUnicodeRange [required] A Unicode range of characters to download glyphs for. Each response will
#' contain 256 characters. For example, 0–255 includes all characters from
#' range `U+0000` to `00FF`. Must be aligned to multiples of 256.
#' @param Key The optional [API
#' key](https://docs.aws.amazon.com/location/latest/developerguide/using-apikeys.html)
#' to authorize the request.
#' @param MapName [required] The map resource associated with the glyph file.
#'
#' @keywords internal
#'
#' @rdname locationservice_get_map_glyphs
locationservice_get_map_glyphs <- function(FontStack, FontUnicodeRange, Key = NULL, MapName) {
op <- new_operation(
name = "GetMapGlyphs",
http_method = "GET",
http_path = "/maps/v0/maps/{MapName}/glyphs/{FontStack}/{FontUnicodeRange}",
paginator = list()
)
input <- .locationservice$get_map_glyphs_input(FontStack = FontStack, FontUnicodeRange = FontUnicodeRange, Key = Key, MapName = MapName)
output <- .locationservice$get_map_glyphs_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$get_map_glyphs <- locationservice_get_map_glyphs
#' Retrieves the sprite sheet corresponding to a map resource
#'
#' @description
#' Retrieves the sprite sheet corresponding to a map resource. The sprite sheet is a PNG image paired with a JSON document describing the offsets of individual icons that will be displayed on a rendered map.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_get_map_sprites/](https://www.paws-r-sdk.com/docs/locationservice_get_map_sprites/) for full documentation.
#'
#' @param FileName [required] The name of the sprite file. Use the following file names for the sprite
#' sheet:
#'
#' - `sprites.png`
#'
#' - `sprites@@2x.png` for high pixel density displays
#'
#' For the JSON document containing image offsets. Use the following file
#' names:
#'
#' - `sprites.json`
#'
#' - `sprites@@2x.json` for high pixel density displays
#' @param Key The optional [API
#' key](https://docs.aws.amazon.com/location/latest/developerguide/using-apikeys.html)
#' to authorize the request.
#' @param MapName [required] The map resource associated with the sprite file.
#'
#' @keywords internal
#'
#' @rdname locationservice_get_map_sprites
locationservice_get_map_sprites <- function(FileName, Key = NULL, MapName) {
op <- new_operation(
name = "GetMapSprites",
http_method = "GET",
http_path = "/maps/v0/maps/{MapName}/sprites/{FileName}",
paginator = list()
)
input <- .locationservice$get_map_sprites_input(FileName = FileName, Key = Key, MapName = MapName)
output <- .locationservice$get_map_sprites_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$get_map_sprites <- locationservice_get_map_sprites
#' Retrieves the map style descriptor from a map resource
#'
#' @description
#' Retrieves the map style descriptor from a map resource.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_get_map_style_descriptor/](https://www.paws-r-sdk.com/docs/locationservice_get_map_style_descriptor/) for full documentation.
#'
#' @param Key The optional [API
#' key](https://docs.aws.amazon.com/location/latest/developerguide/using-apikeys.html)
#' to authorize the request.
#' @param MapName [required] The map resource to retrieve the style descriptor from.
#'
#' @keywords internal
#'
#' @rdname locationservice_get_map_style_descriptor
locationservice_get_map_style_descriptor <- function(Key = NULL, MapName) {
op <- new_operation(
name = "GetMapStyleDescriptor",
http_method = "GET",
http_path = "/maps/v0/maps/{MapName}/style-descriptor",
paginator = list()
)
input <- .locationservice$get_map_style_descriptor_input(Key = Key, MapName = MapName)
output <- .locationservice$get_map_style_descriptor_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$get_map_style_descriptor <- locationservice_get_map_style_descriptor
#' Retrieves a vector data tile from the map resource
#'
#' @description
#' Retrieves a vector data tile from the map resource. Map tiles are used by clients to render a map. they're addressed using a grid arrangement with an X coordinate, Y coordinate, and Z (zoom) level.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_get_map_tile/](https://www.paws-r-sdk.com/docs/locationservice_get_map_tile/) for full documentation.
#'
#' @param Key The optional [API
#' key](https://docs.aws.amazon.com/location/latest/developerguide/using-apikeys.html)
#' to authorize the request.
#' @param MapName [required] The map resource to retrieve the map tiles from.
#' @param X [required] The X axis value for the map tile.
#' @param Y [required] The Y axis value for the map tile.
#' @param Z [required] The zoom value for the map tile.
#'
#' @keywords internal
#'
#' @rdname locationservice_get_map_tile
locationservice_get_map_tile <- function(Key = NULL, MapName, X, Y, Z) {
op <- new_operation(
name = "GetMapTile",
http_method = "GET",
http_path = "/maps/v0/maps/{MapName}/tiles/{Z}/{X}/{Y}",
paginator = list()
)
input <- .locationservice$get_map_tile_input(Key = Key, MapName = MapName, X = X, Y = Y, Z = Z)
output <- .locationservice$get_map_tile_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$get_map_tile <- locationservice_get_map_tile
#' Finds a place by its unique ID
#'
#' @description
#' Finds a place by its unique ID. A `PlaceId` is returned by other search operations.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_get_place/](https://www.paws-r-sdk.com/docs/locationservice_get_place/) for full documentation.
#'
#' @param IndexName [required] The name of the place index resource that you want to use for the
#' search.
#' @param Language The preferred language used to return results. The value must be a valid
#' BCP 47 language tag, for example, `en` for English.
#'
#' This setting affects the languages used in the results, but not the
#' results themselves. If no language is specified, or not supported for a
#' particular result, the partner automatically chooses a language for the
#' result.
#'
#' For an example, we'll use the Greek language. You search for a location
#' around Athens, Greece, with the `language` parameter set to `en`. The
#' `city` in the results will most likely be returned as `Athens`.
#'
#' If you set the `language` parameter to `el`, for Greek, then the `city`
#' in the results will more likely be returned as \eqn{A\Theta\eta\nu\alpha}.
#'
#' If the data provider does not have a value for Greek, the result will be
#' in a language that the provider does support.
#' @param PlaceId [required] The identifier of the place to find.
#'
#' @keywords internal
#'
#' @rdname locationservice_get_place
locationservice_get_place <- function(IndexName, Language = NULL, PlaceId) {
op <- new_operation(
name = "GetPlace",
http_method = "GET",
http_path = "/places/v0/indexes/{IndexName}/places/{PlaceId}",
paginator = list()
)
input <- .locationservice$get_place_input(IndexName = IndexName, Language = Language, PlaceId = PlaceId)
output <- .locationservice$get_place_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$get_place <- locationservice_get_place
#' A batch request to retrieve all device positions
#'
#' @description
#' A batch request to retrieve all device positions.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_list_device_positions/](https://www.paws-r-sdk.com/docs/locationservice_list_device_positions/) for full documentation.
#'
#' @param MaxResults An optional limit for the number of entries returned in a single call.
#'
#' Default value: `100`
#' @param NextToken The pagination token specifying which page of results to return in the
#' response. If no token is provided, the default page is the first page.
#'
#' Default value: `null`
#' @param TrackerName [required] The tracker resource containing the requested devices.
#'
#' @keywords internal
#'
#' @rdname locationservice_list_device_positions
locationservice_list_device_positions <- function(MaxResults = NULL, NextToken = NULL, TrackerName) {
op <- new_operation(
name = "ListDevicePositions",
http_method = "POST",
http_path = "/tracking/v0/trackers/{TrackerName}/list-positions",
paginator = list()
)
input <- .locationservice$list_device_positions_input(MaxResults = MaxResults, NextToken = NextToken, TrackerName = TrackerName)
output <- .locationservice$list_device_positions_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$list_device_positions <- locationservice_list_device_positions
#' Lists geofence collections in your Amazon Web Services account
#'
#' @description
#' Lists geofence collections in your Amazon Web Services account.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_list_geofence_collections/](https://www.paws-r-sdk.com/docs/locationservice_list_geofence_collections/) for full documentation.
#'
#' @param MaxResults An optional limit for the number of resources returned in a single call.
#'
#' Default value: `100`
#' @param NextToken The pagination token specifying which page of results to return in the
#' response. If no token is provided, the default page is the first page.
#'
#' Default value: `null`
#'
#' @keywords internal
#'
#' @rdname locationservice_list_geofence_collections
locationservice_list_geofence_collections <- function(MaxResults = NULL, NextToken = NULL) {
op <- new_operation(
name = "ListGeofenceCollections",
http_method = "POST",
http_path = "/geofencing/v0/list-collections",
paginator = list()
)
input <- .locationservice$list_geofence_collections_input(MaxResults = MaxResults, NextToken = NextToken)
output <- .locationservice$list_geofence_collections_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$list_geofence_collections <- locationservice_list_geofence_collections
#' Lists geofences stored in a given geofence collection
#'
#' @description
#' Lists geofences stored in a given geofence collection.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_list_geofences/](https://www.paws-r-sdk.com/docs/locationservice_list_geofences/) for full documentation.
#'
#' @param CollectionName [required] The name of the geofence collection storing the list of geofences.
#' @param MaxResults An optional limit for the number of geofences returned in a single call.
#'
#' Default value: `100`
#' @param NextToken The pagination token specifying which page of results to return in the
#' response. If no token is provided, the default page is the first page.
#'
#' Default value: `null`
#'
#' @keywords internal
#'
#' @rdname locationservice_list_geofences
locationservice_list_geofences <- function(CollectionName, MaxResults = NULL, NextToken = NULL) {
op <- new_operation(
name = "ListGeofences",
http_method = "POST",
http_path = "/geofencing/v0/collections/{CollectionName}/list-geofences",
paginator = list()
)
input <- .locationservice$list_geofences_input(CollectionName = CollectionName, MaxResults = MaxResults, NextToken = NextToken)
output <- .locationservice$list_geofences_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$list_geofences <- locationservice_list_geofences
#' Lists API key resources in your Amazon Web Services account
#'
#' @description
#' Lists API key resources in your Amazon Web Services account.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_list_keys/](https://www.paws-r-sdk.com/docs/locationservice_list_keys/) for full documentation.
#'
#' @param Filter Optionally filter the list to only `Active` or `Expired` API keys.
#' @param MaxResults An optional limit for the number of resources returned in a single call.
#'
#' Default value: `100`
#' @param NextToken The pagination token specifying which page of results to return in the
#' response. If no token is provided, the default page is the first page.
#'
#' Default value: `null`
#'
#' @keywords internal
#'
#' @rdname locationservice_list_keys
locationservice_list_keys <- function(Filter = NULL, MaxResults = NULL, NextToken = NULL) {
op <- new_operation(
name = "ListKeys",
http_method = "POST",
http_path = "/metadata/v0/list-keys",
paginator = list()
)
input <- .locationservice$list_keys_input(Filter = Filter, MaxResults = MaxResults, NextToken = NextToken)
output <- .locationservice$list_keys_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$list_keys <- locationservice_list_keys
#' Lists map resources in your Amazon Web Services account
#'
#' @description
#' Lists map resources in your Amazon Web Services account.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_list_maps/](https://www.paws-r-sdk.com/docs/locationservice_list_maps/) for full documentation.
#'
#' @param MaxResults An optional limit for the number of resources returned in a single call.
#'
#' Default value: `100`
#' @param NextToken The pagination token specifying which page of results to return in the
#' response. If no token is provided, the default page is the first page.
#'
#' Default value: `null`
#'
#' @keywords internal
#'
#' @rdname locationservice_list_maps
locationservice_list_maps <- function(MaxResults = NULL, NextToken = NULL) {
op <- new_operation(
name = "ListMaps",
http_method = "POST",
http_path = "/maps/v0/list-maps",
paginator = list()
)
input <- .locationservice$list_maps_input(MaxResults = MaxResults, NextToken = NextToken)
output <- .locationservice$list_maps_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$list_maps <- locationservice_list_maps
#' Lists place index resources in your Amazon Web Services account
#'
#' @description
#' Lists place index resources in your Amazon Web Services account.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_list_place_indexes/](https://www.paws-r-sdk.com/docs/locationservice_list_place_indexes/) for full documentation.
#'
#' @param MaxResults An optional limit for the maximum number of results returned in a single
#' call.
#'
#' Default value: `100`
#' @param NextToken The pagination token specifying which page of results to return in the
#' response. If no token is provided, the default page is the first page.
#'
#' Default value: `null`
#'
#' @keywords internal
#'
#' @rdname locationservice_list_place_indexes
locationservice_list_place_indexes <- function(MaxResults = NULL, NextToken = NULL) {
op <- new_operation(
name = "ListPlaceIndexes",
http_method = "POST",
http_path = "/places/v0/list-indexes",
paginator = list()
)
input <- .locationservice$list_place_indexes_input(MaxResults = MaxResults, NextToken = NextToken)
output <- .locationservice$list_place_indexes_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$list_place_indexes <- locationservice_list_place_indexes
#' Lists route calculator resources in your Amazon Web Services account
#'
#' @description
#' Lists route calculator resources in your Amazon Web Services account.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_list_route_calculators/](https://www.paws-r-sdk.com/docs/locationservice_list_route_calculators/) for full documentation.
#'
#' @param MaxResults An optional maximum number of results returned in a single call.
#'
#' Default Value: `100`
#' @param NextToken The pagination token specifying which page of results to return in the
#' response. If no token is provided, the default page is the first page.
#'
#' Default Value: `null`
#'
#' @keywords internal
#'
#' @rdname locationservice_list_route_calculators
locationservice_list_route_calculators <- function(MaxResults = NULL, NextToken = NULL) {
op <- new_operation(
name = "ListRouteCalculators",
http_method = "POST",
http_path = "/routes/v0/list-calculators",
paginator = list()
)
input <- .locationservice$list_route_calculators_input(MaxResults = MaxResults, NextToken = NextToken)
output <- .locationservice$list_route_calculators_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$list_route_calculators <- locationservice_list_route_calculators
#' Returns a list of tags that are applied to the specified Amazon Location
#' resource
#'
#' @description
#' Returns a list of tags that are applied to the specified Amazon Location resource.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_list_tags_for_resource/](https://www.paws-r-sdk.com/docs/locationservice_list_tags_for_resource/) for full documentation.
#'
#' @param ResourceArn [required] The Amazon Resource Name (ARN) of the resource whose tags you want to
#' retrieve.
#'
#' - Format example:
#' `arn:aws:geo:region:account-id:resourcetype/ExampleResource`
#'
#' @keywords internal
#'
#' @rdname locationservice_list_tags_for_resource
locationservice_list_tags_for_resource <- function(ResourceArn) {
op <- new_operation(
name = "ListTagsForResource",
http_method = "GET",
http_path = "/tags/{ResourceArn}",
paginator = list()
)
input <- .locationservice$list_tags_for_resource_input(ResourceArn = ResourceArn)
output <- .locationservice$list_tags_for_resource_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$list_tags_for_resource <- locationservice_list_tags_for_resource
#' Lists geofence collections currently associated to the given tracker
#' resource
#'
#' @description
#' Lists geofence collections currently associated to the given tracker resource.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_list_tracker_consumers/](https://www.paws-r-sdk.com/docs/locationservice_list_tracker_consumers/) for full documentation.
#'
#' @param MaxResults An optional limit for the number of resources returned in a single call.
#'
#' Default value: `100`
#' @param NextToken The pagination token specifying which page of results to return in the
#' response. If no token is provided, the default page is the first page.
#'
#' Default value: `null`
#' @param TrackerName [required] The tracker resource whose associated geofence collections you want to
#' list.
#'
#' @keywords internal
#'
#' @rdname locationservice_list_tracker_consumers
locationservice_list_tracker_consumers <- function(MaxResults = NULL, NextToken = NULL, TrackerName) {
op <- new_operation(
name = "ListTrackerConsumers",
http_method = "POST",
http_path = "/tracking/v0/trackers/{TrackerName}/list-consumers",
paginator = list()
)
input <- .locationservice$list_tracker_consumers_input(MaxResults = MaxResults, NextToken = NextToken, TrackerName = TrackerName)
output <- .locationservice$list_tracker_consumers_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$list_tracker_consumers <- locationservice_list_tracker_consumers
#' Lists tracker resources in your Amazon Web Services account
#'
#' @description
#' Lists tracker resources in your Amazon Web Services account.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_list_trackers/](https://www.paws-r-sdk.com/docs/locationservice_list_trackers/) for full documentation.
#'
#' @param MaxResults An optional limit for the number of resources returned in a single call.
#'
#' Default value: `100`
#' @param NextToken The pagination token specifying which page of results to return in the
#' response. If no token is provided, the default page is the first page.
#'
#' Default value: `null`
#'
#' @keywords internal
#'
#' @rdname locationservice_list_trackers
locationservice_list_trackers <- function(MaxResults = NULL, NextToken = NULL) {
op <- new_operation(
name = "ListTrackers",
http_method = "POST",
http_path = "/tracking/v0/list-trackers",
paginator = list()
)
input <- .locationservice$list_trackers_input(MaxResults = MaxResults, NextToken = NextToken)
output <- .locationservice$list_trackers_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$list_trackers <- locationservice_list_trackers
#' Stores a geofence geometry in a given geofence collection, or updates
#' the geometry of an existing geofence if a geofence ID is included in the
#' request
#'
#' @description
#' Stores a geofence geometry in a given geofence collection, or updates the geometry of an existing geofence if a geofence ID is included in the request.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_put_geofence/](https://www.paws-r-sdk.com/docs/locationservice_put_geofence/) for full documentation.
#'
#' @param CollectionName [required] The geofence collection to store the geofence in.
#' @param GeofenceId [required] An identifier for the geofence. For example, `ExampleGeofence-1`.
#' @param Geometry [required] Contains the details to specify the position of the geofence. Can be
#' either a polygon or a circle. Including both will return a validation
#' error.
#'
#' Each [geofence
#' polygon](https://docs.aws.amazon.com/location/latest/APIReference/API_GeofenceGeometry.html)
#' can have a maximum of 1,000 vertices.
#'
#' @keywords internal
#'
#' @rdname locationservice_put_geofence
locationservice_put_geofence <- function(CollectionName, GeofenceId, Geometry) {
op <- new_operation(
name = "PutGeofence",
http_method = "PUT",
http_path = "/geofencing/v0/collections/{CollectionName}/geofences/{GeofenceId}",
paginator = list()
)
input <- .locationservice$put_geofence_input(CollectionName = CollectionName, GeofenceId = GeofenceId, Geometry = Geometry)
output <- .locationservice$put_geofence_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$put_geofence <- locationservice_put_geofence
#' Reverse geocodes a given coordinate and returns a legible address
#'
#' @description
#' Reverse geocodes a given coordinate and returns a legible address. Allows you to search for Places or points of interest near a given position.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_search_place_index_for_position/](https://www.paws-r-sdk.com/docs/locationservice_search_place_index_for_position/) for full documentation.
#'
#' @param IndexName [required] The name of the place index resource you want to use for the search.
#' @param Language The preferred language used to return results. The value must be a valid
#' BCP 47 language tag, for example, `en` for English.
#'
#' This setting affects the languages used in the results, but not the
#' results themselves. If no language is specified, or not supported for a
#' particular result, the partner automatically chooses a language for the
#' result.
#'
#' For an example, we'll use the Greek language. You search for a location
#' around Athens, Greece, with the `language` parameter set to `en`. The
#' `city` in the results will most likely be returned as `Athens`.
#'
#' If you set the `language` parameter to `el`, for Greek, then the `city`
#' in the results will more likely be returned as \eqn{A\Theta\eta\nu\alpha}.
#'
#' If the data provider does not have a value for Greek, the result will be
#' in a language that the provider does support.
#' @param MaxResults An optional parameter. The maximum number of results returned per
#' request.
#'
#' Default value: `50`
#' @param Position [required] Specifies the longitude and latitude of the position to query.
#'
#' This parameter must contain a pair of numbers. The first number
#' represents the X coordinate, or longitude; the second number represents
#' the Y coordinate, or latitude.
#'
#' For example, `[-123.1174, 49.2847]` represents a position with longitude
#' `-123.1174` and latitude `49.2847`.
#'
#' @keywords internal
#'
#' @rdname locationservice_search_place_index_for_position
locationservice_search_place_index_for_position <- function(IndexName, Language = NULL, MaxResults = NULL, Position) {
op <- new_operation(
name = "SearchPlaceIndexForPosition",
http_method = "POST",
http_path = "/places/v0/indexes/{IndexName}/search/position",
paginator = list()
)
input <- .locationservice$search_place_index_for_position_input(IndexName = IndexName, Language = Language, MaxResults = MaxResults, Position = Position)
output <- .locationservice$search_place_index_for_position_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$search_place_index_for_position <- locationservice_search_place_index_for_position
#' Generates suggestions for addresses and points of interest based on
#' partial or misspelled free-form text
#'
#' @description
#' Generates suggestions for addresses and points of interest based on partial or misspelled free-form text. This operation is also known as autocomplete, autosuggest, or fuzzy matching.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_search_place_index_for_suggestions/](https://www.paws-r-sdk.com/docs/locationservice_search_place_index_for_suggestions/) for full documentation.
#'
#' @param BiasPosition An optional parameter that indicates a preference for place suggestions
#' that are closer to a specified position.
#'
#' If provided, this parameter must contain a pair of numbers. The first
#' number represents the X coordinate, or longitude; the second number
#' represents the Y coordinate, or latitude.
#'
#' For example, `[-123.1174, 49.2847]` represents the position with
#' longitude `-123.1174` and latitude `49.2847`.
#'
#' `BiasPosition` and `FilterBBox` are mutually exclusive. Specifying both
#' options results in an error.
#' @param FilterBBox An optional parameter that limits the search results by returning only
#' suggestions within a specified bounding box.
#'
#' If provided, this parameter must contain a total of four consecutive
#' numbers in two pairs. The first pair of numbers represents the X and Y
#' coordinates (longitude and latitude, respectively) of the southwest
#' corner of the bounding box; the second pair of numbers represents the X
#' and Y coordinates (longitude and latitude, respectively) of the
#' northeast corner of the bounding box.
#'
#' For example, `[-12.7935, -37.4835, -12.0684, -36.9542]` represents a
#' bounding box where the southwest corner has longitude `-12.7935` and
#' latitude `-37.4835`, and the northeast corner has longitude `-12.0684`
#' and latitude `-36.9542`.
#'
#' `FilterBBox` and `BiasPosition` are mutually exclusive. Specifying both
#' options results in an error.
#' @param FilterCountries An optional parameter that limits the search results by returning only
#' suggestions within the provided list of countries.
#'
#' - Use the [ISO 3166](https://www.iso.org/iso-3166-country-codes.html)
#' 3-digit country code. For example, Australia uses three upper-case
#' characters: `AUS`.
#' @param IndexName [required] The name of the place index resource you want to use for the search.
#' @param Language The preferred language used to return results. The value must be a valid
#' BCP 47 language tag, for example, `en` for English.
#'
#' This setting affects the languages used in the results. If no language
#' is specified, or not supported for a particular result, the partner
#' automatically chooses a language for the result.
#'
#' For an example, we'll use the Greek language. You search for
#' `Athens, Gr` to get suggestions with the `language` parameter set to
#' `en`. The results found will most likely be returned as
#' `Athens, Greece`.
#'
#' If you set the `language` parameter to `el`, for Greek, then the result
#' found will more likely be returned as \eqn{A\Theta\eta\nu\sigma, E\lambda\lambda\alpha\delta}.
#'
#' If the data provider does not have a value for Greek, the result will be
#' in a language that the provider does support.
#' @param MaxResults An optional parameter. The maximum number of results returned per
#' request.
#'
#' The default: `5`
#' @param Text [required] The free-form partial text to use to generate place suggestions. For
#' example, `eiffel tow`.
#'
#' @keywords internal
#'
#' @rdname locationservice_search_place_index_for_suggestions
locationservice_search_place_index_for_suggestions <- function(BiasPosition = NULL, FilterBBox = NULL, FilterCountries = NULL, IndexName, Language = NULL, MaxResults = NULL, Text) {
op <- new_operation(
name = "SearchPlaceIndexForSuggestions",
http_method = "POST",
http_path = "/places/v0/indexes/{IndexName}/search/suggestions",
paginator = list()
)
input <- .locationservice$search_place_index_for_suggestions_input(BiasPosition = BiasPosition, FilterBBox = FilterBBox, FilterCountries = FilterCountries, IndexName = IndexName, Language = Language, MaxResults = MaxResults, Text = Text)
output <- .locationservice$search_place_index_for_suggestions_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$search_place_index_for_suggestions <- locationservice_search_place_index_for_suggestions
#' Geocodes free-form text, such as an address, name, city, or region to
#' allow you to search for Places or points of interest
#'
#' @description
#' Geocodes free-form text, such as an address, name, city, or region to allow you to search for Places or points of interest.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_search_place_index_for_text/](https://www.paws-r-sdk.com/docs/locationservice_search_place_index_for_text/) for full documentation.
#'
#' @param BiasPosition An optional parameter that indicates a preference for places that are
#' closer to a specified position.
#'
#' If provided, this parameter must contain a pair of numbers. The first
#' number represents the X coordinate, or longitude; the second number
#' represents the Y coordinate, or latitude.
#'
#' For example, `[-123.1174, 49.2847]` represents the position with
#' longitude `-123.1174` and latitude `49.2847`.
#'
#' `BiasPosition` and `FilterBBox` are mutually exclusive. Specifying both
#' options results in an error.
#' @param FilterBBox An optional parameter that limits the search results by returning only
#' places that are within the provided bounding box.
#'
#' If provided, this parameter must contain a total of four consecutive
#' numbers in two pairs. The first pair of numbers represents the X and Y
#' coordinates (longitude and latitude, respectively) of the southwest
#' corner of the bounding box; the second pair of numbers represents the X
#' and Y coordinates (longitude and latitude, respectively) of the
#' northeast corner of the bounding box.
#'
#' For example, `[-12.7935, -37.4835, -12.0684, -36.9542]` represents a
#' bounding box where the southwest corner has longitude `-12.7935` and
#' latitude `-37.4835`, and the northeast corner has longitude `-12.0684`
#' and latitude `-36.9542`.
#'
#' `FilterBBox` and `BiasPosition` are mutually exclusive. Specifying both
#' options results in an error.
#' @param FilterCountries An optional parameter that limits the search results by returning only
#' places that are in a specified list of countries.
#'
#' - Valid values include [ISO
#' 3166](https://www.iso.org/iso-3166-country-codes.html) 3-digit
#' country codes. For example, Australia uses three upper-case
#' characters: `AUS`.
#' @param IndexName [required] The name of the place index resource you want to use for the search.
#' @param Language The preferred language used to return results. The value must be a valid
#' BCP 47 language tag, for example, `en` for English.
#'
#' This setting affects the languages used in the results, but not the
#' results themselves. If no language is specified, or not supported for a
#' particular result, the partner automatically chooses a language for the
#' result.
#'
#' For an example, we'll use the Greek language. You search for
#' `Athens, Greece`, with the `language` parameter set to `en`. The result
#' found will most likely be returned as `Athens`.
#'
#' If you set the `language` parameter to `el`, for Greek, then the result
#' found will more likely be returned as \eqn{A\Theta\eta\nu\alpha}.
#'
#' If the data provider does not have a value for Greek, the result will be
#' in a language that the provider does support.
#' @param MaxResults An optional parameter. The maximum number of results returned per
#' request.
#'
#' The default: `50`
#' @param Text [required] The address, name, city, or region to be used in the search in free-form
#' text format. For example, `123 Any Street`.
#'
#' @keywords internal
#'
#' @rdname locationservice_search_place_index_for_text
locationservice_search_place_index_for_text <- function(BiasPosition = NULL, FilterBBox = NULL, FilterCountries = NULL, IndexName, Language = NULL, MaxResults = NULL, Text) {
op <- new_operation(
name = "SearchPlaceIndexForText",
http_method = "POST",
http_path = "/places/v0/indexes/{IndexName}/search/text",
paginator = list()
)
input <- .locationservice$search_place_index_for_text_input(BiasPosition = BiasPosition, FilterBBox = FilterBBox, FilterCountries = FilterCountries, IndexName = IndexName, Language = Language, MaxResults = MaxResults, Text = Text)
output <- .locationservice$search_place_index_for_text_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$search_place_index_for_text <- locationservice_search_place_index_for_text
#' Assigns one or more tags (key-value pairs) to the specified Amazon
#' Location Service resource
#'
#' @description
#' Assigns one or more tags (key-value pairs) to the specified Amazon Location Service resource.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_tag_resource/](https://www.paws-r-sdk.com/docs/locationservice_tag_resource/) for full documentation.
#'
#' @param ResourceArn [required] The Amazon Resource Name (ARN) of the resource whose tags you want to
#' update.
#'
#' - Format example:
#' `arn:aws:geo:region:account-id:resourcetype/ExampleResource`
#' @param Tags [required] Applies one or more tags to specific resource. A tag is a key-value pair
#' that helps you manage, identify, search, and filter your resources.
#'
#' Format: `"key" : "value"`
#'
#' Restrictions:
#'
#' - Maximum 50 tags per resource.
#'
#' - Each tag key must be unique and must have exactly one associated
#' value.
#'
#' - Maximum key length: 128 Unicode characters in UTF-8.
#'
#' - Maximum value length: 256 Unicode characters in UTF-8.
#'
#' - Can use alphanumeric characters (A–Z, a–z, 0–9), and the following
#' characters: + - = . _ : / @@
#'
#' - Cannot use "aws:" as a prefix for a key.
#'
#' @keywords internal
#'
#' @rdname locationservice_tag_resource
locationservice_tag_resource <- function(ResourceArn, Tags) {
op <- new_operation(
name = "TagResource",
http_method = "POST",
http_path = "/tags/{ResourceArn}",
paginator = list()
)
input <- .locationservice$tag_resource_input(ResourceArn = ResourceArn, Tags = Tags)
output <- .locationservice$tag_resource_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$tag_resource <- locationservice_tag_resource
#' Removes one or more tags from the specified Amazon Location resource
#'
#' @description
#' Removes one or more tags from the specified Amazon Location resource.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_untag_resource/](https://www.paws-r-sdk.com/docs/locationservice_untag_resource/) for full documentation.
#'
#' @param ResourceArn [required] The Amazon Resource Name (ARN) of the resource from which you want to
#' remove tags.
#'
#' - Format example:
#' `arn:aws:geo:region:account-id:resourcetype/ExampleResource`
#' @param TagKeys [required] The list of tag keys to remove from the specified resource.
#'
#' @keywords internal
#'
#' @rdname locationservice_untag_resource
locationservice_untag_resource <- function(ResourceArn, TagKeys) {
op <- new_operation(
name = "UntagResource",
http_method = "DELETE",
http_path = "/tags/{ResourceArn}",
paginator = list()
)
input <- .locationservice$untag_resource_input(ResourceArn = ResourceArn, TagKeys = TagKeys)
output <- .locationservice$untag_resource_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$untag_resource <- locationservice_untag_resource
#' Updates the specified properties of a given geofence collection
#'
#' @description
#' Updates the specified properties of a given geofence collection.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_update_geofence_collection/](https://www.paws-r-sdk.com/docs/locationservice_update_geofence_collection/) for full documentation.
#'
#' @param CollectionName [required] The name of the geofence collection to update.
#' @param Description Updates the description for the geofence collection.
#' @param PricingPlan No longer used. If included, the only allowed value is
#' `RequestBasedUsage`.
#' @param PricingPlanDataSource This parameter is no longer used.
#'
#' @keywords internal
#'
#' @rdname locationservice_update_geofence_collection
locationservice_update_geofence_collection <- function(CollectionName, Description = NULL, PricingPlan = NULL, PricingPlanDataSource = NULL) {
op <- new_operation(
name = "UpdateGeofenceCollection",
http_method = "PATCH",
http_path = "/geofencing/v0/collections/{CollectionName}",
paginator = list()
)
input <- .locationservice$update_geofence_collection_input(CollectionName = CollectionName, Description = Description, PricingPlan = PricingPlan, PricingPlanDataSource = PricingPlanDataSource)
output <- .locationservice$update_geofence_collection_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$update_geofence_collection <- locationservice_update_geofence_collection
#' Updates the specified properties of a given API key resource
#'
#' @description
#' Updates the specified properties of a given API key resource.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_update_key/](https://www.paws-r-sdk.com/docs/locationservice_update_key/) for full documentation.
#'
#' @param Description Updates the description for the API key resource.
#' @param ExpireTime Updates the timestamp for when the API key resource will expire in [ISO
#' 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format:
#' `YYYY-MM-DDThh:mm:ss.sssZ`.
#' @param ForceUpdate The boolean flag to be included for updating `ExpireTime` or
#' `Restrictions` details.
#'
#' Must be set to `true` to update an API key resource that has been used
#' in the past 7 days.
#'
#' `False` if force update is not preferred
#'
#' Default value: `False`
#' @param KeyName [required] The name of the API key resource to update.
#' @param NoExpiry Whether the API key should expire. Set to `true` to set the API key to
#' have no expiration time.
#' @param Restrictions Updates the API key restrictions for the API key resource.
#'
#' @keywords internal
#'
#' @rdname locationservice_update_key
locationservice_update_key <- function(Description = NULL, ExpireTime = NULL, ForceUpdate = NULL, KeyName, NoExpiry = NULL, Restrictions = NULL) {
op <- new_operation(
name = "UpdateKey",
http_method = "PATCH",
http_path = "/metadata/v0/keys/{KeyName}",
paginator = list()
)
input <- .locationservice$update_key_input(Description = Description, ExpireTime = ExpireTime, ForceUpdate = ForceUpdate, KeyName = KeyName, NoExpiry = NoExpiry, Restrictions = Restrictions)
output <- .locationservice$update_key_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$update_key <- locationservice_update_key
#' Updates the specified properties of a given map resource
#'
#' @description
#' Updates the specified properties of a given map resource.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_update_map/](https://www.paws-r-sdk.com/docs/locationservice_update_map/) for full documentation.
#'
#' @param ConfigurationUpdate Updates the parts of the map configuration that can be updated,
#' including the political view.
#' @param Description Updates the description for the map resource.
#' @param MapName [required] The name of the map resource to update.
#' @param PricingPlan No longer used. If included, the only allowed value is
#' `RequestBasedUsage`.
#'
#' @keywords internal
#'
#' @rdname locationservice_update_map
locationservice_update_map <- function(ConfigurationUpdate = NULL, Description = NULL, MapName, PricingPlan = NULL) {
op <- new_operation(
name = "UpdateMap",
http_method = "PATCH",
http_path = "/maps/v0/maps/{MapName}",
paginator = list()
)
input <- .locationservice$update_map_input(ConfigurationUpdate = ConfigurationUpdate, Description = Description, MapName = MapName, PricingPlan = PricingPlan)
output <- .locationservice$update_map_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$update_map <- locationservice_update_map
#' Updates the specified properties of a given place index resource
#'
#' @description
#' Updates the specified properties of a given place index resource.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_update_place_index/](https://www.paws-r-sdk.com/docs/locationservice_update_place_index/) for full documentation.
#'
#' @param DataSourceConfiguration Updates the data storage option for the place index resource.
#' @param Description Updates the description for the place index resource.
#' @param IndexName [required] The name of the place index resource to update.
#' @param PricingPlan No longer used. If included, the only allowed value is
#' `RequestBasedUsage`.
#'
#' @keywords internal
#'
#' @rdname locationservice_update_place_index
locationservice_update_place_index <- function(DataSourceConfiguration = NULL, Description = NULL, IndexName, PricingPlan = NULL) {
op <- new_operation(
name = "UpdatePlaceIndex",
http_method = "PATCH",
http_path = "/places/v0/indexes/{IndexName}",
paginator = list()
)
input <- .locationservice$update_place_index_input(DataSourceConfiguration = DataSourceConfiguration, Description = Description, IndexName = IndexName, PricingPlan = PricingPlan)
output <- .locationservice$update_place_index_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$update_place_index <- locationservice_update_place_index
#' Updates the specified properties for a given route calculator resource
#'
#' @description
#' Updates the specified properties for a given route calculator resource.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_update_route_calculator/](https://www.paws-r-sdk.com/docs/locationservice_update_route_calculator/) for full documentation.
#'
#' @param CalculatorName [required] The name of the route calculator resource to update.
#' @param Description Updates the description for the route calculator resource.
#' @param PricingPlan No longer used. If included, the only allowed value is
#' `RequestBasedUsage`.
#'
#' @keywords internal
#'
#' @rdname locationservice_update_route_calculator
locationservice_update_route_calculator <- function(CalculatorName, Description = NULL, PricingPlan = NULL) {
op <- new_operation(
name = "UpdateRouteCalculator",
http_method = "PATCH",
http_path = "/routes/v0/calculators/{CalculatorName}",
paginator = list()
)
input <- .locationservice$update_route_calculator_input(CalculatorName = CalculatorName, Description = Description, PricingPlan = PricingPlan)
output <- .locationservice$update_route_calculator_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$update_route_calculator <- locationservice_update_route_calculator
#' Updates the specified properties of a given tracker resource
#'
#' @description
#' Updates the specified properties of a given tracker resource.
#'
#' See [https://www.paws-r-sdk.com/docs/locationservice_update_tracker/](https://www.paws-r-sdk.com/docs/locationservice_update_tracker/) for full documentation.
#'
#' @param Description Updates the description for the tracker resource.
#' @param PositionFiltering Updates the position filtering for the tracker resource.
#'
#' Valid values:
#'
#' - `TimeBased` - Location updates are evaluated against linked geofence
#' collections, but not every location update is stored. If your update
#' frequency is more often than 30 seconds, only one update per 30
#' seconds is stored for each unique device ID.
#'
#' - `DistanceBased` - If the device has moved less than 30 m (98.4 ft),
#' location updates are ignored. Location updates within this distance
#' are neither evaluated against linked geofence collections, nor
#' stored. This helps control costs by reducing the number of geofence
#' evaluations and historical device positions to paginate through.
#' Distance-based filtering can also reduce the effects of GPS noise
#' when displaying device trajectories on a map.
#'
#' - `AccuracyBased` - If the device has moved less than the measured
#' accuracy, location updates are ignored. For example, if two
#' consecutive updates from a device have a horizontal accuracy of 5 m
#' and 10 m, the second update is ignored if the device has moved less
#' than 15 m. Ignored location updates are neither evaluated against
#' linked geofence collections, nor stored. This helps educe the
#' effects of GPS noise when displaying device trajectories on a map,
#' and can help control costs by reducing the number of geofence
#' evaluations.
#' @param PricingPlan No longer used. If included, the only allowed value is
#' `RequestBasedUsage`.
#' @param PricingPlanDataSource This parameter is no longer used.
#' @param TrackerName [required] The name of the tracker resource to update.
#'
#' @keywords internal
#'
#' @rdname locationservice_update_tracker
locationservice_update_tracker <- function(Description = NULL, PositionFiltering = NULL, PricingPlan = NULL, PricingPlanDataSource = NULL, TrackerName) {
op <- new_operation(
name = "UpdateTracker",
http_method = "PATCH",
http_path = "/tracking/v0/trackers/{TrackerName}",
paginator = list()
)
input <- .locationservice$update_tracker_input(Description = Description, PositionFiltering = PositionFiltering, PricingPlan = PricingPlan, PricingPlanDataSource = PricingPlanDataSource, TrackerName = TrackerName)
output <- .locationservice$update_tracker_output()
config <- get_config()
svc <- .locationservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.locationservice$operations$update_tracker <- locationservice_update_tracker
|
#reading data, converting data$Date to Data type, subsetting etc.
data<-read.delim("household_power_consumption.txt",header=TRUE,sep=";", na.strings = "?")
data$Date<-strptime(data$Date, "%e/%m/%Y")
data2<-data[(data$Date>="2007-2-1" & data$Date<="2007-2-2"),]
data2$Date <- as.POSIXlt(paste(as.Date(data2$Date), data2$Time, sep=" "))
#main plotting function
par(mfrow=c(2,2), mar=c(5,5,2,2))
plot(data2$Date, data2$Global_active_power, type="l",
xlab="", ylab="Global Active Power (kilowatts)")
plot(data2$Date, data2$Voltage, type="l",
xlab="datetime", ylab="Voltage")
plot(data2$Date, data2$Sub_metering_1, type="n",xlab="", ylab="Energy sub metering", ylim=c(0, max(c(data2$Sub_metering_1, data2$Sub_metering_2, data2$Sub_metering_3))),)
lines(data2$Date, data2$Sub_metering_1, col="black")
lines(data2$Date, data2$Sub_metering_2, col="red")
lines(data2$Date, data2$Sub_metering_3, col="blue")
legend("topright",pch="-",lwd=1,bty="n",cex=.6,pt.cex=0.6,col=c("black","red","blue"), legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
plot(data2$Date, data2$Global_reactive_power, type="l",
xlab="datetime", ylab="Global_reactive_power")
#saving into png
dev.copy(png, file="plot4.png", width=480,height=480)
dev.off()
| /plot4.R | no_license | TroubleMak3r/ExData_Plotting1 | R | false | false | 1,253 | r |
#reading data, converting data$Date to Data type, subsetting etc.
data<-read.delim("household_power_consumption.txt",header=TRUE,sep=";", na.strings = "?")
data$Date<-strptime(data$Date, "%e/%m/%Y")
data2<-data[(data$Date>="2007-2-1" & data$Date<="2007-2-2"),]
data2$Date <- as.POSIXlt(paste(as.Date(data2$Date), data2$Time, sep=" "))
#main plotting function
par(mfrow=c(2,2), mar=c(5,5,2,2))
plot(data2$Date, data2$Global_active_power, type="l",
xlab="", ylab="Global Active Power (kilowatts)")
plot(data2$Date, data2$Voltage, type="l",
xlab="datetime", ylab="Voltage")
plot(data2$Date, data2$Sub_metering_1, type="n",xlab="", ylab="Energy sub metering", ylim=c(0, max(c(data2$Sub_metering_1, data2$Sub_metering_2, data2$Sub_metering_3))),)
lines(data2$Date, data2$Sub_metering_1, col="black")
lines(data2$Date, data2$Sub_metering_2, col="red")
lines(data2$Date, data2$Sub_metering_3, col="blue")
legend("topright",pch="-",lwd=1,bty="n",cex=.6,pt.cex=0.6,col=c("black","red","blue"), legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
plot(data2$Date, data2$Global_reactive_power, type="l",
xlab="datetime", ylab="Global_reactive_power")
#saving into png
dev.copy(png, file="plot4.png", width=480,height=480)
dev.off()
|
#Not considered Shift and is.na(Client) <- 'AGS'
# install.packages("RODBC")
library(RODBC)
odbcCloseAll()
edw_bi <- odbcConnect("PulseDB_EDW_BI")
active <- sqlFetch(edw_bi, "AttAnalysis_tblActiveEmployees")
inactive <- sqlFetch(edw_bi, "AttAnalysis_tblInActiveEmployees")
dataa <- rbind(active, inactive)
AR <- subset(dataa, dataa$Department == "Medical Coding")
TM <- subset(AR, JobRole == "Team Member")
Current <- subset(TM, as.numeric(format(TM$DateOfRelieving, '%Y')) == 1900 | as.numeric(format(TM$DateOfRelieving, '%Y')) >= 2015 )
################ Attrition ##########
# only 2 > on the basis that we'll not have date for current employees as told by sid (his discussion with IT) on 23 Dec
Current$Attrition = ifelse(as.numeric(format(Current$DateOfRelieving, '%Y')) == 1900, 0, 1)
Current$Available = ifelse(as.numeric(format(Current$DateOfRelieving, '%Y')) != 1900, 0, 1)
Current$Status = as.factor(ifelse(as.numeric(format(Current$DateOfRelieving, '%Y')) == 1900, "Current", "Past"))
########## EmployeeAge #######
library(lubridate)
Current$EmployeeAge <- ifelse(Current$Status=="Current", interval(Current$DateofBirth, today())/duration(num=1, units = "years"), interval(Current$DateofBirth, Current$DateOfRelieving)/duration(num=1, units = "years"))
############ modified variables ###############
y <- addNA(Current$Client)
levels(y)[is.na(levels(y))] <- "AGS" # Maximum number of categories (50) exceeded
Current$Client <- y
summary(Current$Client)
############### New variables ###############
JMonth <- month(Current$DateofJoin)
RMonth <- month(Current$DateOfRelieving)
Current$JMonth <- as.factor(JMonth)
Current$RMonth <- as.factor(RMonth)
##################### Use limited data ############
# "Shift2",
dataset2 <- Current[c( "EmployeeCode", "EmployeeName", "AGSExperienceInMonths", "Gender", "EmployeeAge"
, "JMonth", "RMonth", "MaritalStatus", "WorkLocation", "ExperienceType", "ProdAvgLast3Months"
, "QualAvgLast3Months", "CourseLevels", "Last30DaysLeaveCount","TotalExtraHoursWorked"
, "LastReviewType", "LastReviewRating", "Client", "SubClient"
, "TransportMode","EngagementIndex", "FunctionName", "Status", "Attrition", "Available")]
summary(dataset2)
################# Imputation ###################
library(mice)
simple <- dataset2[c("MaritalStatus", "CourseLevels", "EngagementIndex")]
summary(simple)
set.seed(144)
imputed = complete(mice(simple))
summary(imputed)
dataset2$MaritalStatus = imputed$MaritalStatus
dataset2$CourseLevels = imputed$CourseLevels
dataset2$EngagementIndex = imputed$EngagementIndex
############## Chi Square Test ############
chisq.test(dataset2$EngagementIndex, dataset2$Attrition)
chisq.test(dataset2$AGSExperienceInMonths, dataset2$Attrition, simulate.p.value = TRUE)
chisq.test(dataset2$Gender, dataset2$Attrition)
chisq.test(dataset2$QualAvgLast3Months, dataset2$Attrition, simulate.p.value = TRUE)
chisq.test(dataset2$FunctionName, dataset2$Attrition, simulate.p.value = TRUE)
# chisq.test(dataset2$Shift2, dataset2$Attrition, simulate.p.value = TRUE)
chisq.test(dataset2$WorkLocation, dataset2$Attrition, simulate.p.value = TRUE)
chisq.test(dataset2$CourseLevels, dataset2$Attrition, simulate.p.value = TRUE)
chisq.test(dataset2$ExperienceType, dataset2$Attrition)
chisq.test(dataset2$MaritalStatus, dataset2$Attrition)
chisq.test(dataset2$ProdAvgLast3Months, dataset2$Attrition, simulate.p.value = TRUE)
chisq.test(dataset2$Last30DaysLeaveCount, dataset2$Attrition, simulate.p.value = TRUE)
chisq.test(dataset2$EmployeeAge, dataset2$Attrition, simulate.p.value = TRUE)
chisq.test(dataset2$JMonth, dataset2$Attrition)
chisq.test(dataset2$RMonth, dataset2$Attrition)
chisq.test(dataset2$Client, dataset2$Attrition, simulate.p.value = TRUE)
chisq.test(dataset2$SubClient, dataset2$Attrition, simulate.p.value = TRUE)
chisq.test(dataset2$TotalExtraHoursWorked, dataset2$Attrition, simulate.p.value = TRUE)
chisq.test(dataset2$LastReviewType, dataset2$Attrition, simulate.p.value = TRUE)
chisq.test(dataset2$LastReviewRating, dataset2$Attrition, simulate.p.value = TRUE)
# chisq.test(dataset2$Distance, dataset2$Status, simulate.p.value = TRUE)
################# 4 Model 1 (all IMP) #######################
# LastReviewType + LastReviewRating + Shift3
set.seed(144)
logit1 <- glm(Attrition ~ EngagementIndex + AGSExperienceInMonths + Gender
+ QualAvgLast3Months + FunctionName + WorkLocation + CourseLevels
+ ExperienceType + MaritalStatus + ProdAvgLast3Months + Last30DaysLeaveCount
+ EmployeeAge + JMonth + Client
+ TotalExtraHoursWorked
, family = binomial(link = "logit")
, data = dataset2)
summary(logit1)
##################### 5 prediciton on all data ####################
predAll1 <- predict(logit1, dataset2, "response" )
table(dataset2$Status, predAll1 > 0.5)
(1221+987)*100/(1221+69+123+987) # 92
################# 6 Export Model Score #################
t <- cbind(dataset2, predAll1)
t$Prediction <- ifelse(predAll1>0.5, "Leave", "Stay")
write.csv(t, file="MC_Logit_Score_R0.csv", row.names=FALSE)
| /20170112 V1R0 MC ().R | no_license | Makarand87/AttritionAnalysis2 | R | false | false | 5,173 | r | #Not considered Shift and is.na(Client) <- 'AGS'
# install.packages("RODBC")
library(RODBC)
odbcCloseAll()
edw_bi <- odbcConnect("PulseDB_EDW_BI")
active <- sqlFetch(edw_bi, "AttAnalysis_tblActiveEmployees")
inactive <- sqlFetch(edw_bi, "AttAnalysis_tblInActiveEmployees")
dataa <- rbind(active, inactive)
AR <- subset(dataa, dataa$Department == "Medical Coding")
TM <- subset(AR, JobRole == "Team Member")
Current <- subset(TM, as.numeric(format(TM$DateOfRelieving, '%Y')) == 1900 | as.numeric(format(TM$DateOfRelieving, '%Y')) >= 2015 )
################ Attrition ##########
# only 2 > on the basis that we'll not have date for current employees as told by sid (his discussion with IT) on 23 Dec
Current$Attrition = ifelse(as.numeric(format(Current$DateOfRelieving, '%Y')) == 1900, 0, 1)
Current$Available = ifelse(as.numeric(format(Current$DateOfRelieving, '%Y')) != 1900, 0, 1)
Current$Status = as.factor(ifelse(as.numeric(format(Current$DateOfRelieving, '%Y')) == 1900, "Current", "Past"))
########## EmployeeAge #######
library(lubridate)
Current$EmployeeAge <- ifelse(Current$Status=="Current", interval(Current$DateofBirth, today())/duration(num=1, units = "years"), interval(Current$DateofBirth, Current$DateOfRelieving)/duration(num=1, units = "years"))
############ modified variables ###############
y <- addNA(Current$Client)
levels(y)[is.na(levels(y))] <- "AGS" # Maximum number of categories (50) exceeded
Current$Client <- y
summary(Current$Client)
############### New variables ###############
JMonth <- month(Current$DateofJoin)
RMonth <- month(Current$DateOfRelieving)
Current$JMonth <- as.factor(JMonth)
Current$RMonth <- as.factor(RMonth)
##################### Use limited data ############
# "Shift2",
dataset2 <- Current[c( "EmployeeCode", "EmployeeName", "AGSExperienceInMonths", "Gender", "EmployeeAge"
, "JMonth", "RMonth", "MaritalStatus", "WorkLocation", "ExperienceType", "ProdAvgLast3Months"
, "QualAvgLast3Months", "CourseLevels", "Last30DaysLeaveCount","TotalExtraHoursWorked"
, "LastReviewType", "LastReviewRating", "Client", "SubClient"
, "TransportMode","EngagementIndex", "FunctionName", "Status", "Attrition", "Available")]
summary(dataset2)
################# Imputation ###################
library(mice)
simple <- dataset2[c("MaritalStatus", "CourseLevels", "EngagementIndex")]
summary(simple)
set.seed(144)
imputed = complete(mice(simple))
summary(imputed)
dataset2$MaritalStatus = imputed$MaritalStatus
dataset2$CourseLevels = imputed$CourseLevels
dataset2$EngagementIndex = imputed$EngagementIndex
############## Chi Square Test ############
chisq.test(dataset2$EngagementIndex, dataset2$Attrition)
chisq.test(dataset2$AGSExperienceInMonths, dataset2$Attrition, simulate.p.value = TRUE)
chisq.test(dataset2$Gender, dataset2$Attrition)
chisq.test(dataset2$QualAvgLast3Months, dataset2$Attrition, simulate.p.value = TRUE)
chisq.test(dataset2$FunctionName, dataset2$Attrition, simulate.p.value = TRUE)
# chisq.test(dataset2$Shift2, dataset2$Attrition, simulate.p.value = TRUE)
chisq.test(dataset2$WorkLocation, dataset2$Attrition, simulate.p.value = TRUE)
chisq.test(dataset2$CourseLevels, dataset2$Attrition, simulate.p.value = TRUE)
chisq.test(dataset2$ExperienceType, dataset2$Attrition)
chisq.test(dataset2$MaritalStatus, dataset2$Attrition)
chisq.test(dataset2$ProdAvgLast3Months, dataset2$Attrition, simulate.p.value = TRUE)
chisq.test(dataset2$Last30DaysLeaveCount, dataset2$Attrition, simulate.p.value = TRUE)
chisq.test(dataset2$EmployeeAge, dataset2$Attrition, simulate.p.value = TRUE)
chisq.test(dataset2$JMonth, dataset2$Attrition)
chisq.test(dataset2$RMonth, dataset2$Attrition)
chisq.test(dataset2$Client, dataset2$Attrition, simulate.p.value = TRUE)
chisq.test(dataset2$SubClient, dataset2$Attrition, simulate.p.value = TRUE)
chisq.test(dataset2$TotalExtraHoursWorked, dataset2$Attrition, simulate.p.value = TRUE)
chisq.test(dataset2$LastReviewType, dataset2$Attrition, simulate.p.value = TRUE)
chisq.test(dataset2$LastReviewRating, dataset2$Attrition, simulate.p.value = TRUE)
# chisq.test(dataset2$Distance, dataset2$Status, simulate.p.value = TRUE)
################# 4 Model 1 (all IMP) #######################
# LastReviewType + LastReviewRating + Shift3
set.seed(144)
logit1 <- glm(Attrition ~ EngagementIndex + AGSExperienceInMonths + Gender
+ QualAvgLast3Months + FunctionName + WorkLocation + CourseLevels
+ ExperienceType + MaritalStatus + ProdAvgLast3Months + Last30DaysLeaveCount
+ EmployeeAge + JMonth + Client
+ TotalExtraHoursWorked
, family = binomial(link = "logit")
, data = dataset2)
summary(logit1)
##################### 5 prediciton on all data ####################
predAll1 <- predict(logit1, dataset2, "response" )
table(dataset2$Status, predAll1 > 0.5)
(1221+987)*100/(1221+69+123+987) # 92
################# 6 Export Model Score #################
t <- cbind(dataset2, predAll1)
t$Prediction <- ifelse(predAll1>0.5, "Leave", "Stay")
write.csv(t, file="MC_Logit_Score_R0.csv", row.names=FALSE)
|
vector <- function(v){
x<- scan(n=1, quiet=TRUE, what=numeric())
while (x != 0){
v <- c(v,x)
x <- scan(n=1, quiet=TRUE, what=numeric())
}
return(v)
}
v<- vector()
| /1. FIRST YEAR/Introduction to Computing/Exercices_Laura/exercici80.R | no_license | laurajuliamelis/BachelorDegree_Statistics | R | false | false | 190 | r | vector <- function(v){
x<- scan(n=1, quiet=TRUE, what=numeric())
while (x != 0){
v <- c(v,x)
x <- scan(n=1, quiet=TRUE, what=numeric())
}
return(v)
}
v<- vector()
|
#' Run a Linear Mixed Effects Model on all voxels of a NIfTI image and return parametric coefficients and residuals
#'
#'
#' This function is able to run a Linear Mixed Effect Model using the lmer() function.
#' The analysis will run in all voxels in in the mask and will return the model fit for each voxel.
#' The function relies on lmerTest to create p-values using the Satterthwaite Approximation.
#'
#'
#' @param image Input image of type 'nifti' or vector of path(s) to images. If multiple paths, the script will all mergeNifti() and merge across time.
#' @param mask Input mask of type 'nifti' or path to mask. Must be a binary mask
#' @param fourdOut To be passed to mergeNifti, This is the path and file name without the suffix to save the fourd file. Default (NULL) means script won't write out 4D image.
#' @param formula Must be a formula passed to lmer()
#' @param subjData Dataframe containing all the covariates used for the analysis
#' @param mc.preschedule Argument to be passed to mclapply, whether or not to preschedule the jobs. More info in parallel::mclapply
#' @param ncores Number of cores to use
#' @param ... Additional arguments passed to lmer()
#'
#' @keywords internal
#' @return Return list of parametric and spline coefficients (include standard errors and p-values) fitted to each voxel over the masked images passed to function.
#' @export
#'
#'
#'
#' @examples
#'
#'
#' image <- oro.nifti::nifti(img = array(1:1600, dim =c(4,4,4,25)))
#' mask <- oro.nifti::nifti(img = array(c(rep(0,15),1), dim = c(4,4,4,1)))
#' set.seed(1)
#' covs <- data.frame(x = runif(25), id = rep(1:5,5))
#' fm1 <- "~ x + (1|id)"
#' models <- rlmerParam(image, mask, formula = fm1, subjData = covs, ncores = 1)
#'
rlmerParam <- function(image, mask , fourdOut = NULL, formula, subjData, mc.preschedule = TRUE, ncores = 1, ...) {
if (missing(image)) { stop("image is missing")}
if (missing(mask)) { stop("mask is missing")}
if (missing(formula)) { stop("formula is missing")}
if (missing(subjData)) { stop("subjData is missing")}
if (class(formula) != "character") { stop("formula class must be character")}
if (class(image) == "character" & length(image) == 1) {
image <- oro.nifti::readNIfTI(fname=image)
} else if (class(image) == "character" & length(image) > 1) {
image <- mergeNiftis(inputPaths = image, direction = "t", outfile = fourdOut)
}
if (class(mask) == "character" & length(mask) == 1) {
mask <- oro.nifti::readNIfTI(fname=mask)
}
imageMat <- ts2matrix(image, mask)
voxNames <- as.character(names(imageMat))
rm(image)
rm(mask)
gc()
print("Created time series to matrix")
m <- parallel::mclapply(voxNames,
FUN = listFormula, formula, mc.cores = ncores)
print("Created formula list")
timeIn <- proc.time()
imageMat <- cbind(imageMat, subjData)
print("Running test model")
model <- base::do.call(lmerTest::lmer, list(formula = m[[1]], data=imageMat, ...))
print("Running parallel models")
model <- parallel::mclapply(m,
FUN = function(x, data, ...) {
foo <- base::do.call(lmerTest::lmer, list(formula = x, data=data, ...))
return(list(summary(foo)$coefficients, summary(foo)$residuals))
}, data=imageMat, ..., mc.preschedule = mc.preschedule, mc.cores = ncores)
timeOut <- proc.time() - timeIn
print(timeOut[3])
print("Parallel Models Ran")
return(model)
}
| /R/rlmerParam.R | no_license | angelgar/voxel | R | false | false | 3,516 | r | #' Run a Linear Mixed Effects Model on all voxels of a NIfTI image and return parametric coefficients and residuals
#'
#'
#' This function is able to run a Linear Mixed Effect Model using the lmer() function.
#' The analysis will run in all voxels in in the mask and will return the model fit for each voxel.
#' The function relies on lmerTest to create p-values using the Satterthwaite Approximation.
#'
#'
#' @param image Input image of type 'nifti' or vector of path(s) to images. If multiple paths, the script will all mergeNifti() and merge across time.
#' @param mask Input mask of type 'nifti' or path to mask. Must be a binary mask
#' @param fourdOut To be passed to mergeNifti, This is the path and file name without the suffix to save the fourd file. Default (NULL) means script won't write out 4D image.
#' @param formula Must be a formula passed to lmer()
#' @param subjData Dataframe containing all the covariates used for the analysis
#' @param mc.preschedule Argument to be passed to mclapply, whether or not to preschedule the jobs. More info in parallel::mclapply
#' @param ncores Number of cores to use
#' @param ... Additional arguments passed to lmer()
#'
#' @keywords internal
#' @return Return list of parametric and spline coefficients (include standard errors and p-values) fitted to each voxel over the masked images passed to function.
#' @export
#'
#'
#'
#' @examples
#'
#'
#' image <- oro.nifti::nifti(img = array(1:1600, dim =c(4,4,4,25)))
#' mask <- oro.nifti::nifti(img = array(c(rep(0,15),1), dim = c(4,4,4,1)))
#' set.seed(1)
#' covs <- data.frame(x = runif(25), id = rep(1:5,5))
#' fm1 <- "~ x + (1|id)"
#' models <- rlmerParam(image, mask, formula = fm1, subjData = covs, ncores = 1)
#'
rlmerParam <- function(image, mask , fourdOut = NULL, formula, subjData, mc.preschedule = TRUE, ncores = 1, ...) {
if (missing(image)) { stop("image is missing")}
if (missing(mask)) { stop("mask is missing")}
if (missing(formula)) { stop("formula is missing")}
if (missing(subjData)) { stop("subjData is missing")}
if (class(formula) != "character") { stop("formula class must be character")}
if (class(image) == "character" & length(image) == 1) {
image <- oro.nifti::readNIfTI(fname=image)
} else if (class(image) == "character" & length(image) > 1) {
image <- mergeNiftis(inputPaths = image, direction = "t", outfile = fourdOut)
}
if (class(mask) == "character" & length(mask) == 1) {
mask <- oro.nifti::readNIfTI(fname=mask)
}
imageMat <- ts2matrix(image, mask)
voxNames <- as.character(names(imageMat))
rm(image)
rm(mask)
gc()
print("Created time series to matrix")
m <- parallel::mclapply(voxNames,
FUN = listFormula, formula, mc.cores = ncores)
print("Created formula list")
timeIn <- proc.time()
imageMat <- cbind(imageMat, subjData)
print("Running test model")
model <- base::do.call(lmerTest::lmer, list(formula = m[[1]], data=imageMat, ...))
print("Running parallel models")
model <- parallel::mclapply(m,
FUN = function(x, data, ...) {
foo <- base::do.call(lmerTest::lmer, list(formula = x, data=data, ...))
return(list(summary(foo)$coefficients, summary(foo)$residuals))
}, data=imageMat, ..., mc.preschedule = mc.preschedule, mc.cores = ncores)
timeOut <- proc.time() - timeIn
print(timeOut[3])
print("Parallel Models Ran")
return(model)
}
|
## These functions provide an environment for a matrix
# and its pre-computed inverse
## Create a matrix object that has a special environment containing its
## pre-calculated inverse
makeCacheMatrix <- function(x = matrix()) {
inverse = NULL
set <- function(mat) { ## set new or initial value of matrix
x <<- mat
inverse <<- NULL
}
get <- function() x ##return matrix
setinverse <- function(invmat) {
inverse <<- invmat ##cache the inverse provided as an argument
}
getinverse <- function() inverse
list(set=set, get=get,
setinverse=setinverse, getinverse=getinverse)
##vector of methods that
##operate on 'inverse' and 'x'
## in this environment
}
## If the specified object does not already have a pre-computed inverse,
## this function computes, caches and returns this inverse
## If already computed, it simply returns the cached inverse
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
xinv = x$getinverse()
if (!is.null(xinv)) {
message ("Getting cached inverse")
return (xinv)
}
mat = x$get()
xinv = solve(mat, ...) ## calculate inverse of mat
x$setinverse(xinv) ## cache this inverse for next time
xinv ##return this inverse
}
| /cachematrix.R | no_license | sbarkin/ProgrammingAssignment2 | R | false | false | 1,366 | r | ## These functions provide an environment for a matrix
# and its pre-computed inverse
## Create a matrix object that has a special environment containing its
## pre-calculated inverse
makeCacheMatrix <- function(x = matrix()) {
inverse = NULL
set <- function(mat) { ## set new or initial value of matrix
x <<- mat
inverse <<- NULL
}
get <- function() x ##return matrix
setinverse <- function(invmat) {
inverse <<- invmat ##cache the inverse provided as an argument
}
getinverse <- function() inverse
list(set=set, get=get,
setinverse=setinverse, getinverse=getinverse)
##vector of methods that
##operate on 'inverse' and 'x'
## in this environment
}
## If the specified object does not already have a pre-computed inverse,
## this function computes, caches and returns this inverse
## If already computed, it simply returns the cached inverse
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
xinv = x$getinverse()
if (!is.null(xinv)) {
message ("Getting cached inverse")
return (xinv)
}
mat = x$get()
xinv = solve(mat, ...) ## calculate inverse of mat
x$setinverse(xinv) ## cache this inverse for next time
xinv ##return this inverse
}
|
#' Get PDL results
#'
#' @return Dataframe with all contents of the pdl_results table except the
#' timestamp column.
#'
#' @param conn database connection object
#' @import RODBC
#' @import stringi
#' @export
get_pdl_results <- function(conn) {
pdl_results_df <- sqlQuery(conn, "select * from pdl_results",
stringsAsFactors = FALSE)
pdl_results_df$TIMESTAMP <- NULL
}
| /R/get_pdl_results.R | permissive | rmsharp/snprcspf | R | false | false | 402 | r | #' Get PDL results
#'
#' @return Dataframe with all contents of the pdl_results table except the
#' timestamp column.
#'
#' @param conn database connection object
#' @import RODBC
#' @import stringi
#' @export
get_pdl_results <- function(conn) {
pdl_results_df <- sqlQuery(conn, "select * from pdl_results",
stringsAsFactors = FALSE)
pdl_results_df$TIMESTAMP <- NULL
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/peaks.R
\name{Getisotopologues}
\alias{Getisotopologues}
\title{Get the selected isotopologues at certain MS data}
\usage{
Getisotopologues(formula = "C12OH6Br4", charge = "1", width = 0.3)
}
\arguments{
\item{formula}{the molecular formula. C12OH6Br4 means BDE-47 as default}
\item{charge}{the charge of that molecular. 1 in EI mode as default}
\item{width}{the width of the peak width on mass spectrum. 0.3 as default for low resolution mass spectrum.}
}
\description{
Get the selected isotopologues at certain MS data
}
\examples{
# show isotopologues for BDE-47
Getisotopologues(formula = 'C12OH6Br4')
}
| /man/Getisotopologues.Rd | no_license | AspirinCode/enviGCMS | R | false | true | 688 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/peaks.R
\name{Getisotopologues}
\alias{Getisotopologues}
\title{Get the selected isotopologues at certain MS data}
\usage{
Getisotopologues(formula = "C12OH6Br4", charge = "1", width = 0.3)
}
\arguments{
\item{formula}{the molecular formula. C12OH6Br4 means BDE-47 as default}
\item{charge}{the charge of that molecular. 1 in EI mode as default}
\item{width}{the width of the peak width on mass spectrum. 0.3 as default for low resolution mass spectrum.}
}
\description{
Get the selected isotopologues at certain MS data
}
\examples{
# show isotopologues for BDE-47
Getisotopologues(formula = 'C12OH6Br4')
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/EpiNow2-package.R
\docType{package}
\name{EpiNow2-package}
\alias{EpiNow2}
\alias{EpiNow2-package}
\title{EpiNow2: Estimate Real-Time Case Counts and Time-Varying Epidemiological Parameters}
\description{
Estimates the time-varying reproduction number, rate of spread, and doubling time using a range of open-source tools (Abbott et al. (2020) \doi{10.12688/wellcomeopenres.16006.1}), and current best practices (Gostic et al. (2020) \doi{10.1101/2020.06.18.20134858}). It aims to help users avoid some of the limitations of naive implementations in a framework that is informed by community feedback and is actively supported.
}
\seealso{
Useful links:
\itemize{
\item \url{https://epiforecasts.io/EpiNow2/}
\item \url{https://epiforecasts.io/EpiNow2/dev/}
\item \url{https://github.com/epiforecasts/EpiNow2}
\item Report bugs at \url{https://github.com/epiforecasts/EpiNow2/issues}
}
}
\author{
\strong{Maintainer}: Sam Abbott \email{sam.abbott@lshtm.ac.uk} (\href{https://orcid.org/0000-0001-8057-8037}{ORCID})
Authors:
\itemize{
\item Joel Hellewell \email{joel.hellewell@lshtm.ac.uk} (\href{https://orcid.org/0000-0003-2683-0849}{ORCID})
\item Katharine Sherratt \email{katharine.sherratt@lshtm.ac.uk}
\item Katelyn Gostic \email{kgostic@uchicago.edu}
\item Joe Hickson \email{joseph.hickson@metoffice.gov.uk}
\item Hamada S. Badr \email{badr@jhu.edu} (\href{https://orcid.org/0000-0002-9808-2344}{ORCID})
\item Michael DeWitt \email{me.dewitt.jr@gmail.com} (\href{https://orcid.org/0000-0001-8940-1967}{ORCID})
\item EpiForecasts
\item Sebastian Funk \email{sebastian.funk@lshtm.ac.uk} (\href{https://orcid.org/0000-0002-2842-3406}{ORCID})
}
Other contributors:
\itemize{
\item Robin Thompson \email{robin.thompson@lshtm.ac.uk} [contributor]
\item Sophie Meakin \email{sophie.meaking@lshtm.ac.uk} [contributor]
\item James Munday \email{james.munday@lshtm.ac.uk} [contributor]
\item Nikos Bosse [contributor]
\item Paul Mee \email{paul.mee@lshtm.ac.uk} [contributor]
\item Peter Ellis \email{peter.ellis2013nz@gmail.com} [contributor]
\item Pietro Monticone \email{pietro.monticone@edu.unito.it} [contributor]
\item Lloyd Chapman \email{lloyd.chapman1@lshtm.ac.uk } [contributor]
\item James M. Azam \email{james.azam@lshtm.ac.uk} (\href{https://orcid.org/0000-0001-5782-7330}{ORCID}) [contributor]
}
}
\keyword{internal}
| /man/EpiNow2-package.Rd | permissive | epiforecasts/EpiNow2 | R | false | true | 2,452 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/EpiNow2-package.R
\docType{package}
\name{EpiNow2-package}
\alias{EpiNow2}
\alias{EpiNow2-package}
\title{EpiNow2: Estimate Real-Time Case Counts and Time-Varying Epidemiological Parameters}
\description{
Estimates the time-varying reproduction number, rate of spread, and doubling time using a range of open-source tools (Abbott et al. (2020) \doi{10.12688/wellcomeopenres.16006.1}), and current best practices (Gostic et al. (2020) \doi{10.1101/2020.06.18.20134858}). It aims to help users avoid some of the limitations of naive implementations in a framework that is informed by community feedback and is actively supported.
}
\seealso{
Useful links:
\itemize{
\item \url{https://epiforecasts.io/EpiNow2/}
\item \url{https://epiforecasts.io/EpiNow2/dev/}
\item \url{https://github.com/epiforecasts/EpiNow2}
\item Report bugs at \url{https://github.com/epiforecasts/EpiNow2/issues}
}
}
\author{
\strong{Maintainer}: Sam Abbott \email{sam.abbott@lshtm.ac.uk} (\href{https://orcid.org/0000-0001-8057-8037}{ORCID})
Authors:
\itemize{
\item Joel Hellewell \email{joel.hellewell@lshtm.ac.uk} (\href{https://orcid.org/0000-0003-2683-0849}{ORCID})
\item Katharine Sherratt \email{katharine.sherratt@lshtm.ac.uk}
\item Katelyn Gostic \email{kgostic@uchicago.edu}
\item Joe Hickson \email{joseph.hickson@metoffice.gov.uk}
\item Hamada S. Badr \email{badr@jhu.edu} (\href{https://orcid.org/0000-0002-9808-2344}{ORCID})
\item Michael DeWitt \email{me.dewitt.jr@gmail.com} (\href{https://orcid.org/0000-0001-8940-1967}{ORCID})
\item EpiForecasts
\item Sebastian Funk \email{sebastian.funk@lshtm.ac.uk} (\href{https://orcid.org/0000-0002-2842-3406}{ORCID})
}
Other contributors:
\itemize{
\item Robin Thompson \email{robin.thompson@lshtm.ac.uk} [contributor]
\item Sophie Meakin \email{sophie.meaking@lshtm.ac.uk} [contributor]
\item James Munday \email{james.munday@lshtm.ac.uk} [contributor]
\item Nikos Bosse [contributor]
\item Paul Mee \email{paul.mee@lshtm.ac.uk} [contributor]
\item Peter Ellis \email{peter.ellis2013nz@gmail.com} [contributor]
\item Pietro Monticone \email{pietro.monticone@edu.unito.it} [contributor]
\item Lloyd Chapman \email{lloyd.chapman1@lshtm.ac.uk } [contributor]
\item James M. Azam \email{james.azam@lshtm.ac.uk} (\href{https://orcid.org/0000-0001-5782-7330}{ORCID}) [contributor]
}
}
\keyword{internal}
|
#
subroutine wfcn1(ilf, ifcn, x)
#
#
#
implicit integer*4 (i-n)
include "../common/lbl4"
include "../common/lundefs"
character*80 ia
character*80 itemp
#
# There are some things this subroutine must handle
# correctly. The first is to accept a function name
# that can have any number of inserted blanks and
# still be recognized. For this, all blanks are
# compresed out by subroutine comprs.
# Secondly, functions 7 and 8 require a number to
# be decoded. This is done with a call to wjfren.
# Thirdly, if the input is not recognized, a
# negative function number will be returned.
#
itemp = ' '
ia = ' '
itemp = iopcon
x=0.0
#
#
ia = iopcon(ilf:80)
call comprs(ia)
iopcon = ia
#
# Note: some compilers have a limit of 20 if-then-else
# so change to if and no else. This is technically
# less efficient, but machines are plenty fast these days.
# - RNC 12/22/2009
ifcn = -1
if (ia == 'exp') {
ifcn = 2
}
if (ia == 'ln') {
ifcn = 3
}
if (ia == 'log') {
ifcn = 4
}
if (ia == '10**x') {
ifcn = 5
}
if (ia == '1/x') {
ifcn = 6
}
if (ia(1:4) == 'x**c') {
ifcn = 7
}
if (ia(1:4) == 'c**x') {
ifcn = 8
}
if (ia == 'sin') {
ifcn = 9
}
if (ia == 'cos') {
ifcn = 10
}
if (ia == 'tan') {
ifcn = 11
}
if (ia == 'invcos') {
ifcn = 12
}
if (ia == 'invsin') {
ifcn = 13
}
if (ia == 'invtan') {
ifcn = 14
}
if (ia == 'sind') {
ifcn = 15
}
if (ia == 'cosd') {
ifcn = 16
}
if (ia == 'tand') {
ifcn = 17
}
if (ia == 'invcosd') {
ifcn = 18
}
if (ia == 'invsind') {
ifcn = 19
}
if (ia == 'invtand') {
ifcn = 20
}
if (ia == 'cosh') {
ifcn = 21
}
if (ia == 'sinh') {
ifcn = 22
}
if (ia == 'tanh') {
ifcn = 23
}
if (ia == 'abs') {
ifcn = 24
}
if (ia == 'int') {
ifcn = 25
}
if (ia == 'frac') {
ifcn = 26
}
if (ia == '1/xe') {
ifcn = 27
}
if (ifcn == -1) {
write(ttyout,10)
}
if (ifcn == 7 || ifcn == 8) {
i=5
call wjfren(i,x,il)
}
iopcon = itemp
return
10 format(' invalid function name')
end
| /src-local/specpr/src.specpr/talg/wfcn1.r | no_license | ns-bak/tetracorder-tutorial | R | false | false | 2,649 | r | #
subroutine wfcn1(ilf, ifcn, x)
#
#
#
implicit integer*4 (i-n)
include "../common/lbl4"
include "../common/lundefs"
character*80 ia
character*80 itemp
#
# There are some things this subroutine must handle
# correctly. The first is to accept a function name
# that can have any number of inserted blanks and
# still be recognized. For this, all blanks are
# compresed out by subroutine comprs.
# Secondly, functions 7 and 8 require a number to
# be decoded. This is done with a call to wjfren.
# Thirdly, if the input is not recognized, a
# negative function number will be returned.
#
itemp = ' '
ia = ' '
itemp = iopcon
x=0.0
#
#
ia = iopcon(ilf:80)
call comprs(ia)
iopcon = ia
#
# Note: some compilers have a limit of 20 if-then-else
# so change to if and no else. This is technically
# less efficient, but machines are plenty fast these days.
# - RNC 12/22/2009
ifcn = -1
if (ia == 'exp') {
ifcn = 2
}
if (ia == 'ln') {
ifcn = 3
}
if (ia == 'log') {
ifcn = 4
}
if (ia == '10**x') {
ifcn = 5
}
if (ia == '1/x') {
ifcn = 6
}
if (ia(1:4) == 'x**c') {
ifcn = 7
}
if (ia(1:4) == 'c**x') {
ifcn = 8
}
if (ia == 'sin') {
ifcn = 9
}
if (ia == 'cos') {
ifcn = 10
}
if (ia == 'tan') {
ifcn = 11
}
if (ia == 'invcos') {
ifcn = 12
}
if (ia == 'invsin') {
ifcn = 13
}
if (ia == 'invtan') {
ifcn = 14
}
if (ia == 'sind') {
ifcn = 15
}
if (ia == 'cosd') {
ifcn = 16
}
if (ia == 'tand') {
ifcn = 17
}
if (ia == 'invcosd') {
ifcn = 18
}
if (ia == 'invsind') {
ifcn = 19
}
if (ia == 'invtand') {
ifcn = 20
}
if (ia == 'cosh') {
ifcn = 21
}
if (ia == 'sinh') {
ifcn = 22
}
if (ia == 'tanh') {
ifcn = 23
}
if (ia == 'abs') {
ifcn = 24
}
if (ia == 'int') {
ifcn = 25
}
if (ia == 'frac') {
ifcn = 26
}
if (ia == '1/xe') {
ifcn = 27
}
if (ifcn == -1) {
write(ttyout,10)
}
if (ifcn == 7 || ifcn == 8) {
i=5
call wjfren(i,x,il)
}
iopcon = itemp
return
10 format(' invalid function name')
end
|
# http://www.mbie.govt.nz/info-services/business/business-growth-agenda/regions
download.file("http://www.mbie.govt.nz/info-services/business/business-growth-agenda/regions/documents-image-library/REAR-Webtool-20160610.zip",
destfile = "tmp.zip", mode = "wb")
unzip("tmp.zip") # read the "regional-economic-activity-data.txt" file for description
rea <- read.csv("all-data.csv", stringsAsFactors = FALSE)
dim(rea) # 1.1 million rows, 14 columns
head(rea)
# cleanup
unlink("tmp.zip")
unlink("all-data.csv")
unique(rea$indicator)
unique(rea$slice)
unique(rea$area)
unique(rea$feature)
rea %>%
filter(slice == "unemployment rate, annual average" &
area == "Lower Hutt")
| /_working/snippet - REA.R | no_license | ellisp/blog-source | R | false | false | 704 | r | # http://www.mbie.govt.nz/info-services/business/business-growth-agenda/regions
download.file("http://www.mbie.govt.nz/info-services/business/business-growth-agenda/regions/documents-image-library/REAR-Webtool-20160610.zip",
destfile = "tmp.zip", mode = "wb")
unzip("tmp.zip") # read the "regional-economic-activity-data.txt" file for description
rea <- read.csv("all-data.csv", stringsAsFactors = FALSE)
dim(rea) # 1.1 million rows, 14 columns
head(rea)
# cleanup
unlink("tmp.zip")
unlink("all-data.csv")
unique(rea$indicator)
unique(rea$slice)
unique(rea$area)
unique(rea$feature)
rea %>%
filter(slice == "unemployment rate, annual average" &
area == "Lower Hutt")
|
# load the census data from the csv file
dat = read.csv("https://raw.githubusercontent.com/peterkabai/dataScience/master/data/census.csv")
# print the datatype for each column
str(dat)
# use 'summary' to get some statistics on the 'education_num' attribute
summary(dat$education_num)
# since it is a categorical feature, you need to display 'education_num'
# with a barplot not a histogram
barplot(table(dat$education_num))
# plot 'capital_gain' with a density plot
plot(density(dat$capital_gain))
# look at 'workclass', andplot it in the most appropriate way
summary(dat$workclass)
par(mar=c(3,10,3,3))
barplot(table(dat$workclass), horiz=TRUE, las=1)
# use a bar plot to show the distribution of attribute 'sex'
par(mar=c(3,3,3,3))
barplot(table(dat$sex))
# use a horizontal bar plot to visualize attribute 'marital_status'
par(mar=c(3,10,3,3))
barplot(table(dat$marital_status), horiz=TRUE, las=1)
# visualize 'relationship'
par(mar=c(3,10,3,3))
barplot(table(dat$relationship), horiz=TRUE, las=1)
# percentage where native country is united states
table(dat$native_country)[["United_States"]]/nrow(dat)
# load the US News and World Report college data
dat = read.csv("https://raw.githubusercontent.com/peterkabai/dataScience/master/data/college.csv")
# create scatter plots to compare some of the variables
# do smaller colleges spend more
plot(Expend ~ F.Undergrad, data=dat)
# do smaller colleges charge more
plot(Outstate ~ F.Undergrad, data=dat, pch=".")
# plot the tuition vs the percent of applicants accepted
percent.accept = dat$Accept/dat$Apps
percent.accept
plot(dat$Outstate, percent.accept, pch=".")
# plot the tuition vs the percent of applicants enrolled
percent.enroll = dat$Enroll/dat$Apps
percent.enroll
plot(dat$Outstate, percent.enroll, pch=".")
# plot the numebr of accepted students vs the number of
# applications with a line of best fit
plot(Accept ~ Apps, data=dat)
fit = lm(Accept ~ Apps, data=dat)
abline(fit, lwd=1, col="blue")
# plot a mean, median, min, or max line
abline( h=mean(dat$Accept), lwd=2, col="red")
abline( h=min(dat$Accept), lwd=2, col="yellow")
abline( h=max(dat$Accept), lwd=2, col="green")
# plot the density plot for 'F.Undergrad' values
# and add lines to divide the data into thirds
top_of_small = quantile(dat$F.Undergrad, 1/3)[[1]]
top_of_medium = quantile(dat$F.Undergrad, 2/3)[[1]]
plot(density(dat$F.Undergrad))
abline( v=top_of_small, lwd=1, col="red")
abline( v=top_of_medium, lwd=1, col="red")
# add a column to the data, with small, medium and large values
# determined by the 'F.Undergrad' value and the thirds
dat$Size = ifelse(
dat["F.Undergrad"] < top_of_small, "small", ifelse(
dat["F.Undergrad"] < top_of_medium, "medium", "large"
)
)
# make a scatterplot with 'PhD' on the x axis and 'Outstate' on the y axis
# for each third using the 'Size' column
par(mfrow=c(3,1))
plot(dat$PhD[dat$Size == "small"], dat$Outstate[dat$Size == "small"])
plot(dat$PhD[dat$Size == "medium"], dat$Outstate[dat$Size == "medium"])
plot(dat$PhD[dat$Size == "large"], dat$Outstate[dat$Size == "large"])
# plot again on a single scatterplot, with color used to distinguish the thirds
par(mfrow=c(1,1))
plot(
dat$PhD,
dat$Outstate,
pch=20,
col=ifelse(dat["Size"] == "small", "green", ifelse(dat["Size"] == "medium", "yellow", "red"))
)
# show 3 histograms of the PhD variable, one each for
# small, medium, and large schools
par(mfrow=c(3,1))
hist(
dat$PhD[dat$Size == "small"],
main="Histogram of PhD's for Small Colleges",
xlab="PhD's",
xlim=c(0,100),
ylim=c(0,100)
)
hist(
dat$PhD[dat$Size == "medium"],
main="Histogram of PhD's for Medium Colleges",
xlab="PhD's",
xlim=c(0,100),
ylim=c(0,100)
)
hist(
dat$PhD[dat$Size == "large"],
main="Histogram of PhD's for Large Colleges",
xlab="PhD's",
xlim=c(0,100),
ylim=c(0,100)
)
| /dataExploration.R | no_license | peterkabai/dataScience | R | false | false | 3,853 | r | # load the census data from the csv file
dat = read.csv("https://raw.githubusercontent.com/peterkabai/dataScience/master/data/census.csv")
# print the datatype for each column
str(dat)
# use 'summary' to get some statistics on the 'education_num' attribute
summary(dat$education_num)
# since it is a categorical feature, you need to display 'education_num'
# with a barplot not a histogram
barplot(table(dat$education_num))
# plot 'capital_gain' with a density plot
plot(density(dat$capital_gain))
# look at 'workclass', andplot it in the most appropriate way
summary(dat$workclass)
par(mar=c(3,10,3,3))
barplot(table(dat$workclass), horiz=TRUE, las=1)
# use a bar plot to show the distribution of attribute 'sex'
par(mar=c(3,3,3,3))
barplot(table(dat$sex))
# use a horizontal bar plot to visualize attribute 'marital_status'
par(mar=c(3,10,3,3))
barplot(table(dat$marital_status), horiz=TRUE, las=1)
# visualize 'relationship'
par(mar=c(3,10,3,3))
barplot(table(dat$relationship), horiz=TRUE, las=1)
# percentage where native country is united states
table(dat$native_country)[["United_States"]]/nrow(dat)
# load the US News and World Report college data
dat = read.csv("https://raw.githubusercontent.com/peterkabai/dataScience/master/data/college.csv")
# create scatter plots to compare some of the variables
# do smaller colleges spend more
plot(Expend ~ F.Undergrad, data=dat)
# do smaller colleges charge more
plot(Outstate ~ F.Undergrad, data=dat, pch=".")
# plot the tuition vs the percent of applicants accepted
percent.accept = dat$Accept/dat$Apps
percent.accept
plot(dat$Outstate, percent.accept, pch=".")
# plot the tuition vs the percent of applicants enrolled
percent.enroll = dat$Enroll/dat$Apps
percent.enroll
plot(dat$Outstate, percent.enroll, pch=".")
# plot the numebr of accepted students vs the number of
# applications with a line of best fit
plot(Accept ~ Apps, data=dat)
fit = lm(Accept ~ Apps, data=dat)
abline(fit, lwd=1, col="blue")
# plot a mean, median, min, or max line
abline( h=mean(dat$Accept), lwd=2, col="red")
abline( h=min(dat$Accept), lwd=2, col="yellow")
abline( h=max(dat$Accept), lwd=2, col="green")
# plot the density plot for 'F.Undergrad' values
# and add lines to divide the data into thirds
top_of_small = quantile(dat$F.Undergrad, 1/3)[[1]]
top_of_medium = quantile(dat$F.Undergrad, 2/3)[[1]]
plot(density(dat$F.Undergrad))
abline( v=top_of_small, lwd=1, col="red")
abline( v=top_of_medium, lwd=1, col="red")
# add a column to the data, with small, medium and large values
# determined by the 'F.Undergrad' value and the thirds
dat$Size = ifelse(
dat["F.Undergrad"] < top_of_small, "small", ifelse(
dat["F.Undergrad"] < top_of_medium, "medium", "large"
)
)
# make a scatterplot with 'PhD' on the x axis and 'Outstate' on the y axis
# for each third using the 'Size' column
par(mfrow=c(3,1))
plot(dat$PhD[dat$Size == "small"], dat$Outstate[dat$Size == "small"])
plot(dat$PhD[dat$Size == "medium"], dat$Outstate[dat$Size == "medium"])
plot(dat$PhD[dat$Size == "large"], dat$Outstate[dat$Size == "large"])
# plot again on a single scatterplot, with color used to distinguish the thirds
par(mfrow=c(1,1))
plot(
dat$PhD,
dat$Outstate,
pch=20,
col=ifelse(dat["Size"] == "small", "green", ifelse(dat["Size"] == "medium", "yellow", "red"))
)
# show 3 histograms of the PhD variable, one each for
# small, medium, and large schools
par(mfrow=c(3,1))
hist(
dat$PhD[dat$Size == "small"],
main="Histogram of PhD's for Small Colleges",
xlab="PhD's",
xlim=c(0,100),
ylim=c(0,100)
)
hist(
dat$PhD[dat$Size == "medium"],
main="Histogram of PhD's for Medium Colleges",
xlab="PhD's",
xlim=c(0,100),
ylim=c(0,100)
)
hist(
dat$PhD[dat$Size == "large"],
main="Histogram of PhD's for Large Colleges",
xlab="PhD's",
xlim=c(0,100),
ylim=c(0,100)
)
|
\name{STIKhat} \alias{STIKhat}
\title{Estimation of the Space-Time Inhomogeneous K-function}
\description{Compute an estimate of the Space-Time Inhomogeneous K-function.}
\usage{STIKhat(xyt, s.region, t.region, dist, times, lambda,
correction="isotropic", infectious=FALSE) }
\arguments{
\item{xyt}{Coordinates and times \eqn{(x,y,t)}{(x,y,t)} of the point pattern.}
\item{s.region}{Two-column matrix specifying polygonal region containing all data locations. If \code{s.region} is missing, the bounding box of \code{xyt[,1:2]} is considered.}
\item{t.region}{Vector containing the minimum and maximum values of the time interval. If \code{t.region} is missing, the range of \code{xyt[,3]} is considered.}
\item{dist}{Vector of distances \eqn{u}{u} at which \eqn{K(u,v)}{K(u,v)} is computed. If missing, the maximum of \code{dist} is given by \eqn{\min(S_x,S_y)/4}{min(S_x, S_y)/4}, where \eqn{S_x}{S_x} and \eqn{S_y}{S_y} represent the maximum width and height of the bounding box of \code{s.region}.}
\item{times}{Vector of times \eqn{v}{v} at which \eqn{K(u,v)}{K(u,v)} is computed. If missing, the maximum of \code{times} is given by \eqn{(T_{\max} - T_{\min})/4}{(T_max - T_min)/4}, where \eqn{T_{\min}}{T_min} and \eqn{T_{\max}}{T_max} are the minimum and maximum of the time interval \eqn{T}{T}.}
\item{lambda}{Vector of values of the space-time intensity function evaluated at the points \eqn{(x,y,t)}{(x,y,t)} in \eqn{S\times T}{S x T}. If \code{lambda} is missing, the estimate of the space-time K-function is computed as for the homogeneous case (Diggle et al., 1995), i.e. considering \eqn{n/|S \times T|}{n/|S x T|} as an estimate of the space-time intensity.}
\item{correction}{A character vector specifying the edge correction(s) to be applied among \code{"isotropic"}, \code{"border"}, \code{"modified.border"}, \code{"translate"} and \code{"none"} (see Details). The default is \code{"isotropic"}.}
\item{infectious}{Logical value. If \code{TRUE}, only future events are considered and the isotropic edge correction method is used. See Details.}}
\details{Gabriel (2014) proposes the following unbiased estimator for the STIK-function, based on data giving the locations of events \eqn{x_i: i=1,\ldots,n}{x_i: i = 1,...,n} on a spatio-temporal region \eqn{S\times T}{SxT}, where \eqn{S}{S} is an arbitrary polygon and \eqn{T}{T} is a time interval:
\deqn{\widehat{K}(u,v)=\sum_{i=1}^{n}\sum_{j\neq i}\frac{1}{w_{ij}}\frac{1}{\lambda(x_i)\lambda(x_j)}\mathbf{1}_{\lbrace \|s_i - s_j\| \leq u \ ; \ |t_i - t_j| \leq v \rbrace},}{K(u,v) = sum_{i = 1,...,n} sum_{j != i} 1/w_ij 1/(lambda(x_i) lambda(x_j)) 1{u_ij <= u ; t_j - t_i <= v},}
where \eqn{\lambda(x_i)}{lambda(x_i)} is the intensity at \eqn{x_i = (s_i,t_i)}{x_i = (s_i, t_i)} and \eqn{w_{ij}}{w_ij} is an edge correction factor to deal with spatial-temporal edge effects. The edge correction methods implemented are:
\code{isotropic}: \eqn{w_{ij} = |S \times T| w_{ij}^{(t)} w_{ij}^{(s)}}{w_ij = |S x T| w_ij^(s) w_ij^(t)}, where the temporal edge correction factor \eqn{w_{ij}^{(t)} = 1}{w_ij^(t) = 1} if both ends of the interval of length \eqn{2 |t_i - t_j|}{2|t_i - t_j|} centred at \eqn{t_i}{t_i} lie within \eqn{T}{T} and \eqn{w_{ij}^{(t)}=1/2}{w_ij^(t) = 1/2} otherwise and \eqn{w_{ij}^{(s)}}{w_ij^(s)} is the proportion of the circumference of a circle centred at the location \eqn{s_i}{s_i} with radius \eqn{\|s_i -s_j\|}{||s_i - s_j||} lying in \eqn{S}{S} (also called Ripley's edge correction factor).
\code{border}: \eqn{w_{ij}=\frac{\sum_{j=1}^{n}\mathbf{1}\lbrace d(s_j,S)>u \ ; \ d(t_j,T) >v\rbrace/\lambda(x_j)}{\mathbf{1}_{\lbrace d(s_i,S) > u \ ; \ d(t_i,T) >v \rbrace}}}{w_ij = (sum_{j = 1,...,n} 1{d(s_j, S) > u ; d(t_j, T) > v}/
lambda(x_j)) / 1{d(s_i, S) > u ; d(t_i, T) > v}}, where \eqn{d(s_i,S)}{d(s_i, S)} denotes the distance between \eqn{s_i}{s_i} and the boundary of \eqn{S}{S} and \eqn{d(t_i,T)}{d(t_i, T)} the distance between \eqn{t_i}{t_i} and the boundary of \eqn{T}{T}.
\code{modified.border}: \eqn{w_{ij} = \frac{|S_{\ominus u}|\times|T_{\ominus v}|}{\mathbf{1}_{\lbrace d(s_i,S) > u \ ; \ d(t_i,T) >v \rbrace}}}{w_ij = |S_(-u) x T_(-v)| / 1{d(s_i, S) > u ; d(t_i, T) > v}}, where \eqn{S_{\ominus u}}{S_(-u)} and \eqn{T_{\ominus v}}{T_(-v)} are the eroded spatial and temporal region respectively, obtained by trimming off a margin of width \eqn{u}{u} and \eqn{v}{v} from the border of the original region.
\code{translate}: \eqn{w_{ij} =|S \cap S_{s_i-s_j}| \times |T \cap T_{t_i-t_j}|}{w_ij = |S intersect S_(s_i - s_j)
x T intersect T_(t_i - t_j)|}, where \eqn{S_{s_i-s_j}}{S_(s_i - s_j)} and \eqn{T_{t_i-t_j}}{T_(t_ i - t_j)}
are the translated spatial and temporal regions.
\code{none}: No edge correction is performed and \eqn{w_{ij}=|S \times T|}{w_ij = |S x T|}.
If parameter \code{infectious = TRUE}, ony future events are considered and the estimator is, using an isotropic edge correction factor (Gabriel and Diggle, 2009):
\deqn{\widehat{K}(u,v)=\frac{1}{|S\times T|}\frac{n}{n_v}\sum_{i=1}^{n_v}\sum_{j=1; j > i}^{n_v} \frac{1}{w_{ij}} \frac{1}{\lambda(x_i) \lambda(x_j)}\mathbf{1}_{\left\lbrace u_{ij} \leq u\right\rbrace}\mathbf{1}_{\left\lbrace t_j - t_i \leq v \right\rbrace}.}{K(u,v) = 1/|SxT| n/n_v sum_{i = 1,...,n_v} sum_{j = 1,...,n_v; j > i} 1/w_ij 1/(lambda(x_i) lambda(x_j)) 1{u_ij <= u} 1{t_j - t_i <= v}.}
In this equation, the points \eqn{x_i=(s_i, t_i)}{x_i = (s_i, t_i)} are ordered so that \eqn{t_i < t_{i+1}}{t_i <
t_(i+1)}, with ties due to round-off error broken by randomly unrounding if necessary. To deal with temporal edge-effects, for each \eqn{v}{v}, \eqn{n_v}{n_v} denotes the number of events for which \eqn{t_i \leq T_1 -v}{t_i <= T_1 - v}, with \eqn{T=[T_0,T_1]}{T=[T_0, T_1]}. To deal with spatial edge-effects, we use Ripley's method.
If \code{lambda} is missing in argument, \code{STIKhat} computes an estimate of the space-time (homogeneous)
K-function: \deqn{\widehat{K}(u,v)=\frac{|S\times T|}{n_v(n-1)} \sum_{i=1}^{n_v}\sum_{j=1;j>i}^{n_v}\frac{1}{w_{ij}}\mathbf{1}_{\lbrace u_{ij}\leq u \rbrace}\mathbf{1}_{\lbrace t_j - t_i \leq v \rbrace}}{K(u,v) = |SxT|/(n_v (n-1)) sum_{i = 1,...,n_v} sum_{j = 1,...,n_v; j > i} 1/w_ij 1{u_ij <= u} 1{t_j - t_i <= v}}}
\value{ A list containing:
\item{Khat}{\code{ndist} x \code{ntimes} matrix containing values of \eqn{\hat{K}_{ST}(u,v).}{K(u,v)}.}
\item{Ktheo}{\code{ndist} x \code{ntimes} matrix containing theoretical values for a Poisson process; \eqn{\pi u^2 v}{pi u^2 v} for \eqn{K}{K} and \eqn{2 \pi u^2 v}{2 pi u^2 v} for \eqn{K^*}{K^*}.}
\item{dist, times, infectious}{Parameters passed in argument.}
\item{correction}{The name(s) of the edge correction method(s) passed in argument.}}
\references{
Baddeley A., Moller J. and Waagepetersen R. (2000). Non- and semi-parametric estimation of interaction in inhomogeneous point patterns. Statistica Neerlandica, 54, 329--350.
Baddeley, A., Rubak, E., Turner, R., (2015). Spatial Point Patterns: Methodology and Applications with R. CRC Press, Boca Raton.
Diggle P. , Chedwynd A., Haggkvist R. and Morris S. (1995). Second-order analysis of space-time clustering. Statistical Methods in Medical Research, 4, 124--136.
Gabriel E., Diggle P. (2009). Second-order analysis of inhomogeneous spatio-temporal point process data. Statistica Neerlandica, 63, 43--51.
Gabriel E., Rowlingson B., Diggle P. (2013). stpp: an R package for plotting, simulating and analyzing Spatio-Temporal Point Patterns. Journal of Statistical Software, 53(2), 1--29.
Gabriel E. (2014). Estimating second-order characteristics of inhomogeneous spatio-temporal point processes: influence of edge correction methods and intensity estimates. Methodology and computing in Applied Probabillity, 16(2), 411--431.
}
\author{Edith Gabriel <edith.gabriel@inrae.fr> }
\examples{\donttest{
# First example
data(fmd)
data(northcumbria)
FMD<-as.3dpoints(fmd[,1]/1000,fmd[,2]/1000,fmd[,3])
Northcumbria=northcumbria/1000
# estimation of the temporal intensity
Mt<-density(FMD[,3],n=1000)
mut<-Mt$y[findInterval(FMD[,3],Mt$x)]*dim(FMD)[1]
# estimation of the spatial intensity
h<-mse2d(as.points(FMD[,1:2]), Northcumbria, nsmse=50, range=4)
h<-h$h[which.min(h$mse)]
Ms<-kernel2d(as.points(FMD[,1:2]), Northcumbria, h, nx=5000, ny=5000)
atx<-findInterval(x=FMD[,1],vec=Ms$x)
aty<-findInterval(x=FMD[,2],vec=Ms$y)
mhat<-NULL
for(i in 1:length(atx)) mhat<-c(mhat,Ms$z[atx[i],aty[i]])
# estimation of the STIK function
u <- seq(0,10,by=1)
v <- seq(0,15,by=1)
stik1 <- STIKhat(xyt=FMD, s.region=northcumbria/1000,t.region=c(1,200),
lambda=mhat*mut/dim(FMD)[1], dist=u, times=v, infectious=TRUE)
# plotting the estimation
plotK(stik1)
plotK(stik1,type="persp",theta=-65,phi=35)
}
# Second example
xyt=rpp(lambda=200)
stik2=STIKhat(xyt$xyt,dist=seq(0,0.16,by=0.02),
times=seq(0,0.16,by=0.02),correction=c("border","translate"))
plotK(stik2,type="contour",legend=TRUE,which="translate")
}
| /stpp/man/STIKhat.Rd | no_license | albrizre/spatstat.revdep | R | false | false | 8,945 | rd | \name{STIKhat} \alias{STIKhat}
\title{Estimation of the Space-Time Inhomogeneous K-function}
\description{Compute an estimate of the Space-Time Inhomogeneous K-function.}
\usage{STIKhat(xyt, s.region, t.region, dist, times, lambda,
correction="isotropic", infectious=FALSE) }
\arguments{
\item{xyt}{Coordinates and times \eqn{(x,y,t)}{(x,y,t)} of the point pattern.}
\item{s.region}{Two-column matrix specifying polygonal region containing all data locations. If \code{s.region} is missing, the bounding box of \code{xyt[,1:2]} is considered.}
\item{t.region}{Vector containing the minimum and maximum values of the time interval. If \code{t.region} is missing, the range of \code{xyt[,3]} is considered.}
\item{dist}{Vector of distances \eqn{u}{u} at which \eqn{K(u,v)}{K(u,v)} is computed. If missing, the maximum of \code{dist} is given by \eqn{\min(S_x,S_y)/4}{min(S_x, S_y)/4}, where \eqn{S_x}{S_x} and \eqn{S_y}{S_y} represent the maximum width and height of the bounding box of \code{s.region}.}
\item{times}{Vector of times \eqn{v}{v} at which \eqn{K(u,v)}{K(u,v)} is computed. If missing, the maximum of \code{times} is given by \eqn{(T_{\max} - T_{\min})/4}{(T_max - T_min)/4}, where \eqn{T_{\min}}{T_min} and \eqn{T_{\max}}{T_max} are the minimum and maximum of the time interval \eqn{T}{T}.}
\item{lambda}{Vector of values of the space-time intensity function evaluated at the points \eqn{(x,y,t)}{(x,y,t)} in \eqn{S\times T}{S x T}. If \code{lambda} is missing, the estimate of the space-time K-function is computed as for the homogeneous case (Diggle et al., 1995), i.e. considering \eqn{n/|S \times T|}{n/|S x T|} as an estimate of the space-time intensity.}
\item{correction}{A character vector specifying the edge correction(s) to be applied among \code{"isotropic"}, \code{"border"}, \code{"modified.border"}, \code{"translate"} and \code{"none"} (see Details). The default is \code{"isotropic"}.}
\item{infectious}{Logical value. If \code{TRUE}, only future events are considered and the isotropic edge correction method is used. See Details.}}
\details{Gabriel (2014) proposes the following unbiased estimator for the STIK-function, based on data giving the locations of events \eqn{x_i: i=1,\ldots,n}{x_i: i = 1,...,n} on a spatio-temporal region \eqn{S\times T}{SxT}, where \eqn{S}{S} is an arbitrary polygon and \eqn{T}{T} is a time interval:
\deqn{\widehat{K}(u,v)=\sum_{i=1}^{n}\sum_{j\neq i}\frac{1}{w_{ij}}\frac{1}{\lambda(x_i)\lambda(x_j)}\mathbf{1}_{\lbrace \|s_i - s_j\| \leq u \ ; \ |t_i - t_j| \leq v \rbrace},}{K(u,v) = sum_{i = 1,...,n} sum_{j != i} 1/w_ij 1/(lambda(x_i) lambda(x_j)) 1{u_ij <= u ; t_j - t_i <= v},}
where \eqn{\lambda(x_i)}{lambda(x_i)} is the intensity at \eqn{x_i = (s_i,t_i)}{x_i = (s_i, t_i)} and \eqn{w_{ij}}{w_ij} is an edge correction factor to deal with spatial-temporal edge effects. The edge correction methods implemented are:
\code{isotropic}: \eqn{w_{ij} = |S \times T| w_{ij}^{(t)} w_{ij}^{(s)}}{w_ij = |S x T| w_ij^(s) w_ij^(t)}, where the temporal edge correction factor \eqn{w_{ij}^{(t)} = 1}{w_ij^(t) = 1} if both ends of the interval of length \eqn{2 |t_i - t_j|}{2|t_i - t_j|} centred at \eqn{t_i}{t_i} lie within \eqn{T}{T} and \eqn{w_{ij}^{(t)}=1/2}{w_ij^(t) = 1/2} otherwise and \eqn{w_{ij}^{(s)}}{w_ij^(s)} is the proportion of the circumference of a circle centred at the location \eqn{s_i}{s_i} with radius \eqn{\|s_i -s_j\|}{||s_i - s_j||} lying in \eqn{S}{S} (also called Ripley's edge correction factor).
\code{border}: \eqn{w_{ij}=\frac{\sum_{j=1}^{n}\mathbf{1}\lbrace d(s_j,S)>u \ ; \ d(t_j,T) >v\rbrace/\lambda(x_j)}{\mathbf{1}_{\lbrace d(s_i,S) > u \ ; \ d(t_i,T) >v \rbrace}}}{w_ij = (sum_{j = 1,...,n} 1{d(s_j, S) > u ; d(t_j, T) > v}/
lambda(x_j)) / 1{d(s_i, S) > u ; d(t_i, T) > v}}, where \eqn{d(s_i,S)}{d(s_i, S)} denotes the distance between \eqn{s_i}{s_i} and the boundary of \eqn{S}{S} and \eqn{d(t_i,T)}{d(t_i, T)} the distance between \eqn{t_i}{t_i} and the boundary of \eqn{T}{T}.
\code{modified.border}: \eqn{w_{ij} = \frac{|S_{\ominus u}|\times|T_{\ominus v}|}{\mathbf{1}_{\lbrace d(s_i,S) > u \ ; \ d(t_i,T) >v \rbrace}}}{w_ij = |S_(-u) x T_(-v)| / 1{d(s_i, S) > u ; d(t_i, T) > v}}, where \eqn{S_{\ominus u}}{S_(-u)} and \eqn{T_{\ominus v}}{T_(-v)} are the eroded spatial and temporal region respectively, obtained by trimming off a margin of width \eqn{u}{u} and \eqn{v}{v} from the border of the original region.
\code{translate}: \eqn{w_{ij} =|S \cap S_{s_i-s_j}| \times |T \cap T_{t_i-t_j}|}{w_ij = |S intersect S_(s_i - s_j)
x T intersect T_(t_i - t_j)|}, where \eqn{S_{s_i-s_j}}{S_(s_i - s_j)} and \eqn{T_{t_i-t_j}}{T_(t_ i - t_j)}
are the translated spatial and temporal regions.
\code{none}: No edge correction is performed and \eqn{w_{ij}=|S \times T|}{w_ij = |S x T|}.
If parameter \code{infectious = TRUE}, ony future events are considered and the estimator is, using an isotropic edge correction factor (Gabriel and Diggle, 2009):
\deqn{\widehat{K}(u,v)=\frac{1}{|S\times T|}\frac{n}{n_v}\sum_{i=1}^{n_v}\sum_{j=1; j > i}^{n_v} \frac{1}{w_{ij}} \frac{1}{\lambda(x_i) \lambda(x_j)}\mathbf{1}_{\left\lbrace u_{ij} \leq u\right\rbrace}\mathbf{1}_{\left\lbrace t_j - t_i \leq v \right\rbrace}.}{K(u,v) = 1/|SxT| n/n_v sum_{i = 1,...,n_v} sum_{j = 1,...,n_v; j > i} 1/w_ij 1/(lambda(x_i) lambda(x_j)) 1{u_ij <= u} 1{t_j - t_i <= v}.}
In this equation, the points \eqn{x_i=(s_i, t_i)}{x_i = (s_i, t_i)} are ordered so that \eqn{t_i < t_{i+1}}{t_i <
t_(i+1)}, with ties due to round-off error broken by randomly unrounding if necessary. To deal with temporal edge-effects, for each \eqn{v}{v}, \eqn{n_v}{n_v} denotes the number of events for which \eqn{t_i \leq T_1 -v}{t_i <= T_1 - v}, with \eqn{T=[T_0,T_1]}{T=[T_0, T_1]}. To deal with spatial edge-effects, we use Ripley's method.
If \code{lambda} is missing in argument, \code{STIKhat} computes an estimate of the space-time (homogeneous)
K-function: \deqn{\widehat{K}(u,v)=\frac{|S\times T|}{n_v(n-1)} \sum_{i=1}^{n_v}\sum_{j=1;j>i}^{n_v}\frac{1}{w_{ij}}\mathbf{1}_{\lbrace u_{ij}\leq u \rbrace}\mathbf{1}_{\lbrace t_j - t_i \leq v \rbrace}}{K(u,v) = |SxT|/(n_v (n-1)) sum_{i = 1,...,n_v} sum_{j = 1,...,n_v; j > i} 1/w_ij 1{u_ij <= u} 1{t_j - t_i <= v}}}
\value{ A list containing:
\item{Khat}{\code{ndist} x \code{ntimes} matrix containing values of \eqn{\hat{K}_{ST}(u,v).}{K(u,v)}.}
\item{Ktheo}{\code{ndist} x \code{ntimes} matrix containing theoretical values for a Poisson process; \eqn{\pi u^2 v}{pi u^2 v} for \eqn{K}{K} and \eqn{2 \pi u^2 v}{2 pi u^2 v} for \eqn{K^*}{K^*}.}
\item{dist, times, infectious}{Parameters passed in argument.}
\item{correction}{The name(s) of the edge correction method(s) passed in argument.}}
\references{
Baddeley A., Moller J. and Waagepetersen R. (2000). Non- and semi-parametric estimation of interaction in inhomogeneous point patterns. Statistica Neerlandica, 54, 329--350.
Baddeley, A., Rubak, E., Turner, R., (2015). Spatial Point Patterns: Methodology and Applications with R. CRC Press, Boca Raton.
Diggle P. , Chedwynd A., Haggkvist R. and Morris S. (1995). Second-order analysis of space-time clustering. Statistical Methods in Medical Research, 4, 124--136.
Gabriel E., Diggle P. (2009). Second-order analysis of inhomogeneous spatio-temporal point process data. Statistica Neerlandica, 63, 43--51.
Gabriel E., Rowlingson B., Diggle P. (2013). stpp: an R package for plotting, simulating and analyzing Spatio-Temporal Point Patterns. Journal of Statistical Software, 53(2), 1--29.
Gabriel E. (2014). Estimating second-order characteristics of inhomogeneous spatio-temporal point processes: influence of edge correction methods and intensity estimates. Methodology and computing in Applied Probabillity, 16(2), 411--431.
}
\author{Edith Gabriel <edith.gabriel@inrae.fr> }
\examples{\donttest{
# First example
data(fmd)
data(northcumbria)
FMD<-as.3dpoints(fmd[,1]/1000,fmd[,2]/1000,fmd[,3])
Northcumbria=northcumbria/1000
# estimation of the temporal intensity
Mt<-density(FMD[,3],n=1000)
mut<-Mt$y[findInterval(FMD[,3],Mt$x)]*dim(FMD)[1]
# estimation of the spatial intensity
h<-mse2d(as.points(FMD[,1:2]), Northcumbria, nsmse=50, range=4)
h<-h$h[which.min(h$mse)]
Ms<-kernel2d(as.points(FMD[,1:2]), Northcumbria, h, nx=5000, ny=5000)
atx<-findInterval(x=FMD[,1],vec=Ms$x)
aty<-findInterval(x=FMD[,2],vec=Ms$y)
mhat<-NULL
for(i in 1:length(atx)) mhat<-c(mhat,Ms$z[atx[i],aty[i]])
# estimation of the STIK function
u <- seq(0,10,by=1)
v <- seq(0,15,by=1)
stik1 <- STIKhat(xyt=FMD, s.region=northcumbria/1000,t.region=c(1,200),
lambda=mhat*mut/dim(FMD)[1], dist=u, times=v, infectious=TRUE)
# plotting the estimation
plotK(stik1)
plotK(stik1,type="persp",theta=-65,phi=35)
}
# Second example
xyt=rpp(lambda=200)
stik2=STIKhat(xyt$xyt,dist=seq(0,0.16,by=0.02),
times=seq(0,0.16,by=0.02),correction=c("border","translate"))
plotK(stik2,type="contour",legend=TRUE,which="translate")
}
|
#' @name launch_app
#'
#' @title Launch the Covid-19 shiny application
#'
#' @description This function will find and load the shiny application from inst/app.
#'
#'
#' @source The dataset from the application comes from Ramikripsin coronavirus package
#'
#' @examples
#'\dontrun{
#'launch_app()
#' }
#'
#' @export
#'
library(shiny)
launch_app <- function(){
appDir <- system.file("app", package="southeastcovid")
if (appDir == ""){
stop("Could not find the directory, please re-install southeastcovid")
}
shiny::runApp(appDir, display.mode = "normal")
}
| /R/launch_app.R | permissive | etc5523-2020/r-package-assessment-whysptra | R | false | false | 576 | r | #' @name launch_app
#'
#' @title Launch the Covid-19 shiny application
#'
#' @description This function will find and load the shiny application from inst/app.
#'
#'
#' @source The dataset from the application comes from Ramikripsin coronavirus package
#'
#' @examples
#'\dontrun{
#'launch_app()
#' }
#'
#' @export
#'
library(shiny)
launch_app <- function(){
appDir <- system.file("app", package="southeastcovid")
if (appDir == ""){
stop("Could not find the directory, please re-install southeastcovid")
}
shiny::runApp(appDir, display.mode = "normal")
}
|
library(tseries)
library(forecast)
# 设置工作目录并读取数据
setwd('H:\\Program Products\\Python Files\\0 Jupyter\\FDM-Project-2021\\dataset\\期货价格数据\\使用数据\\')
da <- read.csv("./黄金周数据.csv",head=F)
names(da) <- c('date','price')
head(da)
price <- da$price
price = ts(da$price,frequency=52,start=c(1972,1,10))
head(price)
plot.ts(price)
tsdisplay(price)
# 拆掉最后一年做样本的测试集
sprice<-ts(as.vector(price[1:500]),frequency=52,start=c(1972,1,10))
tsdisplay(sprice)
adf.test(sprice)
# 明显存在按经济波动增长的趋势,故利用差法将其干掉
s1<-diff(sprice,1)
# 单位根检验判断是否平稳
adf.test(s1)
# 检验通过
tsdisplay(s1)
# 图像显示acf与pac均存在截尾情况
# 根据pacf图像可判断 p 取 2 或 100
# 根据acf图像可判断 q 取 2 90 或 140
# 进行模型拟合,看看哪个效果好
a <- auto.arima(sprice)
summary(a)
arima(sprice,order=c(2,1,2))
arima(sprice,order=c(100,1,2))
arima(sprice,order=c(2,1,90))
arima(sprice,order=c(2,1,140))
arima(sprice,order=c(100,1,120))
arima(sprice,order=c(100,1,90))
#先进行拟合
fit1<-arima(sprice,order=c(2,1,2),seasonal=list(order=c(1,0,0),period=52))
#然后tsdiag看一下各自的结果,Ljung-Box检验的p值都在0.05之上,结果不错。
tsdiag(fit1)
fit1
#预测
f.p1<-forecast(fit1,h=50,level=c(99.5))
plot(f.p1,ylim=c(100,500))
lines(f.p1$fitted,col="green")
lines(price,col="red")
# 利用全部数据进行建模
#先进行拟合
fit2<-arima(price,order=c(2,1,2),seasonal=list(order=c(1,0,0),period=52))
tsdiag(fit2)
#预测
f.p2<-forecast(fit2,h=7,level=c(99.5))
plot(f.p2,ylim=c(100,500))
lines(f.p2$fitted,col="green")
lines(price,col="red")
f.p2 | /src/homework1.r | no_license | HoBeedzc/FDM-Project-2021 | R | false | false | 1,725 | r | library(tseries)
library(forecast)
# 设置工作目录并读取数据
setwd('H:\\Program Products\\Python Files\\0 Jupyter\\FDM-Project-2021\\dataset\\期货价格数据\\使用数据\\')
da <- read.csv("./黄金周数据.csv",head=F)
names(da) <- c('date','price')
head(da)
price <- da$price
price = ts(da$price,frequency=52,start=c(1972,1,10))
head(price)
plot.ts(price)
tsdisplay(price)
# 拆掉最后一年做样本的测试集
sprice<-ts(as.vector(price[1:500]),frequency=52,start=c(1972,1,10))
tsdisplay(sprice)
adf.test(sprice)
# 明显存在按经济波动增长的趋势,故利用差法将其干掉
s1<-diff(sprice,1)
# 单位根检验判断是否平稳
adf.test(s1)
# 检验通过
tsdisplay(s1)
# 图像显示acf与pac均存在截尾情况
# 根据pacf图像可判断 p 取 2 或 100
# 根据acf图像可判断 q 取 2 90 或 140
# 进行模型拟合,看看哪个效果好
a <- auto.arima(sprice)
summary(a)
arima(sprice,order=c(2,1,2))
arima(sprice,order=c(100,1,2))
arima(sprice,order=c(2,1,90))
arima(sprice,order=c(2,1,140))
arima(sprice,order=c(100,1,120))
arima(sprice,order=c(100,1,90))
#先进行拟合
fit1<-arima(sprice,order=c(2,1,2),seasonal=list(order=c(1,0,0),period=52))
#然后tsdiag看一下各自的结果,Ljung-Box检验的p值都在0.05之上,结果不错。
tsdiag(fit1)
fit1
#预测
f.p1<-forecast(fit1,h=50,level=c(99.5))
plot(f.p1,ylim=c(100,500))
lines(f.p1$fitted,col="green")
lines(price,col="red")
# 利用全部数据进行建模
#先进行拟合
fit2<-arima(price,order=c(2,1,2),seasonal=list(order=c(1,0,0),period=52))
tsdiag(fit2)
#预测
f.p2<-forecast(fit2,h=7,level=c(99.5))
plot(f.p2,ylim=c(100,500))
lines(f.p2$fitted,col="green")
lines(price,col="red")
f.p2 |
#' Get notifications
#'
#' @export
#' @template curl
#' @param parse (logical) Attempt to parse to data.frame's if possible. Default: \code{TRUE}
#' @return either a data.frame or a list
#' @examples \dontrun{
#' notifications()
#' }
notifications <- function(parse = TRUE, ...) {
res <- asp_GET("notifications", list(), ...)
asp_parse(res, parse)
}
| /R/notifications.R | no_license | sckott/aspacer | R | false | false | 354 | r | #' Get notifications
#'
#' @export
#' @template curl
#' @param parse (logical) Attempt to parse to data.frame's if possible. Default: \code{TRUE}
#' @return either a data.frame or a list
#' @examples \dontrun{
#' notifications()
#' }
notifications <- function(parse = TRUE, ...) {
res <- asp_GET("notifications", list(), ...)
asp_parse(res, parse)
}
|
library(sporm)
### Name: plotor
### Title: Empirical odds rate plot
### Aliases: plotor
### ** Examples
# Use radar tube life data
z<-RadarTube$Days
v<-RadarTube$Type
x<-z[v==1]; y<-z[v==2]
# Dabrowska-Doksum's estimate of theta
theta0.hat<-dd.est(x,y)
vartheta0.hat<-dd.est(y,x)
# MRLE of theta
m<-length(x); n<-length(y)
N<-m+n; lambda<-m/N
phat0<-phi(N, theta0.hat, lambda)/N
theta.hat<-mrle.sporm(x, y, theta0.hat, phat0)$theta
## Empirical Odds Raio Plot
plotor(x, y, main="Empirical Odds Ratio Plot", lwd=2, ylim=c(0,2))
abline(h=theta.hat, lwd=2,lty=2, col=2)
abline(h=1/vartheta0.hat, lwd=2,lty=3, col=3)
abline(h=theta0.hat, lwd=2,lty=4, col=4)
| /data/genthat_extracted_code/sporm/examples/plotor.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 661 | r | library(sporm)
### Name: plotor
### Title: Empirical odds rate plot
### Aliases: plotor
### ** Examples
# Use radar tube life data
z<-RadarTube$Days
v<-RadarTube$Type
x<-z[v==1]; y<-z[v==2]
# Dabrowska-Doksum's estimate of theta
theta0.hat<-dd.est(x,y)
vartheta0.hat<-dd.est(y,x)
# MRLE of theta
m<-length(x); n<-length(y)
N<-m+n; lambda<-m/N
phat0<-phi(N, theta0.hat, lambda)/N
theta.hat<-mrle.sporm(x, y, theta0.hat, phat0)$theta
## Empirical Odds Raio Plot
plotor(x, y, main="Empirical Odds Ratio Plot", lwd=2, ylim=c(0,2))
abline(h=theta.hat, lwd=2,lty=2, col=2)
abline(h=1/vartheta0.hat, lwd=2,lty=3, col=3)
abline(h=theta0.hat, lwd=2,lty=4, col=4)
|
library(STPGA)
### Name: GenAlgForSubsetSelectionMONoTest
### Title: Genetic algorithm for subset selection no given test with
### multiple criteria for Multi Objective Optimized Experimental Design.
### Aliases: GenAlgForSubsetSelectionMONoTest
### ** Examples
## Not run:
##D library(STPGA)
##D library(GenomicMating)
##D
##D data(WheatData)
##D
##D
##D Msvd<-svd(scale(Wheat.M, scale=F, center=T), nu=50, nv=50)
##D Dgeno<-as.matrix(dist(scale(Wheat.M, scale=F, center=T)))^2
##D P<-Wheat.M%*%Msvd$v
##D dim(P)
##D rownames(Dgeno)<-colnames(Dgeno)<-rownames(P)<-rownames(Wheat.M)
##D test<-sample(rownames(P), 25)
##D candidates<-setdiff(rownames(P), test)
##D outnewprog<-GenAlgForSubsetSelectionMONoTest(Pcs=P,Dist=Dgeno,
##D Candidates=candidates,ntoselect=75,
##D selectionstats=list("DOPT", "neg_dist_in_train2", "dist_to_test2"),
##D selectionstatstypes=c("Pcs", "Dist", "Dist"),
##D plotdirections=c(1,1,1),npopGA=300,
##D mutprob=1, mutintensity=2, nitGA=100,
##D plotiters=TRUE, mc.cores=1, InitPop=NULL)
##D
##D #####Best solution according to ideal solution concept
##D outnewprog[[1]][[which.min(disttoideal(outnewprog[[2]]))]]
##D
## End(Not run)
| /data/genthat_extracted_code/STPGA/examples/GenAlgForSubsetSelectionMONoTest.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,179 | r | library(STPGA)
### Name: GenAlgForSubsetSelectionMONoTest
### Title: Genetic algorithm for subset selection no given test with
### multiple criteria for Multi Objective Optimized Experimental Design.
### Aliases: GenAlgForSubsetSelectionMONoTest
### ** Examples
## Not run:
##D library(STPGA)
##D library(GenomicMating)
##D
##D data(WheatData)
##D
##D
##D Msvd<-svd(scale(Wheat.M, scale=F, center=T), nu=50, nv=50)
##D Dgeno<-as.matrix(dist(scale(Wheat.M, scale=F, center=T)))^2
##D P<-Wheat.M%*%Msvd$v
##D dim(P)
##D rownames(Dgeno)<-colnames(Dgeno)<-rownames(P)<-rownames(Wheat.M)
##D test<-sample(rownames(P), 25)
##D candidates<-setdiff(rownames(P), test)
##D outnewprog<-GenAlgForSubsetSelectionMONoTest(Pcs=P,Dist=Dgeno,
##D Candidates=candidates,ntoselect=75,
##D selectionstats=list("DOPT", "neg_dist_in_train2", "dist_to_test2"),
##D selectionstatstypes=c("Pcs", "Dist", "Dist"),
##D plotdirections=c(1,1,1),npopGA=300,
##D mutprob=1, mutintensity=2, nitGA=100,
##D plotiters=TRUE, mc.cores=1, InitPop=NULL)
##D
##D #####Best solution according to ideal solution concept
##D outnewprog[[1]][[which.min(disttoideal(outnewprog[[2]]))]]
##D
## End(Not run)
|
#Script para crear el modelo y guardarlo en un archivo .rda
dataset <- read.csv("Dataset.csv",stringsAsFactors=FALSE,encoding="UTF-8",header=TRUE)
#Ajustar el dataset para el entrenamiento.
dataset$Clase <- as.factor(dataset$Clase)
dataset$Genero <- as.factor(dataset$Genero)
dataset$Union.padres <- as.factor(dataset$Union.padres)
#Creación del modelo.
logistic <- glm( Clase ~
Union.padres +
Presion.familiar +
Ambiente.escolar +
Promedio +
Genero +
Libros +
Apoyo.academico +
Edad.padre +
Relacion.esfuerzo.exito,
data=dataset, family="binomial")
#Guardar el modelo
save(logistic,file="LogisticModel.rda")
| /Script.r | no_license | Charly52830/JovenesTalento | R | false | false | 675 | r | #Script para crear el modelo y guardarlo en un archivo .rda
dataset <- read.csv("Dataset.csv",stringsAsFactors=FALSE,encoding="UTF-8",header=TRUE)
#Ajustar el dataset para el entrenamiento.
dataset$Clase <- as.factor(dataset$Clase)
dataset$Genero <- as.factor(dataset$Genero)
dataset$Union.padres <- as.factor(dataset$Union.padres)
#Creación del modelo.
logistic <- glm( Clase ~
Union.padres +
Presion.familiar +
Ambiente.escolar +
Promedio +
Genero +
Libros +
Apoyo.academico +
Edad.padre +
Relacion.esfuerzo.exito,
data=dataset, family="binomial")
#Guardar el modelo
save(logistic,file="LogisticModel.rda")
|
library(mgcViz)
### Name: plot.multi.ptermFactor
### Title: Plotting factor or logical parametric effects
### Aliases: plot.multi.ptermFactor plot.multi.ptermLogical
### plot.ptermFactor plot.ptermLogical
### ** Examples
# Simulate data and fit GAM
set.seed(3)
dat <- gamSim(1,n=2000,dist="normal",scale=20)
dat$fac <- as.factor( sample(c("A1", "A2", "A3"), nrow(dat), replace = TRUE) )
dat$logi <- as.logical( sample(c(TRUE, FALSE), nrow(dat), replace = TRUE) )
bs <- "cr"; k <- 12
b <- gam(y~fac + s(x0) + s(x1) + s(x2) + s(x3) + logi, data=dat)
o <- getViz(b, nsim = 0)
# Extract factor terms and plot it
pt <- pterm(o, 1)
plot(pt) + l_ciBar() + l_fitPoints(colour = 2) + l_rug(alpha = 0.2)
# Use barplot instead of points
pt <- pterm(o, 1)
plot(pt) + l_fitBar() + l_ciBar()
# Same with binary varible
pt <- pterm(o, 2)
plot(pt) + l_fitPoints() + l_ciBar()
| /data/genthat_extracted_code/mgcViz/examples/plot.ptermFactor.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 873 | r | library(mgcViz)
### Name: plot.multi.ptermFactor
### Title: Plotting factor or logical parametric effects
### Aliases: plot.multi.ptermFactor plot.multi.ptermLogical
### plot.ptermFactor plot.ptermLogical
### ** Examples
# Simulate data and fit GAM
set.seed(3)
dat <- gamSim(1,n=2000,dist="normal",scale=20)
dat$fac <- as.factor( sample(c("A1", "A2", "A3"), nrow(dat), replace = TRUE) )
dat$logi <- as.logical( sample(c(TRUE, FALSE), nrow(dat), replace = TRUE) )
bs <- "cr"; k <- 12
b <- gam(y~fac + s(x0) + s(x1) + s(x2) + s(x3) + logi, data=dat)
o <- getViz(b, nsim = 0)
# Extract factor terms and plot it
pt <- pterm(o, 1)
plot(pt) + l_ciBar() + l_fitPoints(colour = 2) + l_rug(alpha = 0.2)
# Use barplot instead of points
pt <- pterm(o, 1)
plot(pt) + l_fitBar() + l_ciBar()
# Same with binary varible
pt <- pterm(o, 2)
plot(pt) + l_fitPoints() + l_ciBar()
|
\name{AFtest}
\alias{AFtest}
\title{Exact test of equality of allele frequencies for males and females
}
\description{
Function \code{AFtest} tests equality of allele frequencies for males
and females for bi-allelic marker data by means of a Fisher exact test.
}
\usage{
AFtest(x, verbose = TRUE, ...)
}
\arguments{
\item{x}{a vector containg the genotypic counts c(A,B,AA,AB,BB) for a
bi-allelic X-chromosomal markers.}
\item{verbose}{verbose = TRUE prints results, verbose = FALSE is silent.}
\item{\dots}{additional arguments for function \code{fisher.test}.}
}
\details{
Function \code{AFtest} constructs the contingency table of sex by
allele, and call \code{fisher.test} to test for equality of allele
frequencies. The test assumes Hardy-Weinberg equilibrium.
}
\value{
\item{AC}{Two-way table of sex by allele}
\item{pval}{p-value of the test}
}
\author{ Jan Graffelman \email{jan.graffelman@upc.edu} }
\seealso{ \code{\link{HWChisq}}, \code{\link{HWExact}} }
\examples{
rs5968922 <- c(A=392, B=212, AA=275, AB=296, BB=80)
AFtest(rs5968922)
}
\keyword{htest}
| /HardyWeinberg/man/AFtest.Rd | no_license | akhikolla/InformationHouse | R | false | false | 1,128 | rd | \name{AFtest}
\alias{AFtest}
\title{Exact test of equality of allele frequencies for males and females
}
\description{
Function \code{AFtest} tests equality of allele frequencies for males
and females for bi-allelic marker data by means of a Fisher exact test.
}
\usage{
AFtest(x, verbose = TRUE, ...)
}
\arguments{
\item{x}{a vector containg the genotypic counts c(A,B,AA,AB,BB) for a
bi-allelic X-chromosomal markers.}
\item{verbose}{verbose = TRUE prints results, verbose = FALSE is silent.}
\item{\dots}{additional arguments for function \code{fisher.test}.}
}
\details{
Function \code{AFtest} constructs the contingency table of sex by
allele, and call \code{fisher.test} to test for equality of allele
frequencies. The test assumes Hardy-Weinberg equilibrium.
}
\value{
\item{AC}{Two-way table of sex by allele}
\item{pval}{p-value of the test}
}
\author{ Jan Graffelman \email{jan.graffelman@upc.edu} }
\seealso{ \code{\link{HWChisq}}, \code{\link{HWExact}} }
\examples{
rs5968922 <- c(A=392, B=212, AA=275, AB=296, BB=80)
AFtest(rs5968922)
}
\keyword{htest}
|
context("Linear editrow derivarions")
test_that("Various rows work",{
edt <- parse(text=c("x==y", "x+w ==y"))
e <- parseNum(edt[[1]])
#print(e)
})
test_that("Parsing a constant works",{
edt <- parse(text=c("x < 2"))
e <- parseNum(edt[[1]])
#print(e)
})
test_that("Parsing a inequality works",{
edt <- parse(text=c("x > 2"))
e <- parseNum(edt[[1]])
#print(e)
})
test_that("Parsing a negative coefficient works",{
edt <- parse(text=c("x == -2"))
#e <- makeEditRow(edt[[1]])
#print(e)
})
| /data/genthat_extracted_code/editrules/tests/testEditRow.R | no_license | surayaaramli/typeRrh | R | false | false | 525 | r | context("Linear editrow derivarions")
test_that("Various rows work",{
edt <- parse(text=c("x==y", "x+w ==y"))
e <- parseNum(edt[[1]])
#print(e)
})
test_that("Parsing a constant works",{
edt <- parse(text=c("x < 2"))
e <- parseNum(edt[[1]])
#print(e)
})
test_that("Parsing a inequality works",{
edt <- parse(text=c("x > 2"))
e <- parseNum(edt[[1]])
#print(e)
})
test_that("Parsing a negative coefficient works",{
edt <- parse(text=c("x == -2"))
#e <- makeEditRow(edt[[1]])
#print(e)
})
|
context('test if there is output')
test_that("something is produced", {
set.seed(12)
result <- select_powerballad()
# should be "Paradise by the dashboard light - Meat Loaf", but who nows when songs are added
expect_true( is.character(result) )
})
| /tests/testthat/test_songs.R | permissive | Raoke/powrballad | R | false | false | 272 | r | context('test if there is output')
test_that("something is produced", {
set.seed(12)
result <- select_powerballad()
# should be "Paradise by the dashboard light - Meat Loaf", but who nows when songs are added
expect_true( is.character(result) )
})
|
# This is the user-interface definition of a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
dashboardPage(
dashboardHeader(title = loadinglogo('https://www.kaggle.com/wendykan/lending-club-loan-data',
'peertopeer.png',
'loading.gif',
height=50
)
),
dashboardSidebar(),
dashboardBody(
# Boxes need to be put in a row (or column)
fluidRow(
box(plotOutput("plot1", height = 250)),
box(
title = "Controls",
sliderInput("slider", "Number of observations:", 1, 100, 50)
)
)
)
)
| /shiny/lc/ui.R | no_license | MikeMorris89/mm | R | false | false | 786 | r |
# This is the user-interface definition of a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
dashboardPage(
dashboardHeader(title = loadinglogo('https://www.kaggle.com/wendykan/lending-club-loan-data',
'peertopeer.png',
'loading.gif',
height=50
)
),
dashboardSidebar(),
dashboardBody(
# Boxes need to be put in a row (or column)
fluidRow(
box(plotOutput("plot1", height = 250)),
box(
title = "Controls",
sliderInput("slider", "Number of observations:", 1, 100, 50)
)
)
)
)
|
test_that("percentage() works", {
x <- 1:10
expect_identical(percentage(x, 5), x / 5 * 100)
})
| /tests/testthat/test-function-percentage.R | permissive | KWB-R/kwb.utils | R | false | false | 105 | r | test_that("percentage() works", {
x <- 1:10
expect_identical(percentage(x, 5), x / 5 * 100)
})
|
file = read.table("D:\\big_data\\household_power_consumption.txt",sep = ';', header = TRUE, comment.char="")
slice = file[file[,1] == '1/2/2007' | file[,1] == '2/2/2007',]
png('rplot.png')
hist(as.numeric(as.character(slice$Global_active_power)), col = 'red', xlab = 'Global active power (kilowatts)', main = 'Global active power')
dev.off() | /plot1.R | no_license | Kriattiffer/ExData_Plotting1 | R | false | false | 342 | r | file = read.table("D:\\big_data\\household_power_consumption.txt",sep = ';', header = TRUE, comment.char="")
slice = file[file[,1] == '1/2/2007' | file[,1] == '2/2/2007',]
png('rplot.png')
hist(as.numeric(as.character(slice$Global_active_power)), col = 'red', xlab = 'Global active power (kilowatts)', main = 'Global active power')
dev.off() |
# Test the plotting package on each of the emIRT functions
# Use the examples from the help files
source('emIRT_graph.R')
# binIRT
data(s109)
rc <- convertRC(s109)
p <- makePriors(rc$n, rc$m, 1)
s <- getStarts(rc$n, rc$m, 1)
lout <- binIRT(.rc = rc,
.starts = s,
.priors = p,
.control = {
list(threads = 1,
verbose = FALSE,
thresh = 1e-6)})
plot(lout,rc_data=rc)
# Add CIs
lout <- boot_emIRT(lout, .data = rc, .starts = s, .priors = p,
.control = list(threads = 1, verbose = FALSE, thresh = 1e-06), Ntrials=10, verbose=2)
plot(lout,rc_data=rc)
#show only republicans
plot(lout,rc_data=rc,subset_name='R')
#show only most liberal/most conservative senators.
plot(lout,rc_data=rc,subset_name=c('SESSIONS (R AL)','BOXER (D CA)'),subset_type='individual')
#adjust position of names for senators
plot(lout,rc_data=rc,hjust_bottom=-1,hjust_top=2)
# Function produces a ggplot object, so it can be further modified
outobj <- plot(lout,rc_data=rc)
outobj <- outobj + geom_vline(xintercept=0)
plot(outobj)
#hierIRT
data(dwnom)
lout <- hierIRT(.data = dwnom$data.in,
.starts = dwnom$cur,
.priors = dwnom$priors,
.control = {list(
threads = 2,
verbose = TRUE,
thresh = 1e-4,
maxit=200,
checkfreq=1)})
# Note that with this data in which different legislators have multiple observations, ggplot2
# will put all observations for each legislator on the same row
plot(lout,legis.names=dwnom$legis$name,parties=dwnom$nomres$party)
plot(lout,legis.names=dwnom$legis$name,parties=dwnom$nomres$party,subset_name='HATCH ',subset_type='individual')
#networkIRT
data(ustweet)
lout <- networkIRT(.y = ustweet$data,
.starts = ustweet$starts,
.priors = ustweet$priors,
.control = {list(verbose = TRUE,
maxit = 100,
convtype = 2,
thresh = 1e-6,
threads = 1)},
.anchor_item = 43)
plot(lout,legis.names=colnames(ustweet$data))
#ordIRT
data(AsahiTodai)
out.varinf <- ordIRT(.rc = AsahiTodai$dat.all, .starts = AsahiTodai$start.values,
.priors = AsahiTodai$priors, .D = 1,
.control = {list(verbose = TRUE,
thresh = 1e-6, maxit = 500)})
plot(out.varinf)
#poisIRT
data(manifesto)
lout <- poisIRT(.rc = manifesto$data.manif,
i = 0:(ncol(manifesto$data.manif)-1),
NI=ncol(manifesto$data.manif),
.starts = manifesto$starts.manif,
.priors = manifesto$priors.manif,
.control = {list(
threads = 1,
verbose = TRUE,
thresh = 1e-6,
maxit=1000)})
plot(lout,legis.names=colnames(manifesto$data.manif))
#dynIRT
#dynIRT requires different code to loop over the T time points and produce a facet.grid
#dynIRT objects do not inherit from emIRT objects, so the function has to be called explicitly
data("mq_data")
lout <- dynIRT(.data = mq_data$data.mq,
.starts = mq_data$cur.mq,
.priors = mq_data$priors.mq,
.control = {list(
threads = 1,
verbose = TRUE,
thresh = 1e-6,
maxit=500)})
lout <- boot_emIRT(lout,Ntrials = 10,.data = mq_data$data.mq,
.starts = mq_data$cur.mq,
.priors = mq_data$priors.mq,
.control = {list(
threads = 1,
verbose = TRUE,
thresh = 1e-6,
maxit=500)})
# Function can only plot max six facets of time points. It will by default select 6 equally-spaced time points
outobj <- plot.emIRT(lout,legis.names=row.names(mq_data$data.mq$rc),timelabels=as.character(1937:2013))
outobj <- outobj + geom_vline(xintercept=0)
plot(outobj)
# You can add time point labels and specify particular time points to plot
plot.emIRT(lout,legis.names=row.names(mq_data$data.mq$rc),timelabels=as.character(1937:2013),timepoints=c('1937','1938',
'1939','1940',
'1941','1942',
'1943','1944'))
# Also select a judge to see what that judge has done over time
plot.emIRT(lout,legis.names=row.names(mq_data$data.mq$rc),timelabels=as.character(1937:2013),
timepoints=as.character(1994:2004),
subset_name=c('Rehnquist','Breyer'),
subset_type='individual')
| /R_Scripts/emIRT_plot_test.R | no_license | saudiwin/ARP_Research | R | false | false | 5,073 | r | # Test the plotting package on each of the emIRT functions
# Use the examples from the help files
source('emIRT_graph.R')
# binIRT
data(s109)
rc <- convertRC(s109)
p <- makePriors(rc$n, rc$m, 1)
s <- getStarts(rc$n, rc$m, 1)
lout <- binIRT(.rc = rc,
.starts = s,
.priors = p,
.control = {
list(threads = 1,
verbose = FALSE,
thresh = 1e-6)})
plot(lout,rc_data=rc)
# Add CIs
lout <- boot_emIRT(lout, .data = rc, .starts = s, .priors = p,
.control = list(threads = 1, verbose = FALSE, thresh = 1e-06), Ntrials=10, verbose=2)
plot(lout,rc_data=rc)
#show only republicans
plot(lout,rc_data=rc,subset_name='R')
#show only most liberal/most conservative senators.
plot(lout,rc_data=rc,subset_name=c('SESSIONS (R AL)','BOXER (D CA)'),subset_type='individual')
#adjust position of names for senators
plot(lout,rc_data=rc,hjust_bottom=-1,hjust_top=2)
# Function produces a ggplot object, so it can be further modified
outobj <- plot(lout,rc_data=rc)
outobj <- outobj + geom_vline(xintercept=0)
plot(outobj)
#hierIRT
data(dwnom)
lout <- hierIRT(.data = dwnom$data.in,
.starts = dwnom$cur,
.priors = dwnom$priors,
.control = {list(
threads = 2,
verbose = TRUE,
thresh = 1e-4,
maxit=200,
checkfreq=1)})
# Note that with this data in which different legislators have multiple observations, ggplot2
# will put all observations for each legislator on the same row
plot(lout,legis.names=dwnom$legis$name,parties=dwnom$nomres$party)
plot(lout,legis.names=dwnom$legis$name,parties=dwnom$nomres$party,subset_name='HATCH ',subset_type='individual')
#networkIRT
data(ustweet)
lout <- networkIRT(.y = ustweet$data,
.starts = ustweet$starts,
.priors = ustweet$priors,
.control = {list(verbose = TRUE,
maxit = 100,
convtype = 2,
thresh = 1e-6,
threads = 1)},
.anchor_item = 43)
plot(lout,legis.names=colnames(ustweet$data))
#ordIRT
data(AsahiTodai)
out.varinf <- ordIRT(.rc = AsahiTodai$dat.all, .starts = AsahiTodai$start.values,
.priors = AsahiTodai$priors, .D = 1,
.control = {list(verbose = TRUE,
thresh = 1e-6, maxit = 500)})
plot(out.varinf)
#poisIRT
data(manifesto)
lout <- poisIRT(.rc = manifesto$data.manif,
i = 0:(ncol(manifesto$data.manif)-1),
NI=ncol(manifesto$data.manif),
.starts = manifesto$starts.manif,
.priors = manifesto$priors.manif,
.control = {list(
threads = 1,
verbose = TRUE,
thresh = 1e-6,
maxit=1000)})
plot(lout,legis.names=colnames(manifesto$data.manif))
#dynIRT
#dynIRT requires different code to loop over the T time points and produce a facet.grid
#dynIRT objects do not inherit from emIRT objects, so the function has to be called explicitly
data("mq_data")
lout <- dynIRT(.data = mq_data$data.mq,
.starts = mq_data$cur.mq,
.priors = mq_data$priors.mq,
.control = {list(
threads = 1,
verbose = TRUE,
thresh = 1e-6,
maxit=500)})
lout <- boot_emIRT(lout,Ntrials = 10,.data = mq_data$data.mq,
.starts = mq_data$cur.mq,
.priors = mq_data$priors.mq,
.control = {list(
threads = 1,
verbose = TRUE,
thresh = 1e-6,
maxit=500)})
# Function can only plot max six facets of time points. It will by default select 6 equally-spaced time points
outobj <- plot.emIRT(lout,legis.names=row.names(mq_data$data.mq$rc),timelabels=as.character(1937:2013))
outobj <- outobj + geom_vline(xintercept=0)
plot(outobj)
# You can add time point labels and specify particular time points to plot
plot.emIRT(lout,legis.names=row.names(mq_data$data.mq$rc),timelabels=as.character(1937:2013),timepoints=c('1937','1938',
'1939','1940',
'1941','1942',
'1943','1944'))
# Also select a judge to see what that judge has done over time
plot.emIRT(lout,legis.names=row.names(mq_data$data.mq$rc),timelabels=as.character(1937:2013),
timepoints=as.character(1994:2004),
subset_name=c('Rehnquist','Breyer'),
subset_type='individual')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lineSegmentsCRSConversion.R
\name{lineSegmentsCRSConversion}
\alias{lineSegmentsCRSConversion}
\title{Line segments Coordinate Reference System conversion}
\usage{
lineSegmentsCRSConversion(x)
}
\description{
Line segments conversion from a angles-length-centers to endpoints coordinates
}
\examples{
ae <- circular(c(0, 45, 90, 135),units="degrees",
template="geographics")
le <- c(2, sqrt(2) * 3, 3, sqrt(2) * 3)
ends <- data.frame(x0 = c(0, 1, 1, -1),
y0 = c(1, 1, 0, 1),
x1 = c(0, 4, 4, -4),
y1 = c(3, 4, 0, 4)
)
xe <- data.frame(a = ae, l = le,
x = (ends$x0 + ends$x1) / 2,
y = (ends$y0 + ends$y1) / 2)
plotSegments(
segment=list( g1 = list(x0 = ends$x0, y0 = ends$y0,
x1 = ends$x1, y1 = ends$y1)),
asp=1, main="Segments")
points.default(xe$x, xe$y, pch=20) # adding the midpoints to the current plot
print(ends)
lineSegmentsCRSConversion(x = xe)
}
\references{
equation 1.78, page 56 of D:/IMP/Transferencia/ReporteMetodologiasDFN_FmendozaT.pdf
}
| /man/lineSegmentsCRSConversion.Rd | no_license | mathphysmx/percolation | R | false | true | 1,173 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lineSegmentsCRSConversion.R
\name{lineSegmentsCRSConversion}
\alias{lineSegmentsCRSConversion}
\title{Line segments Coordinate Reference System conversion}
\usage{
lineSegmentsCRSConversion(x)
}
\description{
Line segments conversion from a angles-length-centers to endpoints coordinates
}
\examples{
ae <- circular(c(0, 45, 90, 135),units="degrees",
template="geographics")
le <- c(2, sqrt(2) * 3, 3, sqrt(2) * 3)
ends <- data.frame(x0 = c(0, 1, 1, -1),
y0 = c(1, 1, 0, 1),
x1 = c(0, 4, 4, -4),
y1 = c(3, 4, 0, 4)
)
xe <- data.frame(a = ae, l = le,
x = (ends$x0 + ends$x1) / 2,
y = (ends$y0 + ends$y1) / 2)
plotSegments(
segment=list( g1 = list(x0 = ends$x0, y0 = ends$y0,
x1 = ends$x1, y1 = ends$y1)),
asp=1, main="Segments")
points.default(xe$x, xe$y, pch=20) # adding the midpoints to the current plot
print(ends)
lineSegmentsCRSConversion(x = xe)
}
\references{
equation 1.78, page 56 of D:/IMP/Transferencia/ReporteMetodologiasDFN_FmendozaT.pdf
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/postSimPlots.R
\docType{data}
\name{multimodalDat}
\alias{multimodalDat}
\title{Plot data from simulated expression}
\format{
Object of class \code{"gtable"}.
}
\usage{
data("multimodalDat")
}
\description{
This data is used in the vignette to demonstrate the flexibility of the
Dino model to smoothly estimate arbitrary latent multimodal expression
distributions. These data are intended for internal use only.
}
\examples{
data("multimodalDat")
}
\keyword{datasets}
| /man/multimodalDat.Rd | no_license | JBrownBiostat/Dino | R | false | true | 547 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/postSimPlots.R
\docType{data}
\name{multimodalDat}
\alias{multimodalDat}
\title{Plot data from simulated expression}
\format{
Object of class \code{"gtable"}.
}
\usage{
data("multimodalDat")
}
\description{
This data is used in the vignette to demonstrate the flexibility of the
Dino model to smoothly estimate arbitrary latent multimodal expression
distributions. These data are intended for internal use only.
}
\examples{
data("multimodalDat")
}
\keyword{datasets}
|
#'generic call for reporting the parameter estiamtes from different regression
#'models
#'
#'Parameter estiamtes, confidence intervals, and p-values form regression
#'models. Results are presented in three forms, numeric matrix, character
#' matrix, and individual strings. The character matrix and strings are
#' intended to be used when knitting with LaTeX.
#'
#'%% ~~ If necessary, more details than the description above ~~
#'
#'@param fit a lm, glm, coxph, or survfit object
#'@param alpha significance level, 100(1-alpha)% CIs will be generated
#'@param \dots arguments to pass to params_frmtr as noted in the following
#'@param param if NULL (default) then a full matrix of of all coeffients will be
#'returned. A character represtation of the parameters of interest can be
#'returned if specified.
#'@param digits number of digits after the decimal point, included trailing
#'zeros, to print numbers to: see \code{\link{frmt}}
#'@param pdigits number of digits to format p-values: see \code{\link{frmtp}}
#'@param show.ci logical, return confidence intervals
#'@param show.pval logical, return the p-values
#'@param alpha significant level, reporting 100(1-alpha)% CIs
#'@param fun funciton for transforming results. Particularly useful is
#' \code{fun = exp} when working with logisitic regression models, for example.
#'@param show.equal.sign passed to \code{frmtp}
#'@param unit can be added to the strings returned such that the string could be
#' xx mg (95% CI: yy, zz; p-value = 0.pppp) instead of just
#' xx (95% CI: yy, zz; p-vaue = 0.pppp)
#'@param big.mark passed to frmt
#'@param small.mark passed to frmt
#'@author Peter DeWitt
#'@seealso \code{\link{params_frmtr}}
#'@keywords regression results
#'@examples
#' fit <- lm(mpg ~ wt + cyl, data = mtcars)
#' params(fit)
#' params(fit, param = "wt")
#'
#' ## logisitic regression
#' fit <- glm(I(mgp > 25) ~ wt + cyl, data = mtcars,
#' family = binomial(link = "logit"))
#' # log odds
#' params(fit)
#' # odds ratios
#' params(fit, fun = exp)
#'
#' @rdname params
#' @export params
params <-
function(fit, alpha = getOption("qwraps.alpha", 0.05), ...){
UseMethod("params")
}
#' @rdname params
#' @method params coxph
#' @S3method params coxph
params.coxph <-
function(fit, alpha = getOption("qwraps.alpha", 0.05), ...)
{
rtn <- matrix(summary(fit)$coef[, -2], ncol = 4)
rtn[, 3] <- rtn[, 1] + qnorm(1 - alpha / 2) * rtn[, 2]
rtn[, 2] <- rtn[, 1] + qnorm(alpha / 2) * rtn[, 2]
return(params_frmtr(rtn, ...))
}
#' @rdname params
#' @method params glm
#' @S3method params glm
params.glm <-
function(fit, alpha = getOption("qwraps.alpha", 0.05), ...)
{
rtn <- summary(fit)$coef
rtn[, 3] <- rtn[, 1] + qt(1 - alpha / 2, df.residual(fit)) * rtn[, 2]
rtn[, 2] <- rtn[, 1] + qt(alpha / 2, df.residual(fit)) * rtn[, 2]
return(params_frmtr(rtn, ...))
}
#' @rdname params
#' @method params lm
#' @S3method params lm
params.lm <-
function(fit, alpha = getOption("qwraps.alpha", 0.05), ...)
{
rtn <- summary(fit)$coef
rtn[, 3] <- rtn[, 1] + qt(1 - alpha / 2, df.residual(fit)) * rtn[, 2]
rtn[, 2] <- rtn[, 1] + qt(alpha / 2, df.residual(fit)) * rtn[, 2]
return(params_frmtr(rtn, ...))
}
#' @rdname params
#' @method params survfit
#' @S3method params survfit
params.survfit <-
function(fit, alpha = getOption("qwraps.alpha", 0.05), ...)
{
if (1 - alpha != fit$conf.int){
warning("Refitting survfit with requested confidence level")
fit <- update(fit, conf.int = 1 - alpha)
}
rtn <- summary(fit)$table[, c(5:7, 7)]
rtn[, 4] <- NA # needed only for correct dims in params.frmt
show.pval <- FALSE
return(params_frmtr(rtn, ...))
}
| /R/params.R | no_license | dewittpe/qwraps | R | false | false | 3,660 | r | #'generic call for reporting the parameter estiamtes from different regression
#'models
#'
#'Parameter estiamtes, confidence intervals, and p-values form regression
#'models. Results are presented in three forms, numeric matrix, character
#' matrix, and individual strings. The character matrix and strings are
#' intended to be used when knitting with LaTeX.
#'
#'%% ~~ If necessary, more details than the description above ~~
#'
#'@param fit a lm, glm, coxph, or survfit object
#'@param alpha significance level, 100(1-alpha)% CIs will be generated
#'@param \dots arguments to pass to params_frmtr as noted in the following
#'@param param if NULL (default) then a full matrix of of all coeffients will be
#'returned. A character represtation of the parameters of interest can be
#'returned if specified.
#'@param digits number of digits after the decimal point, included trailing
#'zeros, to print numbers to: see \code{\link{frmt}}
#'@param pdigits number of digits to format p-values: see \code{\link{frmtp}}
#'@param show.ci logical, return confidence intervals
#'@param show.pval logical, return the p-values
#'@param alpha significant level, reporting 100(1-alpha)% CIs
#'@param fun funciton for transforming results. Particularly useful is
#' \code{fun = exp} when working with logisitic regression models, for example.
#'@param show.equal.sign passed to \code{frmtp}
#'@param unit can be added to the strings returned such that the string could be
#' xx mg (95% CI: yy, zz; p-value = 0.pppp) instead of just
#' xx (95% CI: yy, zz; p-vaue = 0.pppp)
#'@param big.mark passed to frmt
#'@param small.mark passed to frmt
#'@author Peter DeWitt
#'@seealso \code{\link{params_frmtr}}
#'@keywords regression results
#'@examples
#' fit <- lm(mpg ~ wt + cyl, data = mtcars)
#' params(fit)
#' params(fit, param = "wt")
#'
#' ## logisitic regression
#' fit <- glm(I(mgp > 25) ~ wt + cyl, data = mtcars,
#' family = binomial(link = "logit"))
#' # log odds
#' params(fit)
#' # odds ratios
#' params(fit, fun = exp)
#'
#' @rdname params
#' @export params
params <-
function(fit, alpha = getOption("qwraps.alpha", 0.05), ...){
UseMethod("params")
}
#' @rdname params
#' @method params coxph
#' @S3method params coxph
params.coxph <-
function(fit, alpha = getOption("qwraps.alpha", 0.05), ...)
{
rtn <- matrix(summary(fit)$coef[, -2], ncol = 4)
rtn[, 3] <- rtn[, 1] + qnorm(1 - alpha / 2) * rtn[, 2]
rtn[, 2] <- rtn[, 1] + qnorm(alpha / 2) * rtn[, 2]
return(params_frmtr(rtn, ...))
}
#' @rdname params
#' @method params glm
#' @S3method params glm
params.glm <-
function(fit, alpha = getOption("qwraps.alpha", 0.05), ...)
{
rtn <- summary(fit)$coef
rtn[, 3] <- rtn[, 1] + qt(1 - alpha / 2, df.residual(fit)) * rtn[, 2]
rtn[, 2] <- rtn[, 1] + qt(alpha / 2, df.residual(fit)) * rtn[, 2]
return(params_frmtr(rtn, ...))
}
#' @rdname params
#' @method params lm
#' @S3method params lm
params.lm <-
function(fit, alpha = getOption("qwraps.alpha", 0.05), ...)
{
rtn <- summary(fit)$coef
rtn[, 3] <- rtn[, 1] + qt(1 - alpha / 2, df.residual(fit)) * rtn[, 2]
rtn[, 2] <- rtn[, 1] + qt(alpha / 2, df.residual(fit)) * rtn[, 2]
return(params_frmtr(rtn, ...))
}
#' @rdname params
#' @method params survfit
#' @S3method params survfit
params.survfit <-
function(fit, alpha = getOption("qwraps.alpha", 0.05), ...)
{
if (1 - alpha != fit$conf.int){
warning("Refitting survfit with requested confidence level")
fit <- update(fit, conf.int = 1 - alpha)
}
rtn <- summary(fit)$table[, c(5:7, 7)]
rtn[, 4] <- NA # needed only for correct dims in params.frmt
show.pval <- FALSE
return(params_frmtr(rtn, ...))
}
|
source("R/import_books.R")
source("R/import_characters.R")
source("R/import_alias.R")
books %>%
filter(str_detect(line, " Watch"))
# Remove titles from character names
# Replace aliases with names
# Remove dialogue
# Replace pronouns with names??
# Replace titles with names??
#
books %>%
unnest_tokens(bigram, line, token = "ngrams", n = 2) %>%
separate(bigram, c("word1", "word2"), sep = " ") %>%
filter(!word1 %in% stop_words$word) %>%
filter(!word2 %in% stop_words$word) %>%
count(word1, word2, sort = TRUE) %>% View
books %>%
unnest_tokens(trigram, line, token = "ngrams", n = 3) %>%
separate(trigram, c("word1", "word2", "word3"), sep = " ") %>%
filter(!word1 %in% stop_words$word,
!word2 %in% stop_words$word,
!word3 %in% stop_words$word) %>%
count(word1, word2, word3, sort = TRUE) %>% View
#method 1
books %>%
unnest_tokens(ngram, line, token = "ngrams", n = 8) %>%
group_by(book, chapter) %>%
mutate(section = row_number() %/% 3) %>%
ungroup() %>%
View()
#only keep ngrams with names
#reduce the number of columns until there are only collections of 2 columns of names
#Modeling through dialouge?
#remove all non dialogue
#have dialogue | Speaker | Listener data format
#algorithm for finding out who the speakers / listeners are?
#graph network
| /R/notes.R | no_license | visuelledata/malazannetwork | R | false | false | 1,328 | r | source("R/import_books.R")
source("R/import_characters.R")
source("R/import_alias.R")
books %>%
filter(str_detect(line, " Watch"))
# Remove titles from character names
# Replace aliases with names
# Remove dialogue
# Replace pronouns with names??
# Replace titles with names??
#
books %>%
unnest_tokens(bigram, line, token = "ngrams", n = 2) %>%
separate(bigram, c("word1", "word2"), sep = " ") %>%
filter(!word1 %in% stop_words$word) %>%
filter(!word2 %in% stop_words$word) %>%
count(word1, word2, sort = TRUE) %>% View
books %>%
unnest_tokens(trigram, line, token = "ngrams", n = 3) %>%
separate(trigram, c("word1", "word2", "word3"), sep = " ") %>%
filter(!word1 %in% stop_words$word,
!word2 %in% stop_words$word,
!word3 %in% stop_words$word) %>%
count(word1, word2, word3, sort = TRUE) %>% View
#method 1
books %>%
unnest_tokens(ngram, line, token = "ngrams", n = 8) %>%
group_by(book, chapter) %>%
mutate(section = row_number() %/% 3) %>%
ungroup() %>%
View()
#only keep ngrams with names
#reduce the number of columns until there are only collections of 2 columns of names
#Modeling through dialouge?
#remove all non dialogue
#have dialogue | Speaker | Listener data format
#algorithm for finding out who the speakers / listeners are?
#graph network
|
# Astronauts!
library(tidyverse)
library(tidytuesdayR)
library(ggtext)
library(gridExtra)
library(extrafont)
## Get the data ####
tt_data <- tt_load(2020, week = 29)
astronauts <- tt_data$astronauts %>%
mutate(age_at_mission = year_of_mission - year_of_birth)
## Build space theme ####
theme_astro <- function() {
theme_minimal() %+replace%
theme(plot.background = element_rect(fill = "#1d1330", colour = "#1d1330"),
panel.grid = element_line(color = "#1d1330"),
panel.background = element_rect(fill = "#1d1330", colour = "#1d1330"),
text = element_text(colour = "white", family = "Corbel Light"),
plot.title = element_text(hjust = 0, size = 20, family = "AR DESTINE"),
axis.text = element_text(color = "white", size = 10),
plot.subtitle = element_markdown(hjust = 0, size = 13, lineheight = 1),
axis.title = element_text(color = "white", size = 10),
axis.ticks = element_blank())
}
## Add text for annotation bubbles ####
texts <- tibble(
age = c(77, 72, 15, 20),
year = c(2010, 1985, 1968, 2005),
text = c(
"This is not a typo! Meet **John Herschel Glenn Jr.**, who travelled to space aged 77 in 1999. What a legend!",
"1985 was the year that saw the **most astronauts in space**, with a total of 62 on 28 missions.",
"The **two youngest astronauts** were Gherman Titov and Valentina Tereshkova, both aged 26. They each flew only one mission. It would be 1982 before the next female astronaut took to space.",
"**Sergei Krikalev** went on his first of six missions aged 30. Only two astronauts have been on more missions: Franklin R. Chang-Diaz and Jerry L. Ross, who both started their careers in the 1980 NASA-9 selection."),
vjust = c(.5, .5, .5, .5)
)
## Plot it! ####
ggplot(astronauts) +
geom_point(aes(y = year_of_mission, x = age_at_mission, colour = sex,
size = hours_mission, alpha = total_number_of_missions),
show.legend = F) +
scale_colour_manual(values = c(male = "#e1f7fa", female = "#ffa72b")) +
labs(title = "
Ages through Time and Space
",
subtitle = "
**Astronauts have got older, missions have got longer, and starting younger is no guarantee
of going more often.**
Each dot is an astronaut on a mission. The larger the dot, the more hours the mission took,
ranging from 0 to over 10,000 (14 months!). The more transparent the dot, the fewer times
that astronaut went to space.
The slope of age by year of mission is similar for <span style='color:#e1f7fa'>male</span> and <span style='color:#ffa72b'>female</span> astronauts, with a 20-year
time lag.
All this with a few notable exceptions...",
x = "Age at start of mission",
y = "",
caption = "\n\n#TidyTuesday | Graphic: @cararthompson | Data: Mariya Stavnichuk and Tatsuya Corlett") +
xlim(c(10, 85)) +
geom_textbox(data = texts,
aes(age, year,
label = text,
vjust = vjust),
colour = "white",
box.colour = "#1d1330",
size = 3.8,
fill = "#1d1330",
family = "Corbel Light",
maxwidth = unit(8, "lines"),
hjust = .5,
show.legend = F) +
annotate("curve", x = 77, xend = 77, y = 2005, yend = 1999.5, curvature = 0,
size = .75, arrow = arrow(length = unit(2, "mm")), colour = "#938ca1") +
annotate("curve", x = 65, xend = 60, y = 1985, yend = 1985, curvature = 0,
size = .75, arrow = arrow(length = unit(2, "mm")), colour = "#938ca1") +
annotate("curve", x = 21, xend = 34, y = 1963, yend = 1981, curvature = .3,
size = .5, linetype = 2, arrow = arrow(length = unit(3, "mm")), colour = "#ffa72b") +
annotate("curve", x = 21, xend = 26, y = 1970, yend = 1964, curvature = -0.4,
size = .75, arrow = arrow(length = unit(2, "mm")), colour = "#938ca1") +
annotate("curve", x = 25, xend = 30, y = 2000, yend = 1990, curvature = -0.3,
size = .75, arrow = arrow(length = unit(2, "mm")), colour = "#938ca1") +
theme_astro()
# Export to create making-of gif, adapting the approach used by Georgios Karamanis (@geokaramanis)
ggsave(filename = file.path("../making-of/temp", paste0("202007b_astronauts-", format(Sys.time(), "%Y%m%d_%H%M%S"), ".png")),
dpi = 400, width = 10, height = 10)
| /sources/13-temporals/2-change/38-scatter/archetypes/ages-in-space/202007b_astronauts.R | permissive | UN-AVT/kamino-source | R | false | false | 4,415 | r | # Astronauts!
library(tidyverse)
library(tidytuesdayR)
library(ggtext)
library(gridExtra)
library(extrafont)
## Get the data ####
tt_data <- tt_load(2020, week = 29)
astronauts <- tt_data$astronauts %>%
mutate(age_at_mission = year_of_mission - year_of_birth)
## Build space theme ####
theme_astro <- function() {
theme_minimal() %+replace%
theme(plot.background = element_rect(fill = "#1d1330", colour = "#1d1330"),
panel.grid = element_line(color = "#1d1330"),
panel.background = element_rect(fill = "#1d1330", colour = "#1d1330"),
text = element_text(colour = "white", family = "Corbel Light"),
plot.title = element_text(hjust = 0, size = 20, family = "AR DESTINE"),
axis.text = element_text(color = "white", size = 10),
plot.subtitle = element_markdown(hjust = 0, size = 13, lineheight = 1),
axis.title = element_text(color = "white", size = 10),
axis.ticks = element_blank())
}
## Add text for annotation bubbles ####
texts <- tibble(
age = c(77, 72, 15, 20),
year = c(2010, 1985, 1968, 2005),
text = c(
"This is not a typo! Meet **John Herschel Glenn Jr.**, who travelled to space aged 77 in 1999. What a legend!",
"1985 was the year that saw the **most astronauts in space**, with a total of 62 on 28 missions.",
"The **two youngest astronauts** were Gherman Titov and Valentina Tereshkova, both aged 26. They each flew only one mission. It would be 1982 before the next female astronaut took to space.",
"**Sergei Krikalev** went on his first of six missions aged 30. Only two astronauts have been on more missions: Franklin R. Chang-Diaz and Jerry L. Ross, who both started their careers in the 1980 NASA-9 selection."),
vjust = c(.5, .5, .5, .5)
)
## Plot it! ####
ggplot(astronauts) +
geom_point(aes(y = year_of_mission, x = age_at_mission, colour = sex,
size = hours_mission, alpha = total_number_of_missions),
show.legend = F) +
scale_colour_manual(values = c(male = "#e1f7fa", female = "#ffa72b")) +
labs(title = "
Ages through Time and Space
",
subtitle = "
**Astronauts have got older, missions have got longer, and starting younger is no guarantee
of going more often.**
Each dot is an astronaut on a mission. The larger the dot, the more hours the mission took,
ranging from 0 to over 10,000 (14 months!). The more transparent the dot, the fewer times
that astronaut went to space.
The slope of age by year of mission is similar for <span style='color:#e1f7fa'>male</span> and <span style='color:#ffa72b'>female</span> astronauts, with a 20-year
time lag.
All this with a few notable exceptions...",
x = "Age at start of mission",
y = "",
caption = "\n\n#TidyTuesday | Graphic: @cararthompson | Data: Mariya Stavnichuk and Tatsuya Corlett") +
xlim(c(10, 85)) +
geom_textbox(data = texts,
aes(age, year,
label = text,
vjust = vjust),
colour = "white",
box.colour = "#1d1330",
size = 3.8,
fill = "#1d1330",
family = "Corbel Light",
maxwidth = unit(8, "lines"),
hjust = .5,
show.legend = F) +
annotate("curve", x = 77, xend = 77, y = 2005, yend = 1999.5, curvature = 0,
size = .75, arrow = arrow(length = unit(2, "mm")), colour = "#938ca1") +
annotate("curve", x = 65, xend = 60, y = 1985, yend = 1985, curvature = 0,
size = .75, arrow = arrow(length = unit(2, "mm")), colour = "#938ca1") +
annotate("curve", x = 21, xend = 34, y = 1963, yend = 1981, curvature = .3,
size = .5, linetype = 2, arrow = arrow(length = unit(3, "mm")), colour = "#ffa72b") +
annotate("curve", x = 21, xend = 26, y = 1970, yend = 1964, curvature = -0.4,
size = .75, arrow = arrow(length = unit(2, "mm")), colour = "#938ca1") +
annotate("curve", x = 25, xend = 30, y = 2000, yend = 1990, curvature = -0.3,
size = .75, arrow = arrow(length = unit(2, "mm")), colour = "#938ca1") +
theme_astro()
# Export to create making-of gif, adapting the approach used by Georgios Karamanis (@geokaramanis)
ggsave(filename = file.path("../making-of/temp", paste0("202007b_astronauts-", format(Sys.time(), "%Y%m%d_%H%M%S"), ".png")),
dpi = 400, width = 10, height = 10)
|
\name{boa.save}
\alias{boa.save}
\title{Save Session}
\description{
Save the current state of the session lists of MCMC sequences and the global
parameters to a database object.
}
\usage{
boa.save(name, envir = globalenv(), replace = FALSE)
}
\arguments{
\item{name}{Character string giving the name of the object to which the
current session should be saved.}
\item{envir}{The 'environment' to which the object should be saved. For more
information, consult the help documentation in R on the assign() function.}
\item{replace}{Logical value indicating whether object \code{name} should be
replaced if it already exists.}
}
\value{
A logical value indicating that the session was successfully saved to the specified object.
}
\author{Brian J. Smith}
\keyword{utilities}
| /man/boa.save.Rd | no_license | brian-j-smith/boa | R | false | false | 795 | rd | \name{boa.save}
\alias{boa.save}
\title{Save Session}
\description{
Save the current state of the session lists of MCMC sequences and the global
parameters to a database object.
}
\usage{
boa.save(name, envir = globalenv(), replace = FALSE)
}
\arguments{
\item{name}{Character string giving the name of the object to which the
current session should be saved.}
\item{envir}{The 'environment' to which the object should be saved. For more
information, consult the help documentation in R on the assign() function.}
\item{replace}{Logical value indicating whether object \code{name} should be
replaced if it already exists.}
}
\value{
A logical value indicating that the session was successfully saved to the specified object.
}
\author{Brian J. Smith}
\keyword{utilities}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summarize.R
\name{summarize}
\alias{summarize}
\title{Summary}
\usage{
summarize(Data, Group)
}
\arguments{
\item{Data}{a numeric matrix or data frame}
\item{Group}{a vector of factors associated with group structure}
}
\value{
list with the following results:
\item{Global.summary}{ summary of globala data}
\item{Group.summary}{ summary of group datasets}
\item{mean.between.data}{ matrix of Group mean}
\item{mean.within.data}{ matrix of group centered data}
}
\description{
Summary of multigroup data in global and group parts
}
\examples{
Data = iris[,-5]
Group = iris[,5]
res = summarize(Data, Group)
}
\seealso{
\code{\link{mgPCA}}, \code{\link{DGPA}}, \code{\link{DCCSWA}},
\code{\link{DSTATIS}}, \code{\link{BGC}},
\code{\link{TBWvariance}}, \code{\link{iris}}
}
| /man/summarize.Rd | no_license | cran/multigroup | R | false | true | 875 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summarize.R
\name{summarize}
\alias{summarize}
\title{Summary}
\usage{
summarize(Data, Group)
}
\arguments{
\item{Data}{a numeric matrix or data frame}
\item{Group}{a vector of factors associated with group structure}
}
\value{
list with the following results:
\item{Global.summary}{ summary of globala data}
\item{Group.summary}{ summary of group datasets}
\item{mean.between.data}{ matrix of Group mean}
\item{mean.within.data}{ matrix of group centered data}
}
\description{
Summary of multigroup data in global and group parts
}
\examples{
Data = iris[,-5]
Group = iris[,5]
res = summarize(Data, Group)
}
\seealso{
\code{\link{mgPCA}}, \code{\link{DGPA}}, \code{\link{DCCSWA}},
\code{\link{DSTATIS}}, \code{\link{BGC}},
\code{\link{TBWvariance}}, \code{\link{iris}}
}
|
library(tidyverse)
library(MASS)
library(ISLR)
library(caret)
dfDefault <- Default
dfDefault %>% dplyr::count(default)
p <- ggplot(dfDefault, aes(balance, fill = default)) +
geom_histogram(binwidth = 500)
p
pl1 <- ggplot(dfDefault, aes(balance, fill = default))
pl1 <- pl1 + geom_density(alpha = 0.2, adjust = 5 )
pl1
# ------------- Break for Bayesian Analysis going back to EDA
pg <- ggplot_build(p)
# this creates an R list, which is a little different data structure:(http://www.r-tutor.com/r-introduction/list)
# basically a way to store a bunch of different objects
pgData <- pg$data[[1]]
pgData %>% dplyr::select(x, density)
pgData <- pgData %>% mutate(prob = (xmax- xmin)*density)
ProbAnalysis = pgData %>% group_by(group, xmax) %>% summarize(GrpProb = sum(prob, na.rm = T))
sum(ProbAnalysis$GrpProb)
# default = 2
p2 = ggplot(ProbAnalysis, aes(xmax, y = GrpProb, fill = factor(group))) +
geom_bar(stat = "identity", position = "dodge")
p2
DefaultPop = filter(ProbAnalysis, group == 1)
# ----
p3 = ggplot(dfDefault, aes(balance)) +
geom_histogram(binwidth = 500)
p3
pg <- ggplot_build(p3)
# this creates an R list, which is a little different data structure:(http://www.r-tutor.com/r-introduction/list)
# basically a way to store a bunch of different objects
pgData <- pg$data[[1]]
pgData %>% dplyr::select(x, density)
pgData <- pgData %>% mutate(prob = (xmax- xmin)*density)
sum(pgData$prob)
pgData %>% filter(xmax == 1250) %>% dplyr::select(xmax, prob)
# -----------------
lda.fit <- lda(default ~ balance, data = dfDefault)
lda.fit
lda.pred <- predict(lda.fit)
dfPred <- data.frame(lda.pred)
dfPred %>% dplyr::count(class)
pl1 <- pl1 + geom_vline(xintercept = mean(lda.fit$means) )
pl1
p <- p + geom_vline(xintercept = mean(lda.fit$means) )
p
# get decision rule (don't worry about doing this - just FYI)
A <- A <- mean(lda.fit$means)
B <- log(lda.fit$prior[2]) - log(lda.fit$prior[1])
s2.k <- t(tapply(dfDefault$balance, dfDefault$default, var)) %*% lda.fit$prior
C <- s2.k/(lda.fit$means[1] - lda.fit$means[2])
dr <- A + B * C
dr
p <- p + geom_vline(xintercept = dr )
p
confusionMatrix(factor(lda.pred$class) , factor(dfDefault$default), positive = "Yes")
# look at the data again
firstAnalysis <- as_tibble(cbind(as.character(lda.pred$class), as.character(dfDefault$default), lda.pred$posterior))
firstAnalysis <- cbind(firstAnalysis,dplyr::select(dfDefault, student, balance, income))
write_csv(firstAnalysis, "firstAnalysis.csv")
# let's adjust the threshold
pred <- rep('No', nrow(dfDefault))
pred[lda.pred$posterior[,2] >= 0.2] <- 'Yes'
dfPred <- data.frame(pred)
dfPred %>% dplyr::count(pred)
confusionMatrix(factor(pred) , factor(dfDefault$default), positive = "Yes")
# now let's do this for real
testSplit <- .4
totalSampleSize <- nrow(dfDefault)
testSampleSize <- round(totalSampleSize*testSplit)
trainSampleSize <- totalSampleSize - testSampleSize
tindexes <- sample(1:nrow(dfDefault), testSampleSize)
indexes <- sample(1:nrow(dfDefault[-tindexes,]), trainSampleSize)
xTrain <- dfDefault[indexes, ]
xTest <- dfDefault[tindexes,]
#------------------- SMOTE ----------------#
library(DMwR)
prop.table(table(xTrain$default))
smoteData <- SMOTE(default ~ ., data = xTrain, perc.over = 350, perc.under=130) # SMOTE only works with facdtors
prop.table(table(smoteData$default))
lda.fit <- lda(default ~ ., smoteData)
lda.pred <- predict(lda.fit, xTest)
confusionMatrix((lda.pred$class), factor(xTest$default), positive = "Yes")
#----------------------------------
lda.fit <- lda(default ~ balance, xTrain)
lda.fit
lda.pred <- predict(lda.fit, xTest)
# get decision rule
A <- A <- mean(lda.fit$means)
B <- log(lda.fit$prior[2]) - log(lda.fit$prior[1])
s2.k <- t(tapply(xTest$balance, xTest$default, var)) %*% lda.fit$prior
C <- s2.k/(lda.fit$means[1] - lda.fit$means[2])
dr <- A + B * C
dr
p <- p + geom_vline(xintercept = dr, color = 'red' )
p
# same place
confusionMatrix((lda.pred$class), factor(xTest$default), positive = "Yes")
# add more predictors (p)
# remember, visualization is gone in p>2
lda.fit <- lda(default ~ ., xTrain)
lda.fit
lda.pred <- predict(lda.fit, xTest)
confusionMatrix((lda.pred$class), factor(xTest$default), positive = "Yes")
# get back orignial and look at it:
finalAnalysis <- as_tibble(cbind(as.character(lda.pred$class), as.character(xTest$default), lda.pred$posterior))
finalAnalysis <- cbind(finalAnalysis,dplyr::select(xTest, student, balance, income))
write_csv(finalAnalysis, "finalAnalysis.csv")
| /DA2_LDA w Smote Section.R | no_license | CollinCroskery/Foundations | R | false | false | 4,530 | r | library(tidyverse)
library(MASS)
library(ISLR)
library(caret)
dfDefault <- Default
dfDefault %>% dplyr::count(default)
p <- ggplot(dfDefault, aes(balance, fill = default)) +
geom_histogram(binwidth = 500)
p
pl1 <- ggplot(dfDefault, aes(balance, fill = default))
pl1 <- pl1 + geom_density(alpha = 0.2, adjust = 5 )
pl1
# ------------- Break for Bayesian Analysis going back to EDA
pg <- ggplot_build(p)
# this creates an R list, which is a little different data structure:(http://www.r-tutor.com/r-introduction/list)
# basically a way to store a bunch of different objects
pgData <- pg$data[[1]]
pgData %>% dplyr::select(x, density)
pgData <- pgData %>% mutate(prob = (xmax- xmin)*density)
ProbAnalysis = pgData %>% group_by(group, xmax) %>% summarize(GrpProb = sum(prob, na.rm = T))
sum(ProbAnalysis$GrpProb)
# default = 2
p2 = ggplot(ProbAnalysis, aes(xmax, y = GrpProb, fill = factor(group))) +
geom_bar(stat = "identity", position = "dodge")
p2
DefaultPop = filter(ProbAnalysis, group == 1)
# ----
p3 = ggplot(dfDefault, aes(balance)) +
geom_histogram(binwidth = 500)
p3
pg <- ggplot_build(p3)
# this creates an R list, which is a little different data structure:(http://www.r-tutor.com/r-introduction/list)
# basically a way to store a bunch of different objects
pgData <- pg$data[[1]]
pgData %>% dplyr::select(x, density)
pgData <- pgData %>% mutate(prob = (xmax- xmin)*density)
sum(pgData$prob)
pgData %>% filter(xmax == 1250) %>% dplyr::select(xmax, prob)
# -----------------
lda.fit <- lda(default ~ balance, data = dfDefault)
lda.fit
lda.pred <- predict(lda.fit)
dfPred <- data.frame(lda.pred)
dfPred %>% dplyr::count(class)
pl1 <- pl1 + geom_vline(xintercept = mean(lda.fit$means) )
pl1
p <- p + geom_vline(xintercept = mean(lda.fit$means) )
p
# get decision rule (don't worry about doing this - just FYI)
A <- A <- mean(lda.fit$means)
B <- log(lda.fit$prior[2]) - log(lda.fit$prior[1])
s2.k <- t(tapply(dfDefault$balance, dfDefault$default, var)) %*% lda.fit$prior
C <- s2.k/(lda.fit$means[1] - lda.fit$means[2])
dr <- A + B * C
dr
p <- p + geom_vline(xintercept = dr )
p
confusionMatrix(factor(lda.pred$class) , factor(dfDefault$default), positive = "Yes")
# look at the data again
firstAnalysis <- as_tibble(cbind(as.character(lda.pred$class), as.character(dfDefault$default), lda.pred$posterior))
firstAnalysis <- cbind(firstAnalysis,dplyr::select(dfDefault, student, balance, income))
write_csv(firstAnalysis, "firstAnalysis.csv")
# let's adjust the threshold
pred <- rep('No', nrow(dfDefault))
pred[lda.pred$posterior[,2] >= 0.2] <- 'Yes'
dfPred <- data.frame(pred)
dfPred %>% dplyr::count(pred)
confusionMatrix(factor(pred) , factor(dfDefault$default), positive = "Yes")
# now let's do this for real
testSplit <- .4
totalSampleSize <- nrow(dfDefault)
testSampleSize <- round(totalSampleSize*testSplit)
trainSampleSize <- totalSampleSize - testSampleSize
tindexes <- sample(1:nrow(dfDefault), testSampleSize)
indexes <- sample(1:nrow(dfDefault[-tindexes,]), trainSampleSize)
xTrain <- dfDefault[indexes, ]
xTest <- dfDefault[tindexes,]
#------------------- SMOTE ----------------#
library(DMwR)
prop.table(table(xTrain$default))
smoteData <- SMOTE(default ~ ., data = xTrain, perc.over = 350, perc.under=130) # SMOTE only works with facdtors
prop.table(table(smoteData$default))
lda.fit <- lda(default ~ ., smoteData)
lda.pred <- predict(lda.fit, xTest)
confusionMatrix((lda.pred$class), factor(xTest$default), positive = "Yes")
#----------------------------------
lda.fit <- lda(default ~ balance, xTrain)
lda.fit
lda.pred <- predict(lda.fit, xTest)
# get decision rule
A <- A <- mean(lda.fit$means)
B <- log(lda.fit$prior[2]) - log(lda.fit$prior[1])
s2.k <- t(tapply(xTest$balance, xTest$default, var)) %*% lda.fit$prior
C <- s2.k/(lda.fit$means[1] - lda.fit$means[2])
dr <- A + B * C
dr
p <- p + geom_vline(xintercept = dr, color = 'red' )
p
# same place
confusionMatrix((lda.pred$class), factor(xTest$default), positive = "Yes")
# add more predictors (p)
# remember, visualization is gone in p>2
lda.fit <- lda(default ~ ., xTrain)
lda.fit
lda.pred <- predict(lda.fit, xTest)
confusionMatrix((lda.pred$class), factor(xTest$default), positive = "Yes")
# get back orignial and look at it:
finalAnalysis <- as_tibble(cbind(as.character(lda.pred$class), as.character(xTest$default), lda.pred$posterior))
finalAnalysis <- cbind(finalAnalysis,dplyr::select(xTest, student, balance, income))
write_csv(finalAnalysis, "finalAnalysis.csv")
|
#' Track intermediate points
#'
#' Calculate great circle intermediate points on longitude, latitude input vectors. A
#' spherical model is used, from the geosphere package.
#'
#' This function returns a list of data frames, with a data frame of interpolated locations
#' for every interval between input locations. There is a final empty data frame to ensure
#' the list is the same length as the inputs. See embedded usage of the tidyr function 'unnest()'
#' for ease of use.
#'
#' To use on multiple track ids, use a grouped data frame with tidyverse code like
#' `inter <- data %>% group_by(id) %>%
#' mutate(inter = track_intermediate(lon, lat, date = , distance = )`.
#'
#'
#' Then, un-nest this result for further use (the 'inter' above retains the information
#' about the parent locations for custom usage if needed), so the final location of each
#' group has invalid intermediates:
#' `dd <- inter %>% slice(-1) %>% unnest()`
#' @param x longitude
#' @param y latitude
#' @param date optional input date-time in POSIXct
#' @param distance optional minimum distance (metres) between interpolated points
#' @param duration optional minimum duration (seconds) between interpolated point,
#' if set then `distance` must be `NULL` and `date` must be input
#' @return a list of data frames of intermediate points (for use with `unnest()` from tidyr)
#' @export
#' @importFrom stats setNames
#' @examples
#' track_intermediate(trips0$x[1:10], trips0$y[1:10], distance = 15000)
#'
#' track_intermediate(trips0$x[1:10], trips0$y[1:10], date = trips0$date,
#' distance = 1500)
#'
#' inter_time <- track_intermediate(trips0$x[1:10], trips0$y[1:10],
#' date = trips0$date, duration = 1800)
#' \dontrun{
#' ## run with full workflow to expand into a new data frame with
#' ## `int_x`, `int_y`, and (optional) `int_date`
#' if (requireNamespace("tidyr") && requireNamespace("dplyr")) {
#' tr1 <- trips0[seq(1, nrow(trips0), by = 30), ]
#' dd <- tr1 %>% group_by(id) %>%
#' mutate(inter = track_intermediate(x, y, date = date, distance = 150000)) %>%
#' tidyr::unnest()
#' plot(dd$int_date, dd$int_y, pch = ".", cex = 2, main = "equidistant in space")
#' abline(v = tr1$date)
#'
#' dd1 <- tr1 %>% group_by(id) %>%
#' mutate(inter = track_intermediate(x, y, date = date, duration = 3600 * 12)) %>%
#' tidyr::unnest()
#' plot(dd1$int_date, dd1$int_y, pch = ".", cex = 2, main = "equispaced in time")
#' abline(v = tr1$date)
#' }
#' }
track_intermediate <- function(x, y, date = NULL, distance = NULL, duration = NULL) {
n <- length(x)
if (!is.null(distance) && !is.null(duration)) stop("'distance' or 'duration' (or both) must be NULL")
if (is.null(distance)) {
npoints <- rep(15, n - 1)
} else {
npoints <- pmax(3, ceiling(track_distance(x, y) / distance))[-1L]
}
if (!is.null(duration)) {
if (is.null(date)) stop("if 'duration' is not NULL, 'date' must also be given/n")
npoints <- pmax(3, ceiling(track_time(date) / duration))[-1L]
}
listm <- geosphere::gcIntermediate(cbind(x[-n], y[-n]), cbind(x[-1], y[-1]),
n = npoints, addStartEnd = TRUE, sp = FALSE)
if (n == 2) listm <- list(listm)
listm <- lapply(listm, as.data.frame)
## sometimes we get V1, V2
listm <- lapply(listm, function(ddd) setNames(ddd, c("lon", "lat")))
##npoints <- npoints + 2 ## because addStartEnd = TRUE
funa <- function(a) data.frame(int_x = a[["lon"]], int_y = a[["lat"]],
int_date = a[["int_date"]])
runfun <- function(a) data.frame(int_x = a[["lon"]], int_y = a[["lat"]])
## sometimes these aren't the same
actual_npoints <- unlist(lapply(listm, nrow))
if (!is.null(date)) {
runfun <- funa
for (i in seq_along(listm)) {
dts <- seq(date[i], date[i +1], length.out = actual_npoints[i])
#if (nrow(listm[[i]]) < 1) browser()
listm[[i]]$int_date <- dts
}
}
##browser()
c(lapply(listm, runfun), list(data.frame()))
}
| /R/track_intermediate.R | no_license | ianjonsen/traipse | R | false | false | 4,060 | r | #' Track intermediate points
#'
#' Calculate great circle intermediate points on longitude, latitude input vectors. A
#' spherical model is used, from the geosphere package.
#'
#' This function returns a list of data frames, with a data frame of interpolated locations
#' for every interval between input locations. There is a final empty data frame to ensure
#' the list is the same length as the inputs. See embedded usage of the tidyr function 'unnest()'
#' for ease of use.
#'
#' To use on multiple track ids, use a grouped data frame with tidyverse code like
#' `inter <- data %>% group_by(id) %>%
#' mutate(inter = track_intermediate(lon, lat, date = , distance = )`.
#'
#'
#' Then, un-nest this result for further use (the 'inter' above retains the information
#' about the parent locations for custom usage if needed), so the final location of each
#' group has invalid intermediates:
#' `dd <- inter %>% slice(-1) %>% unnest()`
#' @param x longitude
#' @param y latitude
#' @param date optional input date-time in POSIXct
#' @param distance optional minimum distance (metres) between interpolated points
#' @param duration optional minimum duration (seconds) between interpolated point,
#' if set then `distance` must be `NULL` and `date` must be input
#' @return a list of data frames of intermediate points (for use with `unnest()` from tidyr)
#' @export
#' @importFrom stats setNames
#' @examples
#' track_intermediate(trips0$x[1:10], trips0$y[1:10], distance = 15000)
#'
#' track_intermediate(trips0$x[1:10], trips0$y[1:10], date = trips0$date,
#' distance = 1500)
#'
#' inter_time <- track_intermediate(trips0$x[1:10], trips0$y[1:10],
#' date = trips0$date, duration = 1800)
#' \dontrun{
#' ## run with full workflow to expand into a new data frame with
#' ## `int_x`, `int_y`, and (optional) `int_date`
#' if (requireNamespace("tidyr") && requireNamespace("dplyr")) {
#' tr1 <- trips0[seq(1, nrow(trips0), by = 30), ]
#' dd <- tr1 %>% group_by(id) %>%
#' mutate(inter = track_intermediate(x, y, date = date, distance = 150000)) %>%
#' tidyr::unnest()
#' plot(dd$int_date, dd$int_y, pch = ".", cex = 2, main = "equidistant in space")
#' abline(v = tr1$date)
#'
#' dd1 <- tr1 %>% group_by(id) %>%
#' mutate(inter = track_intermediate(x, y, date = date, duration = 3600 * 12)) %>%
#' tidyr::unnest()
#' plot(dd1$int_date, dd1$int_y, pch = ".", cex = 2, main = "equispaced in time")
#' abline(v = tr1$date)
#' }
#' }
track_intermediate <- function(x, y, date = NULL, distance = NULL, duration = NULL) {
n <- length(x)
if (!is.null(distance) && !is.null(duration)) stop("'distance' or 'duration' (or both) must be NULL")
if (is.null(distance)) {
npoints <- rep(15, n - 1)
} else {
npoints <- pmax(3, ceiling(track_distance(x, y) / distance))[-1L]
}
if (!is.null(duration)) {
if (is.null(date)) stop("if 'duration' is not NULL, 'date' must also be given/n")
npoints <- pmax(3, ceiling(track_time(date) / duration))[-1L]
}
listm <- geosphere::gcIntermediate(cbind(x[-n], y[-n]), cbind(x[-1], y[-1]),
n = npoints, addStartEnd = TRUE, sp = FALSE)
if (n == 2) listm <- list(listm)
listm <- lapply(listm, as.data.frame)
## sometimes we get V1, V2
listm <- lapply(listm, function(ddd) setNames(ddd, c("lon", "lat")))
##npoints <- npoints + 2 ## because addStartEnd = TRUE
funa <- function(a) data.frame(int_x = a[["lon"]], int_y = a[["lat"]],
int_date = a[["int_date"]])
runfun <- function(a) data.frame(int_x = a[["lon"]], int_y = a[["lat"]])
## sometimes these aren't the same
actual_npoints <- unlist(lapply(listm, nrow))
if (!is.null(date)) {
runfun <- funa
for (i in seq_along(listm)) {
dts <- seq(date[i], date[i +1], length.out = actual_npoints[i])
#if (nrow(listm[[i]]) < 1) browser()
listm[[i]]$int_date <- dts
}
}
##browser()
c(lapply(listm, runfun), list(data.frame()))
}
|
rm(list=ls(all.names=TRUE))
library(XML)
library(RCurl)
library(httr)
Sys.setlocale(category = "LC_ALL", locale = "cht")
urlCode = c("TWXX0021:1:TW")
#,"TWXX0025:1:TW")
startNo = 1
endNo = length(urlCode)
subPath <- "https://weather.com/zh-TW/weather/hourbyhour/l/"
cityCode <- c("台北")
#"桃園")
alldata <- data.frame()
for( pid in 1:length(urlCode)){
urlPath <- paste(subPath, urlCode[pid], sep='')
temp <- getURL(urlPath, encoding = "big5")
xmldoc <- htmlParse(temp)
hour <- xpathSApply(xmldoc,"//div[@class='hourly-time']//span",xmlValue)
temperature <- xpathSApply(xmldoc,"//td[@class='temp']",xmlValue)
rain <- xpathSApply(xmldoc,"//td[@class='precip']",xmlValue)
for(time in 1:length(hour)){
hour2 <- hour[time]
temperature2<- sub('.$','',temperature[time])
rain2<-rain[time]
Erroresult<- tryCatch({
subdata <- data.frame(hour2,temperature2,rain2)
alldata <- rbind(alldata, subdata)
}, warning = function(war) {
print(paste("MY_WARNING: ", urlPath))
}, error = function(err) {
print(paste("MY_ERROR: ", urlPath))
}, finally = {
print(paste("End Try&Catch", urlPath))
})
}
}
print(nrow(alldata))
write.csv(alldata, file = "/Users/walter/Desktop/weather.csv") | /weather_crawler2.R | no_license | FTS152/ooha_bus_project | R | false | false | 1,320 | r | rm(list=ls(all.names=TRUE))
library(XML)
library(RCurl)
library(httr)
Sys.setlocale(category = "LC_ALL", locale = "cht")
urlCode = c("TWXX0021:1:TW")
#,"TWXX0025:1:TW")
startNo = 1
endNo = length(urlCode)
subPath <- "https://weather.com/zh-TW/weather/hourbyhour/l/"
cityCode <- c("台北")
#"桃園")
alldata <- data.frame()
for( pid in 1:length(urlCode)){
urlPath <- paste(subPath, urlCode[pid], sep='')
temp <- getURL(urlPath, encoding = "big5")
xmldoc <- htmlParse(temp)
hour <- xpathSApply(xmldoc,"//div[@class='hourly-time']//span",xmlValue)
temperature <- xpathSApply(xmldoc,"//td[@class='temp']",xmlValue)
rain <- xpathSApply(xmldoc,"//td[@class='precip']",xmlValue)
for(time in 1:length(hour)){
hour2 <- hour[time]
temperature2<- sub('.$','',temperature[time])
rain2<-rain[time]
Erroresult<- tryCatch({
subdata <- data.frame(hour2,temperature2,rain2)
alldata <- rbind(alldata, subdata)
}, warning = function(war) {
print(paste("MY_WARNING: ", urlPath))
}, error = function(err) {
print(paste("MY_ERROR: ", urlPath))
}, finally = {
print(paste("End Try&Catch", urlPath))
})
}
}
print(nrow(alldata))
write.csv(alldata, file = "/Users/walter/Desktop/weather.csv") |
# load data
cData <- read.table("./new_data.txt", header = T, sep = ";")
# open dev
png("plot1.png", width = 480, height = 480, bg = "transparent")
hist(cData$Global_active_power, col = "red", xlab = "Global Active Power (kilowatts)", main = "Global Active Power")
# close dev
dev.off()
| /plot1.R | no_license | amrsekilly/ExData_Plotting1 | R | false | false | 289 | r | # load data
cData <- read.table("./new_data.txt", header = T, sep = ";")
# open dev
png("plot1.png", width = 480, height = 480, bg = "transparent")
hist(cData$Global_active_power, col = "red", xlab = "Global Active Power (kilowatts)", main = "Global Active Power")
# close dev
dev.off()
|
NULL
#' Stochastic Generation of a \code{PrecipitationOccurenceModel} or \code{PrecipitationOccurenceMultiSiteModel} model object
#'
#' It is an implentation of \code{\link{generate}} method
#'
#' @param x model returned by \code{\link{PrecipitationOccurenceModel}} or \code{\link{PrecipitationOccurenceMultiSiteModel}}
#' @param newdata predictor or exogenous variables. See \code{\link{predict.PrecipitationOccurenceModel}}
#' @param exogen predictor or exogenous variables
#' @param monthly.factor vector of factors indicating the month of the days
#' @param random vector of random or calculated numbers ranging between 0 and 1
#' @param origin,end character strings (yyyy-dd-mm) indicating the start and/or end date of the daily weather generation.
#' @param n number of generations. See \code{\link{generate}}. Here it is ignored and the number of generations is given by \code{origin},\code{end} or \code{monthly.factor}.
#' @param previous logical vector containing previously occurred states
#' @param ... further arguments
#'
#' @seealso \code{\link{generate}},\code{\link{predict.glm}},\code{\link{PrecipitationOccurenceModel}},\code{\link{PrecipitationOccurenceMultiSiteModel}}
#' @export
#' @method generate PrecipitationOccurenceModel
#' @S3method generate PrecipitationOccurenceModel
#' @aliases generate generate.PrecipitationOccurenceModel
#' @rdname generate
#' @importFrom RGENERATE generate
#'
#' @references
#' D.S. Wilks (1998), Multisite Generalization of a Daily Stochastic Precipitation Generation Model, Journal of Hydrology, Volume 210, Issues 1-4, September 1998, Pages 178-191,
#' \url{http://www.sciencedirect.com/science/article/pii/S0022169498001863}
#'
#' Muamaraldin Mhanna and Willy Bauwens (2011) A Stochastic Space-Time Model for the Generation of Daily Rainfall in the Gaza Strip, International Journal of Climatology, Volume 32, Issue 7, pages 1098-1112,
#' \url{http://dx.doi.org/10.1002/joc.2305}
#'
#'
#'
#'
#' @examples
#'
#' library(RGENERATEPREC)
#'
#'
#' ## A function example can be found in the following script file:
#' scriptfile <- system.file("example.generate.R",package="RGENERATEPREC")
#' ## The corrent file path is given by 'scriptfile' variable:
#' print(scriptfile)
#' ## To run the example file, launch the file with 'source' command (uncomment the following line)
#' #source(scriptfile)
#'
#' ## ALTERNATIVELY you can run the following lines:
#'
#'
#'
#' data(trentino)
#'
#' year_min <- 1961
#' year_max <- 1990
#'
#' origin <- paste(year_min,1,1,sep="-")
#' end <- paste(year_max,12,31,sep="-")
#'
#' period <- PRECIPITATION$year>=year_min & PRECIPITATION$year<=year_max
#' period_temp <- TEMPERATURE_MAX$year>=year_min & TEMPERATURE_MAX$year<=year_max
#'
#' prec_mes <- PRECIPITATION[period,]
#' Tx_mes <- TEMPERATURE_MAX[period_temp,]
#' Tn_mes <- TEMPERATURE_MIN[period_temp,]
## removing nonworking stations (e.g. time series with NA)
#' accepted <- array(TRUE,length(names(prec_mes)))
#' names(accepted) <- names(prec_mes)
#' for (it in names(prec_mes)) {
#' acc <- TRUE
#' acc <- (length(which(!is.na(Tx_mes[,it])))==length(Tx_mes[,it]))
#' acc <- (length(which(!is.na(Tn_mes[,it])))==length(Tn_mes[,it])) & acc
#' accepted[it] <- (length(which(!is.na(prec_mes[,it])))==length(prec_mes[,it])) & acc
#'
#' }
#'
#' valmin <- 1.0
###station <- names(PRECIPITATION)[!(names(PRECIPITATION) %in% c("day","month","year"))]
#' prec_mes <- prec_mes[,accepted]
#'
#'
#'
#' Tx_mes <- Tx_mes[,accepted]
#' Tn_mes <- Tn_mes[,accepted]
#' prec_occurence_mes <- prec_mes>=valmin
#'
#' station <- names(prec_mes)[!(names(prec_mes) %in% c("day","month","year"))]
#' it <- station[2]
#' vect <- Tx_mes[,it]-Tn_mes[,it]
#' months <- factor(prec_mes$month)
#'
#' #
#' ### Not Run!!!
#' ### Please uncomment the following lines to run them
#'
#'
#' #model <-
#' #PrecipitationOccurenceModel(x=prec_mes[,it],exogen=vect,
#' #monthly.factor=months,valmin=valmin)
#' #
#' #obs <- prec_mes[,it]>=valmin
#' #
#' #gen <- generate(model,exogen=vect,monthly.factor=months,n=length(months))
#'
#'
#' ### MultiSite Generation
#'
#'
#' station <- station[1:2]
#' exogen <- Tx_mes[,station]-Tn_mes[,station]
#'
#' months <- factor(prec_mes$month)
#'
#' #
#' ### Not Run!!!
#' ### Please uncomment the following lines to run them
#'
#' #model_multisite <-
#' #PrecipitationOccurenceMultiSiteModel(x=prec_mes[,station],
#' #exogen=exogen,origin=origin,multisite_type="wilks")
#' #
#' #
#' ## LOGIT-type Model
#' #model_multisite_logit <-
#' #PrecipitationOccurenceMultiSiteModel(x=prec_mes,exogen=exogen,
#' #origin=origin,multisite_type="logit",station=station)
#' #
#' #
#' #obs_multisite <- prec_mes[,station]>=valmin
#' #
#' #gen_multisite <- generate(model_multisite,exogen=exogen,origin=origin,end=end)
#' #
#' #gen_multisite_logit <- generate(model_multisite_logit,exogen=exogen,origin=origin,end=end)
generate.PrecipitationOccurenceModel <- function(x,newdata=NULL,previous=NULL,n=30,random=runif(n,min=0,max=1),exogen=NULL,monthly.factor=NULL,...) {
p <- x$p
if (p<1) previous <- NULL
if (!is.null(exogen)) newdata <- as.data.frame(exogen)
if (is.null(newdata) & is.null(monthly.factor)) {
newdata <- x$predictor
} else if (is.null(newdata)) {
newdata <- as.data.frame(array(NA,c(length(monthly.factor),0)))
}
if (!is.null(monthly.factor)) newdata$month <- factor(monthly.factor)
if (nrow(newdata)<n) {
warning("Warning: n is reduced, insufficient numbers of predictors!")
n <- nrow(newdata)
}
names_n <- names(newdata)
newdata <- as.data.frame(newdata[1:n,])
names(newdata) <- names_n
if (is.null(previous)) {
previous <- rnorm(x$p)>=0
}
out <- array(NA,n)
for (i in 1:n) {
### prob <- 1-predict(x,newdata=newdata[i,],previous=previous,type="response",endogenous=endogenous,...)
prob <- 1-predict(x,newdata=newdata[i,],previous=previous,type="response",...) ## ec 20141208
out[i] <- random[i]>=prob
previous <- c(out[i],previous[-p])
}
return(out)
}
NULL
#'
#'
#'
#' @export
#' @method generate CCGammaObjectListPerEachMonth
#' @S3method generate CCGammaObjectListPerEachMonth
#' @aliases generate generate.CCGammaObjectListPerEachMonth
#' @rdname generate
#'
generate.CCGammaObjectListPerEachMonth <- function(x,...) {
class(x) <- "list"
out <- generate(x,...)
return(out)
}
NULL
#'
#'
#'
#' @export
#' @method generate PrecipitationOccurenceMultiSiteModel
#' @S3method generate PrecipitationOccurenceMultiSiteModel
#' @aliases generate generate.PrecipitationOccurenceMultiSiteModel
#' @rdname generate
#'
generate.PrecipitationOccurenceMultiSiteModel <- function(x,exogen,n=10,origin="1961-1-1",end="1990-1-1",previous=NULL,monthly.factor=NULL,...) {
out <- NULL
if (is.null(monthly.factor)) {
dates <- as.Date(origin):as.Date(end)
months <- adddate(as.data.frame(dates),origin=origin)$month
n <- length(months)
} else {
months <- monthly.factor
n <- length(months)
}
if (x$type=="wilks") {
monthsf <- sprintf("month%02d",months)
gen_wilks <- generate(x$ccgamma,FUN=rnorm,type="covariance",names=x$station,factor.series=monthsf)
for (c in 1:ncol(gen_wilks)) {
gen_wilks[,c] <- pnorm(gen_wilks[,c])
}
if (is.null(exogen)) {
exogen <- lapply(X=x$station,FUN=function(x){ NULL })
names(exogen) <- x$station
}
if (is.null(previous)) {
previous <- lapply(X=x$station,FUN=function(x){ NULL })
names(previous) <- x$station
}
out <- as.data.frame(array(NA,dim(gen_wilks)))
names(out) <- names(gen_wilks)
for (it in x$station) {
# if (is.data.frame(exogen)) {
#
# exogen_loc <- exogen[,it]
#
# } else {
#
# exogen_loc <- exogen[[it]]
# }
if (is.data.frame(exogen)) {
cols <- str_detect(names(exogen),it)
exogen_loc <- exogen[,cols]
} else if (is.list(exogen)) {
exogen_loc <- exogen[[it]]
} else {
exogen_loc <- exogen
}
if (is.data.frame(previous)) {
previous_loc <- previous[,it]
} else {
previous_loc <- previous[[it]]
}
###
###function(x,newdata=NULL,previous=NULL,n=30,random=runif(n,min=0,max=1),exogen=NULL,monthly.factor=NULL,...) {
message(paste("Processing",it))
out[,it] <- generate(x[[it]],previous=previous_loc,exogen=exogen_loc,monthly.factor=factor(months),random=gen_wilks[,it],n=n)
###
}
} else if (x$type=="logit") {
if (is.null(exogen)) {
exogen <- as.data.frame(array(NA,c(n,0)))
}
if (is.null(previous)) {
previous <- as.data.frame(array(rnorm(x$p*x$K)>=0,c(x$p,x$K)))
names(previous) <- x$station
} else {
previous <- previous[,x$station] ## ec 20141204
}
out <- as.data.frame(array(NA,c(n,length(x$station))))
names(out) <- x$station
percs <- seq(from=0,to=100,by=5)
npercs <- trunc(percs/100*n)
for (ncnt in 1:n) {
if (ncnt %in% npercs) {
valprec <- percs[npercs==ncnt]
message <- paste(sprintf("Processing: %0.2f",valprec),"%",sep="")
message(message)
}
out[ncnt,] <- unlist(lapply(X=x[x$station],FUN=generate,previous=previous,endogenous=x$station,exogen=exogen[ncnt,],monthly.factor=factor(months)[ncnt],n=1,...))
previous[-1,] <- previous[-x$p,]
previous[1,] <- out[ncnt,]
}
#### out[,it] <- generate(x[[it]],previous=previous_loc,exogen=exogen,monthly.factor=factor(months),random=gen_wilks[,it],n=n)
}
### out <- NULL
## TO DO
## TO GO ON ....
return(out)
}
| /RGENERATEPREC/R/generate.PrecipitationOccurenceModel.R | no_license | ingted/R-Examples | R | false | false | 9,726 | r |
NULL
#' Stochastic Generation of a \code{PrecipitationOccurenceModel} or \code{PrecipitationOccurenceMultiSiteModel} model object
#'
#' It is an implentation of \code{\link{generate}} method
#'
#' @param x model returned by \code{\link{PrecipitationOccurenceModel}} or \code{\link{PrecipitationOccurenceMultiSiteModel}}
#' @param newdata predictor or exogenous variables. See \code{\link{predict.PrecipitationOccurenceModel}}
#' @param exogen predictor or exogenous variables
#' @param monthly.factor vector of factors indicating the month of the days
#' @param random vector of random or calculated numbers ranging between 0 and 1
#' @param origin,end character strings (yyyy-dd-mm) indicating the start and/or end date of the daily weather generation.
#' @param n number of generations. See \code{\link{generate}}. Here it is ignored and the number of generations is given by \code{origin},\code{end} or \code{monthly.factor}.
#' @param previous logical vector containing previously occurred states
#' @param ... further arguments
#'
#' @seealso \code{\link{generate}},\code{\link{predict.glm}},\code{\link{PrecipitationOccurenceModel}},\code{\link{PrecipitationOccurenceMultiSiteModel}}
#' @export
#' @method generate PrecipitationOccurenceModel
#' @S3method generate PrecipitationOccurenceModel
#' @aliases generate generate.PrecipitationOccurenceModel
#' @rdname generate
#' @importFrom RGENERATE generate
#'
#' @references
#' D.S. Wilks (1998), Multisite Generalization of a Daily Stochastic Precipitation Generation Model, Journal of Hydrology, Volume 210, Issues 1-4, September 1998, Pages 178-191,
#' \url{http://www.sciencedirect.com/science/article/pii/S0022169498001863}
#'
#' Muamaraldin Mhanna and Willy Bauwens (2011) A Stochastic Space-Time Model for the Generation of Daily Rainfall in the Gaza Strip, International Journal of Climatology, Volume 32, Issue 7, pages 1098-1112,
#' \url{http://dx.doi.org/10.1002/joc.2305}
#'
#'
#'
#'
#' @examples
#'
#' library(RGENERATEPREC)
#'
#'
#' ## A function example can be found in the following script file:
#' scriptfile <- system.file("example.generate.R",package="RGENERATEPREC")
#' ## The corrent file path is given by 'scriptfile' variable:
#' print(scriptfile)
#' ## To run the example file, launch the file with 'source' command (uncomment the following line)
#' #source(scriptfile)
#'
#' ## ALTERNATIVELY you can run the following lines:
#'
#'
#'
#' data(trentino)
#'
#' year_min <- 1961
#' year_max <- 1990
#'
#' origin <- paste(year_min,1,1,sep="-")
#' end <- paste(year_max,12,31,sep="-")
#'
#' period <- PRECIPITATION$year>=year_min & PRECIPITATION$year<=year_max
#' period_temp <- TEMPERATURE_MAX$year>=year_min & TEMPERATURE_MAX$year<=year_max
#'
#' prec_mes <- PRECIPITATION[period,]
#' Tx_mes <- TEMPERATURE_MAX[period_temp,]
#' Tn_mes <- TEMPERATURE_MIN[period_temp,]
## removing nonworking stations (e.g. time series with NA)
#' accepted <- array(TRUE,length(names(prec_mes)))
#' names(accepted) <- names(prec_mes)
#' for (it in names(prec_mes)) {
#' acc <- TRUE
#' acc <- (length(which(!is.na(Tx_mes[,it])))==length(Tx_mes[,it]))
#' acc <- (length(which(!is.na(Tn_mes[,it])))==length(Tn_mes[,it])) & acc
#' accepted[it] <- (length(which(!is.na(prec_mes[,it])))==length(prec_mes[,it])) & acc
#'
#' }
#'
#' valmin <- 1.0
###station <- names(PRECIPITATION)[!(names(PRECIPITATION) %in% c("day","month","year"))]
#' prec_mes <- prec_mes[,accepted]
#'
#'
#'
#' Tx_mes <- Tx_mes[,accepted]
#' Tn_mes <- Tn_mes[,accepted]
#' prec_occurence_mes <- prec_mes>=valmin
#'
#' station <- names(prec_mes)[!(names(prec_mes) %in% c("day","month","year"))]
#' it <- station[2]
#' vect <- Tx_mes[,it]-Tn_mes[,it]
#' months <- factor(prec_mes$month)
#'
#' #
#' ### Not Run!!!
#' ### Please uncomment the following lines to run them
#'
#'
#' #model <-
#' #PrecipitationOccurenceModel(x=prec_mes[,it],exogen=vect,
#' #monthly.factor=months,valmin=valmin)
#' #
#' #obs <- prec_mes[,it]>=valmin
#' #
#' #gen <- generate(model,exogen=vect,monthly.factor=months,n=length(months))
#'
#'
#' ### MultiSite Generation
#'
#'
#' station <- station[1:2]
#' exogen <- Tx_mes[,station]-Tn_mes[,station]
#'
#' months <- factor(prec_mes$month)
#'
#' #
#' ### Not Run!!!
#' ### Please uncomment the following lines to run them
#'
#' #model_multisite <-
#' #PrecipitationOccurenceMultiSiteModel(x=prec_mes[,station],
#' #exogen=exogen,origin=origin,multisite_type="wilks")
#' #
#' #
#' ## LOGIT-type Model
#' #model_multisite_logit <-
#' #PrecipitationOccurenceMultiSiteModel(x=prec_mes,exogen=exogen,
#' #origin=origin,multisite_type="logit",station=station)
#' #
#' #
#' #obs_multisite <- prec_mes[,station]>=valmin
#' #
#' #gen_multisite <- generate(model_multisite,exogen=exogen,origin=origin,end=end)
#' #
#' #gen_multisite_logit <- generate(model_multisite_logit,exogen=exogen,origin=origin,end=end)
generate.PrecipitationOccurenceModel <- function(x,newdata=NULL,previous=NULL,n=30,random=runif(n,min=0,max=1),exogen=NULL,monthly.factor=NULL,...) {
p <- x$p
if (p<1) previous <- NULL
if (!is.null(exogen)) newdata <- as.data.frame(exogen)
if (is.null(newdata) & is.null(monthly.factor)) {
newdata <- x$predictor
} else if (is.null(newdata)) {
newdata <- as.data.frame(array(NA,c(length(monthly.factor),0)))
}
if (!is.null(monthly.factor)) newdata$month <- factor(monthly.factor)
if (nrow(newdata)<n) {
warning("Warning: n is reduced, insufficient numbers of predictors!")
n <- nrow(newdata)
}
names_n <- names(newdata)
newdata <- as.data.frame(newdata[1:n,])
names(newdata) <- names_n
if (is.null(previous)) {
previous <- rnorm(x$p)>=0
}
out <- array(NA,n)
for (i in 1:n) {
### prob <- 1-predict(x,newdata=newdata[i,],previous=previous,type="response",endogenous=endogenous,...)
prob <- 1-predict(x,newdata=newdata[i,],previous=previous,type="response",...) ## ec 20141208
out[i] <- random[i]>=prob
previous <- c(out[i],previous[-p])
}
return(out)
}
NULL
#'
#'
#'
#' @export
#' @method generate CCGammaObjectListPerEachMonth
#' @S3method generate CCGammaObjectListPerEachMonth
#' @aliases generate generate.CCGammaObjectListPerEachMonth
#' @rdname generate
#'
generate.CCGammaObjectListPerEachMonth <- function(x,...) {
class(x) <- "list"
out <- generate(x,...)
return(out)
}
NULL
#'
#'
#'
#' @export
#' @method generate PrecipitationOccurenceMultiSiteModel
#' @S3method generate PrecipitationOccurenceMultiSiteModel
#' @aliases generate generate.PrecipitationOccurenceMultiSiteModel
#' @rdname generate
#'
generate.PrecipitationOccurenceMultiSiteModel <- function(x,exogen,n=10,origin="1961-1-1",end="1990-1-1",previous=NULL,monthly.factor=NULL,...) {
out <- NULL
if (is.null(monthly.factor)) {
dates <- as.Date(origin):as.Date(end)
months <- adddate(as.data.frame(dates),origin=origin)$month
n <- length(months)
} else {
months <- monthly.factor
n <- length(months)
}
if (x$type=="wilks") {
monthsf <- sprintf("month%02d",months)
gen_wilks <- generate(x$ccgamma,FUN=rnorm,type="covariance",names=x$station,factor.series=monthsf)
for (c in 1:ncol(gen_wilks)) {
gen_wilks[,c] <- pnorm(gen_wilks[,c])
}
if (is.null(exogen)) {
exogen <- lapply(X=x$station,FUN=function(x){ NULL })
names(exogen) <- x$station
}
if (is.null(previous)) {
previous <- lapply(X=x$station,FUN=function(x){ NULL })
names(previous) <- x$station
}
out <- as.data.frame(array(NA,dim(gen_wilks)))
names(out) <- names(gen_wilks)
for (it in x$station) {
# if (is.data.frame(exogen)) {
#
# exogen_loc <- exogen[,it]
#
# } else {
#
# exogen_loc <- exogen[[it]]
# }
if (is.data.frame(exogen)) {
cols <- str_detect(names(exogen),it)
exogen_loc <- exogen[,cols]
} else if (is.list(exogen)) {
exogen_loc <- exogen[[it]]
} else {
exogen_loc <- exogen
}
if (is.data.frame(previous)) {
previous_loc <- previous[,it]
} else {
previous_loc <- previous[[it]]
}
###
###function(x,newdata=NULL,previous=NULL,n=30,random=runif(n,min=0,max=1),exogen=NULL,monthly.factor=NULL,...) {
message(paste("Processing",it))
out[,it] <- generate(x[[it]],previous=previous_loc,exogen=exogen_loc,monthly.factor=factor(months),random=gen_wilks[,it],n=n)
###
}
} else if (x$type=="logit") {
if (is.null(exogen)) {
exogen <- as.data.frame(array(NA,c(n,0)))
}
if (is.null(previous)) {
previous <- as.data.frame(array(rnorm(x$p*x$K)>=0,c(x$p,x$K)))
names(previous) <- x$station
} else {
previous <- previous[,x$station] ## ec 20141204
}
out <- as.data.frame(array(NA,c(n,length(x$station))))
names(out) <- x$station
percs <- seq(from=0,to=100,by=5)
npercs <- trunc(percs/100*n)
for (ncnt in 1:n) {
if (ncnt %in% npercs) {
valprec <- percs[npercs==ncnt]
message <- paste(sprintf("Processing: %0.2f",valprec),"%",sep="")
message(message)
}
out[ncnt,] <- unlist(lapply(X=x[x$station],FUN=generate,previous=previous,endogenous=x$station,exogen=exogen[ncnt,],monthly.factor=factor(months)[ncnt],n=1,...))
previous[-1,] <- previous[-x$p,]
previous[1,] <- out[ncnt,]
}
#### out[,it] <- generate(x[[it]],previous=previous_loc,exogen=exogen,monthly.factor=factor(months),random=gen_wilks[,it],n=n)
}
### out <- NULL
## TO DO
## TO GO ON ....
return(out)
}
|
\name{print.LSDdata}
\alias{print.LSDdata}
\title{Prints the components of a list containing data on the LSDs for all pairwise differences of predictions.}
\description{Prints the components of an \code{LSDdata} \code{\link{list}} created by \code{\link{exploreLSDs}},
that contains data on the LSDs for all pairwise differences of predictions stored in an
\code{\link{alldiffs.object}}.}
\usage{\method{print}{LSDdata}(x, which.print = c("statistics", "false.pos", "false.neg"), ...)}
\arguments{
\item{x}{An object that, ideally, is of class \code{LSDdata}.}
\item{which.print}{Which components of the \code{LSDdata} \code{\link{list}} to print.
Possible values are any combination of \code{frequencies},
\code{distinct.vals}, \code{statistics}, \code{accuracy},
\code{false.pos}, \code{false.neg},
\code{per.pred.accuracy}, \code{LSD}, \code{summary} and \code{all},
except that \code{summary} and \code{all} cannot occur together.
For a description of the components, see \code{\link{alldiffs.object}}.
The default is to print \code{statistics}, \code{false.pos},
\code{false.neg}. The option \code{summary} results in the printing of
\code{distinct.vals}, \code{statistics}, \code{false.pos}, \code{false.neg}.}
\item{\dots}{further arguments passed to \code{print}.}
}
\value{No value is returned, but components of \code{x} are printed as specified in \code{which.print}.}
\author{Chris Brien}
\seealso{\code{\link{exploreLSDs.alldiffs}}, \code{\link{alldiffs.object}}}
\examples{\dontrun{
data(WaterRunoff.dat)
asreml.options(keep.order = TRUE) #required for asreml-R4 only
current.asr <- asreml(fixed = pH ~ Benches + (Sources * (Type + Species)),
random = ~ Benches:MainPlots,
keep.order=TRUE, data= WaterRunoff.dat)
current.asrt <- as.asrtests(current.asr, NULL, NULL)
diffs <- predictPlus(classify = "Sources:Type",
asreml.obj = current.asr,
wald.tab = current.asrt$wald.tab,
present = c("Sources", "Type", "Species"))
LSDdata <- exploreLSDs(diffs, LSDtype = "factor.combinations", LSDby = "Sources")
print(LSDdata)
}}
\keyword{asreml}
\keyword{htest} | /man/print.LSDdata.Rd | no_license | cran/asremlPlus | R | false | false | 2,438 | rd | \name{print.LSDdata}
\alias{print.LSDdata}
\title{Prints the components of a list containing data on the LSDs for all pairwise differences of predictions.}
\description{Prints the components of an \code{LSDdata} \code{\link{list}} created by \code{\link{exploreLSDs}},
that contains data on the LSDs for all pairwise differences of predictions stored in an
\code{\link{alldiffs.object}}.}
\usage{\method{print}{LSDdata}(x, which.print = c("statistics", "false.pos", "false.neg"), ...)}
\arguments{
\item{x}{An object that, ideally, is of class \code{LSDdata}.}
\item{which.print}{Which components of the \code{LSDdata} \code{\link{list}} to print.
Possible values are any combination of \code{frequencies},
\code{distinct.vals}, \code{statistics}, \code{accuracy},
\code{false.pos}, \code{false.neg},
\code{per.pred.accuracy}, \code{LSD}, \code{summary} and \code{all},
except that \code{summary} and \code{all} cannot occur together.
For a description of the components, see \code{\link{alldiffs.object}}.
The default is to print \code{statistics}, \code{false.pos},
\code{false.neg}. The option \code{summary} results in the printing of
\code{distinct.vals}, \code{statistics}, \code{false.pos}, \code{false.neg}.}
\item{\dots}{further arguments passed to \code{print}.}
}
\value{No value is returned, but components of \code{x} are printed as specified in \code{which.print}.}
\author{Chris Brien}
\seealso{\code{\link{exploreLSDs.alldiffs}}, \code{\link{alldiffs.object}}}
\examples{\dontrun{
data(WaterRunoff.dat)
asreml.options(keep.order = TRUE) #required for asreml-R4 only
current.asr <- asreml(fixed = pH ~ Benches + (Sources * (Type + Species)),
random = ~ Benches:MainPlots,
keep.order=TRUE, data= WaterRunoff.dat)
current.asrt <- as.asrtests(current.asr, NULL, NULL)
diffs <- predictPlus(classify = "Sources:Type",
asreml.obj = current.asr,
wald.tab = current.asrt$wald.tab,
present = c("Sources", "Type", "Species"))
LSDdata <- exploreLSDs(diffs, LSDtype = "factor.combinations", LSDby = "Sources")
print(LSDdata)
}}
\keyword{asreml}
\keyword{htest} |
#' Pie chart in kedata style
#'
#' @description create pie chart form 2 variabel in kedata style. Percentage computed from
#' each y varaibel value / total of y value * 100
#'
#' @importFrom stats cor
#'
#' @param data data frame with 2 variabel representing categorical and numeric data
#' @param x categorical variabel
#' @param y numeric variabel
#' @param color character of color hexagon for coloring x variabel
#' @param title character
#' @param subtitle character
#' @param data_source character
#'
#' @return ggplot and ggarrange object
#'
#' @examples
#' \dontrun{
#' library(dataplot)
#' library(ggplot2)
#'
#' df <- data.frame(
#' "brand" = c("Samsung", "Huawei", "Apple", "Xiaomi", "OPPO"),
#' "share" = c(10, 30, 20, 35, 5)
#' )
#'
#' mycolor <- c("#55DDE0", "#33658A", "#2F4858", "#F6AE2D", "#F26419")
#'
#' plot_pie(
#' data = df, x = "brand", y = "share", color = mycolor,
#' title = "Lorem Ipsum is simply dummy text",
#' subtitle = "Contrary to popular belief, Lorem Ipsum is not simply random text",
#' data_source = "Sumber: www.kedata.online"
#' )
#' }
#' @export
plot_pie <- function(data, x, y, color, title, subtitle, data_source) {
# Data
data <- data[, c(x, y)]
colnames(data) <- c("col1", "col2")
# Pie
pie <- ggplot2::ggplot(data, ggplot2::aes(x = "", y = col2, fill = col1)) +
ggplot2::geom_bar(stat = "identity", width = 1)
# Convert to pie (polar coordinates) and add labels
pie <- pie + ggplot2::coord_polar("y", start = 0) +
ggplot2::geom_text(ggplot2::aes(label = paste0(round((col2 / sum(data$col2)) * 100, 2), "%")),
position = ggplot2::position_stack(vjust = 0.5),
color = "white"
)
# Add color scale (hex colors)
pie <- pie + ggplot2::scale_fill_manual(values = color)
# Remove labels and add title
pie <- pie + ggplot2::labs(x = NULL, y = NULL, fill = NULL)
# Tidy up the theme
# ggplot2::theme_set(cowplot::theme_minimal_grid()) # pre-set the bw theme.
pie <- pie + ggplot2::theme(
axis.line = ggplot2::element_blank(),
text = ggplot2::element_text(size = 10, family = "Roboto"),
axis.text = ggplot2::element_blank(),
axis.ticks = ggplot2::element_blank(),
plot.title = ggplot2::element_text(hjust = 0.5, color = "#666666")
) +
ggplot2::theme_void()
# Finishing
kedata_final2(
plotname = pie,
title = paste0(title),
subtitle = paste0(subtitle),
data_source = paste0(data_source)
)
}
| /R/plot_pie.R | no_license | eppofahmi/dataplot | R | false | false | 2,445 | r | #' Pie chart in kedata style
#'
#' @description create pie chart form 2 variabel in kedata style. Percentage computed from
#' each y varaibel value / total of y value * 100
#'
#' @importFrom stats cor
#'
#' @param data data frame with 2 variabel representing categorical and numeric data
#' @param x categorical variabel
#' @param y numeric variabel
#' @param color character of color hexagon for coloring x variabel
#' @param title character
#' @param subtitle character
#' @param data_source character
#'
#' @return ggplot and ggarrange object
#'
#' @examples
#' \dontrun{
#' library(dataplot)
#' library(ggplot2)
#'
#' df <- data.frame(
#' "brand" = c("Samsung", "Huawei", "Apple", "Xiaomi", "OPPO"),
#' "share" = c(10, 30, 20, 35, 5)
#' )
#'
#' mycolor <- c("#55DDE0", "#33658A", "#2F4858", "#F6AE2D", "#F26419")
#'
#' plot_pie(
#' data = df, x = "brand", y = "share", color = mycolor,
#' title = "Lorem Ipsum is simply dummy text",
#' subtitle = "Contrary to popular belief, Lorem Ipsum is not simply random text",
#' data_source = "Sumber: www.kedata.online"
#' )
#' }
#' @export
plot_pie <- function(data, x, y, color, title, subtitle, data_source) {
# Data
data <- data[, c(x, y)]
colnames(data) <- c("col1", "col2")
# Pie
pie <- ggplot2::ggplot(data, ggplot2::aes(x = "", y = col2, fill = col1)) +
ggplot2::geom_bar(stat = "identity", width = 1)
# Convert to pie (polar coordinates) and add labels
pie <- pie + ggplot2::coord_polar("y", start = 0) +
ggplot2::geom_text(ggplot2::aes(label = paste0(round((col2 / sum(data$col2)) * 100, 2), "%")),
position = ggplot2::position_stack(vjust = 0.5),
color = "white"
)
# Add color scale (hex colors)
pie <- pie + ggplot2::scale_fill_manual(values = color)
# Remove labels and add title
pie <- pie + ggplot2::labs(x = NULL, y = NULL, fill = NULL)
# Tidy up the theme
# ggplot2::theme_set(cowplot::theme_minimal_grid()) # pre-set the bw theme.
pie <- pie + ggplot2::theme(
axis.line = ggplot2::element_blank(),
text = ggplot2::element_text(size = 10, family = "Roboto"),
axis.text = ggplot2::element_blank(),
axis.ticks = ggplot2::element_blank(),
plot.title = ggplot2::element_text(hjust = 0.5, color = "#666666")
) +
ggplot2::theme_void()
# Finishing
kedata_final2(
plotname = pie,
title = paste0(title),
subtitle = paste0(subtitle),
data_source = paste0(data_source)
)
}
|
library(forcats)
scientific_10 <- function(x) {
parse(text=gsub("e", " %*% 10^", scales::scientific_format()(x)))
}
# plotting ribosomae antioxidant trade-off
a_r_trade_off <- model_out_mnfe4_model1 %>%
filter(Fex == 1000) %>%
group_by(Mnx, Fex) %>%
summarize_all(mean) %>%
ggplot(aes(x = R, y = A/2,
fill = Mnx)) +
theme_bw() +
geom_point(size = 4, pch = 21) +
scale_y_continuous(label=scientific_10) +
scale_x_continuous(label=scientific_10) +
xlab("Ribosomes (per cell)") +
scale_fill_distiller(name = 'dMn (pM)',
palette = 'GnBu') +
theme(legend.position = c(0.72, 0.68), axis.text.x = element_text(angle = 45, hjust = 1),
legend.background = element_blank()) +
ylab("Antioxidants (MnSOD per cell)");a_r_trade_off
translation_allocation_tradeoff <- model_out_mnfe4_model1 %>%
filter(Fex == 1 | Fex == 50 | Fex == 100,
Mnx == 1000) %>%
ggplot(aes(x = beta_r,
y = beta_a,
colour = u_trans,
shape = factor(Fex))) +
geom_point(size = 4, alpha = 0.8) +
xlab("Proportion of Ribosomes synthesizing Ribosomes") +
ylab("Proportion of Ribosomes synthesizing\nAntioxidants") +
theme_bw() +
theme(legend.position = c(0.2, 0.76),
legend.direction = 'horizontal') +
scale_color_continuous('Growth Rate (per day)') +
scale_shape_discrete('dFe (pM)') +
guides(colour = guide_colourbar(title.position="top", title.hjust = 0.5),
shape = guide_legend(title.position="top", title.hjust = 0.5)) +
theme(legend.background = element_blank(),
legend.key.size = unit(0.5, "cm"),
legend.margin = margin(0,0,0,0, unit="cm"));translation_allocation_tradeoff
model_out_mnfe4_model1$Fex_label <- paste('dFe = ', model_out_mnfe4_model1$Fex, 'pM', sep = '')
# plotting the distribution of Fe quotas
amol_per_cell_tradeoff <- model_out_mnfe4_model1 %>%
filter(Fex == 50 | Fex == 100 | Fex == 1,
Mnx == 1000) %>%
ggplot() +
geom_density(aes(total_fe_amol)) +
facet_wrap(~fct_reorder(Fex_label, Fex), nrow = 3) +
xlab('Fe Quota (aMol per cell)') +
theme_bw() +
theme(strip.background = element_rect(fill = 'white')) +
ylab('Kernel Density')
## aggregating plots
lower_plots_internal_lim_rearrange <- ggarrange(a_r_trade_off, translation_allocation_tradeoff,
amol_per_cell_tradeoff,
widths = c(1, 2, 1), nrow = 1,
labels = c('A', 'B', 'C'), font.label = list(size = 9))
ggsave(lower_plots_internal_lim_rearrange,
filename = "figures/internal_lim_consequences_3.png",
width = 10.7,
height = 4.16, dpi = 1000)
| /scripts/plotting_antioxidant_tradeoffs_ribosomes.R | no_license | bertrand-lab/mn-fe-allocation | R | false | false | 2,646 | r | library(forcats)
scientific_10 <- function(x) {
parse(text=gsub("e", " %*% 10^", scales::scientific_format()(x)))
}
# plotting ribosomae antioxidant trade-off
a_r_trade_off <- model_out_mnfe4_model1 %>%
filter(Fex == 1000) %>%
group_by(Mnx, Fex) %>%
summarize_all(mean) %>%
ggplot(aes(x = R, y = A/2,
fill = Mnx)) +
theme_bw() +
geom_point(size = 4, pch = 21) +
scale_y_continuous(label=scientific_10) +
scale_x_continuous(label=scientific_10) +
xlab("Ribosomes (per cell)") +
scale_fill_distiller(name = 'dMn (pM)',
palette = 'GnBu') +
theme(legend.position = c(0.72, 0.68), axis.text.x = element_text(angle = 45, hjust = 1),
legend.background = element_blank()) +
ylab("Antioxidants (MnSOD per cell)");a_r_trade_off
translation_allocation_tradeoff <- model_out_mnfe4_model1 %>%
filter(Fex == 1 | Fex == 50 | Fex == 100,
Mnx == 1000) %>%
ggplot(aes(x = beta_r,
y = beta_a,
colour = u_trans,
shape = factor(Fex))) +
geom_point(size = 4, alpha = 0.8) +
xlab("Proportion of Ribosomes synthesizing Ribosomes") +
ylab("Proportion of Ribosomes synthesizing\nAntioxidants") +
theme_bw() +
theme(legend.position = c(0.2, 0.76),
legend.direction = 'horizontal') +
scale_color_continuous('Growth Rate (per day)') +
scale_shape_discrete('dFe (pM)') +
guides(colour = guide_colourbar(title.position="top", title.hjust = 0.5),
shape = guide_legend(title.position="top", title.hjust = 0.5)) +
theme(legend.background = element_blank(),
legend.key.size = unit(0.5, "cm"),
legend.margin = margin(0,0,0,0, unit="cm"));translation_allocation_tradeoff
model_out_mnfe4_model1$Fex_label <- paste('dFe = ', model_out_mnfe4_model1$Fex, 'pM', sep = '')
# plotting the distribution of Fe quotas
amol_per_cell_tradeoff <- model_out_mnfe4_model1 %>%
filter(Fex == 50 | Fex == 100 | Fex == 1,
Mnx == 1000) %>%
ggplot() +
geom_density(aes(total_fe_amol)) +
facet_wrap(~fct_reorder(Fex_label, Fex), nrow = 3) +
xlab('Fe Quota (aMol per cell)') +
theme_bw() +
theme(strip.background = element_rect(fill = 'white')) +
ylab('Kernel Density')
## aggregating plots
lower_plots_internal_lim_rearrange <- ggarrange(a_r_trade_off, translation_allocation_tradeoff,
amol_per_cell_tradeoff,
widths = c(1, 2, 1), nrow = 1,
labels = c('A', 'B', 'C'), font.label = list(size = 9))
ggsave(lower_plots_internal_lim_rearrange,
filename = "figures/internal_lim_consequences_3.png",
width = 10.7,
height = 4.16, dpi = 1000)
|
library(sp)
library(rgdal)
library(raster)
# Create a grid of points within the bbox of the SpatialPolygonsDataFrame
# colorado with decimal degrees as map units
grid <- makegrid(mapa, cellsize = 0.1)
# grid is a data.frame. To change it to a spatial data set we have to
grid <- SpatialPoints(grid, proj4string = CRS(proj4string(mapa)))
grid <- grid[mapa, ]
plot(mapa)
plot(grid, pch = ".", add = T)
| /Subsetmapa.R | no_license | bmsw/Statistics | R | false | false | 405 | r | library(sp)
library(rgdal)
library(raster)
# Create a grid of points within the bbox of the SpatialPolygonsDataFrame
# colorado with decimal degrees as map units
grid <- makegrid(mapa, cellsize = 0.1)
# grid is a data.frame. To change it to a spatial data set we have to
grid <- SpatialPoints(grid, proj4string = CRS(proj4string(mapa)))
grid <- grid[mapa, ]
plot(mapa)
plot(grid, pch = ".", add = T)
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 22510
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 22510
c
c Input Parameter (command line, file):
c input filename QBFLIB/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#134.A#48.c#.w#7.s#48.asp.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 7685
c no.of clauses 22510
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 22510
c
c QBFLIB/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#134.A#48.c#.w#7.s#48.asp.qdimacs 7685 22510 E1 [] 0 134 7551 22510 NONE
| /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#134.A#48.c#.w#7.s#48.asp/ctrl.e#1.a#3.E#134.A#48.c#.w#7.s#48.asp.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 732 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 22510
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 22510
c
c Input Parameter (command line, file):
c input filename QBFLIB/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#134.A#48.c#.w#7.s#48.asp.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 7685
c no.of clauses 22510
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 22510
c
c QBFLIB/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#134.A#48.c#.w#7.s#48.asp.qdimacs 7685 22510 E1 [] 0 134 7551 22510 NONE
|
# utilize the PortfolioAnalytics package to optimize portfolios
library(PortfolioAnalytics)
library(ROI)
data("edhec")
# Use historical hedge fund data
ret <- edhec
names <- colnames(ret)
# Investigate hedge fund return data
# First look at the correlation
hf.cor <- cor(data)
library(corrplot)
corrplot.mixed(hf.cor, upper = "color", tl.col = "black")
# calculate expected returns, covariance, and portfolio returns given weights w
w <- as.matrix(c(rep(1 / ncol(ret), ncol(ret))))
mu <- as.matrix(colMeans(ret))
sigma <- cov(ret)
# expected returns of individual assets given weights w
mu * w
# Expected equal weight benchmark portfolio return
t(w) %*% mu
# Expected benchmark portfolio volatility
sqrt(t(w) %*% sigma %*% w)
# Create portfolio specification object
prt.spc <- portfolio.spec(names)
# Begin adding constraints to specifications
prt.spc <- add.constraint(portfolio = prt.spc, type = "long_only")
prt.spc <- add.constraint(portfolio = prt.spc, type = "full_investment")
# Add objectives to specification
prt.spc <- add.objective(prt.spc, type = "return", name = "mean")
prt.spc <- add.objective(prt.spc, type = "risk", name = "StdDev")
# View the specifications
prt.spc
# Run portfolio optimization using quadratic solution
opt <- optimize.portfolio(ret, portfolio = prt.spc, optimize_method = "ROI")
opt
# Plot individual assets, and optimized solution
chart.RiskReward(opt, risk.col = "StdDev", return.col = "mean", chart.assets = TRUE)
# Examine optimized portfolio weights
# No diversification
extractWeights(opt)
# Extract objective measures
om <- extractObjectiveMeasures(opt)
# Compare to benchmark returns
om$mean
t(w) %*% mu
# Compare to benchmark standard deviation
sqrt(t(w) %*% sigma %*% w)
om$StdDev[1]
# Examine the performance
opt.prt.rtn <- Return.portfolio(ret, weights = opt$weights)
prt.rtn <- Return.portfolio(ret, weights = as.numeric(w))
chart.CumReturns(opt.prt.rtn, col = "blue")
lines(cumprod(1 + prt.rtn) - 1, col = "red")
addLegend("topleft", legend.names = c("Optimized", "Benchmark"), col = c("blue", "red"), lwd = 2)
# It would appear the optimized portfolio outperformed the equal weighted portfolio
# However, it failed to reduce standard devivation or achieve diversification
sd(opt.prt.rtn)
sd(prt.rtn)
# Check weights for diversification, or lack thereof
barplot(opt$weights)
# Check risk budget
vol_budget <- StdDev(R = ret, portfolio_method = "component", weights = opt$weights)
barplot(vol_budget)
vol_budget
# The optimized portfolio suffered a larger max drawdown
maxDrawdown(opt.prt.rtn)
maxDrawdown(prt.rtn)
# Summarize results of optimized portfolio
charts.PerformanceSummary(opt.prt.rtn)
charts.PerformanceSummary(prt.rtn)
# Value at risk also increased
VaR(opt.prt.rtn)
VaR(prt.rtn)
# Add a risk budget constraint to increase diversification and reduce volatility
prt.spc2 <- add.objective(prt.spc, type = "risk_budget", name = "StdDev", min_prisk = 0, max_prisk = .15)
prt.spc2
# Run optimization using a random optimization solution
opt2 <- optimize.portfolio(R = ret, portfolio = prt.spc2,
optimize_method = "random", trace = TRUE)
opt2
# Make it easier to access benchmark
benchmark <- list(mean = t(w) %*% mu, StdDev = (sqrt(t(w) %*% sigma %*% w)))
# Check objective measures
om2 <- extractObjectiveMeasures(opt2)
om2$mean - benchmark$mean
om2$StdDev$StdDev - benchmark$StdDev
barplot(c(benchmark$mean, om$mean, om2$mean), ylim = c(0, 0.01))
barplot(c(benchmark$StdDev, om$StdDev, om2$StdDev$StdDev), ylim = c(0, 0.02))
# Chart the results of the generated portfolios and the efficient frontier
chart.RiskReward(opt2, risk.col = "StdDev", return.col = "mean", chart.assets = TRUE)
opt2.prt.rtn <- Return.portfolio(ret, weights = opt2$weights)
chart.CumReturns(opt2.prt.rtn, lwd = 2, ylim = range(cumprod(1 + opt.prt.rtn) - 1))
lines(cumprod(1 + opt.prt.rtn) - 1, col = "blue")
lines(cumprod(1 + prt.rtn) - 1, col = "red")
addLegend(legend.loc = "topleft", legend.names = c("Risk Budget", "Optimized", "Equal Weight"),
lty = 1, lwd = 2, col = c("black", "blue", "red"))
# Try a different non-quadratic optimization method
library(DEoptim)
opt3 <- optimize.portfolio(R = ret, portfolio = prt.spc2,
optimize_method = "DEoptim", trace = TRUE)
om3 <- extractObjectiveMeasures(opt3)
om3$mean - om2$mean
om3$mean - benchmark$mean
om3$StdDev$StdDev - om2$StdDev$StdDev
om3$StdDev$StdDev - benchmark$StdDev
barplot(c(benchmark$mean, om$mean, om2$mean, om3$mean), ylim = c(0, 0.01))
barplot(c(benchmark$StdDev, om$StdDev, om2$StdDev$StdDev, om3$StdDev$StdDev), ylim = c(0, 0.02))
# It would appear the Differential Evolution optimization found a higher mean than the
# random portfolio method, while also lowering standard deviation
chart.RiskReward(opt3, risk.col = "StdDev", return.col = "mean", chart.assets = TRUE)
# Compare metrics
VaR(Return.portfolio(R = ret, weights = as.numeric(w)))
VaR(Return.portfolio(R = ret, weights = opt$weights))
VaR(Return.portfolio(R = ret, weights = opt2$weights))
VaR(Return.portfolio(R = ret, weights = opt3$weights))
opt3.prt.rtn <- Return.portfolio(R = ret, weights = opt3$weights)
# Plot the returns of all portfolio solutions
chart.CumReturns(opt3.prt.rtn, lwd = 2, ylim = range(cumprod(1 + opt.prt.rtn) - 1))
lines(cumprod(1 + prt.rtn) - 1, col = "blue")
lines(cumprod(1 + opt.prt.rtn) - 1, col = "green")
lines(cumprod(1 + opt2.prt.rtn) - 1, col = "red")
addLegend(legend.loc = "topleft", legend.names = c("DEoptim", "Equal Weight", "Quadratic", "Random"),
lty = 1, lwd = 2, col = c("black", "blue", "green", "red"))
charts.PerformanceSummary(opt3.prt.rtn)
extractWeights(opt3)
barplot(opt3$weights)
chart.RollingPerformance(R = opt3.prt.rtn, width = 12, FUN = "SharpeRatio.annualized", Rf = 0.02 / 12)
charts.RollingPerformance(R = opt3.prt.rtn, width = 12, Rf = 0.02 / 12)
table.AnnualizedReturns(R = opt3.prt.rtn, scale = 12, Rf = 0.02 / 12)
# While the risk budget objective worked as expected the total returns were disappointing
# Add a sharpe ratio parameter to the optimization, and remove the risk budget
opt4 <- optimize.portfolio(R = ret, portfolio = prt.spc, optimize_method = "ROI", maxSR = TRUE)
barplot(opt4$weights)
opt4.prt.rtn <- Return.portfolio(ret, weights = opt4$weights)
# Check performance
charts.PerformanceSummary(R = opt4.prt.rtn, Rf = 0.02, main = "MaxSR")
# Looking for higher return than the risk budget optimization, but with more
# diversification than the original quadratic optimization
# Add a box constraint to the weights
prt.spc5 <- add.constraint(portfolio = prt.spc, type = "box", min = 0, max = 0.15)
opt5 <- optimize.portfolio(R = ret, portfolio = prt.spc5, optimize_method = "ROI")
opt5.prt.rtn <- Return.portfolio(R = ret, weights = opt5$weights)
barplot(opt5$weights)
portfolios$Box <- opt5.prt.rtn
vol_budget <- StdDev(R = ret, portfolio_method = "component", weights = opt5$weights)
barplot(vol_budget$pct_contrib_StdDev)
table.DownsideRiskRatio(opt5.prt.rtn)
# Plot the returns of all portfolio solutions
chart.CumReturns(prt.rtn, lwd = 2, ylim = range(cumprod(1 + opt.prt.rtn) - 1))
lines(cumprod(1 + opt.prt.rtn) - 1, col = "green")
lines(cumprod(1 + opt2.prt.rtn) - 1, col = "red")
lines(cumprod(1 + opt3.prt.rtn) - 1, col = "yellow")
lines(cumprod(1 + opt4.prt.rtn) - 1, col = "blue")
lines(cumprod(1 + opt5.prt.rtn) - 1, col = "grey")
addLegend(legend.loc = "topleft", legend.names = c("Equal Weight", "Quadratic", "Random", "DEoptim", "MaxSR", "Box"),
lty = 1, lwd = 2, col = c("black", "green", "red", "yellow", "blue", "grey"))
# Clean up the process of comparing portfolios
portfolios <- cbind(prt.rtn, opt.prt.rtn, opt2.prt.rtn, opt3.prt.rtn, opt4.prt.rtn, opt5.prt.rtn)
names(portfolios) <- c("Equal", "Quadratic", "Random", "DEoptim", "MaxSR", "Box")
lapply(portfolios, table.AnnualizedReturns, scale = 12, Rf = 0.02 / 12)
lapply(portfolios, FUN = SortinoRatio, MAR = 0.001)
lapply(portfolios, FUN = table.Drawdowns)
# Apply periodic rebalancing to all portfolios
# Start with a simple quarterly rebalance for the equal weight portfolio
equal.rebal <- Return.rebalancing(R = ret, weights = rep(1 / ncol(ret), ncol(ret)), rebalance_on = "quarters")
charts.PerformanceSummary(equal.rebal)
table.AnnualizedReturns(equal.rebal)
# Rebalance using the various optimization methods
quadratic.rebal <- optimize.portfolio.rebalancing(R = ret, portfolio = prt.spc,
optimize_method = "ROI", rebalance_on = "quarters",
training_period = 60, rolling_window = 60)
Return.portfolio(R = ret, weights = extractWeights(quadratic.rebal), rebalance_on = "quarters")
random.rebal <- optimize.portfolio.rebalancing(R = ret, portfolio = prt.spc2,
optimize_method = "random", rebalance_on = "quarters",
training_period = 60, rolling_window = 60, trace = TRUE)
random.rebal.returns <- Return.portfolio(R = ret, weights = extractWeights(random.rebal), rebalance_on = "quarters")
DEoptim.rebal <- optimize.portfolio.rebalancing(R = ret, portfolio = prt.spc2,
optimize_method = "DEoptim", rebalance_on = "quarters",
training_period = 60, rolling_window = 60, trace = TRUE)
DEoptim.rebal.returns <- Return.portfolio(R = ret, weights = extractWeights(DEoptim.rebal), rebalance_on = "quarters")
chart.CumReturns(equal.rebal["2002/"])
lines(cumprod(1 + random.rebal.returns) - 1, col = "blue")
lines(cumprod(1 + DEoptim.rebal.returns) - 1, col = "red")
addLegend(legend.loc = "topleft", legend.names = c("Equal Weight", "Random", "DEoptim"), lwd = 2, col = c("black", "blue", "red"))
maxSR.rebal <- optimize.portfolio.rebalancing(R = ret, portfolio = prt.spc,
optimize_method = "ROI", rebalance_on = "quarters",
training_period = 60, rolling_window = 60, maxSR.rebal = TRUE)
maxSR.rebal.returns <- Return.portfolio(R = ret, weights = extractWeights(maxSR.rebal))
box.rebal <- optimize.portfolio.rebalancing(R = ret, portfolio = prt.spc,
optimize_method = "ROI", rebalance_on = "quarters",
training_period = 60, rolling_window = 60)
quadratic.rebal.returns <- Return.portfolio(R = ret, weights = extractWeights(quadratic.rebal["2001/ "]))
charts.PerformanceSummary(quadratic.rebal.returns)
# Chart results of rebalanced portfolio
# Plot results of second rebalanced portfolio optimization
chart.Weights(opt2.rebal)
rr2 <- Return.portfolio(ret, weights = extractWeights(opt2.rebal))
chart.CumReturns(rr2, ylim = c(0, 0.75))
# Compare results of prior optimizations
# Inspect risk / reward metrics
charts.PerformanceSummary(rr2)
| /PortfolioAnalytics.R | no_license | RomeoAlphaYankee/FinanceR | R | false | false | 11,007 | r | # utilize the PortfolioAnalytics package to optimize portfolios
library(PortfolioAnalytics)
library(ROI)
data("edhec")
# Use historical hedge fund data
ret <- edhec
names <- colnames(ret)
# Investigate hedge fund return data
# First look at the correlation
hf.cor <- cor(data)
library(corrplot)
corrplot.mixed(hf.cor, upper = "color", tl.col = "black")
# calculate expected returns, covariance, and portfolio returns given weights w
w <- as.matrix(c(rep(1 / ncol(ret), ncol(ret))))
mu <- as.matrix(colMeans(ret))
sigma <- cov(ret)
# expected returns of individual assets given weights w
mu * w
# Expected equal weight benchmark portfolio return
t(w) %*% mu
# Expected benchmark portfolio volatility
sqrt(t(w) %*% sigma %*% w)
# Create portfolio specification object
prt.spc <- portfolio.spec(names)
# Begin adding constraints to specifications
prt.spc <- add.constraint(portfolio = prt.spc, type = "long_only")
prt.spc <- add.constraint(portfolio = prt.spc, type = "full_investment")
# Add objectives to specification
prt.spc <- add.objective(prt.spc, type = "return", name = "mean")
prt.spc <- add.objective(prt.spc, type = "risk", name = "StdDev")
# View the specifications
prt.spc
# Run portfolio optimization using quadratic solution
opt <- optimize.portfolio(ret, portfolio = prt.spc, optimize_method = "ROI")
opt
# Plot individual assets, and optimized solution
chart.RiskReward(opt, risk.col = "StdDev", return.col = "mean", chart.assets = TRUE)
# Examine optimized portfolio weights
# No diversification
extractWeights(opt)
# Extract objective measures
om <- extractObjectiveMeasures(opt)
# Compare to benchmark returns
om$mean
t(w) %*% mu
# Compare to benchmark standard deviation
sqrt(t(w) %*% sigma %*% w)
om$StdDev[1]
# Examine the performance
opt.prt.rtn <- Return.portfolio(ret, weights = opt$weights)
prt.rtn <- Return.portfolio(ret, weights = as.numeric(w))
chart.CumReturns(opt.prt.rtn, col = "blue")
lines(cumprod(1 + prt.rtn) - 1, col = "red")
addLegend("topleft", legend.names = c("Optimized", "Benchmark"), col = c("blue", "red"), lwd = 2)
# It would appear the optimized portfolio outperformed the equal weighted portfolio
# However, it failed to reduce standard devivation or achieve diversification
sd(opt.prt.rtn)
sd(prt.rtn)
# Check weights for diversification, or lack thereof
barplot(opt$weights)
# Check risk budget
vol_budget <- StdDev(R = ret, portfolio_method = "component", weights = opt$weights)
barplot(vol_budget)
vol_budget
# The optimized portfolio suffered a larger max drawdown
maxDrawdown(opt.prt.rtn)
maxDrawdown(prt.rtn)
# Summarize results of optimized portfolio
charts.PerformanceSummary(opt.prt.rtn)
charts.PerformanceSummary(prt.rtn)
# Value at risk also increased
VaR(opt.prt.rtn)
VaR(prt.rtn)
# Add a risk budget constraint to increase diversification and reduce volatility
prt.spc2 <- add.objective(prt.spc, type = "risk_budget", name = "StdDev", min_prisk = 0, max_prisk = .15)
prt.spc2
# Run optimization using a random optimization solution
opt2 <- optimize.portfolio(R = ret, portfolio = prt.spc2,
optimize_method = "random", trace = TRUE)
opt2
# Make it easier to access benchmark
benchmark <- list(mean = t(w) %*% mu, StdDev = (sqrt(t(w) %*% sigma %*% w)))
# Check objective measures
om2 <- extractObjectiveMeasures(opt2)
om2$mean - benchmark$mean
om2$StdDev$StdDev - benchmark$StdDev
barplot(c(benchmark$mean, om$mean, om2$mean), ylim = c(0, 0.01))
barplot(c(benchmark$StdDev, om$StdDev, om2$StdDev$StdDev), ylim = c(0, 0.02))
# Chart the results of the generated portfolios and the efficient frontier
chart.RiskReward(opt2, risk.col = "StdDev", return.col = "mean", chart.assets = TRUE)
opt2.prt.rtn <- Return.portfolio(ret, weights = opt2$weights)
chart.CumReturns(opt2.prt.rtn, lwd = 2, ylim = range(cumprod(1 + opt.prt.rtn) - 1))
lines(cumprod(1 + opt.prt.rtn) - 1, col = "blue")
lines(cumprod(1 + prt.rtn) - 1, col = "red")
addLegend(legend.loc = "topleft", legend.names = c("Risk Budget", "Optimized", "Equal Weight"),
lty = 1, lwd = 2, col = c("black", "blue", "red"))
# Try a different non-quadratic optimization method
library(DEoptim)
opt3 <- optimize.portfolio(R = ret, portfolio = prt.spc2,
optimize_method = "DEoptim", trace = TRUE)
om3 <- extractObjectiveMeasures(opt3)
om3$mean - om2$mean
om3$mean - benchmark$mean
om3$StdDev$StdDev - om2$StdDev$StdDev
om3$StdDev$StdDev - benchmark$StdDev
barplot(c(benchmark$mean, om$mean, om2$mean, om3$mean), ylim = c(0, 0.01))
barplot(c(benchmark$StdDev, om$StdDev, om2$StdDev$StdDev, om3$StdDev$StdDev), ylim = c(0, 0.02))
# It would appear the Differential Evolution optimization found a higher mean than the
# random portfolio method, while also lowering standard deviation
chart.RiskReward(opt3, risk.col = "StdDev", return.col = "mean", chart.assets = TRUE)
# Compare metrics
VaR(Return.portfolio(R = ret, weights = as.numeric(w)))
VaR(Return.portfolio(R = ret, weights = opt$weights))
VaR(Return.portfolio(R = ret, weights = opt2$weights))
VaR(Return.portfolio(R = ret, weights = opt3$weights))
opt3.prt.rtn <- Return.portfolio(R = ret, weights = opt3$weights)
# Plot the returns of all portfolio solutions
chart.CumReturns(opt3.prt.rtn, lwd = 2, ylim = range(cumprod(1 + opt.prt.rtn) - 1))
lines(cumprod(1 + prt.rtn) - 1, col = "blue")
lines(cumprod(1 + opt.prt.rtn) - 1, col = "green")
lines(cumprod(1 + opt2.prt.rtn) - 1, col = "red")
addLegend(legend.loc = "topleft", legend.names = c("DEoptim", "Equal Weight", "Quadratic", "Random"),
lty = 1, lwd = 2, col = c("black", "blue", "green", "red"))
charts.PerformanceSummary(opt3.prt.rtn)
extractWeights(opt3)
barplot(opt3$weights)
chart.RollingPerformance(R = opt3.prt.rtn, width = 12, FUN = "SharpeRatio.annualized", Rf = 0.02 / 12)
charts.RollingPerformance(R = opt3.prt.rtn, width = 12, Rf = 0.02 / 12)
table.AnnualizedReturns(R = opt3.prt.rtn, scale = 12, Rf = 0.02 / 12)
# While the risk budget objective worked as expected the total returns were disappointing
# Add a sharpe ratio parameter to the optimization, and remove the risk budget
opt4 <- optimize.portfolio(R = ret, portfolio = prt.spc, optimize_method = "ROI", maxSR = TRUE)
barplot(opt4$weights)
opt4.prt.rtn <- Return.portfolio(ret, weights = opt4$weights)
# Check performance
charts.PerformanceSummary(R = opt4.prt.rtn, Rf = 0.02, main = "MaxSR")
# Looking for higher return than the risk budget optimization, but with more
# diversification than the original quadratic optimization
# Add a box constraint to the weights
prt.spc5 <- add.constraint(portfolio = prt.spc, type = "box", min = 0, max = 0.15)
opt5 <- optimize.portfolio(R = ret, portfolio = prt.spc5, optimize_method = "ROI")
opt5.prt.rtn <- Return.portfolio(R = ret, weights = opt5$weights)
barplot(opt5$weights)
portfolios$Box <- opt5.prt.rtn
vol_budget <- StdDev(R = ret, portfolio_method = "component", weights = opt5$weights)
barplot(vol_budget$pct_contrib_StdDev)
table.DownsideRiskRatio(opt5.prt.rtn)
# Plot the returns of all portfolio solutions
chart.CumReturns(prt.rtn, lwd = 2, ylim = range(cumprod(1 + opt.prt.rtn) - 1))
lines(cumprod(1 + opt.prt.rtn) - 1, col = "green")
lines(cumprod(1 + opt2.prt.rtn) - 1, col = "red")
lines(cumprod(1 + opt3.prt.rtn) - 1, col = "yellow")
lines(cumprod(1 + opt4.prt.rtn) - 1, col = "blue")
lines(cumprod(1 + opt5.prt.rtn) - 1, col = "grey")
addLegend(legend.loc = "topleft", legend.names = c("Equal Weight", "Quadratic", "Random", "DEoptim", "MaxSR", "Box"),
lty = 1, lwd = 2, col = c("black", "green", "red", "yellow", "blue", "grey"))
# Clean up the process of comparing portfolios
portfolios <- cbind(prt.rtn, opt.prt.rtn, opt2.prt.rtn, opt3.prt.rtn, opt4.prt.rtn, opt5.prt.rtn)
names(portfolios) <- c("Equal", "Quadratic", "Random", "DEoptim", "MaxSR", "Box")
lapply(portfolios, table.AnnualizedReturns, scale = 12, Rf = 0.02 / 12)
lapply(portfolios, FUN = SortinoRatio, MAR = 0.001)
lapply(portfolios, FUN = table.Drawdowns)
# Apply periodic rebalancing to all portfolios
# Start with a simple quarterly rebalance for the equal weight portfolio
equal.rebal <- Return.rebalancing(R = ret, weights = rep(1 / ncol(ret), ncol(ret)), rebalance_on = "quarters")
charts.PerformanceSummary(equal.rebal)
table.AnnualizedReturns(equal.rebal)
# Rebalance using the various optimization methods
quadratic.rebal <- optimize.portfolio.rebalancing(R = ret, portfolio = prt.spc,
optimize_method = "ROI", rebalance_on = "quarters",
training_period = 60, rolling_window = 60)
Return.portfolio(R = ret, weights = extractWeights(quadratic.rebal), rebalance_on = "quarters")
random.rebal <- optimize.portfolio.rebalancing(R = ret, portfolio = prt.spc2,
optimize_method = "random", rebalance_on = "quarters",
training_period = 60, rolling_window = 60, trace = TRUE)
random.rebal.returns <- Return.portfolio(R = ret, weights = extractWeights(random.rebal), rebalance_on = "quarters")
DEoptim.rebal <- optimize.portfolio.rebalancing(R = ret, portfolio = prt.spc2,
optimize_method = "DEoptim", rebalance_on = "quarters",
training_period = 60, rolling_window = 60, trace = TRUE)
DEoptim.rebal.returns <- Return.portfolio(R = ret, weights = extractWeights(DEoptim.rebal), rebalance_on = "quarters")
chart.CumReturns(equal.rebal["2002/"])
lines(cumprod(1 + random.rebal.returns) - 1, col = "blue")
lines(cumprod(1 + DEoptim.rebal.returns) - 1, col = "red")
addLegend(legend.loc = "topleft", legend.names = c("Equal Weight", "Random", "DEoptim"), lwd = 2, col = c("black", "blue", "red"))
maxSR.rebal <- optimize.portfolio.rebalancing(R = ret, portfolio = prt.spc,
optimize_method = "ROI", rebalance_on = "quarters",
training_period = 60, rolling_window = 60, maxSR.rebal = TRUE)
maxSR.rebal.returns <- Return.portfolio(R = ret, weights = extractWeights(maxSR.rebal))
box.rebal <- optimize.portfolio.rebalancing(R = ret, portfolio = prt.spc,
optimize_method = "ROI", rebalance_on = "quarters",
training_period = 60, rolling_window = 60)
quadratic.rebal.returns <- Return.portfolio(R = ret, weights = extractWeights(quadratic.rebal["2001/ "]))
charts.PerformanceSummary(quadratic.rebal.returns)
# Chart results of rebalanced portfolio
# Plot results of second rebalanced portfolio optimization
chart.Weights(opt2.rebal)
rr2 <- Return.portfolio(ret, weights = extractWeights(opt2.rebal))
chart.CumReturns(rr2, ylim = c(0, 0.75))
# Compare results of prior optimizations
# Inspect risk / reward metrics
charts.PerformanceSummary(rr2)
|
###############
### COCA Climate Data Exploration ###
###############
## Libraries
library(ncdf4)
library(raster)
library(maptools)
library(ggplot2)
library(tidyverse)
library(rgeos)
library(zoo)
library(viridis)
## Some spatial stuff for data visualiztion
# Spatial projections
proj.wgs84<- CRS("+init=epsg:4326") #WGS84
proj.utm<- CRS("+init=epsg:2960") #UTM 19
#Bounds
xlim.use<- c(-77, -65)
ylim.use<- c(35.05, 45.2)
states <- c("Maine", "New Hampshire", "Massachusetts", "Vermont", "New York", "Rhode Island", "Connecticut", "Delaware", "New Jersey", "Maryland", "Pennsylvania", "Virginia", "North Carolina", "South Carolina", "Georgia", "Florida", "District of Columbia", "West Virgina")
provinces <- c("Ontario", "Québec", "Nova Scotia", "New Brunswick")
us <- raster::getData("GADM",country="USA",level=1)
us.states <- us[us$NAME_1 %in% states,]
us.states <- gSimplify(us.states, tol = 0.075, topologyPreserve = TRUE)
canada <- raster::getData("GADM",country="CAN",level=1)
ca.provinces <- canada[canada$NAME_1 %in% provinces,]
ca.provinces <- gSimplify(ca.provinces, tol = 0.075, topologyPreserve = TRUE)
us.states.f<- fortify(us.states, NAME_1)
ca.provinces.f<- fortify(ca.provinces, NAME_1)
#########
#### Applying anomalies -- mean
#########
## Inspect the file using the ncdf4 libaray
## Get sst anomaly
sst.anom.temp<- raster::stack("~/GitHub/COCA/Data/SST.CMIP5.1982-2099.anom.nc", varname = "sstanom")
sst.anom<- raster::rotate(sst.anom.temp)
## Get oisst data
oisst.dat.temp<- raster::stack("~/Dropbox/Andrew/Work/GMRI/Projects/AllData/EC_sst_1981_2015_OISST-V2-AVHRR_agg_combined.nc")
oisst.dat<- raster::rotate(oisst.dat.temp)
# Need to get climatology from the OISST data -- set up OISST stack as time series
oisst.min<- gsub("X", "", min(names(oisst.dat)))
oisst.min.date<- as.Date(gsub("[.]", "-", oisst.min))
oisst.max<- gsub("X", "", max(names(oisst.dat)))
oisst.max.date<- as.Date(gsub("[.]", "-", oisst.max))
# Calculate monthly mean temperature -- this would be compared to the sstclim data (monthly climate ensemble)
oisst.dates<- seq.Date(from = oisst.min.date, to = oisst.max.date, by = "day")
oisst.dat<- setZ(oisst.dat, oisst.dates)
# Aggregate daily to monthly data
oisst.monthly <- zApply(oisst.dat, by = as.yearmon, mean)
# Lets check that
test.dat<- subset(oisst.dat, which(getZ(oisst.dat) >="1981-09-01" & getZ(oisst.dat) <= "1981-09-30"))
sept1981.mu<- calc(test.dat, mean)
plot(oisst.monthly[[1]]-sept1981.mu)
# Everything seems fine there, now need the monthly average for each month across baseline years (1982-2011)
dates<- getZ(oisst.monthly)
subset.vec<- which(dates > "Dec 1981" & dates < "Jan 2012", arr.ind = TRUE)
oisst.monthly.sub<- oisst.monthly[[subset.vec]]
oisst.monthly.sub<- setZ(oisst.monthly.sub, dates[subset.vec])
oisst.clim<- stack()
months<- c("Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec")
for(i in seq_along(months)) {
# Get all the monthly raster
stack.temp<- subset(oisst.monthly.sub, which(grepl(months[i], names(oisst.monthly.sub))))
month.clim<- calc(stack.temp, fun = mean)
oisst.clim<- stack(oisst.clim, month.clim)
names(oisst.clim)[i]<- months[i]
}
# Check that
test.dat<- subset(oisst.monthly.sub, which(grepl("Jan", names(oisst.monthly.sub))))
jan.mu<- calc(test.dat, mean)
summary(oisst.clim[[1]] - jan.mu)
# Looks good -- time to apply the anomalies to the oisst.clim
oisst.clim.coarse<- raster::resample(oisst.clim, sst.anom)
names(oisst.clim.coarse)<- c("01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12")
# Okay, now good to apply the anomalies from the climate models to climatology and get raw values
sst.model<- stack()
for(i in 1:nlayers(sst.anom)) {
index.match<- which(gsub("X", "", names(oisst.clim.coarse)) == unlist(strsplit(names(sst.anom)[i], "[.]"))[2])
rast.temp<- oisst.clim.coarse[[index.match]] + sst.anom[[i]]
sst.model<- stack(sst.model, rast.temp)
names(sst.model)[i]<- names(sst.anom)[i]
}
# One more step, need to fill in the missing coastline raster cell values.... Function below, i is corresponding to a three by three window
fill.na <- function(x, i=5) {
if( is.na(x)[i] ) {
return( round(mean(x, na.rm=TRUE),0) )
} else {
return( round(x[i],0) )
}
}
# Now apply that function to each raster stack
for(i in 1:nlayers(sst.model)) {
new.rast<- focal(sst.model[[i]], w = matrix(1, 3, 3), fun = fill.na, pad = TRUE, na.rm = FALSE)
sst.model[[i]]<- new.rast
}
sst.model.proj<- projectRaster(sst.model, crs = proj.utm)
names(sst.model.proj)<- names(sst.anom)
sst.model<- projectRaster(sst.model.proj, crs = proj.wgs84)
writeRaster(sst.model.proj, filename = paste(data.dir, "climate.sst.proj.grd", sep = ""), format = "raster", overwrite = TRUE)
# Looking at seasonal changes
fall<- c("X2011.09.16", "X2011.10.16", "X2011.11.16", "X2012.09.16", "X2012.10.16", "X2012.11.16", "X2013.09.16", "X2013.10.16", "X2013.11.16", "X2014.09.16", "X2014.10.16", "X2014.11.16", "X2015.09.16", "X2015.10.16", "X2015.11.16")
spring<- c("X2011.03.16", "X2011.04.16", "X2011.05.16", "X2012.03.16", "X2012.04.16", "X2012.05.16", "X2013.03.16", "X2013.04.16", "X2013.05.16", "X2014.03.16", "X2014.04.16", "X2014.05.16", "X2015.03.16", "X2015.04.16", "X2015.05.16")
base.fall<- mean(sst.model[[which(names(sst.model) %in% fall)]])
base.spring<- mean(sst.model[[which(names(sst.model) %in% spring)]])
fall.f<- c("X2055.09.16", "X2055.10.16", "X2055.11.16")
fall.diffs<- mean(sst.model[[which(names(sst.model) %in% fall.f)]]) - base.fall
fall.diffs.df<- as.data.frame(fall.diffs, xy = T)
fall.diffs.df$Season<- rep("Fall", nrow(fall.diffs.df))
spring.f<- c("X2055.03.16", "X2055.04.16", "X2055.05.16")
spring.diffs<- mean(sst.model[[which(names(sst.model) %in% spring.f)]]) - base.spring
spring.diffs.df<- as.data.frame(spring.diffs, xy = T)
spring.diffs.df$Season<- rep("Spring", nrow(spring.diffs.df))
diffs.df<- bind_rows(fall.diffs.df, spring.diffs.df)
ggplot() +
geom_tile(data = diffs.df, aes(x = x, y = y, fill = layer)) +
scale_fill_viridis(name = "SST", option = "viridis", na.value = "white") +
geom_map(data = us.states.f, map = us.states.f,
aes(map_id = id, group = group),
fill = "gray65", color = "gray45", size = 0.15) +
geom_map(data = ca.provinces.f, map = ca.provinces.f,
aes(map_id = id, group = group),
fill = "gray65", color = "gray45", size = 0.15) +
ylim(ylim.use) + ylab("Lat") +
scale_x_continuous("Long", breaks = c(-75.0, -70.0, -65.0), labels = c("-75.0", "-70.0", "-65.0"), limits = xlim.use) +
coord_fixed(1.3) +
theme(panel.background = element_rect(fill = "white", color = "black"), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), strip.background = element_rect(fill="white", color = "black")) +
facet_wrap(~Season)
# Line plot
diffs.neslme<- diffs.df
#####
## Min and Max
#####
## Get sst anomaly
sst.anom.temp<- raster::stack("~/GitHub/COCA/Data/SST.CMIP5.1982-2099.anom.nc", varname = "sstpct05")
sst.anom<- raster::rotate(sst.anom.temp)
# Okay, now good to apply the anomalies from the climate models to climatology and get raw values
sst.model<- stack()
for(i in 1:nlayers(sst.anom)) {
index.match<- which(gsub("X", "", names(oisst.clim.coarse)) == unlist(strsplit(names(sst.anom)[i], "[.]"))[2])
rast.temp<- oisst.clim.coarse[[index.match]] + sst.anom[[i]]
sst.model<- stack(sst.model, rast.temp)
names(sst.model)[i]<- names(sst.anom)[i]
}
# One more step, need to fill in the missing coastline raster cell values.... Function below, i is corresponding to a three by three window
fill.na <- function(x, i=5) {
if( is.na(x)[i] ) {
return( round(mean(x, na.rm=TRUE),0) )
} else {
return( round(x[i],0) )
}
}
# Now apply that function to each raster stack
for(i in 1:nlayers(sst.model)) {
new.rast<- focal(sst.model[[i]], w = matrix(1, 3, 3), fun = fill.na, pad = TRUE, na.rm = FALSE)
sst.model[[i]]<- new.rast
}
sst.model.proj<- projectRaster(sst.model, crs = proj.utm)
names(sst.model.proj)<- names(sst.anom)
sst.model<- projectRaster(sst.model.proj, crs = proj.wgs84)
writeRaster(sst.model.proj, filename = paste("~/GitHub/COCA/Data/", "climate.sst.proj.pct05.grd", sep = ""), format = "raster", overwrite = TRUE)
## Get sst anomaly
sst.anom.temp<- raster::stack("~/GitHub/COCA/Data/SST.CMIP5.1982-2099.anom.nc", varname = "sstpct95")
sst.anom<- raster::rotate(sst.anom.temp)
# Okay, now good to apply the anomalies from the climate models to climatology and get raw values
sst.model<- stack()
for(i in 1:nlayers(sst.anom)) {
index.match<- which(gsub("X", "", names(oisst.clim.coarse)) == unlist(strsplit(names(sst.anom)[i], "[.]"))[2])
rast.temp<- oisst.clim.coarse[[index.match]] + sst.anom[[i]]
sst.model<- stack(sst.model, rast.temp)
names(sst.model)[i]<- names(sst.anom)[i]
}
# One more step, need to fill in the missing coastline raster cell values.... Function below, i is corresponding to a three by three window
fill.na <- function(x, i=5) {
if( is.na(x)[i] ) {
return( round(mean(x, na.rm=TRUE),0) )
} else {
return( round(x[i],0) )
}
}
# Now apply that function to each raster stack
for(i in 1:nlayers(sst.model)) {
new.rast<- focal(sst.model[[i]], w = matrix(1, 3, 3), fun = fill.na, pad = TRUE, na.rm = FALSE)
sst.model[[i]]<- new.rast
}
sst.model.proj<- projectRaster(sst.model, crs = proj.utm)
names(sst.model.proj)<- names(sst.anom)
sst.model<- projectRaster(sst.model.proj, crs = proj.wgs84)
writeRaster(sst.model.proj, filename = paste("~/GitHub/COCA/Data/", "climate.sst.proj.pct95.grd", sep = ""), format = "raster", overwrite = TRUE)
| /Code/climate_dataexploration.R | no_license | aallyn/COCA | R | false | false | 9,658 | r | ###############
### COCA Climate Data Exploration ###
###############
## Libraries
library(ncdf4)
library(raster)
library(maptools)
library(ggplot2)
library(tidyverse)
library(rgeos)
library(zoo)
library(viridis)
## Some spatial stuff for data visualiztion
# Spatial projections
proj.wgs84<- CRS("+init=epsg:4326") #WGS84
proj.utm<- CRS("+init=epsg:2960") #UTM 19
#Bounds
xlim.use<- c(-77, -65)
ylim.use<- c(35.05, 45.2)
states <- c("Maine", "New Hampshire", "Massachusetts", "Vermont", "New York", "Rhode Island", "Connecticut", "Delaware", "New Jersey", "Maryland", "Pennsylvania", "Virginia", "North Carolina", "South Carolina", "Georgia", "Florida", "District of Columbia", "West Virgina")
provinces <- c("Ontario", "Québec", "Nova Scotia", "New Brunswick")
us <- raster::getData("GADM",country="USA",level=1)
us.states <- us[us$NAME_1 %in% states,]
us.states <- gSimplify(us.states, tol = 0.075, topologyPreserve = TRUE)
canada <- raster::getData("GADM",country="CAN",level=1)
ca.provinces <- canada[canada$NAME_1 %in% provinces,]
ca.provinces <- gSimplify(ca.provinces, tol = 0.075, topologyPreserve = TRUE)
us.states.f<- fortify(us.states, NAME_1)
ca.provinces.f<- fortify(ca.provinces, NAME_1)
#########
#### Applying anomalies -- mean
#########
## Inspect the file using the ncdf4 libaray
## Get sst anomaly
sst.anom.temp<- raster::stack("~/GitHub/COCA/Data/SST.CMIP5.1982-2099.anom.nc", varname = "sstanom")
sst.anom<- raster::rotate(sst.anom.temp)
## Get oisst data
oisst.dat.temp<- raster::stack("~/Dropbox/Andrew/Work/GMRI/Projects/AllData/EC_sst_1981_2015_OISST-V2-AVHRR_agg_combined.nc")
oisst.dat<- raster::rotate(oisst.dat.temp)
# Need to get climatology from the OISST data -- set up OISST stack as time series
oisst.min<- gsub("X", "", min(names(oisst.dat)))
oisst.min.date<- as.Date(gsub("[.]", "-", oisst.min))
oisst.max<- gsub("X", "", max(names(oisst.dat)))
oisst.max.date<- as.Date(gsub("[.]", "-", oisst.max))
# Calculate monthly mean temperature -- this would be compared to the sstclim data (monthly climate ensemble)
oisst.dates<- seq.Date(from = oisst.min.date, to = oisst.max.date, by = "day")
oisst.dat<- setZ(oisst.dat, oisst.dates)
# Aggregate daily to monthly data
oisst.monthly <- zApply(oisst.dat, by = as.yearmon, mean)
# Lets check that
test.dat<- subset(oisst.dat, which(getZ(oisst.dat) >="1981-09-01" & getZ(oisst.dat) <= "1981-09-30"))
sept1981.mu<- calc(test.dat, mean)
plot(oisst.monthly[[1]]-sept1981.mu)
# Everything seems fine there, now need the monthly average for each month across baseline years (1982-2011)
dates<- getZ(oisst.monthly)
subset.vec<- which(dates > "Dec 1981" & dates < "Jan 2012", arr.ind = TRUE)
oisst.monthly.sub<- oisst.monthly[[subset.vec]]
oisst.monthly.sub<- setZ(oisst.monthly.sub, dates[subset.vec])
oisst.clim<- stack()
months<- c("Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec")
for(i in seq_along(months)) {
# Get all the monthly raster
stack.temp<- subset(oisst.monthly.sub, which(grepl(months[i], names(oisst.monthly.sub))))
month.clim<- calc(stack.temp, fun = mean)
oisst.clim<- stack(oisst.clim, month.clim)
names(oisst.clim)[i]<- months[i]
}
# Check that
test.dat<- subset(oisst.monthly.sub, which(grepl("Jan", names(oisst.monthly.sub))))
jan.mu<- calc(test.dat, mean)
summary(oisst.clim[[1]] - jan.mu)
# Looks good -- time to apply the anomalies to the oisst.clim
oisst.clim.coarse<- raster::resample(oisst.clim, sst.anom)
names(oisst.clim.coarse)<- c("01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12")
# Okay, now good to apply the anomalies from the climate models to climatology and get raw values
sst.model<- stack()
for(i in 1:nlayers(sst.anom)) {
index.match<- which(gsub("X", "", names(oisst.clim.coarse)) == unlist(strsplit(names(sst.anom)[i], "[.]"))[2])
rast.temp<- oisst.clim.coarse[[index.match]] + sst.anom[[i]]
sst.model<- stack(sst.model, rast.temp)
names(sst.model)[i]<- names(sst.anom)[i]
}
# One more step, need to fill in the missing coastline raster cell values.... Function below, i is corresponding to a three by three window
fill.na <- function(x, i=5) {
if( is.na(x)[i] ) {
return( round(mean(x, na.rm=TRUE),0) )
} else {
return( round(x[i],0) )
}
}
# Now apply that function to each raster stack
for(i in 1:nlayers(sst.model)) {
new.rast<- focal(sst.model[[i]], w = matrix(1, 3, 3), fun = fill.na, pad = TRUE, na.rm = FALSE)
sst.model[[i]]<- new.rast
}
sst.model.proj<- projectRaster(sst.model, crs = proj.utm)
names(sst.model.proj)<- names(sst.anom)
sst.model<- projectRaster(sst.model.proj, crs = proj.wgs84)
writeRaster(sst.model.proj, filename = paste(data.dir, "climate.sst.proj.grd", sep = ""), format = "raster", overwrite = TRUE)
# Looking at seasonal changes
fall<- c("X2011.09.16", "X2011.10.16", "X2011.11.16", "X2012.09.16", "X2012.10.16", "X2012.11.16", "X2013.09.16", "X2013.10.16", "X2013.11.16", "X2014.09.16", "X2014.10.16", "X2014.11.16", "X2015.09.16", "X2015.10.16", "X2015.11.16")
spring<- c("X2011.03.16", "X2011.04.16", "X2011.05.16", "X2012.03.16", "X2012.04.16", "X2012.05.16", "X2013.03.16", "X2013.04.16", "X2013.05.16", "X2014.03.16", "X2014.04.16", "X2014.05.16", "X2015.03.16", "X2015.04.16", "X2015.05.16")
base.fall<- mean(sst.model[[which(names(sst.model) %in% fall)]])
base.spring<- mean(sst.model[[which(names(sst.model) %in% spring)]])
fall.f<- c("X2055.09.16", "X2055.10.16", "X2055.11.16")
fall.diffs<- mean(sst.model[[which(names(sst.model) %in% fall.f)]]) - base.fall
fall.diffs.df<- as.data.frame(fall.diffs, xy = T)
fall.diffs.df$Season<- rep("Fall", nrow(fall.diffs.df))
spring.f<- c("X2055.03.16", "X2055.04.16", "X2055.05.16")
spring.diffs<- mean(sst.model[[which(names(sst.model) %in% spring.f)]]) - base.spring
spring.diffs.df<- as.data.frame(spring.diffs, xy = T)
spring.diffs.df$Season<- rep("Spring", nrow(spring.diffs.df))
diffs.df<- bind_rows(fall.diffs.df, spring.diffs.df)
ggplot() +
geom_tile(data = diffs.df, aes(x = x, y = y, fill = layer)) +
scale_fill_viridis(name = "SST", option = "viridis", na.value = "white") +
geom_map(data = us.states.f, map = us.states.f,
aes(map_id = id, group = group),
fill = "gray65", color = "gray45", size = 0.15) +
geom_map(data = ca.provinces.f, map = ca.provinces.f,
aes(map_id = id, group = group),
fill = "gray65", color = "gray45", size = 0.15) +
ylim(ylim.use) + ylab("Lat") +
scale_x_continuous("Long", breaks = c(-75.0, -70.0, -65.0), labels = c("-75.0", "-70.0", "-65.0"), limits = xlim.use) +
coord_fixed(1.3) +
theme(panel.background = element_rect(fill = "white", color = "black"), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), strip.background = element_rect(fill="white", color = "black")) +
facet_wrap(~Season)
# Line plot
diffs.neslme<- diffs.df
#####
## Min and Max
#####
## Get sst anomaly
sst.anom.temp<- raster::stack("~/GitHub/COCA/Data/SST.CMIP5.1982-2099.anom.nc", varname = "sstpct05")
sst.anom<- raster::rotate(sst.anom.temp)
# Okay, now good to apply the anomalies from the climate models to climatology and get raw values
sst.model<- stack()
for(i in 1:nlayers(sst.anom)) {
index.match<- which(gsub("X", "", names(oisst.clim.coarse)) == unlist(strsplit(names(sst.anom)[i], "[.]"))[2])
rast.temp<- oisst.clim.coarse[[index.match]] + sst.anom[[i]]
sst.model<- stack(sst.model, rast.temp)
names(sst.model)[i]<- names(sst.anom)[i]
}
# One more step, need to fill in the missing coastline raster cell values.... Function below, i is corresponding to a three by three window
fill.na <- function(x, i=5) {
if( is.na(x)[i] ) {
return( round(mean(x, na.rm=TRUE),0) )
} else {
return( round(x[i],0) )
}
}
# Now apply that function to each raster stack
for(i in 1:nlayers(sst.model)) {
new.rast<- focal(sst.model[[i]], w = matrix(1, 3, 3), fun = fill.na, pad = TRUE, na.rm = FALSE)
sst.model[[i]]<- new.rast
}
sst.model.proj<- projectRaster(sst.model, crs = proj.utm)
names(sst.model.proj)<- names(sst.anom)
sst.model<- projectRaster(sst.model.proj, crs = proj.wgs84)
writeRaster(sst.model.proj, filename = paste("~/GitHub/COCA/Data/", "climate.sst.proj.pct05.grd", sep = ""), format = "raster", overwrite = TRUE)
## Get sst anomaly
sst.anom.temp<- raster::stack("~/GitHub/COCA/Data/SST.CMIP5.1982-2099.anom.nc", varname = "sstpct95")
sst.anom<- raster::rotate(sst.anom.temp)
# Okay, now good to apply the anomalies from the climate models to climatology and get raw values
sst.model<- stack()
for(i in 1:nlayers(sst.anom)) {
index.match<- which(gsub("X", "", names(oisst.clim.coarse)) == unlist(strsplit(names(sst.anom)[i], "[.]"))[2])
rast.temp<- oisst.clim.coarse[[index.match]] + sst.anom[[i]]
sst.model<- stack(sst.model, rast.temp)
names(sst.model)[i]<- names(sst.anom)[i]
}
# One more step, need to fill in the missing coastline raster cell values.... Function below, i is corresponding to a three by three window
fill.na <- function(x, i=5) {
if( is.na(x)[i] ) {
return( round(mean(x, na.rm=TRUE),0) )
} else {
return( round(x[i],0) )
}
}
# Now apply that function to each raster stack
for(i in 1:nlayers(sst.model)) {
new.rast<- focal(sst.model[[i]], w = matrix(1, 3, 3), fun = fill.na, pad = TRUE, na.rm = FALSE)
sst.model[[i]]<- new.rast
}
sst.model.proj<- projectRaster(sst.model, crs = proj.utm)
names(sst.model.proj)<- names(sst.anom)
sst.model<- projectRaster(sst.model.proj, crs = proj.wgs84)
writeRaster(sst.model.proj, filename = paste("~/GitHub/COCA/Data/", "climate.sst.proj.pct95.grd", sep = ""), format = "raster", overwrite = TRUE)
|
#' Convert \code{survival::survfit} to \code{data.frame}
#'
#' @param model \code{survival::survfit} instance
#' @param surv.connect logical frag indicates whether connects survival curve to the origin
#' @param fun an arbitrary function defining a transformation of the survival curve
#' @inheritParams fortify_base
#' @return data.frame
#' @aliases fortify.survfit.cox
#' @examples
#' library(survival)
#' fortify(survfit(Surv(time, status) ~ sex, data = lung))
#' fortify(survfit(Surv(time, status) ~ 1, data = lung))
#' fortify(survfit(coxph(Surv(time, status) ~ sex, data = lung)))
#' fortify(survfit(coxph(Surv(time, status) ~ 1, data = lung)))
#' @export
fortify.survfit <- function(model, data = NULL, surv.connect = FALSE,
fun = NULL, ...) {
d <- data.frame(time = model$time,
n.risk = model$n.risk,
n.event = model$n.event,
n.censor = model$n.censor,
surv = model$surv,
std.err = model$std.err,
upper = model$upper,
lower = model$lower)
if (is(model, 'survfit.cox')) {
d <- cbind_wraps(d, data.frame(cumhaz = model$cumhaz))
} else if (is(model, 'survfit')) {
if ('strata' %in% names(model)) {
groupIDs <- gsub(".*=", '', names(model$strata))
groupIDs <- factor(rep(groupIDs, model$strata), levels = groupIDs)
d <- cbind_wraps(d, data.frame(strata = groupIDs))
}
} else {
stop(paste0('Unsupported class for fortify.survfit: ', class(model)))
}
# connect to the origin for plotting
if (surv.connect) {
base <- d[1, ]
# cumhaz is for survfit.cox cases
base[intersect(c('time', 'n.censor', 'std.err', 'cumhaz'), colnames(base))] <- 0
base[c('surv', 'upper', 'lower')] <- 1.0
if ('strata' %in% colnames(d)) {
strata <- levels(d$strata)
base <- as.data.frame(sapply(base, rep.int, times = length(strata)))
base$strata <- strata
base$strata <- factor(base$strata, levels = base$strata)
}
d <- rbind(base, d)
}
if (!is.null(fun)) {
if (is.character(fun)) {
fun <- switch(fun, log = function(x) log(x),
event = function(x) 1 - x,
cumhaz = function(x) -log(x),
cloglog = function(x) log(-log(x)),
pct = function(x) x * 100,
logpct = function(x) 100 * x,
identity = function(x) x,
stop("Unrecognized function argument"))
}
else if (!is.function(fun)) {
stop("Invalid 'fun' argument")
}
d$surv <- fun(d$surv)
d$upper <- fun(d$upper)
d$lower <- fun(d$lower)
}
post_fortify(d)
}
#' Autoplot \code{survival::survfit}
#'
#' @param object \code{survival::survfit} instance
#' @param fun an arbitrary function defining a transformation of the survival curve
#' @param surv.geom geometric string for survival curve. 'step', 'line' or 'point'
#' @param surv.colour line colour for survival curve
#' @param surv.size point size for survival curve
#' @param surv.linetype line type for survival curve
#' @param surv.alpha alpha for survival curve
#' @param surv.fill fill colour survival curve
#' @param surv.shape point shape survival curve
#' @inheritParams fortify.survfit
#' @inheritParams plot_confint
#' @param censor Logical flag indicating whether to plot censors
#' @param censor.colour colour for censors
#' @param censor.size size for censors
#' @param censor.alpha alpha for censors
#' @param censor.shape shape for censors
#' @inheritParams apply_facets
#' @inheritParams post_autoplot
#' @param ... other arguments passed to methods
#' @return ggplot
#' @aliases autoplot.survfit.cox
#' @examples
#' library(survival)
#' autoplot(survfit(Surv(time, status) ~ sex, data = lung))
#' autoplot(survfit(Surv(time, status) ~ sex, data = lung), facets = TRUE)
#' autoplot(survfit(Surv(time, status) ~ 1, data = lung))
#' autoplot(survfit(Surv(time, status) ~ sex, data=lung), conf.int = FALSE, censor = FALSE)
#' autoplot(survfit(coxph(Surv(time, status) ~ sex, data = lung)))
#' @importFrom scales percent
#' @export
autoplot.survfit <- function(object, fun = NULL,
surv.geom = 'step',
surv.colour = NULL, surv.size = NULL, surv.linetype = NULL,
surv.alpha = NULL, surv.fill = NULL, surv.shape = NULL,
surv.connect = TRUE,
conf.int = TRUE,
conf.int.colour = '#0000FF', conf.int.linetype = 'none',
conf.int.fill = '#000000', conf.int.alpha = 0.3,
censor = TRUE, censor.colour = NULL, censor.size = 3,
censor.alpha = NULL, censor.shape = '+',
facets = FALSE, nrow = NULL, ncol = 1, scales = 'free_y',
xlim = c(NA, NA), ylim = c(NA, NA), log = "",
main = NULL, xlab = NULL, ylab = NULL, asp = NULL,
...) {
if (is_derived_from(object, 'aareg')) {
# for autoplot.aareg, object must be a data.frame
plot.data <- object
mapping <- aes_string(x = 'time', y = 'value')
facets_formula <- ~ variable
if (is.null(surv.colour)) {
surv.colour <- 'variable'
}
# use default
scale_labels <- ggplot2::waiver()
} else {
plot.data <- fortify(object, surv.connect = surv.connect, fun = fun)
mapping <- aes_string(x = 'time', y = 'surv')
if ('strata' %in% colnames(plot.data)) {
facets_formula <- ~ strata
if (is.null(surv.colour)) {
surv.colour <- 'strata'
}
} else {
facets_formula <- NULL
}
if (is.null(fun) || fun %in% c('identity', 'event')) {
scale_labels <- scales::percent
} else {
scale_labels <- ggplot2::waiver()
}
}
if (missing(conf.int.fill) & !is.null(surv.colour)) {
conf.int.fill <- surv.colour
}
geomfunc <- get_geom_function(surv.geom, allowed = c('step', 'line', 'point'))
p <- ggplot(data = plot.data, mapping = mapping) +
scale_y_continuous(labels = scale_labels)
p <- p + geom_factory(geomfunc, plot.data,
colour = surv.colour, size = surv.size, linetype = surv.linetype,
alpha = surv.alpha, fill = surv.fill, shape = surv.shape)
if (surv.geom == 'step') {
conf.int.geom <- 'step'
} else {
conf.int.geom <- 'line'
}
p <- plot_confint(p, data = plot.data,
conf.int = conf.int, conf.int.geom = conf.int.geom,
conf.int.colour = conf.int.colour,
conf.int.linetype = conf.int.linetype,
conf.int.fill = conf.int.fill, conf.int.alpha = conf.int.alpha)
if (censor & 'n.censor' %in% colnames(plot.data)) {
p <- p + geom_factory(geom_point, plot.data[plot.data$n.censor > 0, ],
colour = censor.colour, size = censor.size,
alpha = censor.alpha, shape = censor.shape)
}
if (facets) {
p <- apply_facets(p, facets_formula, nrow = nrow, ncol = ncol, scales = scales)
}
p <- post_autoplot(p = p, xlim = xlim, ylim = ylim, log = log,
main = main, xlab = xlab, ylab = ylab, asp = asp)
p
}
#' Convert \code{survival::aareg} to \code{data.frame}
#'
#' @param model \code{survival::aareg} instance
#' @param maxtime truncate the input to the model at time "maxtime"
#' @inheritParams fortify.survfit
#' @param melt Logical flag indicating whether to melt each timeseries as variable
#' @return data.frame
#' @examples
#' library(survival)
#' fortify(aareg(Surv(time, status) ~ age + sex + ph.ecog, data = lung, nmin = 1))
#' fortify(aareg(Surv(time, status) ~ age + sex + ph.ecog, data = lung, nmin = 1), melt = TRUE)
#' @export
fortify.aareg <- function(model, data = NULL,
maxtime = NULL,
surv.connect = TRUE,
melt = FALSE, ...) {
if (is.null(maxtime)) {
keep <- 1:length(model$time)
} else {
keep <- 1:sum(model$time <= maxtime)
}
if (is.matrix(model$coefficient) && ncol(model$coefficient) > 1) {
coefs <- model$coefficient[keep, ]
} else {
coefs <- model$coefficient[keep]
}
rownames(coefs) <- NULL
coefs <- as.data.frame(coefs)
cols <- colnames(coefs)
if (melt) {
d <- cbind(data.frame(time = model$time[keep]), coefs)
if (surv.connect) {
d <- rbind(0, d)
}
d <- tidyr::gather_(d, 'variable', 'coef', cols)
d <- d %>%
dplyr::group_by_('variable') %>%
dplyr::mutate_('se' = 'sqrt(cumsum(coef ^ 2))',
'value' = 'cumsum(coef)',
'upper' = 'value + se * 1.96',
'lower' = 'value - se * 1.96')
} else {
d <- cbind_wraps(data.frame(time = model$time[keep]),
apply(coefs, 2, cumsum))
indexer <- 1 + length(d$time) - rev(match(unique(rev(d$time)), rev(d$time)))
d <- d[indexer, ]
if (surv.connect) {
d <- rbind(0, d)
}
}
post_fortify(d, klass = model)
}
#' Autoplot \code{survival::aareg}
#'
#' @param object \code{survival::aareg} instance
#' @param maxtime truncate the input to the model at time "maxtime"
#' @inheritParams autoplot.survfit
#' @param ... other arguments passed to \code{autoplot.survfit}
#' @return ggplot
#' @examples
#' library(survival)
#' autoplot(aareg(Surv(time, status) ~ age + sex + ph.ecog, data = lung, nmin = 1))
#' @export
autoplot.aareg <- function (object, maxtime = NULL,
surv.connect = TRUE,
facets = TRUE, ncol = NULL,
xlab = '', ylab = '',
...) {
plot.data <- fortify(object, maxtime = maxtime,
surv.connect = surv.connect, melt = TRUE)
autoplot.survfit(plot.data, facets = facets, ncol = ncol,
xlab = '', ylab = '', ...)
}
| /R/fortify_surv.R | no_license | yoursdearboy/ggfortify | R | false | false | 10,075 | r | #' Convert \code{survival::survfit} to \code{data.frame}
#'
#' @param model \code{survival::survfit} instance
#' @param surv.connect logical frag indicates whether connects survival curve to the origin
#' @param fun an arbitrary function defining a transformation of the survival curve
#' @inheritParams fortify_base
#' @return data.frame
#' @aliases fortify.survfit.cox
#' @examples
#' library(survival)
#' fortify(survfit(Surv(time, status) ~ sex, data = lung))
#' fortify(survfit(Surv(time, status) ~ 1, data = lung))
#' fortify(survfit(coxph(Surv(time, status) ~ sex, data = lung)))
#' fortify(survfit(coxph(Surv(time, status) ~ 1, data = lung)))
#' @export
fortify.survfit <- function(model, data = NULL, surv.connect = FALSE,
fun = NULL, ...) {
d <- data.frame(time = model$time,
n.risk = model$n.risk,
n.event = model$n.event,
n.censor = model$n.censor,
surv = model$surv,
std.err = model$std.err,
upper = model$upper,
lower = model$lower)
if (is(model, 'survfit.cox')) {
d <- cbind_wraps(d, data.frame(cumhaz = model$cumhaz))
} else if (is(model, 'survfit')) {
if ('strata' %in% names(model)) {
groupIDs <- gsub(".*=", '', names(model$strata))
groupIDs <- factor(rep(groupIDs, model$strata), levels = groupIDs)
d <- cbind_wraps(d, data.frame(strata = groupIDs))
}
} else {
stop(paste0('Unsupported class for fortify.survfit: ', class(model)))
}
# connect to the origin for plotting
if (surv.connect) {
base <- d[1, ]
# cumhaz is for survfit.cox cases
base[intersect(c('time', 'n.censor', 'std.err', 'cumhaz'), colnames(base))] <- 0
base[c('surv', 'upper', 'lower')] <- 1.0
if ('strata' %in% colnames(d)) {
strata <- levels(d$strata)
base <- as.data.frame(sapply(base, rep.int, times = length(strata)))
base$strata <- strata
base$strata <- factor(base$strata, levels = base$strata)
}
d <- rbind(base, d)
}
if (!is.null(fun)) {
if (is.character(fun)) {
fun <- switch(fun, log = function(x) log(x),
event = function(x) 1 - x,
cumhaz = function(x) -log(x),
cloglog = function(x) log(-log(x)),
pct = function(x) x * 100,
logpct = function(x) 100 * x,
identity = function(x) x,
stop("Unrecognized function argument"))
}
else if (!is.function(fun)) {
stop("Invalid 'fun' argument")
}
d$surv <- fun(d$surv)
d$upper <- fun(d$upper)
d$lower <- fun(d$lower)
}
post_fortify(d)
}
#' Autoplot \code{survival::survfit}
#'
#' @param object \code{survival::survfit} instance
#' @param fun an arbitrary function defining a transformation of the survival curve
#' @param surv.geom geometric string for survival curve. 'step', 'line' or 'point'
#' @param surv.colour line colour for survival curve
#' @param surv.size point size for survival curve
#' @param surv.linetype line type for survival curve
#' @param surv.alpha alpha for survival curve
#' @param surv.fill fill colour survival curve
#' @param surv.shape point shape survival curve
#' @inheritParams fortify.survfit
#' @inheritParams plot_confint
#' @param censor Logical flag indicating whether to plot censors
#' @param censor.colour colour for censors
#' @param censor.size size for censors
#' @param censor.alpha alpha for censors
#' @param censor.shape shape for censors
#' @inheritParams apply_facets
#' @inheritParams post_autoplot
#' @param ... other arguments passed to methods
#' @return ggplot
#' @aliases autoplot.survfit.cox
#' @examples
#' library(survival)
#' autoplot(survfit(Surv(time, status) ~ sex, data = lung))
#' autoplot(survfit(Surv(time, status) ~ sex, data = lung), facets = TRUE)
#' autoplot(survfit(Surv(time, status) ~ 1, data = lung))
#' autoplot(survfit(Surv(time, status) ~ sex, data=lung), conf.int = FALSE, censor = FALSE)
#' autoplot(survfit(coxph(Surv(time, status) ~ sex, data = lung)))
#' @importFrom scales percent
#' @export
autoplot.survfit <- function(object, fun = NULL,
surv.geom = 'step',
surv.colour = NULL, surv.size = NULL, surv.linetype = NULL,
surv.alpha = NULL, surv.fill = NULL, surv.shape = NULL,
surv.connect = TRUE,
conf.int = TRUE,
conf.int.colour = '#0000FF', conf.int.linetype = 'none',
conf.int.fill = '#000000', conf.int.alpha = 0.3,
censor = TRUE, censor.colour = NULL, censor.size = 3,
censor.alpha = NULL, censor.shape = '+',
facets = FALSE, nrow = NULL, ncol = 1, scales = 'free_y',
xlim = c(NA, NA), ylim = c(NA, NA), log = "",
main = NULL, xlab = NULL, ylab = NULL, asp = NULL,
...) {
if (is_derived_from(object, 'aareg')) {
# for autoplot.aareg, object must be a data.frame
plot.data <- object
mapping <- aes_string(x = 'time', y = 'value')
facets_formula <- ~ variable
if (is.null(surv.colour)) {
surv.colour <- 'variable'
}
# use default
scale_labels <- ggplot2::waiver()
} else {
plot.data <- fortify(object, surv.connect = surv.connect, fun = fun)
mapping <- aes_string(x = 'time', y = 'surv')
if ('strata' %in% colnames(plot.data)) {
facets_formula <- ~ strata
if (is.null(surv.colour)) {
surv.colour <- 'strata'
}
} else {
facets_formula <- NULL
}
if (is.null(fun) || fun %in% c('identity', 'event')) {
scale_labels <- scales::percent
} else {
scale_labels <- ggplot2::waiver()
}
}
if (missing(conf.int.fill) & !is.null(surv.colour)) {
conf.int.fill <- surv.colour
}
geomfunc <- get_geom_function(surv.geom, allowed = c('step', 'line', 'point'))
p <- ggplot(data = plot.data, mapping = mapping) +
scale_y_continuous(labels = scale_labels)
p <- p + geom_factory(geomfunc, plot.data,
colour = surv.colour, size = surv.size, linetype = surv.linetype,
alpha = surv.alpha, fill = surv.fill, shape = surv.shape)
if (surv.geom == 'step') {
conf.int.geom <- 'step'
} else {
conf.int.geom <- 'line'
}
p <- plot_confint(p, data = plot.data,
conf.int = conf.int, conf.int.geom = conf.int.geom,
conf.int.colour = conf.int.colour,
conf.int.linetype = conf.int.linetype,
conf.int.fill = conf.int.fill, conf.int.alpha = conf.int.alpha)
if (censor & 'n.censor' %in% colnames(plot.data)) {
p <- p + geom_factory(geom_point, plot.data[plot.data$n.censor > 0, ],
colour = censor.colour, size = censor.size,
alpha = censor.alpha, shape = censor.shape)
}
if (facets) {
p <- apply_facets(p, facets_formula, nrow = nrow, ncol = ncol, scales = scales)
}
p <- post_autoplot(p = p, xlim = xlim, ylim = ylim, log = log,
main = main, xlab = xlab, ylab = ylab, asp = asp)
p
}
#' Convert \code{survival::aareg} to \code{data.frame}
#'
#' @param model \code{survival::aareg} instance
#' @param maxtime truncate the input to the model at time "maxtime"
#' @inheritParams fortify.survfit
#' @param melt Logical flag indicating whether to melt each timeseries as variable
#' @return data.frame
#' @examples
#' library(survival)
#' fortify(aareg(Surv(time, status) ~ age + sex + ph.ecog, data = lung, nmin = 1))
#' fortify(aareg(Surv(time, status) ~ age + sex + ph.ecog, data = lung, nmin = 1), melt = TRUE)
#' @export
fortify.aareg <- function(model, data = NULL,
maxtime = NULL,
surv.connect = TRUE,
melt = FALSE, ...) {
if (is.null(maxtime)) {
keep <- 1:length(model$time)
} else {
keep <- 1:sum(model$time <= maxtime)
}
if (is.matrix(model$coefficient) && ncol(model$coefficient) > 1) {
coefs <- model$coefficient[keep, ]
} else {
coefs <- model$coefficient[keep]
}
rownames(coefs) <- NULL
coefs <- as.data.frame(coefs)
cols <- colnames(coefs)
if (melt) {
d <- cbind(data.frame(time = model$time[keep]), coefs)
if (surv.connect) {
d <- rbind(0, d)
}
d <- tidyr::gather_(d, 'variable', 'coef', cols)
d <- d %>%
dplyr::group_by_('variable') %>%
dplyr::mutate_('se' = 'sqrt(cumsum(coef ^ 2))',
'value' = 'cumsum(coef)',
'upper' = 'value + se * 1.96',
'lower' = 'value - se * 1.96')
} else {
d <- cbind_wraps(data.frame(time = model$time[keep]),
apply(coefs, 2, cumsum))
indexer <- 1 + length(d$time) - rev(match(unique(rev(d$time)), rev(d$time)))
d <- d[indexer, ]
if (surv.connect) {
d <- rbind(0, d)
}
}
post_fortify(d, klass = model)
}
#' Autoplot \code{survival::aareg}
#'
#' @param object \code{survival::aareg} instance
#' @param maxtime truncate the input to the model at time "maxtime"
#' @inheritParams autoplot.survfit
#' @param ... other arguments passed to \code{autoplot.survfit}
#' @return ggplot
#' @examples
#' library(survival)
#' autoplot(aareg(Surv(time, status) ~ age + sex + ph.ecog, data = lung, nmin = 1))
#' @export
autoplot.aareg <- function (object, maxtime = NULL,
surv.connect = TRUE,
facets = TRUE, ncol = NULL,
xlab = '', ylab = '',
...) {
plot.data <- fortify(object, maxtime = maxtime,
surv.connect = surv.connect, melt = TRUE)
autoplot.survfit(plot.data, facets = facets, ncol = ncol,
xlab = '', ylab = '', ...)
}
|
library(castor)
### Name: get_tips_for_mrcas
### Title: Find tips with specific most recent common ancestors.
### Aliases: get_tips_for_mrcas
### Keywords: MRCA
### ** Examples
# generate a random tree
Ntips = 1000
tree = generate_random_tree(list(birth_rate_intercept=1),Ntips)$tree
# pick random nodes
focal_nodes = sample.int(n=tree$Nnode, size=3, replace=FALSE)
# get tips for mrcas
tips_per_focal_node = get_tips_for_mrcas(tree, focal_nodes);
# check correctness (i.e. calculate actual MRCAs of tips)
for(n in 1:length(focal_nodes)){
mrca = get_mrca_of_set(tree, tips_per_focal_node[[n]])
cat(sprintf("Focal node = %d, should match mrca of tips = %d\n",focal_nodes[n],mrca-Ntips))
}
| /data/genthat_extracted_code/castor/examples/get_tips_for_mrcas.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 702 | r | library(castor)
### Name: get_tips_for_mrcas
### Title: Find tips with specific most recent common ancestors.
### Aliases: get_tips_for_mrcas
### Keywords: MRCA
### ** Examples
# generate a random tree
Ntips = 1000
tree = generate_random_tree(list(birth_rate_intercept=1),Ntips)$tree
# pick random nodes
focal_nodes = sample.int(n=tree$Nnode, size=3, replace=FALSE)
# get tips for mrcas
tips_per_focal_node = get_tips_for_mrcas(tree, focal_nodes);
# check correctness (i.e. calculate actual MRCAs of tips)
for(n in 1:length(focal_nodes)){
mrca = get_mrca_of_set(tree, tips_per_focal_node[[n]])
cat(sprintf("Focal node = %d, should match mrca of tips = %d\n",focal_nodes[n],mrca-Ntips))
}
|
isInteger <- ttutils::isInteger
#' Moving average
#'
#' Create a new map/matrix by doing a moving average on a matrix
#'
#' @param Z matrix/dataframe
#' @param r integer : the radius of the window of the moving average. Ex : r = 1 implies a window of 3*3
#'
#' @return matrix (side effects imply a smaller matrix than the Z matrix passed in parametre)
#' @export
#'
#' @examples
#' moving_average(matrix(rnorm(10*10, 0,1), nrow = 10), r=2)
moving_average <- function(Z, r){
if (!isInteger(r)){stop("r must be an integer")}
if (r < 0 ){stop("r must be >= 0")}
if (!is.numeric(Z)){stop("Z must be a matrix/dataframe of numbers")}
Z <- as.matrix(Z)
if ( 2*r+1 > dim(Z)[1] | 2*r+1 > dim(Z)[2]){stop("The grid must be larger than the window. Decrease r or increase the grid size")}
nrowsZ <- dim(Z)[1]
ncolsZ <- dim(Z)[2]
nrowsY <- nrowsZ - 2*r
ncolsY <- ncolsZ - 2*r
Y <- matrix(data = rep(0, nrowsY*ncolsY), nrow = nrowsY)
for(i in (r+1):(nrowsZ-r)){
for(j in (r+1):(ncolsZ-r)){
window <- Z[(i-r):(i+r), (j-r):(j+r)]
Y[i-r,j-r] <- mean(window)
}
}
return (Y)
}
| /R/moving_average.R | permissive | C-Juliette/Geostatistic | R | false | false | 1,112 | r | isInteger <- ttutils::isInteger
#' Moving average
#'
#' Create a new map/matrix by doing a moving average on a matrix
#'
#' @param Z matrix/dataframe
#' @param r integer : the radius of the window of the moving average. Ex : r = 1 implies a window of 3*3
#'
#' @return matrix (side effects imply a smaller matrix than the Z matrix passed in parametre)
#' @export
#'
#' @examples
#' moving_average(matrix(rnorm(10*10, 0,1), nrow = 10), r=2)
moving_average <- function(Z, r){
if (!isInteger(r)){stop("r must be an integer")}
if (r < 0 ){stop("r must be >= 0")}
if (!is.numeric(Z)){stop("Z must be a matrix/dataframe of numbers")}
Z <- as.matrix(Z)
if ( 2*r+1 > dim(Z)[1] | 2*r+1 > dim(Z)[2]){stop("The grid must be larger than the window. Decrease r or increase the grid size")}
nrowsZ <- dim(Z)[1]
ncolsZ <- dim(Z)[2]
nrowsY <- nrowsZ - 2*r
ncolsY <- ncolsZ - 2*r
Y <- matrix(data = rep(0, nrowsY*ncolsY), nrow = nrowsY)
for(i in (r+1):(nrowsZ-r)){
for(j in (r+1):(ncolsZ-r)){
window <- Z[(i-r):(i+r), (j-r):(j+r)]
Y[i-r,j-r] <- mean(window)
}
}
return (Y)
}
|
## These two function calculate the inverse of a matrix and cache its values, avoiding to repeat
## computations that would take too long to be executed each time.
## The first function, makematrix, takes an invertible matrix and creates a list with 4 functions.
## These functions store the values of the matrix and the values of the inverse matrix
makematrix <- function(z = matrix()) {
m <- NULL
set <- function(y) {
z <<- y
m <<- NULL
}
get <- function() {z}
setsolve <- function(solve) {m <<- solve}
getsolve <- function() {m}
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
## The second functions takes the list created as an argument, check if the inverse of the matrix
## has been calculated and then proceed to calculate it. If the inverse has yet been stored, a message
## is returned and the inverse is retrieved and returned.
cachemean <- function(z, ...) {
m <- z$getsolve()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- z$get()
m <- solve(data, ...)
z$setsolve(m)
m
}
| /cachematrix.R | no_license | Ferric2004/ProgrammingAssignment2 | R | false | false | 1,152 | r | ## These two function calculate the inverse of a matrix and cache its values, avoiding to repeat
## computations that would take too long to be executed each time.
## The first function, makematrix, takes an invertible matrix and creates a list with 4 functions.
## These functions store the values of the matrix and the values of the inverse matrix
makematrix <- function(z = matrix()) {
m <- NULL
set <- function(y) {
z <<- y
m <<- NULL
}
get <- function() {z}
setsolve <- function(solve) {m <<- solve}
getsolve <- function() {m}
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
## The second functions takes the list created as an argument, check if the inverse of the matrix
## has been calculated and then proceed to calculate it. If the inverse has yet been stored, a message
## is returned and the inverse is retrieved and returned.
cachemean <- function(z, ...) {
m <- z$getsolve()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- z$get()
m <- solve(data, ...)
z$setsolve(m)
m
}
|
#!/usr/bin/env Rscript
# mtbr pipline
# Load libraries
suppressPackageStartupMessages(library("optparse", quietly=TRUE, verbose=FALSE, warn.conflicts=FALSE))
suppressPackageStartupMessages(library("data.table", quietly=TRUE, verbose=FALSE, warn.conflicts=FALSE))
suppressPackageStartupMessages(library("zoo", quietly=TRUE, verbose=FALSE, warn.conflicts=FALSE))
suppressPackageStartupMessages(library("methyutils", quietly=TRUE, verbose=FALSE, warn.conflicts=FALSE))
suppressPackageStartupMessages(library("IRanges", quietly=TRUE, verbose=FALSE, warn.conflicts=FALSE))
getScriptPath <- function(){
cmd.args <- commandArgs()
m <- regexpr("(?<=^--file=).+", cmd.args, perl=TRUE)
script.dir <- dirname(regmatches(cmd.args, m))
if(length(script.dir) == 0) stop("can't determine script dir: please call the script with Rscript")
if(length(script.dir) > 1) stop("can't determine script dir: more than one '--file' argument detected")
return(script.dir)
}
source(paste(getScriptPath(),"/cgDensity.lib.R", sep = ""), chdir = TRUE)
##Specify desired options in a list
option_list <- list(
make_option(c("-l","--genome-library"), help="Bioconductor BSgenome library name", default = "BSgenome.Mmusculus.UCSC.mm9"),
make_option(c("-n","--genome-name"), help="genome library object name. ex: \"Mmusculus\", \"Hsapiens\", \"Scerevisiae\"", default = "Mmusculus"),
make_option(c("-t","--genome-type"), help="genome type , example mm9, mm10, hg19, hg18, default is NULL", default = ""),
make_option(c("-r","--promoter-range"), help="Promoter range", default = 1000),
make_option(c("-w","--window-size"), help="sliding window size , default is 2500", default = 2500)
)
# Get command line options
arguments <- parse_args(OptionParser(usage = "%prog [options] gene.csv samplePath", option_list = option_list), positional_arguments = 2)
opt <- arguments$options
kGenomeLibrary <- opt$`genome-library`
kGenomeName <- opt$`genome-name`
kGenomeType <- opt$`genome-type`
kPromoterRange <- opt$`promoter-range`
kWindowSize <- opt$`window-size`
kGenedb <- arguments$args[1]
kSamplePath <- arguments$args[2]
# load the genome library
kGenomeTypeList <- list(
mm9 = list(genome.library="BSgenome.Mmusculus.UCSC.mm9",genome.name="Mmusculus"),
mm10 = list(genome.library="BSgenome.Mmusculus.UCSC.mm10",genome.name="Mmusculus"),
hg18 = list(genome.library="BSgenome.Hsapiens.UCSC.hg18",genome.name="Hsapiens"),
hg19 = list(genome.library="BSgenome.Hsapiens.UCSC.hg19",genome.name="Hsapiens"),
hg38 = list(genome.library="BSgenome.Hsapiens.UCSC.hg38",genome.name="Hsapiens")
)
kGenome <- NULL
if ( kGenomeType %in% names(kGenomeTypeList) ){
suppressPackageStartupMessages(library(kGenomeTypeList[[kGenomeType]][["genome.library"]], character.only = TRUE, quietly=TRUE, verbose=FALSE, warn.conflicts=FALSE))
kGenome <- get(kGenomeTypeList[[kGenomeType]][["genome.name"]])
}else {
suppressPackageStartupMessages(library(kGenomeLibrary, character.only = TRUE, quietly=TRUE, verbose=FALSE, warn.conflicts=FALSE))
kGenome <- get(kGenomeName)
}
if ( is.null(kGenome)){
stop( "Load Biocondutor Genome Library ERROR " )
}
# Get sample file path
if(!file.exists(kSamplePath)){
stop("sample file path \"", kSamplePath ,"\" does not exist.")
}
sample.names <- list.files(kSamplePath)
if(!file.exists(kGenedb)){
stop("gene database file \"", kGenedb ,"\" does not exist.")
}
# read and process gene csv file and tissues cgmtbr files
gene.csv <- read.table(kGenedb, header = TRUE, sep = "\t")
df.gene <- as.data.frame(table(gene.csv$geneSymbol))
df.gene.sel <- df.gene[df.gene$Freq >= 2,]
gene.multi.promoter <- gene.csv[gene.csv$geneSymbol %in% df.gene.sel$Var1,]
gene.multi.promoter$id <- 1:nrow(gene.multi.promoter)
gene.multi.promoter$promStart <- with(gene.multi.promoter, txStart - kPromoterRange)
gene.multi.promoter$promEnd <- with(gene.multi.promoter, txStart + kPromoterRange)
gene.multi.promoter$promStart[gene.multi.promoter$strand == "-"] <- with(gene.multi.promoter[gene.multi.promoter$strand == "-",], txEnd - kPromoterRange)
gene.multi.promoter$promEnd[gene.multi.promoter$strand == "-"] <- with(gene.multi.promoter[gene.multi.promoter$strand == "-",], txEnd + kPromoterRange)
gene.multi.promoter <- gene.multi.promoter[order(gene.multi.promoter$geneSymbol),]
gene.multi.promoter$promId <- as.data.table(gene.multi.promoter)[,.(id = c(1:length(chrom))), by = geneSymbol]$id
gene.multi.promoter.expand <- as.data.table(gene.multi.promoter)[,.(posi = promStart:promEnd, chrom, geneSymbol), by = id]
# process by sample
for (sample.name in sample.names){
gene.multi.promoter[, sample.name] <- 0
sample.mtbr.path <- paste(kSamplePath, "/", sample.name, "/mtbr_cg/", sep="")
message("[*] Processing ", sample.name, "\t", date())
chr.mtbr.files <- list.files(sample.mtbr.path)
for (chr.mtbr.file in chr.mtbr.files){
mtbr.file <- paste(sample.mtbr.path, chr.mtbr.file, sep = "")
load(mtbr.file)
chr.name <- unlist(strsplit(basename(chr.mtbr.file), ".", fixed = TRUE))[1]
# get CG Density
message(chr.name, "\tgetting CG density ", "\t", date())
dna.seq <- kGenome[[chr.name]]
ref.length <- length(dna.seq)
density <- GetDensity(cg.mtbr, kWindowSize, ref.length)
# get Score
message(chr.name, "\tgetting Score ", "\t", date())
score <- GetScore(cg.mtbr, kWindowSize, ref.length)
# rescale
message(chr.name, "\trescaling ", "\t", date())
rescale.data <- RescaleData(density, score)
rescale.data$pos <- 1:nrow(rescale.data)
dt.rescale.data <- data.table(rescale.data)
# calculate region score
chr.promoter.expand <- gene.multi.promoter.expand[gene.multi.promoter.expand$chrom == chr.name,]
chr.promoter.expand$score <- dt.rescale.data$score[chr.promoter.expand$pos]
rg.score <- as.data.table(chr.promoter.expand)[,.(score = mean(score)), by = id]$score
gene.multi.promoter[gene.multi.promoter$chrom == chr.name, ][, sample.name] <- rg.score
}
}
write.table(gene.multi.promoter, "./gene.multi.promoter.csv", row.names = FALSE, col.names = TRUE, quote = FALSE, sep = "\t")
| /DNAmethylation/CpGDenLowess-master/tissuePromoter.Rscript | no_license | qlcm/LILAB | R | false | false | 6,139 | rscript | #!/usr/bin/env Rscript
# mtbr pipline
# Load libraries
suppressPackageStartupMessages(library("optparse", quietly=TRUE, verbose=FALSE, warn.conflicts=FALSE))
suppressPackageStartupMessages(library("data.table", quietly=TRUE, verbose=FALSE, warn.conflicts=FALSE))
suppressPackageStartupMessages(library("zoo", quietly=TRUE, verbose=FALSE, warn.conflicts=FALSE))
suppressPackageStartupMessages(library("methyutils", quietly=TRUE, verbose=FALSE, warn.conflicts=FALSE))
suppressPackageStartupMessages(library("IRanges", quietly=TRUE, verbose=FALSE, warn.conflicts=FALSE))
getScriptPath <- function(){
cmd.args <- commandArgs()
m <- regexpr("(?<=^--file=).+", cmd.args, perl=TRUE)
script.dir <- dirname(regmatches(cmd.args, m))
if(length(script.dir) == 0) stop("can't determine script dir: please call the script with Rscript")
if(length(script.dir) > 1) stop("can't determine script dir: more than one '--file' argument detected")
return(script.dir)
}
source(paste(getScriptPath(),"/cgDensity.lib.R", sep = ""), chdir = TRUE)
##Specify desired options in a list
option_list <- list(
make_option(c("-l","--genome-library"), help="Bioconductor BSgenome library name", default = "BSgenome.Mmusculus.UCSC.mm9"),
make_option(c("-n","--genome-name"), help="genome library object name. ex: \"Mmusculus\", \"Hsapiens\", \"Scerevisiae\"", default = "Mmusculus"),
make_option(c("-t","--genome-type"), help="genome type , example mm9, mm10, hg19, hg18, default is NULL", default = ""),
make_option(c("-r","--promoter-range"), help="Promoter range", default = 1000),
make_option(c("-w","--window-size"), help="sliding window size , default is 2500", default = 2500)
)
# Get command line options
arguments <- parse_args(OptionParser(usage = "%prog [options] gene.csv samplePath", option_list = option_list), positional_arguments = 2)
opt <- arguments$options
kGenomeLibrary <- opt$`genome-library`
kGenomeName <- opt$`genome-name`
kGenomeType <- opt$`genome-type`
kPromoterRange <- opt$`promoter-range`
kWindowSize <- opt$`window-size`
kGenedb <- arguments$args[1]
kSamplePath <- arguments$args[2]
# load the genome library
kGenomeTypeList <- list(
mm9 = list(genome.library="BSgenome.Mmusculus.UCSC.mm9",genome.name="Mmusculus"),
mm10 = list(genome.library="BSgenome.Mmusculus.UCSC.mm10",genome.name="Mmusculus"),
hg18 = list(genome.library="BSgenome.Hsapiens.UCSC.hg18",genome.name="Hsapiens"),
hg19 = list(genome.library="BSgenome.Hsapiens.UCSC.hg19",genome.name="Hsapiens"),
hg38 = list(genome.library="BSgenome.Hsapiens.UCSC.hg38",genome.name="Hsapiens")
)
kGenome <- NULL
if ( kGenomeType %in% names(kGenomeTypeList) ){
suppressPackageStartupMessages(library(kGenomeTypeList[[kGenomeType]][["genome.library"]], character.only = TRUE, quietly=TRUE, verbose=FALSE, warn.conflicts=FALSE))
kGenome <- get(kGenomeTypeList[[kGenomeType]][["genome.name"]])
}else {
suppressPackageStartupMessages(library(kGenomeLibrary, character.only = TRUE, quietly=TRUE, verbose=FALSE, warn.conflicts=FALSE))
kGenome <- get(kGenomeName)
}
if ( is.null(kGenome)){
stop( "Load Biocondutor Genome Library ERROR " )
}
# Get sample file path
if(!file.exists(kSamplePath)){
stop("sample file path \"", kSamplePath ,"\" does not exist.")
}
sample.names <- list.files(kSamplePath)
if(!file.exists(kGenedb)){
stop("gene database file \"", kGenedb ,"\" does not exist.")
}
# read and process gene csv file and tissues cgmtbr files
gene.csv <- read.table(kGenedb, header = TRUE, sep = "\t")
df.gene <- as.data.frame(table(gene.csv$geneSymbol))
df.gene.sel <- df.gene[df.gene$Freq >= 2,]
gene.multi.promoter <- gene.csv[gene.csv$geneSymbol %in% df.gene.sel$Var1,]
gene.multi.promoter$id <- 1:nrow(gene.multi.promoter)
gene.multi.promoter$promStart <- with(gene.multi.promoter, txStart - kPromoterRange)
gene.multi.promoter$promEnd <- with(gene.multi.promoter, txStart + kPromoterRange)
gene.multi.promoter$promStart[gene.multi.promoter$strand == "-"] <- with(gene.multi.promoter[gene.multi.promoter$strand == "-",], txEnd - kPromoterRange)
gene.multi.promoter$promEnd[gene.multi.promoter$strand == "-"] <- with(gene.multi.promoter[gene.multi.promoter$strand == "-",], txEnd + kPromoterRange)
gene.multi.promoter <- gene.multi.promoter[order(gene.multi.promoter$geneSymbol),]
gene.multi.promoter$promId <- as.data.table(gene.multi.promoter)[,.(id = c(1:length(chrom))), by = geneSymbol]$id
gene.multi.promoter.expand <- as.data.table(gene.multi.promoter)[,.(posi = promStart:promEnd, chrom, geneSymbol), by = id]
# process by sample
for (sample.name in sample.names){
gene.multi.promoter[, sample.name] <- 0
sample.mtbr.path <- paste(kSamplePath, "/", sample.name, "/mtbr_cg/", sep="")
message("[*] Processing ", sample.name, "\t", date())
chr.mtbr.files <- list.files(sample.mtbr.path)
for (chr.mtbr.file in chr.mtbr.files){
mtbr.file <- paste(sample.mtbr.path, chr.mtbr.file, sep = "")
load(mtbr.file)
chr.name <- unlist(strsplit(basename(chr.mtbr.file), ".", fixed = TRUE))[1]
# get CG Density
message(chr.name, "\tgetting CG density ", "\t", date())
dna.seq <- kGenome[[chr.name]]
ref.length <- length(dna.seq)
density <- GetDensity(cg.mtbr, kWindowSize, ref.length)
# get Score
message(chr.name, "\tgetting Score ", "\t", date())
score <- GetScore(cg.mtbr, kWindowSize, ref.length)
# rescale
message(chr.name, "\trescaling ", "\t", date())
rescale.data <- RescaleData(density, score)
rescale.data$pos <- 1:nrow(rescale.data)
dt.rescale.data <- data.table(rescale.data)
# calculate region score
chr.promoter.expand <- gene.multi.promoter.expand[gene.multi.promoter.expand$chrom == chr.name,]
chr.promoter.expand$score <- dt.rescale.data$score[chr.promoter.expand$pos]
rg.score <- as.data.table(chr.promoter.expand)[,.(score = mean(score)), by = id]$score
gene.multi.promoter[gene.multi.promoter$chrom == chr.name, ][, sample.name] <- rg.score
}
}
write.table(gene.multi.promoter, "./gene.multi.promoter.csv", row.names = FALSE, col.names = TRUE, quote = FALSE, sep = "\t")
|
library(ggplot2)
library(plyr)
DOWNLOAD=FALSE
exportClicksFile="Download_ExportClicks_07172014.csv"
totalUsersFile="Download_TotalUsers_07162014.csv"
# Download files
if (DOWNLOAD){
download.file("https://terra-incognita.co/exportclicks/", method="curl", destfile=exportClicksFile)
download.file("https://terra-incognita.co/totalusers/", method="curl", destfile=totalUsersFile)
}
# Get total Users #
totalUsersDF<-read.csv(totalUsersFile)
totalUsers=totalUsersDF[1,1]
# Read in click data
df<-read.csv(exportClicksFile)
# Remove blank and null userID rows
df<- df[!df$userID =="" & !df$userID =="null", ]
# SUMMARY
# % who clicked at all
userCount<-length(unique(df$userID,))
print(paste(round(userCount/totalUsers * 100,1), "% of users have clicked on something in Terra Incognita", sep="") )
# % who clicked more than once
freqClicks=as.data.frame(table(df$userID))
moreThanOnce=nrow(freqClicks[freqClicks$Freq>1,])
print(paste(round(moreThanOnce/totalUsers * 100,1), "% of users have clicked more than once", sep=""))
# Summary clicks
print("-----------------------------------")
print(paste( round(min(freqClicks$Freq),1), "is the minimum clicks someone did"))
print(paste( round(max(freqClicks$Freq),1), "is the maximum clicks someone did"))
print(paste( round(median(freqClicks$Freq),1), "is the median clicks per user"))
print(paste( round(mean(freqClicks$Freq),1), "is the average clicks per user"))
# Group by ui_source
print("-----------------------------------")
groups=aggregate(df$userID,by=list(df$ui_source),FUN=length)
redButtonClicks=groups[groups$Group.1=="redbutton",2]
systemStoryClicks=groups[groups$Group.1=="system-story",2]
userStoryClicks=groups[groups$Group.1=="user-story",2]
totalClicks=nrow(df)
print(paste("People clicked the red button", round(redButtonClicks/totalClicks * 100, 1), "% of the time"))
print(paste("People clicked on a recommended story", round(systemStoryClicks/totalClicks * 100, 1), "% of the time"))
print(paste("People clicked on a story they had already read", round(userStoryClicks/totalClicks * 100, 1), "% of the time"))
# Group by random_city
print("-----------------------------------")
groups=aggregate(df$userID,by=list(df$random_city),FUN=length)
randomCityClicks=groups[groups$Group.1==1,2]
notRandomCityClicks=groups[groups$Group.1==0,2]
print(paste("People clicked on cities that they chose", round(notRandomCityClicks/totalClicks * 100, 1), "% of the time"))
print(paste("People clicked on cities chosen by the system", round(randomCityClicks/totalClicks * 100, 1), "% of the time"))
print("-----------------------------------")
# MAKE A BARPLOT TO SHOW INDIVIDUAL USER TOTALS FOR CLICKS
freqClicks = freqClicks[with(freqClicks, order(-Freq)), ]
png("userClicksBarplot.png",width=640, height=480)
barplot(freqClicks$Freq,ylim=c(0,100),xlab="Users", ylab="Total Clicks",main="Total Recommendations Clicked By Users",col="red")
abline(h=5,col="blue",lty=1)
text(93.5, 7, "median = 5", col = "blue")
dev.off()
# MAKE A BARPLOT TO SHOW WHICH WAY USERS PREFERRED TO ACCESS RECS
png("userClicksPrefs.png",width=1000, height=480)
d1<-ddply(df, c("userID","ui_source"), function(x) c(count=nrow(x)))
ggplot(d1, aes(factor(userID), count, fill = ui_source)) +
geom_bar(stat="identity", position = "dodge") + coord_cartesian(ylim = c(0, 35)) + xlab("Users") + ylab("Clicks") +
scale_fill_brewer(palette = "Set1")
dev.off()
# SUMMARY STATS ABOUT INDIV USERS
freqSources=as.data.frame(table(d1$userID))
moreThanOnce=nrow(freqSources[freqSources$Freq>1,])
print(paste(round(moreThanOnce/totalUsers * 100,1), "% of users tried more than one way of accessing recommendations", sep=""))
# SUMMARY STATS ABOUT INDIV USER PREFS
#whittles down to what users have clicked on the most
d2 = ddply(d1, "userID", subset, count == max(count))
# still has duplicate userIDs if they were tied for clicks btw red button & jheadlines
freqSources2=as.data.frame(table(d2$userID))
# filter if they show a clear preference
usersWithPrefs = freqSources2[freqSources2$Freq==1,]
#usersWithPrefs = usersWithPrefs$userID
d3 = d2[d2$userID %in% usersWithPrefs$Var1,]
print(paste(round(nrow(d3)/totalUsers * 100,1), "% of users showed a clear preference for where they clicked", sep=""))
freqPrefs=as.data.frame(table(d3$ui_source))
# SHOW INDIVIDUAL USER FAVORITE WAY OF ACCESSING RECS
png("userClicksPrefs2.png",width=640, height=480)
barplot(freqPrefs$Freq, main="How did users prefer to get to recommendations?", xlab="UI Source", col="red", names.arg=c("Red Button", "Click on Headline", "Click on Story They Read"))
dev.off() | /analysis/summarizeClickData.R | no_license | mitmedialab/Terra-Incognita | R | false | false | 4,605 | r | library(ggplot2)
library(plyr)
DOWNLOAD=FALSE
exportClicksFile="Download_ExportClicks_07172014.csv"
totalUsersFile="Download_TotalUsers_07162014.csv"
# Download files
if (DOWNLOAD){
download.file("https://terra-incognita.co/exportclicks/", method="curl", destfile=exportClicksFile)
download.file("https://terra-incognita.co/totalusers/", method="curl", destfile=totalUsersFile)
}
# Get total Users #
totalUsersDF<-read.csv(totalUsersFile)
totalUsers=totalUsersDF[1,1]
# Read in click data
df<-read.csv(exportClicksFile)
# Remove blank and null userID rows
df<- df[!df$userID =="" & !df$userID =="null", ]
# SUMMARY
# % who clicked at all
userCount<-length(unique(df$userID,))
print(paste(round(userCount/totalUsers * 100,1), "% of users have clicked on something in Terra Incognita", sep="") )
# % who clicked more than once
freqClicks=as.data.frame(table(df$userID))
moreThanOnce=nrow(freqClicks[freqClicks$Freq>1,])
print(paste(round(moreThanOnce/totalUsers * 100,1), "% of users have clicked more than once", sep=""))
# Summary clicks
print("-----------------------------------")
print(paste( round(min(freqClicks$Freq),1), "is the minimum clicks someone did"))
print(paste( round(max(freqClicks$Freq),1), "is the maximum clicks someone did"))
print(paste( round(median(freqClicks$Freq),1), "is the median clicks per user"))
print(paste( round(mean(freqClicks$Freq),1), "is the average clicks per user"))
# Group by ui_source
print("-----------------------------------")
groups=aggregate(df$userID,by=list(df$ui_source),FUN=length)
redButtonClicks=groups[groups$Group.1=="redbutton",2]
systemStoryClicks=groups[groups$Group.1=="system-story",2]
userStoryClicks=groups[groups$Group.1=="user-story",2]
totalClicks=nrow(df)
print(paste("People clicked the red button", round(redButtonClicks/totalClicks * 100, 1), "% of the time"))
print(paste("People clicked on a recommended story", round(systemStoryClicks/totalClicks * 100, 1), "% of the time"))
print(paste("People clicked on a story they had already read", round(userStoryClicks/totalClicks * 100, 1), "% of the time"))
# Group by random_city
print("-----------------------------------")
groups=aggregate(df$userID,by=list(df$random_city),FUN=length)
randomCityClicks=groups[groups$Group.1==1,2]
notRandomCityClicks=groups[groups$Group.1==0,2]
print(paste("People clicked on cities that they chose", round(notRandomCityClicks/totalClicks * 100, 1), "% of the time"))
print(paste("People clicked on cities chosen by the system", round(randomCityClicks/totalClicks * 100, 1), "% of the time"))
print("-----------------------------------")
# MAKE A BARPLOT TO SHOW INDIVIDUAL USER TOTALS FOR CLICKS
freqClicks = freqClicks[with(freqClicks, order(-Freq)), ]
png("userClicksBarplot.png",width=640, height=480)
barplot(freqClicks$Freq,ylim=c(0,100),xlab="Users", ylab="Total Clicks",main="Total Recommendations Clicked By Users",col="red")
abline(h=5,col="blue",lty=1)
text(93.5, 7, "median = 5", col = "blue")
dev.off()
# MAKE A BARPLOT TO SHOW WHICH WAY USERS PREFERRED TO ACCESS RECS
png("userClicksPrefs.png",width=1000, height=480)
d1<-ddply(df, c("userID","ui_source"), function(x) c(count=nrow(x)))
ggplot(d1, aes(factor(userID), count, fill = ui_source)) +
geom_bar(stat="identity", position = "dodge") + coord_cartesian(ylim = c(0, 35)) + xlab("Users") + ylab("Clicks") +
scale_fill_brewer(palette = "Set1")
dev.off()
# SUMMARY STATS ABOUT INDIV USERS
freqSources=as.data.frame(table(d1$userID))
moreThanOnce=nrow(freqSources[freqSources$Freq>1,])
print(paste(round(moreThanOnce/totalUsers * 100,1), "% of users tried more than one way of accessing recommendations", sep=""))
# SUMMARY STATS ABOUT INDIV USER PREFS
#whittles down to what users have clicked on the most
d2 = ddply(d1, "userID", subset, count == max(count))
# still has duplicate userIDs if they were tied for clicks btw red button & jheadlines
freqSources2=as.data.frame(table(d2$userID))
# filter if they show a clear preference
usersWithPrefs = freqSources2[freqSources2$Freq==1,]
#usersWithPrefs = usersWithPrefs$userID
d3 = d2[d2$userID %in% usersWithPrefs$Var1,]
print(paste(round(nrow(d3)/totalUsers * 100,1), "% of users showed a clear preference for where they clicked", sep=""))
freqPrefs=as.data.frame(table(d3$ui_source))
# SHOW INDIVIDUAL USER FAVORITE WAY OF ACCESSING RECS
png("userClicksPrefs2.png",width=640, height=480)
barplot(freqPrefs$Freq, main="How did users prefer to get to recommendations?", xlab="UI Source", col="red", names.arg=c("Red Button", "Click on Headline", "Click on Story They Read"))
dev.off() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/numeric_leading_zero_linter.R
\name{numeric_leading_zero_linter}
\alias{numeric_leading_zero_linter}
\title{Require usage of a leading zero in all fractional numerics}
\usage{
numeric_leading_zero_linter()
}
\description{
While .1 and 0.1 mean the same thing, the latter is easier to read due
to the small size of the '.' glyph.
}
\seealso{
\link{linters} for a complete list of linters available in lintr.
}
\section{Tags}{
\link[=consistency_linters]{consistency}, \link[=readability_linters]{readability}, \link[=style_linters]{style}
}
| /man/numeric_leading_zero_linter.Rd | permissive | russHyde/lintr | R | false | true | 618 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/numeric_leading_zero_linter.R
\name{numeric_leading_zero_linter}
\alias{numeric_leading_zero_linter}
\title{Require usage of a leading zero in all fractional numerics}
\usage{
numeric_leading_zero_linter()
}
\description{
While .1 and 0.1 mean the same thing, the latter is easier to read due
to the small size of the '.' glyph.
}
\seealso{
\link{linters} for a complete list of linters available in lintr.
}
\section{Tags}{
\link[=consistency_linters]{consistency}, \link[=readability_linters]{readability}, \link[=style_linters]{style}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.