content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hostname.R
\name{ip_to_hostname}
\alias{ip_to_hostname}
\alias{hostname_to_ip}
\title{Translate address to/from hostname}
\usage{
ip_to_hostname(x, multiple = FALSE)
hostname_to_ip(x, multiple = FALSE)
}
\arguments{
\item{x}{\itemize{
\item For \code{ip_to_hostname()}: An \code{\link{ip_address}} vector
\item For \code{hostname_to_ip()}: A character vector of hostnames
}}
\item{multiple}{A logical scalar indicating if \emph{all} resolved endpoints are
returned, or just the first endpoint (the default). This determines whether
a vector or list of vectors is returned.}
}
\value{
\itemize{
\item For \code{ip_to_hostname()}: A character vector (\code{multiple = FALSE}) or
a list of character vectors (\code{multiple = TRUE})
\item For \code{hostname_to_ip()}: A \code{\link{ip_address}} vector (\code{multiple = FALSE}) or
a list of \code{\link{ip_address}} vectors (\code{multiple = TRUE})
}
}
\description{
Perform reverse and forward DNS resolution
}
\details{
These functions require an internet connection. Before processing the input
vector, we first check that a known hostname can be resolved. If this fails,
an error is raised.
If DNS lookup cannot resolve an input, then \code{NA} is returned for that input.
If an error occurs during DNS lookup, then a warning is emitted and \code{NA} is
returned for that input.
DNS resolution performs a many-to-many mapping between IP addresses and
hostnames. For this reason, these two functions can potentially return
multiple values for each element of the input vector. The \code{multiple} argument
control whether \emph{all} values are returned (a vector for each input), or
just the first value (a scalar for each input).
}
\examples{
\dontrun{
hostname_to_ip("r-project.org")
ip_to_hostname(hostname_to_ip("r-project.org"))
}
}
\seealso{
The base function \code{nsl()} provides forward DNS resolution to IPv4 addresses,
but only on Unix-like systems.
}
| /man/ip_to_hostname.Rd | permissive | DavisVaughan/ipaddress | R | false | true | 1,996 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hostname.R
\name{ip_to_hostname}
\alias{ip_to_hostname}
\alias{hostname_to_ip}
\title{Translate address to/from hostname}
\usage{
ip_to_hostname(x, multiple = FALSE)
hostname_to_ip(x, multiple = FALSE)
}
\arguments{
\item{x}{\itemize{
\item For \code{ip_to_hostname()}: An \code{\link{ip_address}} vector
\item For \code{hostname_to_ip()}: A character vector of hostnames
}}
\item{multiple}{A logical scalar indicating if \emph{all} resolved endpoints are
returned, or just the first endpoint (the default). This determines whether
a vector or list of vectors is returned.}
}
\value{
\itemize{
\item For \code{ip_to_hostname()}: A character vector (\code{multiple = FALSE}) or
a list of character vectors (\code{multiple = TRUE})
\item For \code{hostname_to_ip()}: A \code{\link{ip_address}} vector (\code{multiple = FALSE}) or
a list of \code{\link{ip_address}} vectors (\code{multiple = TRUE})
}
}
\description{
Perform reverse and forward DNS resolution
}
\details{
These functions require an internet connection. Before processing the input
vector, we first check that a known hostname can be resolved. If this fails,
an error is raised.
If DNS lookup cannot resolve an input, then \code{NA} is returned for that input.
If an error occurs during DNS lookup, then a warning is emitted and \code{NA} is
returned for that input.
DNS resolution performs a many-to-many mapping between IP addresses and
hostnames. For this reason, these two functions can potentially return
multiple values for each element of the input vector. The \code{multiple} argument
control whether \emph{all} values are returned (a vector for each input), or
just the first value (a scalar for each input).
}
\examples{
\dontrun{
hostname_to_ip("r-project.org")
ip_to_hostname(hostname_to_ip("r-project.org"))
}
}
\seealso{
The base function \code{nsl()} provides forward DNS resolution to IPv4 addresses,
but only on Unix-like systems.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{clust_all}
\alias{clust_all}
\title{Build mclust models for multiple fragments}
\usage{
clust_all(
data,
fragnames,
contrib_col_name = "Contribution",
mol_col_name = "MolID"
)
}
\arguments{
\item{data}{input data.frame}
\item{fragnames}{column of the data.frame containing fragments names (i.e. FragID or full_name)}
\item{contrib_col_name}{name of a column with contribution values}
\item{mol_col_name}{name of a column with names (ids) of molecules}
}
\value{
list containing mclust models for fragments contained in data.frame
}
\description{
Build mclust models for multiple fragments
}
\details{
If all contributions of a fragment are identical the model for that fragment
will not be built.
}
\examples{
file_name <- system.file("extdata", "BBB_frag_contributions.txt", package = "rspci")
df <- load_data(file_name)
df <- dplyr::filter(df, Model == "consensus", Property == "overall")
df <- add_full_names(df)
models <- clust_all(df, "full_name")
}
| /man/clust_all.Rd | no_license | DrrDom/rspci | R | false | true | 1,062 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{clust_all}
\alias{clust_all}
\title{Build mclust models for multiple fragments}
\usage{
clust_all(
data,
fragnames,
contrib_col_name = "Contribution",
mol_col_name = "MolID"
)
}
\arguments{
\item{data}{input data.frame}
\item{fragnames}{column of the data.frame containing fragments names (i.e. FragID or full_name)}
\item{contrib_col_name}{name of a column with contribution values}
\item{mol_col_name}{name of a column with names (ids) of molecules}
}
\value{
list containing mclust models for fragments contained in data.frame
}
\description{
Build mclust models for multiple fragments
}
\details{
If all contributions of a fragment are identical the model for that fragment
will not be built.
}
\examples{
file_name <- system.file("extdata", "BBB_frag_contributions.txt", package = "rspci")
df <- load_data(file_name)
df <- dplyr::filter(df, Model == "consensus", Property == "overall")
df <- add_full_names(df)
models <- clust_all(df, "full_name")
}
|
##-------------------------------------------------------------------------------
## Copyright (c) 2012 University of Illinois, NCSA.
## All rights reserved. This program and the accompanying materials
## are made available under the terms of the
## University of Illinois/NCSA Open Source License
## which accompanies this distribution, and is available at
## http://opensource.ncsa.illinois.edu/license.html
##-------------------------------------------------------------------------------
##--------------------------------------------------------------------------------------------------#
##' Reads output of sensitivity analysis runs
##'
##'
##' @title Read Sensitivity Analysis output
##' @return dataframe with one col per quantile analysed and one row per trait,
##' each cell is a list of AGB over time
##' @param traits model parameters included in the sensitivity analysis
##' @param quantiles quantiles selected for sensitivity analysis
##' @param pecandir specifies where pecan writes its configuration files
##' @param outdir directory with model output to use in sensitivity analysis
##' @param pft.name name of PFT used in sensitivity analysis (Optional)
##' @param start.year first year to include in sensitivity analysis
##' @param end.year last year to include in sensitivity analysis
##' @param variables variables to be read from model output
##' @export
#--------------------------------------------------------------------------------------------------#
read.sa.output <- function(traits, quantiles, pecandir, outdir, pft.name='',
start.year, end.year, variables){
if (!exists('runs.samples')) {
samples.file <- file.path(pecandir, 'samples.Rdata')
if(file.exists(samples.file)){
load(samples.file)
sa.runs <- runs.samples$sa
} else {
logger.error(samples.file, "not found, this file is required by the read.sa.output function")
}
}
sa.output <- matrix(nrow = length(quantiles),
ncol = length(traits),
dimnames = list(quantiles, traits))
for(trait in traits){
for(quantile in quantiles){
run.id <- sa.runs[[pft.name]][quantile, trait]
sa.output[quantile, trait] <- sapply(read.output(run.id, file.path(outdir, run.id),
start.year, end.year, variables),
mean, na.rm=TRUE)
} ## end loop over quantiles
logger.info("reading sensitivity analysis output for model run at ", quantiles, "quantiles of trait", trait)
} ## end loop over traits
sa.output <- as.data.frame(sa.output)
return(sa.output)
}
##==================================================================================================#
##' Write sensitivity analysis config files
##'
##' Writes config files for use in sensitivity analysis.
##' @title Write sensitivity analysis configs
##' @param pft pft id used to query PEcAn database
##' @param quantile.samples
##' @param settings list of settings
##' @param write.config a model-specific function to write config files, e.g. \link{write.config.ED}
##' @param convert.samples a model-specific function that transforms variables from units used in database to units used by model, e.g. \link{convert.samples.ED}
##' @param ensemble.samples list of lists supplied by \link{get.sa.samples}
##' @return data frame of runids, writes sensitivity analysis configuration files as a side effect
##' @export
##' @author David LeBauer, Carl Davidson
write.sa.configs <- function(defaults, quantile.samples, settings, model,
clean=FALSE, write.to.db = TRUE){
my.write.config <- paste("write.config.", model,sep="")
if(write.to.db){
con <- try(db.open(settings$database), silent=TRUE)
if(is.character(con)){
con <- NULL
}
} else {
con <- NULL
}
# Get the workflow id
if ("workflow" %in% names(settings)) {
workflow.id <- settings$workflow$id
} else {
workflow.id <- -1
}
runs <- data.frame()
##write median run
MEDIAN <- '50'
median.samples <- list()
for(i in 1:length(quantile.samples)){
median.samples[[i]] <- quantile.samples[[i]][MEDIAN,]
}
names(median.samples) <- names(quantile.samples)
if (!is.null(con)) {
now <- format(Sys.time(), "%Y-%m-%d %H:%M:%S")
query.base(paste("INSERT INTO ensembles (created_at, runtype, workflow_id) values ('", now, "', 'sensitivity analysis', ", workflow.id, ")", sep=''), con)
ensemble.id <- query.base(paste("SELECT id FROM ensembles WHERE created_at='", now, "'", sep=''), con)[['id']]
paramlist <- paste("quantile=MEDIAN,trait=all,pft=", paste(lapply(settings$pfts, function(x) x[['name']]), sep=','), sep='')
query.base(paste("INSERT INTO runs (model_id, site_id, start_time, finish_time, outdir, created_at, ensemble_id, parameter_list) values ('", settings$model$id, "', '", settings$run$site$id, "', '", settings$run$start.date, "', '", settings$run$end.date, "', '",settings$run$outdir , "', '", now, "', ", ensemble.id, ", '", paramlist, "')", sep=''), con)
run.id <- query.base(paste("SELECT id FROM runs WHERE created_at='", now, "' AND parameter_list='", paramlist, "'", sep=''), con)[['id']]
} else {
run.id <- get.run.id('SA', 'median')
ensemble.id <- "NA"
}
medianrun <- run.id
# create folders (cleaning up old ones if needed)
if(clean) {
unlink(file.path(settings$rundir, run.id))
unlink(file.path(settings$modeloutdir, run.id))
}
dir.create(file.path(settings$rundir, run.id), recursive=TRUE)
dir.create(file.path(settings$modeloutdir, run.id), recursive=TRUE)
# write run information to disk
## TODO need to print list of pft names and trait names
cat("runtype : sensitivity analysis\n",
"workflow id : ", workflow.id, "\n",
"ensemble id : ", ensemble.id, "\n",
"pft name : ALL PFT", "\n",
"quantile : MEDIAN\n",
"trait : ALL TRAIT", "\n",
"run id : ", run.id, "\n",
"model : ", model, "\n",
"model id : ", settings$model$id, "\n",
"site : ", settings$run$site$name, "\n",
"site id : ", settings$run$site$id, "\n",
"met data : ", settings$run$site$met, "\n",
"start date : ", settings$run$start.date, "\n",
"end date : ", settings$run$end.date, "\n",
"hostname : ", settings$run$host$name, "\n",
"rundir : ", file.path(settings$run$host$rundir, run.id), "\n",
"outdir : ", file.path(settings$run$host$outdir, run.id), "\n",
file=file.path(settings$rundir, run.id, "README.txt"), sep='')
# write configuration
do.call(my.write.config, args=list(defaults = defaults,
trait.values = median.samples,
settings = settings,
run.id = run.id))
cat(run.id, file=file.path(settings$rundir, "runs.txt"), sep="\n", append=TRUE)
## loop over pfts
runs <- list()
for(i in seq(names(quantile.samples))){
pftname <- names(quantile.samples)[i]
if (pftname == "env") {
next
}
traits <- colnames(quantile.samples[[i]])
quantiles.str <- rownames(quantile.samples[[i]])
runs[[pftname]] <- data.frame()
## loop over variables
for (trait in traits) {
for(quantile.str in quantiles.str) {
if (quantile.str != MEDIAN) {
quantile <- as.numeric(quantile.str)/100
trait.samples <- median.samples
trait.samples[[i]][trait] <- quantile.samples[[i]][quantile.str, trait]
if (!is.null(con)) {
now <- format(Sys.time(), "%Y-%m-%d %H:%M:%S")
paramlist <- paste("quantile=", quantile.str, ",trait=", trait, ",pft=", pftname, sep='')
query.base(paste("INSERT INTO runs (model_id, site_id, start_time, finish_time, outdir, created_at, ensemble_id, parameter_list) values ('", settings$model$id, "', '", settings$run$site$id, "', '", settings$run$start.date, "', '", settings$run$end.date, "', '",settings$run$outdir , "', '", now, "', ", ensemble.id, ", '", paramlist, "')", sep=''), con)
run.id <- query.base(paste("SELECT id FROM runs WHERE created_at='", now, "' AND parameter_list='", paramlist, "'", sep=''), con)[['id']]
} else {
run.id <- get.run.id('SA', round(quantile,3), trait=trait,
pft.name=names(trait.samples)[i])
}
runs[[pftname]][quantile.str, trait] <- run.id
# create folders (cleaning up old ones if needed)
if(clean) {
unlink(file.path(settings$rundir, run.id))
unlink(file.path(settings$modeloutdir, run.id))
}
dir.create(file.path(settings$rundir, run.id), recursive=TRUE)
dir.create(file.path(settings$modeloutdir, run.id), recursive=TRUE)
# write run information to disk
cat("runtype : sensitivity analysis\n",
"workflow id : ", workflow.id, "\n",
"ensemble id : ", ensemble.id, "\n",
"pft name : ", names(trait.samples)[i], "\n",
"quantile : ", quantile.str, "\n",
"trait : ", trait, "\n",
"run id : ", run.id, "\n",
"model : ", model, "\n",
"model id : ", settings$model$id, "\n",
"site : ", settings$run$site$name, "\n",
"site id : ", settings$run$site$id, "\n",
"met data : ", settings$run$site$met, "\n",
"start date : ", settings$run$start.date, "\n",
"end date : ", settings$run$end.date, "\n",
"hostname : ", settings$run$host$name, "\n",
"rundir : ", file.path(settings$run$host$rundir, run.id), "\n",
"outdir : ", file.path(settings$run$host$outdir, run.id), "\n",
file=file.path(settings$rundir, run.id, "README.txt"), sep='')
# write configuration
do.call(my.write.config,
args = list(defaults = defaults,
trait.values = trait.samples,
settings = settings, run.id))
cat(run.id, file=file.path(settings$rundir, "runs.txt"), sep="\n", append=TRUE)
} else {
runs[[pftname]][MEDIAN, trait] <- medianrun
}
}
}
}
if (!is.null(con)) {
db.close(con)
}
invisible(runs)
}
#==================================================================================================#
| /utils/R/sensitivity.R | permissive | kemball/pecan | R | false | false | 10,711 | r | ##-------------------------------------------------------------------------------
## Copyright (c) 2012 University of Illinois, NCSA.
## All rights reserved. This program and the accompanying materials
## are made available under the terms of the
## University of Illinois/NCSA Open Source License
## which accompanies this distribution, and is available at
## http://opensource.ncsa.illinois.edu/license.html
##-------------------------------------------------------------------------------
##--------------------------------------------------------------------------------------------------#
##' Reads output of sensitivity analysis runs
##'
##'
##' @title Read Sensitivity Analysis output
##' @return dataframe with one col per quantile analysed and one row per trait,
##' each cell is a list of AGB over time
##' @param traits model parameters included in the sensitivity analysis
##' @param quantiles quantiles selected for sensitivity analysis
##' @param pecandir specifies where pecan writes its configuration files
##' @param outdir directory with model output to use in sensitivity analysis
##' @param pft.name name of PFT used in sensitivity analysis (Optional)
##' @param start.year first year to include in sensitivity analysis
##' @param end.year last year to include in sensitivity analysis
##' @param variables variables to be read from model output
##' @export
#--------------------------------------------------------------------------------------------------#
read.sa.output <- function(traits, quantiles, pecandir, outdir, pft.name='',
start.year, end.year, variables){
if (!exists('runs.samples')) {
samples.file <- file.path(pecandir, 'samples.Rdata')
if(file.exists(samples.file)){
load(samples.file)
sa.runs <- runs.samples$sa
} else {
logger.error(samples.file, "not found, this file is required by the read.sa.output function")
}
}
sa.output <- matrix(nrow = length(quantiles),
ncol = length(traits),
dimnames = list(quantiles, traits))
for(trait in traits){
for(quantile in quantiles){
run.id <- sa.runs[[pft.name]][quantile, trait]
sa.output[quantile, trait] <- sapply(read.output(run.id, file.path(outdir, run.id),
start.year, end.year, variables),
mean, na.rm=TRUE)
} ## end loop over quantiles
logger.info("reading sensitivity analysis output for model run at ", quantiles, "quantiles of trait", trait)
} ## end loop over traits
sa.output <- as.data.frame(sa.output)
return(sa.output)
}
##==================================================================================================#
##' Write sensitivity analysis config files
##'
##' Writes config files for use in sensitivity analysis.
##' @title Write sensitivity analysis configs
##' @param pft pft id used to query PEcAn database
##' @param quantile.samples
##' @param settings list of settings
##' @param write.config a model-specific function to write config files, e.g. \link{write.config.ED}
##' @param convert.samples a model-specific function that transforms variables from units used in database to units used by model, e.g. \link{convert.samples.ED}
##' @param ensemble.samples list of lists supplied by \link{get.sa.samples}
##' @return data frame of runids, writes sensitivity analysis configuration files as a side effect
##' @export
##' @author David LeBauer, Carl Davidson
write.sa.configs <- function(defaults, quantile.samples, settings, model,
clean=FALSE, write.to.db = TRUE){
my.write.config <- paste("write.config.", model,sep="")
if(write.to.db){
con <- try(db.open(settings$database), silent=TRUE)
if(is.character(con)){
con <- NULL
}
} else {
con <- NULL
}
# Get the workflow id
if ("workflow" %in% names(settings)) {
workflow.id <- settings$workflow$id
} else {
workflow.id <- -1
}
runs <- data.frame()
##write median run
MEDIAN <- '50'
median.samples <- list()
for(i in 1:length(quantile.samples)){
median.samples[[i]] <- quantile.samples[[i]][MEDIAN,]
}
names(median.samples) <- names(quantile.samples)
if (!is.null(con)) {
now <- format(Sys.time(), "%Y-%m-%d %H:%M:%S")
query.base(paste("INSERT INTO ensembles (created_at, runtype, workflow_id) values ('", now, "', 'sensitivity analysis', ", workflow.id, ")", sep=''), con)
ensemble.id <- query.base(paste("SELECT id FROM ensembles WHERE created_at='", now, "'", sep=''), con)[['id']]
paramlist <- paste("quantile=MEDIAN,trait=all,pft=", paste(lapply(settings$pfts, function(x) x[['name']]), sep=','), sep='')
query.base(paste("INSERT INTO runs (model_id, site_id, start_time, finish_time, outdir, created_at, ensemble_id, parameter_list) values ('", settings$model$id, "', '", settings$run$site$id, "', '", settings$run$start.date, "', '", settings$run$end.date, "', '",settings$run$outdir , "', '", now, "', ", ensemble.id, ", '", paramlist, "')", sep=''), con)
run.id <- query.base(paste("SELECT id FROM runs WHERE created_at='", now, "' AND parameter_list='", paramlist, "'", sep=''), con)[['id']]
} else {
run.id <- get.run.id('SA', 'median')
ensemble.id <- "NA"
}
medianrun <- run.id
# create folders (cleaning up old ones if needed)
if(clean) {
unlink(file.path(settings$rundir, run.id))
unlink(file.path(settings$modeloutdir, run.id))
}
dir.create(file.path(settings$rundir, run.id), recursive=TRUE)
dir.create(file.path(settings$modeloutdir, run.id), recursive=TRUE)
# write run information to disk
## TODO need to print list of pft names and trait names
cat("runtype : sensitivity analysis\n",
"workflow id : ", workflow.id, "\n",
"ensemble id : ", ensemble.id, "\n",
"pft name : ALL PFT", "\n",
"quantile : MEDIAN\n",
"trait : ALL TRAIT", "\n",
"run id : ", run.id, "\n",
"model : ", model, "\n",
"model id : ", settings$model$id, "\n",
"site : ", settings$run$site$name, "\n",
"site id : ", settings$run$site$id, "\n",
"met data : ", settings$run$site$met, "\n",
"start date : ", settings$run$start.date, "\n",
"end date : ", settings$run$end.date, "\n",
"hostname : ", settings$run$host$name, "\n",
"rundir : ", file.path(settings$run$host$rundir, run.id), "\n",
"outdir : ", file.path(settings$run$host$outdir, run.id), "\n",
file=file.path(settings$rundir, run.id, "README.txt"), sep='')
# write configuration
do.call(my.write.config, args=list(defaults = defaults,
trait.values = median.samples,
settings = settings,
run.id = run.id))
cat(run.id, file=file.path(settings$rundir, "runs.txt"), sep="\n", append=TRUE)
## loop over pfts
runs <- list()
for(i in seq(names(quantile.samples))){
pftname <- names(quantile.samples)[i]
if (pftname == "env") {
next
}
traits <- colnames(quantile.samples[[i]])
quantiles.str <- rownames(quantile.samples[[i]])
runs[[pftname]] <- data.frame()
## loop over variables
for (trait in traits) {
for(quantile.str in quantiles.str) {
if (quantile.str != MEDIAN) {
quantile <- as.numeric(quantile.str)/100
trait.samples <- median.samples
trait.samples[[i]][trait] <- quantile.samples[[i]][quantile.str, trait]
if (!is.null(con)) {
now <- format(Sys.time(), "%Y-%m-%d %H:%M:%S")
paramlist <- paste("quantile=", quantile.str, ",trait=", trait, ",pft=", pftname, sep='')
query.base(paste("INSERT INTO runs (model_id, site_id, start_time, finish_time, outdir, created_at, ensemble_id, parameter_list) values ('", settings$model$id, "', '", settings$run$site$id, "', '", settings$run$start.date, "', '", settings$run$end.date, "', '",settings$run$outdir , "', '", now, "', ", ensemble.id, ", '", paramlist, "')", sep=''), con)
run.id <- query.base(paste("SELECT id FROM runs WHERE created_at='", now, "' AND parameter_list='", paramlist, "'", sep=''), con)[['id']]
} else {
run.id <- get.run.id('SA', round(quantile,3), trait=trait,
pft.name=names(trait.samples)[i])
}
runs[[pftname]][quantile.str, trait] <- run.id
# create folders (cleaning up old ones if needed)
if(clean) {
unlink(file.path(settings$rundir, run.id))
unlink(file.path(settings$modeloutdir, run.id))
}
dir.create(file.path(settings$rundir, run.id), recursive=TRUE)
dir.create(file.path(settings$modeloutdir, run.id), recursive=TRUE)
# write run information to disk
cat("runtype : sensitivity analysis\n",
"workflow id : ", workflow.id, "\n",
"ensemble id : ", ensemble.id, "\n",
"pft name : ", names(trait.samples)[i], "\n",
"quantile : ", quantile.str, "\n",
"trait : ", trait, "\n",
"run id : ", run.id, "\n",
"model : ", model, "\n",
"model id : ", settings$model$id, "\n",
"site : ", settings$run$site$name, "\n",
"site id : ", settings$run$site$id, "\n",
"met data : ", settings$run$site$met, "\n",
"start date : ", settings$run$start.date, "\n",
"end date : ", settings$run$end.date, "\n",
"hostname : ", settings$run$host$name, "\n",
"rundir : ", file.path(settings$run$host$rundir, run.id), "\n",
"outdir : ", file.path(settings$run$host$outdir, run.id), "\n",
file=file.path(settings$rundir, run.id, "README.txt"), sep='')
# write configuration
do.call(my.write.config,
args = list(defaults = defaults,
trait.values = trait.samples,
settings = settings, run.id))
cat(run.id, file=file.path(settings$rundir, "runs.txt"), sep="\n", append=TRUE)
} else {
runs[[pftname]][MEDIAN, trait] <- medianrun
}
}
}
}
if (!is.null(con)) {
db.close(con)
}
invisible(runs)
}
#==================================================================================================#
|
library(censorcopula)
### Name: Newloglik2
### Title: likelihood function
### Aliases: Newloglik2
### Keywords: copula likelihood
### ** Examples
library(copula)
## generate sample
data <- rCopula(50, claytonCopula(2))
## return the value of log-likelihood funcion for selected params
Newloglik2(param=2, data, claytonCopula(2))
| /data/genthat_extracted_code/censorcopula/examples/Newloglik2.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 339 | r | library(censorcopula)
### Name: Newloglik2
### Title: likelihood function
### Aliases: Newloglik2
### Keywords: copula likelihood
### ** Examples
library(copula)
## generate sample
data <- rCopula(50, claytonCopula(2))
## return the value of log-likelihood funcion for selected params
Newloglik2(param=2, data, claytonCopula(2))
|
setwd("~/DataVisualization/DV_RProject3/01 Data/CSVs")
file_path <- "statesAbbr.csv"
df <- read.csv(file_path, stringsAsFactors = FALSE)
# Replace "." (i.e., period) with "_" in the column names.
names(df) <- gsub("\\.+", "_", names(df))
str(df)
for(n in names(df)) {
df[n] <- data.frame(lapply(df[n], gsub, pattern="[^ -~]",replacement= ""))
}
dimensions <- c("State", "Abbreviation")
# Make all state data to upper case
df["State"] <- data.frame(lapply(df["State"], toupper))
write.csv(df, paste(gsub(".csv", "", file_path), ".reformatted.csv", sep=""), row.names=FALSE, na = "")
states <- gsub(" +", "_", gsub("[^A-z, 0-9, ]", "", gsub(".csv", "", file_path)))
sql <- paste("CREATE TABLE", states, "(\n-- Change table_name to the table name you want.\n")
for(d in dimensions) {
if(d != tail(dimensions, n=1)) sql <- paste(sql, paste(d, "varchar2(4000),\n"))
else sql <- paste(sql, paste(d, "varchar2(4000)\n"))
}
sql <- paste(sql, ");")
cat(sql)
| /01 Data/Set_Up_StateAbbr.R | no_license | amurga1/DV_RProject3 | R | false | false | 969 | r | setwd("~/DataVisualization/DV_RProject3/01 Data/CSVs")
file_path <- "statesAbbr.csv"
df <- read.csv(file_path, stringsAsFactors = FALSE)
# Replace "." (i.e., period) with "_" in the column names.
names(df) <- gsub("\\.+", "_", names(df))
str(df)
for(n in names(df)) {
df[n] <- data.frame(lapply(df[n], gsub, pattern="[^ -~]",replacement= ""))
}
dimensions <- c("State", "Abbreviation")
# Make all state data to upper case
df["State"] <- data.frame(lapply(df["State"], toupper))
write.csv(df, paste(gsub(".csv", "", file_path), ".reformatted.csv", sep=""), row.names=FALSE, na = "")
states <- gsub(" +", "_", gsub("[^A-z, 0-9, ]", "", gsub(".csv", "", file_path)))
sql <- paste("CREATE TABLE", states, "(\n-- Change table_name to the table name you want.\n")
for(d in dimensions) {
if(d != tail(dimensions, n=1)) sql <- paste(sql, paste(d, "varchar2(4000),\n"))
else sql <- paste(sql, paste(d, "varchar2(4000)\n"))
}
sql <- paste(sql, ");")
cat(sql)
|
# generic methods for "BinData" class
setGeneric( "chrID",
function( object, ... )
standardGeneric("chrID")
)
setGeneric( "coord",
function( object, ... )
standardGeneric("coord")
)
setGeneric( "tagCount",
function( object, ... )
standardGeneric("tagCount")
)
setGeneric( "mappability",
function( object, ... )
standardGeneric("mappability")
)
setGeneric( "gcContent",
function( object, ... )
standardGeneric("gcContent")
)
setGeneric( "input",
function( object, ... )
standardGeneric("input")
)
setGeneric( "mosaicsFit",
function( object, ... )
standardGeneric("mosaicsFit")
)
# generic methods for "MosaicsFit" class
setGeneric( "estimates",
function( object, ... )
standardGeneric("estimates")
)
setGeneric( "mosaicsPeak",
function( object, ... )
standardGeneric("mosaicsPeak")
)
setGeneric( "mosaicsFitHMM",
function( object, ... )
standardGeneric("mosaicsFitHMM")
)
# generic methods for "MosaicsHMM" class
setGeneric( "mosaicsPeakHMM",
function( object, ... )
standardGeneric("mosaicsPeakHMM")
)
# generic methods for "MosaicsPeak" class
setGeneric( "peakList",
function( object, ... )
standardGeneric("peakList")
)
setGeneric( "export",
function( object, ... )
standardGeneric("export")
)
setGeneric( "bdBin",
function( object, ... )
standardGeneric("bdBin")
)
setGeneric( "empFDR",
function( object, ... )
standardGeneric("empFDR")
)
| /R/AllGenerics.R | no_license | duydnguyen/mosaics | R | false | false | 1,564 | r |
# generic methods for "BinData" class
setGeneric( "chrID",
function( object, ... )
standardGeneric("chrID")
)
setGeneric( "coord",
function( object, ... )
standardGeneric("coord")
)
setGeneric( "tagCount",
function( object, ... )
standardGeneric("tagCount")
)
setGeneric( "mappability",
function( object, ... )
standardGeneric("mappability")
)
setGeneric( "gcContent",
function( object, ... )
standardGeneric("gcContent")
)
setGeneric( "input",
function( object, ... )
standardGeneric("input")
)
setGeneric( "mosaicsFit",
function( object, ... )
standardGeneric("mosaicsFit")
)
# generic methods for "MosaicsFit" class
setGeneric( "estimates",
function( object, ... )
standardGeneric("estimates")
)
setGeneric( "mosaicsPeak",
function( object, ... )
standardGeneric("mosaicsPeak")
)
setGeneric( "mosaicsFitHMM",
function( object, ... )
standardGeneric("mosaicsFitHMM")
)
# generic methods for "MosaicsHMM" class
setGeneric( "mosaicsPeakHMM",
function( object, ... )
standardGeneric("mosaicsPeakHMM")
)
# generic methods for "MosaicsPeak" class
setGeneric( "peakList",
function( object, ... )
standardGeneric("peakList")
)
setGeneric( "export",
function( object, ... )
standardGeneric("export")
)
setGeneric( "bdBin",
function( object, ... )
standardGeneric("bdBin")
)
setGeneric( "empFDR",
function( object, ... )
standardGeneric("empFDR")
)
|
testlist <- list(x = numeric(0), y = 1.00508743500389e+180)
result <- do.call(blorr:::blr_pairs_cpp,testlist)
str(result) | /blorr/inst/testfiles/blr_pairs_cpp/libFuzzer_blr_pairs_cpp/blr_pairs_cpp_valgrind_files/1609956595-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 121 | r | testlist <- list(x = numeric(0), y = 1.00508743500389e+180)
result <- do.call(blorr:::blr_pairs_cpp,testlist)
str(result) |
load(file="mortality.model.RData")
for (i in 1:5){
load(file=paste0("/home/cheunglc/lyg/nhis2002_09/nhis_imputed_", i,".RData"))
for (j in 2002:2009) {
eval(parse(text=paste0("x <- subset(nhis,year==",j,")")))
#create directory and save subsetted file
dirname <- paste0("/home/cheunglc/lyg/nhis2002_09", i, j)
com <- paste("mkdir", dirname)
system(com)
save(x,file=paste0(dirname,"/x.RData"))
#create R file
write(paste0("load('",dirname,"/x.RData')"),paste0(dirname,"/lyg_",i,j,".R"))
file.append(paste0(dirname,"/lyg_",i,j,".R"),"/home/cheunglc/lyg/lyg.R")
com <- paste0("save(x,file=' ",dirname,"/x.RData')")
cat(com,file=paste0(dirname,"/lyg_",i,j,".R", append=TRUE))
#create swarm file
command <- paste0("R --vanilla <",dirname,"/lyg_",i,j,".R >",dirname,"/lyg_",i,j,".out")
write(command,paste0(dirname,"/swarm"))
#submit swarm file
com<-paste0("swarm -g 1 -f ",dirname,"/swarm"," --module R/3.3.2_gcc-6.2.0", " --time 3-00:00:00")
system(com)
}
} | /old/est_lyg.R | no_license | liccheung/life-years-gained | R | false | false | 1,044 | r | load(file="mortality.model.RData")
for (i in 1:5){
load(file=paste0("/home/cheunglc/lyg/nhis2002_09/nhis_imputed_", i,".RData"))
for (j in 2002:2009) {
eval(parse(text=paste0("x <- subset(nhis,year==",j,")")))
#create directory and save subsetted file
dirname <- paste0("/home/cheunglc/lyg/nhis2002_09", i, j)
com <- paste("mkdir", dirname)
system(com)
save(x,file=paste0(dirname,"/x.RData"))
#create R file
write(paste0("load('",dirname,"/x.RData')"),paste0(dirname,"/lyg_",i,j,".R"))
file.append(paste0(dirname,"/lyg_",i,j,".R"),"/home/cheunglc/lyg/lyg.R")
com <- paste0("save(x,file=' ",dirname,"/x.RData')")
cat(com,file=paste0(dirname,"/lyg_",i,j,".R", append=TRUE))
#create swarm file
command <- paste0("R --vanilla <",dirname,"/lyg_",i,j,".R >",dirname,"/lyg_",i,j,".out")
write(command,paste0(dirname,"/swarm"))
#submit swarm file
com<-paste0("swarm -g 1 -f ",dirname,"/swarm"," --module R/3.3.2_gcc-6.2.0", " --time 3-00:00:00")
system(com)
}
} |
# Hello, world!
#
# This is an example function named 'hello'
# which prints 'Hello, world!'.
#
# You can learn more about package authoring with RStudio at:
#
# http://r-pkgs.had.co.nz/
#
# Some useful keyboard shortcuts for package authoring:
#
# Build and Reload Package: 'Cmd + Shift + B'
# Check Package: 'Cmd + Shift + E'
# Test Package: 'Cmd + Shift + T'
#
# hello <- function() {
# print("Hello, world!")
# }
| /R/hello.R | no_license | mages/PSRWP | R | false | false | 455 | r | # Hello, world!
#
# This is an example function named 'hello'
# which prints 'Hello, world!'.
#
# You can learn more about package authoring with RStudio at:
#
# http://r-pkgs.had.co.nz/
#
# Some useful keyboard shortcuts for package authoring:
#
# Build and Reload Package: 'Cmd + Shift + B'
# Check Package: 'Cmd + Shift + E'
# Test Package: 'Cmd + Shift + T'
#
# hello <- function() {
# print("Hello, world!")
# }
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/checkAtomic.R
\name{checkAtomic}
\alias{assertAtomic}
\alias{assert_atomic}
\alias{checkAtomic}
\alias{expect_atomic}
\alias{testAtomic}
\alias{test_atomic}
\title{Check that an argument is an atomic vector}
\usage{
checkAtomic(x, any.missing = TRUE, all.missing = TRUE, len = NULL,
min.len = NULL, max.len = NULL, unique = FALSE, names = NULL)
assertAtomic(x, any.missing = TRUE, all.missing = TRUE, len = NULL,
min.len = NULL, max.len = NULL, unique = FALSE, names = NULL,
.var.name = vname(x), add = NULL)
assert_atomic(x, any.missing = TRUE, all.missing = TRUE, len = NULL,
min.len = NULL, max.len = NULL, unique = FALSE, names = NULL,
.var.name = vname(x), add = NULL)
testAtomic(x, any.missing = TRUE, all.missing = TRUE, len = NULL,
min.len = NULL, max.len = NULL, unique = FALSE, names = NULL)
test_atomic(x, any.missing = TRUE, all.missing = TRUE, len = NULL,
min.len = NULL, max.len = NULL, unique = FALSE, names = NULL)
expect_atomic(x, any.missing = TRUE, all.missing = TRUE, len = NULL,
min.len = NULL, max.len = NULL, unique = FALSE, names = NULL,
info = NULL, label = vname(x))
}
\arguments{
\item{x}{[any]\cr
Object to check.}
\item{any.missing}{[\code{logical(1)}]\cr
Are vectors with missing values allowed? Default is \code{TRUE}.}
\item{all.missing}{[\code{logical(1)}]\cr
Are vectors with only missing values allowed? Default is \code{TRUE}.}
\item{len}{[\code{integer(1)}]\cr
Exact expected length of \code{x}.}
\item{min.len}{[\code{integer(1)}]\cr
Minimal length of \code{x}.}
\item{max.len}{[\code{integer(1)}]\cr
Maximal length of \code{x}.}
\item{unique}{[\code{logical(1)}]\cr
Must all values be unique? Default is \code{FALSE}.}
\item{names}{[\code{character(1)}]\cr
Check for names. See \code{\link{checkNamed}} for possible values.
Default is \dQuote{any} which performs no check at all.
Note that you can use \code{\link{checkSubset}} to check for a specific set of names.}
\item{.var.name}{[\code{character(1)}]\cr
Name of the checked object to print in assertions. Defaults to
the heuristic implemented in \code{\link{vname}}.}
\item{add}{[\code{AssertCollection}]\cr
Collection to store assertion messages. See \code{\link{AssertCollection}}.}
\item{info}{[character(1)]\cr
Extra information to be included in the message for the testthat reporter.
See \code{\link[testthat]{expect_that}}.}
\item{label}{[\code{character(1)}]\cr
Name of the checked object to print in messages. Defaults to
the heuristic implemented in \code{\link{vname}}.}
}
\value{
Depending on the function prefix:
If the check is successful, the functions return \code{TRUE}. If the check
is not successful, \code{assertAtmoic}/\code{assert_atmoic}
throws an error message, \code{testAtmoic}/\code{test_atmoic}
returns \code{FALSE},
and \code{checkAtmoic} returns a string with the error message.
The function \code{expect_atmoic} always returns an
\code{\link[testthat]{expectation}}.
}
\description{
For the definition of \dQuote{atomic}, see \code{\link[base]{is.atomic}}.
}
\examples{
testAtomic(letters, min.len = 1L, any.missing = FALSE)
}
\seealso{
Other atomicvector: \code{\link{checkAtomicVector}},
\code{\link{checkVector}}
Other basetypes: \code{\link{checkArray}},
\code{\link{checkCharacter}}, \code{\link{checkComplex}},
\code{\link{checkDataFrame}},
\code{\link{checkDataTable}},
\code{\link{checkEnvironment}},
\code{\link{checkFactor}}, \code{\link{checkFunction}},
\code{\link{checkIntegerish}},
\code{\link{checkInteger}}, \code{\link{checkList}},
\code{\link{checkLogical}}, \code{\link{checkMatrix}},
\code{\link{checkNumeric}}, \code{\link{checkVector}}
}
| /man/checkAtomic.Rd | no_license | jackwasey/checkmate | R | false | true | 3,726 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/checkAtomic.R
\name{checkAtomic}
\alias{assertAtomic}
\alias{assert_atomic}
\alias{checkAtomic}
\alias{expect_atomic}
\alias{testAtomic}
\alias{test_atomic}
\title{Check that an argument is an atomic vector}
\usage{
checkAtomic(x, any.missing = TRUE, all.missing = TRUE, len = NULL,
min.len = NULL, max.len = NULL, unique = FALSE, names = NULL)
assertAtomic(x, any.missing = TRUE, all.missing = TRUE, len = NULL,
min.len = NULL, max.len = NULL, unique = FALSE, names = NULL,
.var.name = vname(x), add = NULL)
assert_atomic(x, any.missing = TRUE, all.missing = TRUE, len = NULL,
min.len = NULL, max.len = NULL, unique = FALSE, names = NULL,
.var.name = vname(x), add = NULL)
testAtomic(x, any.missing = TRUE, all.missing = TRUE, len = NULL,
min.len = NULL, max.len = NULL, unique = FALSE, names = NULL)
test_atomic(x, any.missing = TRUE, all.missing = TRUE, len = NULL,
min.len = NULL, max.len = NULL, unique = FALSE, names = NULL)
expect_atomic(x, any.missing = TRUE, all.missing = TRUE, len = NULL,
min.len = NULL, max.len = NULL, unique = FALSE, names = NULL,
info = NULL, label = vname(x))
}
\arguments{
\item{x}{[any]\cr
Object to check.}
\item{any.missing}{[\code{logical(1)}]\cr
Are vectors with missing values allowed? Default is \code{TRUE}.}
\item{all.missing}{[\code{logical(1)}]\cr
Are vectors with only missing values allowed? Default is \code{TRUE}.}
\item{len}{[\code{integer(1)}]\cr
Exact expected length of \code{x}.}
\item{min.len}{[\code{integer(1)}]\cr
Minimal length of \code{x}.}
\item{max.len}{[\code{integer(1)}]\cr
Maximal length of \code{x}.}
\item{unique}{[\code{logical(1)}]\cr
Must all values be unique? Default is \code{FALSE}.}
\item{names}{[\code{character(1)}]\cr
Check for names. See \code{\link{checkNamed}} for possible values.
Default is \dQuote{any} which performs no check at all.
Note that you can use \code{\link{checkSubset}} to check for a specific set of names.}
\item{.var.name}{[\code{character(1)}]\cr
Name of the checked object to print in assertions. Defaults to
the heuristic implemented in \code{\link{vname}}.}
\item{add}{[\code{AssertCollection}]\cr
Collection to store assertion messages. See \code{\link{AssertCollection}}.}
\item{info}{[character(1)]\cr
Extra information to be included in the message for the testthat reporter.
See \code{\link[testthat]{expect_that}}.}
\item{label}{[\code{character(1)}]\cr
Name of the checked object to print in messages. Defaults to
the heuristic implemented in \code{\link{vname}}.}
}
\value{
Depending on the function prefix:
If the check is successful, the functions return \code{TRUE}. If the check
is not successful, \code{assertAtmoic}/\code{assert_atmoic}
throws an error message, \code{testAtmoic}/\code{test_atmoic}
returns \code{FALSE},
and \code{checkAtmoic} returns a string with the error message.
The function \code{expect_atmoic} always returns an
\code{\link[testthat]{expectation}}.
}
\description{
For the definition of \dQuote{atomic}, see \code{\link[base]{is.atomic}}.
}
\examples{
testAtomic(letters, min.len = 1L, any.missing = FALSE)
}
\seealso{
Other atomicvector: \code{\link{checkAtomicVector}},
\code{\link{checkVector}}
Other basetypes: \code{\link{checkArray}},
\code{\link{checkCharacter}}, \code{\link{checkComplex}},
\code{\link{checkDataFrame}},
\code{\link{checkDataTable}},
\code{\link{checkEnvironment}},
\code{\link{checkFactor}}, \code{\link{checkFunction}},
\code{\link{checkIntegerish}},
\code{\link{checkInteger}}, \code{\link{checkList}},
\code{\link{checkLogical}}, \code{\link{checkMatrix}},
\code{\link{checkNumeric}}, \code{\link{checkVector}}
}
|
# hacked by Gareth Wilson - original function genomeVector forms part of MEDIPS package by Lukas Chavez
genomeVectorStrand <- function (data = NULL, extend = 400, bin_size = 50,strandSpec = NULL)
{
if (class(data) != "MEDIPSset")
stop("Must specify a MEDIPSset object.")
chr = regions_chr(data)
start = regions_start(data)
stop = regions_stop(data)
strand = regions_strand(data)
chromosomes = chr_names(data)
chr_lengths = chr_lengths(data)
no_chr_windows = ceiling(chr_lengths/bin_size)
supersize_chr = cumsum(no_chr_windows)
cat("Create the genome vector...\n")
genomeVec_chr = vector(length = supersize_chr[length(chromosomes)],
mode = "character")
genomeVec_pos = vector(length = supersize_chr[length(chromosomes)],
mode = "numeric")
total = length(chromosomes)
pb <- txtProgressBar(min = 0, max = total, style = 3)
for (i in 1:length(chromosomes)) {
setTxtProgressBar(pb, i)
if (i == 1) {
genomeVec_chr[1:no_chr_windows[i]] = chromosomes[i]
genomeVec_pos[1:no_chr_windows[i]] = seq(1, chr_lengths[i],
bin_size)
}
if (i > 1) {
genomeVec_chr[(supersize_chr[i - 1] + 1):(supersize_chr[i -
1] + no_chr_windows[i])] = chromosomes[i]
genomeVec_pos[(supersize_chr[i - 1] + 1):(supersize_chr[i -
1] + no_chr_windows[i])] = seq(1, chr_lengths[i],
bin_size)
}
}
genomeVec_signal = vector(length = supersize_chr[length(chromosomes)],
mode = "numeric")
cat("\nDistribute reads over genome...\n")
for (i in 1:length(chromosomes)) {
setTxtProgressBar(pb, i)
genomeVec_signal[genomeVec_chr == chromosomes[i]] = MEDIPS.distributeReads(start[chr ==
chromosomes[i] & strand == strandSpec], stop[chr == chromosomes[i] & strand == strandSpec], strand[chr ==
chromosomes[i] & strand == strandSpec], genomeVec_pos[genomeVec_chr == chromosomes[i]],
extend)
}
cat("\n")
MEDIPSsetObj = new("MEDIPSset", genome_chr = genomeVec_chr,
genome_pos = genomeVec_pos, genome_raw = genomeVec_signal,
extend = extend, bin_size = bin_size, sample_name = sample_name(data),
genome_name = genome_name(data), regions_chr = regions_chr(data),
regions_start = regions_start(data), regions_stop = regions_stop(data),
regions_strand = regions_strand(data), number_regions = number_regions(data),
chr_names = chr_names(data), chr_lengths = chr_lengths(data))
return(MEDIPSsetObj)
}
| /useq_pipe/v0_4/R_scripts/medips_strand_function.R | no_license | xflicsu/shared_scripts | R | false | false | 2,654 | r | # hacked by Gareth Wilson - original function genomeVector forms part of MEDIPS package by Lukas Chavez
genomeVectorStrand <- function (data = NULL, extend = 400, bin_size = 50,strandSpec = NULL)
{
if (class(data) != "MEDIPSset")
stop("Must specify a MEDIPSset object.")
chr = regions_chr(data)
start = regions_start(data)
stop = regions_stop(data)
strand = regions_strand(data)
chromosomes = chr_names(data)
chr_lengths = chr_lengths(data)
no_chr_windows = ceiling(chr_lengths/bin_size)
supersize_chr = cumsum(no_chr_windows)
cat("Create the genome vector...\n")
genomeVec_chr = vector(length = supersize_chr[length(chromosomes)],
mode = "character")
genomeVec_pos = vector(length = supersize_chr[length(chromosomes)],
mode = "numeric")
total = length(chromosomes)
pb <- txtProgressBar(min = 0, max = total, style = 3)
for (i in 1:length(chromosomes)) {
setTxtProgressBar(pb, i)
if (i == 1) {
genomeVec_chr[1:no_chr_windows[i]] = chromosomes[i]
genomeVec_pos[1:no_chr_windows[i]] = seq(1, chr_lengths[i],
bin_size)
}
if (i > 1) {
genomeVec_chr[(supersize_chr[i - 1] + 1):(supersize_chr[i -
1] + no_chr_windows[i])] = chromosomes[i]
genomeVec_pos[(supersize_chr[i - 1] + 1):(supersize_chr[i -
1] + no_chr_windows[i])] = seq(1, chr_lengths[i],
bin_size)
}
}
genomeVec_signal = vector(length = supersize_chr[length(chromosomes)],
mode = "numeric")
cat("\nDistribute reads over genome...\n")
for (i in 1:length(chromosomes)) {
setTxtProgressBar(pb, i)
genomeVec_signal[genomeVec_chr == chromosomes[i]] = MEDIPS.distributeReads(start[chr ==
chromosomes[i] & strand == strandSpec], stop[chr == chromosomes[i] & strand == strandSpec], strand[chr ==
chromosomes[i] & strand == strandSpec], genomeVec_pos[genomeVec_chr == chromosomes[i]],
extend)
}
cat("\n")
MEDIPSsetObj = new("MEDIPSset", genome_chr = genomeVec_chr,
genome_pos = genomeVec_pos, genome_raw = genomeVec_signal,
extend = extend, bin_size = bin_size, sample_name = sample_name(data),
genome_name = genome_name(data), regions_chr = regions_chr(data),
regions_start = regions_start(data), regions_stop = regions_stop(data),
regions_strand = regions_strand(data), number_regions = number_regions(data),
chr_names = chr_names(data), chr_lengths = chr_lengths(data))
return(MEDIPSsetObj)
}
|
# Reading the Database
power <- read.table("household_power_consumption.txt", sep =";", header = TRUE,
na.strings = "?", nrows = 72000, colClasses = c("character",
"character","numeric","numeric","numeric","numeric","numeric",
"numeric","numeric"))
# It is Extracted the observations between 1/2/2007 and 2/2/2007, both included
nSelect <- power$Date == "1/2/2007" | power$Date == "2/2/2007"
powerSelect <- power[nSelect,]
# The Date and Time variables are converted to Date/Time classes
fecha <- paste(as.Date(powerSelect[,1],format = "%d/%m/%Y"),powerSelect[,2])
dateTime <- strptime(fecha, "%Y-%m-%d %H:%M:%S")
powerSelect <- cbind(dateTime, powerSelect)
# Open png device; create 'plot4.png'
png(filename = "plot4.png", width = 480, height = 480 )
par(mfcol = c(2,2))
# Graph 1
# Getting the variables
<<<<<<< HEAD
x <- powerSelect$dateTime
=======
x <- powerSelect[,1]
>>>>>>> e7aeb92c9c67c3a780d3d8afd34e9659f81c3e5f
y <- powerSelect$Global_active_power
# Ploting the grahp
plot(x,y, xlab = "", ylab = "Global Active Power (Kilowatts)", type = "l")
# Graph 2
# Getting the variables
sub1 <- powerSelect$Sub_metering_1
sub2 <- powerSelect$Sub_metering_2
sub3 <- powerSelect$Sub_metering_3
# Plotting the graph
plot(x,sub1, xlab = "", ylab = "Energy sub metering", type = "n")
lines(x, sub1, col ="black")
lines(x, sub2, col = "red")
lines(x, sub3, col = "blue")
legend("topright", legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
<<<<<<< HEAD
lty = c(1,1,1), col = c("black", "red", "blue"), bty = "n")
=======
lty = c(1,1,1), col = c("black", "red", "blue"))
>>>>>>> e7aeb92c9c67c3a780d3d8afd34e9659f81c3e5f
# Graph 3
# Getting the variable
Vol <- powerSelect$Voltage
# Plotting the graph
plot(x, Vol, xlab = "datetime", ylab = "Voltage", type = "l")
# Graph 4
# Getting the variable
Grp <- powerSelect$Global_reactive_power
# Plotting the graph
<<<<<<< HEAD
plot(x, Grp, xlab = "datetime", ylab = "Global_reactive_power", type = "n")
lines(x, Grp, col ="dimgrey")
points(x, Grp, pch = 46, col = "black")
=======
plot(x, Grp, xlab = "datetime", ylab = "Global_reactive_power", type = "l")
>>>>>>> e7aeb92c9c67c3a780d3d8afd34e9659f81c3e5f
par(mfrow = c(1,1))
# Close the png file device
dev.off()
| /plot4.R | no_license | DanielCabello/datasciencecoursera | R | false | false | 2,508 | r | # Reading the Database
power <- read.table("household_power_consumption.txt", sep =";", header = TRUE,
na.strings = "?", nrows = 72000, colClasses = c("character",
"character","numeric","numeric","numeric","numeric","numeric",
"numeric","numeric"))
# It is Extracted the observations between 1/2/2007 and 2/2/2007, both included
nSelect <- power$Date == "1/2/2007" | power$Date == "2/2/2007"
powerSelect <- power[nSelect,]
# The Date and Time variables are converted to Date/Time classes
fecha <- paste(as.Date(powerSelect[,1],format = "%d/%m/%Y"),powerSelect[,2])
dateTime <- strptime(fecha, "%Y-%m-%d %H:%M:%S")
powerSelect <- cbind(dateTime, powerSelect)
# Open png device; create 'plot4.png'
png(filename = "plot4.png", width = 480, height = 480 )
par(mfcol = c(2,2))
# Graph 1
# Getting the variables
<<<<<<< HEAD
x <- powerSelect$dateTime
=======
x <- powerSelect[,1]
>>>>>>> e7aeb92c9c67c3a780d3d8afd34e9659f81c3e5f
y <- powerSelect$Global_active_power
# Ploting the grahp
plot(x,y, xlab = "", ylab = "Global Active Power (Kilowatts)", type = "l")
# Graph 2
# Getting the variables
sub1 <- powerSelect$Sub_metering_1
sub2 <- powerSelect$Sub_metering_2
sub3 <- powerSelect$Sub_metering_3
# Plotting the graph
plot(x,sub1, xlab = "", ylab = "Energy sub metering", type = "n")
lines(x, sub1, col ="black")
lines(x, sub2, col = "red")
lines(x, sub3, col = "blue")
legend("topright", legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
<<<<<<< HEAD
lty = c(1,1,1), col = c("black", "red", "blue"), bty = "n")
=======
lty = c(1,1,1), col = c("black", "red", "blue"))
>>>>>>> e7aeb92c9c67c3a780d3d8afd34e9659f81c3e5f
# Graph 3
# Getting the variable
Vol <- powerSelect$Voltage
# Plotting the graph
plot(x, Vol, xlab = "datetime", ylab = "Voltage", type = "l")
# Graph 4
# Getting the variable
Grp <- powerSelect$Global_reactive_power
# Plotting the graph
<<<<<<< HEAD
plot(x, Grp, xlab = "datetime", ylab = "Global_reactive_power", type = "n")
lines(x, Grp, col ="dimgrey")
points(x, Grp, pch = 46, col = "black")
=======
plot(x, Grp, xlab = "datetime", ylab = "Global_reactive_power", type = "l")
>>>>>>> e7aeb92c9c67c3a780d3d8afd34e9659f81c3e5f
par(mfrow = c(1,1))
# Close the png file device
dev.off()
|
#' @title Create networks from social media data
#'
#' @description This function creates networks from social media data as produced from \code{\link{Collect}}.
#' \code{Create} is the final step of the \code{\link{Authenticate}}, \code{\link{Collect}} and \code{Create}
#' workflow.
#'
#' There are four types of networks that can be created from collected data: \code{activity}, \code{actor},
#' \code{twomode} or \code{semantic}.
#'
#' For \code{activity} networks refer to \code{\link{Create.activity.twitter}}, \code{\link{Create.activity.youtube}}
#' and \code{\link{Create.activity.reddit}} for parameters and usage.
#'
#' For \code{actor} networks refer to \code{\link{Create.actor.twitter}}, \code{\link{Create.actor.youtube}} and
#' \code{\link{Create.actor.reddit}}.
#'
#' For \code{twomode} and \code{semantic} networks refer to \code{\link{Create.twomode.twitter}} and
#' \code{\link{Create.semantic.twitter}} functions for parameters and usage respectively.
#'
#' @param datasource Collected social media data of class \code{"datasource"} and \code{socialmedia}.
#' @param type Character string. Type of network to be created, can be \code{"activity"}, \code{"actor"},
#' \code{"twomode"} or \code{"semantic"}.
#' @param ... Optional parameters to pass to functions providied by supporting R packages that are used for social media
#' network creation.
#'
#' @export
Create <- function(datasource, type, ...) {
# searches the class list of datasource for matching method
UseMethod("Create", type)
}
#' @export
Create.default <- function(datasource, type, ...) {
# check datasource
if (is.list(datasource) && ("tweets" %in% names(datasource))) {
if (check_df_n(datasource$tweets) < 1) {
stop("Datasource passed to Create is invalid or empty.", call. = FALSE)
}
} else {
if (check_df_n(datasource) < 1) {
stop("Datasource passed to Create is invalid or empty.", call. = FALSE)
}
}
# check if network type is a character string
if (!is.character(type)) {
stop("Create network type should be a character string.", call. = FALSE)
}
# check if function exists for network type
# todo: perhaps search create methods so this can be extensible
func_name <- paste0("Create", ".", type)
if (!exists(func_name, where = asNamespace("vosonSML"), mode = "function")) {
stop("Unknown network type passed to create.", call. = FALSE)
}
# add social media type to value class list
class(type) <- append(class(type), type)
# call create again
Create(datasource, type, ...)
}
#' @title Create activity networks from social media data
#'
#' @noRd
#' @method Create activity
#' @export
Create.activity <- function(datasource, type, ...) {
msg <- f_verbose(check_dots("verbose", ...))
UseMethod("Create.activity", datasource)
}
#' @noRd
#' @export
Create.activity.default <- function(datasource, type, ...) {
stop("Unknown datasource passed to create activity network.", call. = FALSE)
}
#' @title Create actor network from social media data
#'
#' @noRd
#' @method Create actor
#' @export
Create.actor <- function(datasource, type, ...) {
msg <- f_verbose(check_dots("verbose", ...))
UseMethod("Create.actor", datasource)
}
#' @noRd
#' @export
Create.actor.default <- function(datasource, type, ...) {
stop("Unknown datasource passed to create.", call. = FALSE)
}
#' @title Creates a semantic network from social media data
#'
#' @noRd
#' @method Create semantic
#' @export
Create.semantic <- function(datasource, type, ...) {
msg <- f_verbose(check_dots("verbose", ...))
UseMethod("Create.semantic", datasource)
}
#' @noRd
#' @export
Create.semantic.default <- function(datasource, type, ...) {
stop("Unknown datasource passed to create semantic network.", call. = FALSE)
}
#' @title Create 2-mode networks from social media data
#'
#' @noRd
#' @method Create twomode
#' @export
Create.twomode <- function(datasource, type, ...) {
UseMethod("Create.twomode", datasource)
}
#' @noRd
#' @export
Create.twomode.default <- function(datasource, type, ...) {
stop("Unknown datasource passed to create twomode network.", call. = FALSE)
}
| /R/Create.R | no_license | cran/vosonSML | R | false | false | 4,284 | r | #' @title Create networks from social media data
#'
#' @description This function creates networks from social media data as produced from \code{\link{Collect}}.
#' \code{Create} is the final step of the \code{\link{Authenticate}}, \code{\link{Collect}} and \code{Create}
#' workflow.
#'
#' There are four types of networks that can be created from collected data: \code{activity}, \code{actor},
#' \code{twomode} or \code{semantic}.
#'
#' For \code{activity} networks refer to \code{\link{Create.activity.twitter}}, \code{\link{Create.activity.youtube}}
#' and \code{\link{Create.activity.reddit}} for parameters and usage.
#'
#' For \code{actor} networks refer to \code{\link{Create.actor.twitter}}, \code{\link{Create.actor.youtube}} and
#' \code{\link{Create.actor.reddit}}.
#'
#' For \code{twomode} and \code{semantic} networks refer to \code{\link{Create.twomode.twitter}} and
#' \code{\link{Create.semantic.twitter}} functions for parameters and usage respectively.
#'
#' @param datasource Collected social media data of class \code{"datasource"} and \code{socialmedia}.
#' @param type Character string. Type of network to be created, can be \code{"activity"}, \code{"actor"},
#' \code{"twomode"} or \code{"semantic"}.
#' @param ... Optional parameters to pass to functions providied by supporting R packages that are used for social media
#' network creation.
#'
#' @export
Create <- function(datasource, type, ...) {
# searches the class list of datasource for matching method
UseMethod("Create", type)
}
#' @export
Create.default <- function(datasource, type, ...) {
# check datasource
if (is.list(datasource) && ("tweets" %in% names(datasource))) {
if (check_df_n(datasource$tweets) < 1) {
stop("Datasource passed to Create is invalid or empty.", call. = FALSE)
}
} else {
if (check_df_n(datasource) < 1) {
stop("Datasource passed to Create is invalid or empty.", call. = FALSE)
}
}
# check if network type is a character string
if (!is.character(type)) {
stop("Create network type should be a character string.", call. = FALSE)
}
# check if function exists for network type
# todo: perhaps search create methods so this can be extensible
func_name <- paste0("Create", ".", type)
if (!exists(func_name, where = asNamespace("vosonSML"), mode = "function")) {
stop("Unknown network type passed to create.", call. = FALSE)
}
# add social media type to value class list
class(type) <- append(class(type), type)
# call create again
Create(datasource, type, ...)
}
#' @title Create activity networks from social media data
#'
#' @noRd
#' @method Create activity
#' @export
Create.activity <- function(datasource, type, ...) {
msg <- f_verbose(check_dots("verbose", ...))
UseMethod("Create.activity", datasource)
}
#' @noRd
#' @export
Create.activity.default <- function(datasource, type, ...) {
stop("Unknown datasource passed to create activity network.", call. = FALSE)
}
#' @title Create actor network from social media data
#'
#' @noRd
#' @method Create actor
#' @export
Create.actor <- function(datasource, type, ...) {
msg <- f_verbose(check_dots("verbose", ...))
UseMethod("Create.actor", datasource)
}
#' @noRd
#' @export
Create.actor.default <- function(datasource, type, ...) {
stop("Unknown datasource passed to create.", call. = FALSE)
}
#' @title Creates a semantic network from social media data
#'
#' @noRd
#' @method Create semantic
#' @export
Create.semantic <- function(datasource, type, ...) {
msg <- f_verbose(check_dots("verbose", ...))
UseMethod("Create.semantic", datasource)
}
#' @noRd
#' @export
Create.semantic.default <- function(datasource, type, ...) {
stop("Unknown datasource passed to create semantic network.", call. = FALSE)
}
#' @title Create 2-mode networks from social media data
#'
#' @noRd
#' @method Create twomode
#' @export
Create.twomode <- function(datasource, type, ...) {
UseMethod("Create.twomode", datasource)
}
#' @noRd
#' @export
Create.twomode.default <- function(datasource, type, ...) {
stop("Unknown datasource passed to create twomode network.", call. = FALSE)
}
|
set.seed(0)
### Test soft_threshold
verify_constraints <- function(x, s, tol = 1e-12) {
expect_lte(norm(x, type = "2"), 1 + tol)
expect_lte(sum(abs(x)), s + tol)
}
test_that("soft_threshold returns a vector that satisfies the constraints", {
verify_constraints(soft_threshold(rep(0, 12), 1), 1)
verify_constraints(soft_threshold(rnorm(42), 10), 10)
verify_constraints(soft_threshold(rnorm(1500), 127), 127)
})
| /tests/testthat/test_soft_threshold.r | no_license | cran/RGCCA | R | false | false | 435 | r | set.seed(0)
### Test soft_threshold
verify_constraints <- function(x, s, tol = 1e-12) {
expect_lte(norm(x, type = "2"), 1 + tol)
expect_lte(sum(abs(x)), s + tol)
}
test_that("soft_threshold returns a vector that satisfies the constraints", {
verify_constraints(soft_threshold(rep(0, 12), 1), 1)
verify_constraints(soft_threshold(rnorm(42), 10), 10)
verify_constraints(soft_threshold(rnorm(1500), 127), 127)
})
|
## ----setup, message=FALSE, echo = FALSE-------------------------------------------------
library(BiocStyle)
library(knitr)
library(clusterProfiler)
options(digits=3)
options(width=90)
setwd('//DATA2/work/lbyybl/coorlaborate/YB/cutadapt/output/rna_graph')
## ----setup2, message=FALSE, eval=TRUE---------------------------------------------------
library(limma)
library(Glimma)
library(edgeR)
#library(Mus.musculus)
library(org.Hs.eg.db)
## ----import1----------------------------------------------------------------------------
YB_file <- '/DATA2/work/lbyybl/coorlaborate/YB/cutadapt/output/transcript_count_matrix.csv'
YB_data <- read.csv(YB_file, stringsAsFactors = F)
YB_data2 <- as.matrix(YB_data[,2:10])
rownames(YB_data2) <- as.vector(YB_data[,1])
groups <- as.factor(c('AD38_dox','AD38_dox','AD38_dox','AD38jiadox','AD38jiadox','AD38jiadox','Ac12','Ac12','Ac12'))
YB_file <- DGEList(counts=YB_data2,group=groups,remove.zeros=T)
samplenames <- c('38_dox_1','38_dox_2','38_dox_3','38jiadox_1','38jiadox_2','38jiadox_3','Ac12_1','Ac12_2','Ac12_3')
## ----import2----------------------------------------------------------------------------
x <- YB_file
class(x)
dim(x)
#rownames <- gsub('MSTRG.','',rownames(x))
#rownames(x) <- rownames
## ----annotatesamples--------------------------------------------------------------------
lane <- as.factor(rep(c("time1",'time2','time3'), each=3))
x$samples$lane <- lane
x$samples
## ----annotategenes, message=FALSE-------------------------------------------------------
geneid <- rownames(x)
# genes <- select(Mus.musculus, keys=geneid, columns=c("SYMBOL", "TXCHROM"),
# keytype="REFSEQ")
genes <- bitr(geneid, fromType = "REFSEQ",
toType = c("ENSEMBL", "SYMBOL",'ENTREZID'),
OrgDb = org.Hs.eg.db)
head(genes)
# ## ----removedups-------------------------------------------------------------------------
genes <- genes[!duplicated(genes$REFSEQ),]
# ## ----assigngeneanno---------------------------------------------------------------------
x$genes <- genes
x
## ----cpm--------------------------------------------------------------------------------
cpm <- cpm(x)
lcpm <- cpm(x, log=TRUE, prior.count=2)
## ----lcpm-------------------------------------------------------------------------------
L <- mean(x$samples$lib.size) * 1e-6
M <- median(x$samples$lib.size) * 1e-6
c(L, M)
summary(lcpm)
## ----filter-----------------------------------------------------------------------------
keep.exprs <- rowSums(cpm>0.5)>=3
x <- x[keep.exprs,, keep.lib.sizes=FALSE]
dim(x)
## ----filterplot1, fig.height=4, fig.width=8, fig.cap="每个样本过滤前的原始数据(A)和过滤后(B)的数据的log-CPM值密度。竖直虚线标出了过滤步骤中所用阈值(相当于CPM值为约0.2)。"----
pdf('gene_filter.pdf',width = 12,height = 6)
lcpm.cutoff <- log2(10/M + 2/L)
library(RColorBrewer)
nsamples <- ncol(x)
col <- brewer.pal(nsamples, "Paired")
par(mfrow=c(1,2))
plot(density(lcpm[,1]), col=col[1], lwd=2, ylim=c(0,0.26), las=2, main="", xlab="")
title(main="A. Raw data", xlab="Log-cpm")
abline(v=lcpm.cutoff, lty=3)
abline(v=0, lty=3)
for (i in 2:nsamples){
den <- density(lcpm[,i])
lines(den$x, den$y, col=col[i], lwd=2)
}
legend("topright", samplenames, text.col=col, bty="n")
lcpm <- cpm(x, log=TRUE)
plot(density(lcpm[,1]), col=col[1], lwd=2, ylim=c(0,0.26), las=2, main="", xlab="")
title(main="B. Filtered data", xlab="Log-cpm")
abline(v=lcpm.cutoff, lty=3)
for (i in 2:nsamples){
den <- density(lcpm[,i])
lines(den$x, den$y, col=col[i], lwd=2)
}
legend("topright", samplenames, text.col=col, bty="n")
dev.off()
## ----normalize--------------------------------------------------------------------------
x <- calcNormFactors(x, method = "TMM")
x$samples$norm.factors
## ----MDS1, fig.height=4, fig.width=8, fig.cap="log-CPM值在维度1和2的MDS图,以样品分组上色并标记(A)和维度3和4的MDS图,以测序道上色并标记(B)。图中的距离对应于最主要的倍数变化(fold change),默认情况下也就是前500个在每对样品之间差异最大的基因的平均(均方根)log2倍数变化。"----
pdf('MDS_plot.pdf',width = 12,height = 6)
lcpm <- cpm(x, log=TRUE)
par(mfrow=c(1,2))
col.group <- groups
levels(col.group) <- brewer.pal(nlevels(col.group), "Set1")
col.group <- as.character(col.group)
col.lane <- lane
levels(col.lane) <- brewer.pal(nlevels(col.lane), "Set2")
col.lane <- as.character(col.lane)
plotMDS(lcpm, labels=groups, col=col.group)
title(main="A. Sample groups")
plotMDS(lcpm, labels=lane, col=col.lane, dim=c(3,4))
title(main="B. Sequencing lanes")
dev.off()
## ----GlimmaMDSplot----------------------------------------------------------------------
glMDSPlot(lcpm, labels=paste(groups, lane, sep="_"),
groups=x$samples[,c(1,4)], launch=FALSE)
## ----design-----------------------------------------------------------------------------
design <- model.matrix(~0+groups+lane)
colnames(design) <- gsub("group", "", colnames(design))
design
## ----contrasts--------------------------------------------------------------------------
contr.matrix <- makeContrasts(
AC12vsAD38noDox = sAc12-sAD38_dox,
AC12vsAD38jiaDox = sAc12-sAD38jiadox,
AD38noDoxvsAD38jiaDox = sAD38_dox-sAD38jiadox,
levels = colnames(design))
contr.matrix
## ----voom, fig.height=4, fig.width=8, fig.cap="图中绘制了每个基因的均值(x轴)和方差(y轴),显示了在该数据上使用`voom`前它们之间的相关性(左),以及当运用`voom`的精确权重后这种趋势是如何消除的(右)。左侧的图是使用`voom`函数绘制的,它为进行log-CPM转换后的数据拟合线性模型从而提取残差方差。然后,对方差取平方根(或对标准差取平方根),并相对每个基因的平均表达作图。均值通过平均计数加上2再进行log2转换计算得到。右侧的图使用`plotSA`绘制了log2残差标准差与log-CPM均值的关系。平均log2残差标准差由水平蓝线标出。在这两幅图中,每个黑点表示一个基因,红线为对这些点的拟合。"----
#pdf('voom_plot.pdf',width = 12,height = 6)
par(mfrow=c(1,2))
v <- voom(x, design, plot=TRUE)
v
vfit <- lmFit(v, design)
vfit <- contrasts.fit(vfit, contrasts=contr.matrix)
efit <- eBayes(vfit)
plotSA(efit, main="Final model: Mean-variance trend")
#dev.off()
## ----decidetests------------------------------------------------------------------------
summary(decideTests(efit))
## ----treat------------------------------------------------------------------------------
tfit <- treat(vfit, lfc=0)
dt <- decideTests(tfit)
summary(dt)
## ----venn, fig.height=6, fig.width=6, fig.cap="韦恩图展示了仅basal和LP(左)、仅basal和ML(右)的对比的DE基因数量,还有两种对比中共同的DE基因数量(中)。在任何对比中均不差异表达的基因数量标于右下。"----
de.common <- which(dt[,1]!=0)
length(de.common)
head(tfit$genes$SYMBOL[de.common], n=20)
#pdf('venn_plot.pdf',width = 16,height = 6)
vennDiagram(dt[,1:2], circle.col=c("turquoise", "salmon"))
#dev.off()
write.fit(tfit, dt, file="results.txt")
## ----toptables--------------------------------------------------------------------------
AC12.vs.AD38noDOX <- topTreat(tfit, coef=1, n=Inf)
AC12.vs.AD38noDOX <- topTreat(tfit, coef=2, n=Inf)
AD38noDOX.vs.AD38jiaDOX <- topTreat(tfit, coef=3, n=Inf)
head(AC12.vs.AD38noDOX)
head(AC12.vs.AD38noDOX)
## ----MDplot, fig.keep='none'------------------------------------------------------------
pdf('AC12vsAD38nodoxMA_plot.pdf',width = 10,height = 6)
plotMD(tfit, column=1, status=dt[,1], main=colnames(tfit)[1],
xlim=c(-5,15))
dev.off()
pdf('AC12vsAD38jiadoxMA_plot.pdf',width = 10,height = 6)
plotMD(tfit, column=2, status=dt[,2], main=colnames(tfit)[2],
xlim=c(-5,15))
dev.off()
pdf('AD38nodoxvsAD38jiadoxMA_plot.pdf',width = 10,height = 6)
plotMD(tfit, column=3, status=dt[,3], main=colnames(tfit)[3],
xlim=c(-5,15))
dev.off()
## ----GlimmaMDplot-----------------------------------------------------------------------
glMDPlot(tfit, coef=1, status=dt, main=colnames(tfit)[1],anno = x,
side.main="REFSEQ", counts=lcpm, groups=groups, launch=FALSE)
## ----heatmap, fig.height=8, fig.width=5, fig.cap="在basal和LP的对比中前100个DE基因log-CPM值的热图。经过缩放调整后,每个基因(每行)的表达均值为0,并且标准差为1。给定基因相对高表达的样本被标记为红色,相对低表达的样本被标记为蓝色。浅色和白色代表中等表达水平的基因。样本和基因已通过分层聚类的方法重新排序。图中显示有样本聚类的树状图。", message=FALSE----
library(gplots)
AC12.vs.AD38noDOX.topgenes <- AC12.vs.AD38noDOX$REFSEQ[1:200]
i <- which(rownames(lcpm) %in% AC12.vs.AD38noDOX.topgenes)
mycol <- colorpanel(1000,"blue","white","red")
# heatmap.2(lcpm[i,], scale="row",
# labRow=rownames(v$E)[i], labCol=group,
# col=mycol, trace="none", density.info="none",
# margin=c(8,6), lhei=c(2,10), dendrogram="column")
pdf('38nodoxvsac12heatmap.pdf',width = 6,height = 16)
heatmap.2(lcpm[i,c(4:9)], scale="row",
hclustfun = hclust,
labCol=samplenames[c(4:9)],
col=mycol, trace="none", density.info="none",
margin=c(8,6), lhei=c(2,10), dendrogram="column")
dev.off()
pdf('38jiadoxvsac12heatmap.pdf',width = 6,height = 16)
AC12.vs.AD38jiaDOX.topgenes <- AC12.vs.AD38jiaDOX$REFSEQ[1:200]
i <- which(rownames(lcpm) %in% AC12.vs.AD38jiaDOX.topgenes)
mycol <- colorpanel(1000,"blue","white","red")
heatmap.2(lcpm[i,c(1:3,7:9)], scale="row",
hclustfun = hclust,
labCol=samplenames[c(1:3,7:9)],
col=mycol, trace="none", density.info="none",
margin=c(8,6), lhei=c(2,10), dendrogram="column")
dev.off()
pdf('38nodoxvs38jiadoxheatmap.pdf',width = 6,height = 16)
AD38noDOX.vs.AD38jiaDOX.topgenes <- AC12.vs.AD38noDOX$REFSEQ[1:200]
i <- which(rownames(lcpm) %in% AD38noDOX.vs.AD38jiaDOX.topgenes)
mycol <- colorpanel(1000,"blue","white","red")
heatmap.2(lcpm[i,c(1:6)], scale="row",
hclustfun = hclust,
labCol=samplenames[c(1:6)],
col=mycol, trace="none", density.info="none",
margin=c(8,6), lhei=c(2,10), dendrogram="column")
dev.off()
#----富集分析
down <- tfit$genes$REFSEQ[dt[,1]==-1]
ego2 <- enrichGO(gene = down,
OrgDb = org.Hs.eg.db,
keyType = 'REFSEQ',
ont = "CC",
pAdjustMethod = "BH",
pvalueCutoff = 1,
qvalueCutoff = 1)
pdf('AC12vsAD38noDox_downenrich.pdf',width = 9,height = 6)
dotplot(ego2, showCategory=30)
dev.off()
down <- tfit$genes$REFSEQ[dt[,2]==-1]
ego2 <- enrichGO(gene = down,
OrgDb = org.Hs.eg.db,
keyType = 'REFSEQ',
ont = "CC",
pAdjustMethod = "BH",
pvalueCutoff = 1,
qvalueCutoff = 1)
pdf('AC12vsAD38jiaDox_downenrich.pdf',width = 9,height = 6)
dotplot(ego2, showCategory=30)
dev.off()
down <- tfit$genes$REFSEQ[dt[,3]==-1]
ego2 <- enrichGO(gene = down,
OrgDb = org.Hs.eg.db,
keyType = 'REFSEQ',
ont = "CC",
pAdjustMethod = "BH",
pvalueCutoff = 1,
qvalueCutoff = 1)
pdf('AD38noDoxvsAD38jiaDox_downenrich.pdf',width = 9,height = 6)
dotplot(ego2, showCategory=30)
dev.off()
#---up
up <- tfit$genes$REFSEQ[dt[,1]==1]
ego2 <- enrichGO(gene = up,
OrgDb = org.Hs.eg.db,
keyType = 'REFSEQ',
ont = "CC",
pAdjustMethod = "BH",
pvalueCutoff = 1,
qvalueCutoff = 1)
pdf('AC12vsAD38noDox_upenrich.pdf',width = 9,height = 6)
dotplot(ego2, showCategory=30)
dev.off()
up <- tfit$genes$REFSEQ[dt[,2]==1]
ego2 <- enrichGO(gene = up,
OrgDb = org.Hs.eg.db,
keyType = 'REFSEQ',
ont = "CC",
pAdjustMethod = "BH",
pvalueCutoff = 1,
qvalueCutoff = 1)
pdf('AC12vsAD38jiaDox_upenrich.pdf',width = 9,height = 6)
dotplot(ego2, showCategory=30)
dev.off()
up <- tfit$genes$REFSEQ[dt[,3]==1]
ego2 <- enrichGO(gene = up,
OrgDb = org.Hs.eg.db,
keyType = 'REFSEQ',
ont = "CC",
pAdjustMethod = "BH",
pvalueCutoff = 1,
qvalueCutoff = 1)
pdf('AD38noDoxvsAD38jiaDox_upenrich.pdf',width = 9,height = 6)
dotplot(ego2, showCategory=30)
dev.off() | /coorlabate/RNA-seq.r | no_license | Wenxue-PKU/script | R | false | false | 12,897 | r | ## ----setup, message=FALSE, echo = FALSE-------------------------------------------------
library(BiocStyle)
library(knitr)
library(clusterProfiler)
options(digits=3)
options(width=90)
setwd('//DATA2/work/lbyybl/coorlaborate/YB/cutadapt/output/rna_graph')
## ----setup2, message=FALSE, eval=TRUE---------------------------------------------------
library(limma)
library(Glimma)
library(edgeR)
#library(Mus.musculus)
library(org.Hs.eg.db)
## ----import1----------------------------------------------------------------------------
YB_file <- '/DATA2/work/lbyybl/coorlaborate/YB/cutadapt/output/transcript_count_matrix.csv'
YB_data <- read.csv(YB_file, stringsAsFactors = F)
YB_data2 <- as.matrix(YB_data[,2:10])
rownames(YB_data2) <- as.vector(YB_data[,1])
groups <- as.factor(c('AD38_dox','AD38_dox','AD38_dox','AD38jiadox','AD38jiadox','AD38jiadox','Ac12','Ac12','Ac12'))
YB_file <- DGEList(counts=YB_data2,group=groups,remove.zeros=T)
samplenames <- c('38_dox_1','38_dox_2','38_dox_3','38jiadox_1','38jiadox_2','38jiadox_3','Ac12_1','Ac12_2','Ac12_3')
## ----import2----------------------------------------------------------------------------
x <- YB_file
class(x)
dim(x)
#rownames <- gsub('MSTRG.','',rownames(x))
#rownames(x) <- rownames
## ----annotatesamples--------------------------------------------------------------------
lane <- as.factor(rep(c("time1",'time2','time3'), each=3))
x$samples$lane <- lane
x$samples
## ----annotategenes, message=FALSE-------------------------------------------------------
geneid <- rownames(x)
# genes <- select(Mus.musculus, keys=geneid, columns=c("SYMBOL", "TXCHROM"),
# keytype="REFSEQ")
genes <- bitr(geneid, fromType = "REFSEQ",
toType = c("ENSEMBL", "SYMBOL",'ENTREZID'),
OrgDb = org.Hs.eg.db)
head(genes)
# ## ----removedups-------------------------------------------------------------------------
genes <- genes[!duplicated(genes$REFSEQ),]
# ## ----assigngeneanno---------------------------------------------------------------------
x$genes <- genes
x
## ----cpm--------------------------------------------------------------------------------
cpm <- cpm(x)
lcpm <- cpm(x, log=TRUE, prior.count=2)
## ----lcpm-------------------------------------------------------------------------------
L <- mean(x$samples$lib.size) * 1e-6
M <- median(x$samples$lib.size) * 1e-6
c(L, M)
summary(lcpm)
## ----filter-----------------------------------------------------------------------------
keep.exprs <- rowSums(cpm>0.5)>=3
x <- x[keep.exprs,, keep.lib.sizes=FALSE]
dim(x)
## ----filterplot1, fig.height=4, fig.width=8, fig.cap="每个样本过滤前的原始数据(A)和过滤后(B)的数据的log-CPM值密度。竖直虚线标出了过滤步骤中所用阈值(相当于CPM值为约0.2)。"----
pdf('gene_filter.pdf',width = 12,height = 6)
lcpm.cutoff <- log2(10/M + 2/L)
library(RColorBrewer)
nsamples <- ncol(x)
col <- brewer.pal(nsamples, "Paired")
par(mfrow=c(1,2))
plot(density(lcpm[,1]), col=col[1], lwd=2, ylim=c(0,0.26), las=2, main="", xlab="")
title(main="A. Raw data", xlab="Log-cpm")
abline(v=lcpm.cutoff, lty=3)
abline(v=0, lty=3)
for (i in 2:nsamples){
den <- density(lcpm[,i])
lines(den$x, den$y, col=col[i], lwd=2)
}
legend("topright", samplenames, text.col=col, bty="n")
lcpm <- cpm(x, log=TRUE)
plot(density(lcpm[,1]), col=col[1], lwd=2, ylim=c(0,0.26), las=2, main="", xlab="")
title(main="B. Filtered data", xlab="Log-cpm")
abline(v=lcpm.cutoff, lty=3)
for (i in 2:nsamples){
den <- density(lcpm[,i])
lines(den$x, den$y, col=col[i], lwd=2)
}
legend("topright", samplenames, text.col=col, bty="n")
dev.off()
## ----normalize--------------------------------------------------------------------------
x <- calcNormFactors(x, method = "TMM")
x$samples$norm.factors
## ----MDS1, fig.height=4, fig.width=8, fig.cap="log-CPM值在维度1和2的MDS图,以样品分组上色并标记(A)和维度3和4的MDS图,以测序道上色并标记(B)。图中的距离对应于最主要的倍数变化(fold change),默认情况下也就是前500个在每对样品之间差异最大的基因的平均(均方根)log2倍数变化。"----
pdf('MDS_plot.pdf',width = 12,height = 6)
lcpm <- cpm(x, log=TRUE)
par(mfrow=c(1,2))
col.group <- groups
levels(col.group) <- brewer.pal(nlevels(col.group), "Set1")
col.group <- as.character(col.group)
col.lane <- lane
levels(col.lane) <- brewer.pal(nlevels(col.lane), "Set2")
col.lane <- as.character(col.lane)
plotMDS(lcpm, labels=groups, col=col.group)
title(main="A. Sample groups")
plotMDS(lcpm, labels=lane, col=col.lane, dim=c(3,4))
title(main="B. Sequencing lanes")
dev.off()
## ----GlimmaMDSplot----------------------------------------------------------------------
glMDSPlot(lcpm, labels=paste(groups, lane, sep="_"),
groups=x$samples[,c(1,4)], launch=FALSE)
## ----design-----------------------------------------------------------------------------
design <- model.matrix(~0+groups+lane)
colnames(design) <- gsub("group", "", colnames(design))
design
## ----contrasts--------------------------------------------------------------------------
contr.matrix <- makeContrasts(
AC12vsAD38noDox = sAc12-sAD38_dox,
AC12vsAD38jiaDox = sAc12-sAD38jiadox,
AD38noDoxvsAD38jiaDox = sAD38_dox-sAD38jiadox,
levels = colnames(design))
contr.matrix
## ----voom, fig.height=4, fig.width=8, fig.cap="图中绘制了每个基因的均值(x轴)和方差(y轴),显示了在该数据上使用`voom`前它们之间的相关性(左),以及当运用`voom`的精确权重后这种趋势是如何消除的(右)。左侧的图是使用`voom`函数绘制的,它为进行log-CPM转换后的数据拟合线性模型从而提取残差方差。然后,对方差取平方根(或对标准差取平方根),并相对每个基因的平均表达作图。均值通过平均计数加上2再进行log2转换计算得到。右侧的图使用`plotSA`绘制了log2残差标准差与log-CPM均值的关系。平均log2残差标准差由水平蓝线标出。在这两幅图中,每个黑点表示一个基因,红线为对这些点的拟合。"----
#pdf('voom_plot.pdf',width = 12,height = 6)
par(mfrow=c(1,2))
v <- voom(x, design, plot=TRUE)
v
vfit <- lmFit(v, design)
vfit <- contrasts.fit(vfit, contrasts=contr.matrix)
efit <- eBayes(vfit)
plotSA(efit, main="Final model: Mean-variance trend")
#dev.off()
## ----decidetests------------------------------------------------------------------------
summary(decideTests(efit))
## ----treat------------------------------------------------------------------------------
tfit <- treat(vfit, lfc=0)
dt <- decideTests(tfit)
summary(dt)
## ----venn, fig.height=6, fig.width=6, fig.cap="韦恩图展示了仅basal和LP(左)、仅basal和ML(右)的对比的DE基因数量,还有两种对比中共同的DE基因数量(中)。在任何对比中均不差异表达的基因数量标于右下。"----
de.common <- which(dt[,1]!=0)
length(de.common)
head(tfit$genes$SYMBOL[de.common], n=20)
#pdf('venn_plot.pdf',width = 16,height = 6)
vennDiagram(dt[,1:2], circle.col=c("turquoise", "salmon"))
#dev.off()
write.fit(tfit, dt, file="results.txt")
## ----toptables--------------------------------------------------------------------------
AC12.vs.AD38noDOX <- topTreat(tfit, coef=1, n=Inf)
AC12.vs.AD38noDOX <- topTreat(tfit, coef=2, n=Inf)
AD38noDOX.vs.AD38jiaDOX <- topTreat(tfit, coef=3, n=Inf)
head(AC12.vs.AD38noDOX)
head(AC12.vs.AD38noDOX)
## ----MDplot, fig.keep='none'------------------------------------------------------------
pdf('AC12vsAD38nodoxMA_plot.pdf',width = 10,height = 6)
plotMD(tfit, column=1, status=dt[,1], main=colnames(tfit)[1],
xlim=c(-5,15))
dev.off()
pdf('AC12vsAD38jiadoxMA_plot.pdf',width = 10,height = 6)
plotMD(tfit, column=2, status=dt[,2], main=colnames(tfit)[2],
xlim=c(-5,15))
dev.off()
pdf('AD38nodoxvsAD38jiadoxMA_plot.pdf',width = 10,height = 6)
plotMD(tfit, column=3, status=dt[,3], main=colnames(tfit)[3],
xlim=c(-5,15))
dev.off()
## ----GlimmaMDplot-----------------------------------------------------------------------
glMDPlot(tfit, coef=1, status=dt, main=colnames(tfit)[1],anno = x,
side.main="REFSEQ", counts=lcpm, groups=groups, launch=FALSE)
## ----heatmap, fig.height=8, fig.width=5, fig.cap="在basal和LP的对比中前100个DE基因log-CPM值的热图。经过缩放调整后,每个基因(每行)的表达均值为0,并且标准差为1。给定基因相对高表达的样本被标记为红色,相对低表达的样本被标记为蓝色。浅色和白色代表中等表达水平的基因。样本和基因已通过分层聚类的方法重新排序。图中显示有样本聚类的树状图。", message=FALSE----
library(gplots)
AC12.vs.AD38noDOX.topgenes <- AC12.vs.AD38noDOX$REFSEQ[1:200]
i <- which(rownames(lcpm) %in% AC12.vs.AD38noDOX.topgenes)
mycol <- colorpanel(1000,"blue","white","red")
# heatmap.2(lcpm[i,], scale="row",
# labRow=rownames(v$E)[i], labCol=group,
# col=mycol, trace="none", density.info="none",
# margin=c(8,6), lhei=c(2,10), dendrogram="column")
pdf('38nodoxvsac12heatmap.pdf',width = 6,height = 16)
heatmap.2(lcpm[i,c(4:9)], scale="row",
hclustfun = hclust,
labCol=samplenames[c(4:9)],
col=mycol, trace="none", density.info="none",
margin=c(8,6), lhei=c(2,10), dendrogram="column")
dev.off()
pdf('38jiadoxvsac12heatmap.pdf',width = 6,height = 16)
AC12.vs.AD38jiaDOX.topgenes <- AC12.vs.AD38jiaDOX$REFSEQ[1:200]
i <- which(rownames(lcpm) %in% AC12.vs.AD38jiaDOX.topgenes)
mycol <- colorpanel(1000,"blue","white","red")
heatmap.2(lcpm[i,c(1:3,7:9)], scale="row",
hclustfun = hclust,
labCol=samplenames[c(1:3,7:9)],
col=mycol, trace="none", density.info="none",
margin=c(8,6), lhei=c(2,10), dendrogram="column")
dev.off()
pdf('38nodoxvs38jiadoxheatmap.pdf',width = 6,height = 16)
AD38noDOX.vs.AD38jiaDOX.topgenes <- AC12.vs.AD38noDOX$REFSEQ[1:200]
i <- which(rownames(lcpm) %in% AD38noDOX.vs.AD38jiaDOX.topgenes)
mycol <- colorpanel(1000,"blue","white","red")
heatmap.2(lcpm[i,c(1:6)], scale="row",
hclustfun = hclust,
labCol=samplenames[c(1:6)],
col=mycol, trace="none", density.info="none",
margin=c(8,6), lhei=c(2,10), dendrogram="column")
dev.off()
#----富集分析
down <- tfit$genes$REFSEQ[dt[,1]==-1]
ego2 <- enrichGO(gene = down,
OrgDb = org.Hs.eg.db,
keyType = 'REFSEQ',
ont = "CC",
pAdjustMethod = "BH",
pvalueCutoff = 1,
qvalueCutoff = 1)
pdf('AC12vsAD38noDox_downenrich.pdf',width = 9,height = 6)
dotplot(ego2, showCategory=30)
dev.off()
down <- tfit$genes$REFSEQ[dt[,2]==-1]
ego2 <- enrichGO(gene = down,
OrgDb = org.Hs.eg.db,
keyType = 'REFSEQ',
ont = "CC",
pAdjustMethod = "BH",
pvalueCutoff = 1,
qvalueCutoff = 1)
pdf('AC12vsAD38jiaDox_downenrich.pdf',width = 9,height = 6)
dotplot(ego2, showCategory=30)
dev.off()
down <- tfit$genes$REFSEQ[dt[,3]==-1]
ego2 <- enrichGO(gene = down,
OrgDb = org.Hs.eg.db,
keyType = 'REFSEQ',
ont = "CC",
pAdjustMethod = "BH",
pvalueCutoff = 1,
qvalueCutoff = 1)
pdf('AD38noDoxvsAD38jiaDox_downenrich.pdf',width = 9,height = 6)
dotplot(ego2, showCategory=30)
dev.off()
#---up
up <- tfit$genes$REFSEQ[dt[,1]==1]
ego2 <- enrichGO(gene = up,
OrgDb = org.Hs.eg.db,
keyType = 'REFSEQ',
ont = "CC",
pAdjustMethod = "BH",
pvalueCutoff = 1,
qvalueCutoff = 1)
pdf('AC12vsAD38noDox_upenrich.pdf',width = 9,height = 6)
dotplot(ego2, showCategory=30)
dev.off()
up <- tfit$genes$REFSEQ[dt[,2]==1]
ego2 <- enrichGO(gene = up,
OrgDb = org.Hs.eg.db,
keyType = 'REFSEQ',
ont = "CC",
pAdjustMethod = "BH",
pvalueCutoff = 1,
qvalueCutoff = 1)
pdf('AC12vsAD38jiaDox_upenrich.pdf',width = 9,height = 6)
dotplot(ego2, showCategory=30)
dev.off()
up <- tfit$genes$REFSEQ[dt[,3]==1]
ego2 <- enrichGO(gene = up,
OrgDb = org.Hs.eg.db,
keyType = 'REFSEQ',
ont = "CC",
pAdjustMethod = "BH",
pvalueCutoff = 1,
qvalueCutoff = 1)
pdf('AD38noDoxvsAD38jiaDox_upenrich.pdf',width = 9,height = 6)
dotplot(ego2, showCategory=30)
dev.off() |
plot1 <- function() {
## Load data corresponding to Feb. 1-2, 2007. Row ranges were
## determined by manually looking at the data.
col_class = c("character","character","numeric","numeric","numeric",
"numeric","numeric","numeric","numeric")
data <- read.table("../household_power_consumption.txt", header=TRUE,
sep=";", skip=66636, nrows=2880, stringsAsFactors=FALSE)
## Need to read in the column names too since we skipped rows.
header <- read.table("../household_power_consumption.txt", header=FALSE,
nrows=1, sep=";", stringsAsFactors=FALSE)
colnames(data) <- unlist(header)
## Open PNG file device.
png(file = "plot1.png")
## Make histogram.
hist(data$Global_active_power, col="red",
xlab="Global Active Power (kilowatts)",
main="Global Active Power")
## Close PNG file and don't print anything about
## the graphics device.
invisible(dev.off())
} | /plot1.R | no_license | tprestegard/ExData_Plotting1 | R | false | false | 1,057 | r | plot1 <- function() {
## Load data corresponding to Feb. 1-2, 2007. Row ranges were
## determined by manually looking at the data.
col_class = c("character","character","numeric","numeric","numeric",
"numeric","numeric","numeric","numeric")
data <- read.table("../household_power_consumption.txt", header=TRUE,
sep=";", skip=66636, nrows=2880, stringsAsFactors=FALSE)
## Need to read in the column names too since we skipped rows.
header <- read.table("../household_power_consumption.txt", header=FALSE,
nrows=1, sep=";", stringsAsFactors=FALSE)
colnames(data) <- unlist(header)
## Open PNG file device.
png(file = "plot1.png")
## Make histogram.
hist(data$Global_active_power, col="red",
xlab="Global Active Power (kilowatts)",
main="Global Active Power")
## Close PNG file and don't print anything about
## the graphics device.
invisible(dev.off())
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mediastore_operations.R
\name{mediastore_describe_container}
\alias{mediastore_describe_container}
\title{Retrieves the properties of the requested container}
\usage{
mediastore_describe_container(ContainerName)
}
\arguments{
\item{ContainerName}{The name of the container to query.}
}
\description{
Retrieves the properties of the requested container. This request is
commonly used to retrieve the endpoint of a container. An endpoint is a
value assigned by the service when a new container is created. A
container\'s endpoint does not change after it has been assigned. The
\code{DescribeContainer} request returns a single \code{Container} object based on
\code{ContainerName}. To return all \code{Container} objects that are associated
with a specified AWS account, use ListContainers.
}
\section{Request syntax}{
\preformatted{svc$describe_container(
ContainerName = "string"
)
}
}
\keyword{internal}
| /cran/paws.media.services/man/mediastore_describe_container.Rd | permissive | johnnytommy/paws | R | false | true | 987 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mediastore_operations.R
\name{mediastore_describe_container}
\alias{mediastore_describe_container}
\title{Retrieves the properties of the requested container}
\usage{
mediastore_describe_container(ContainerName)
}
\arguments{
\item{ContainerName}{The name of the container to query.}
}
\description{
Retrieves the properties of the requested container. This request is
commonly used to retrieve the endpoint of a container. An endpoint is a
value assigned by the service when a new container is created. A
container\'s endpoint does not change after it has been assigned. The
\code{DescribeContainer} request returns a single \code{Container} object based on
\code{ContainerName}. To return all \code{Container} objects that are associated
with a specified AWS account, use ListContainers.
}
\section{Request syntax}{
\preformatted{svc$describe_container(
ContainerName = "string"
)
}
}
\keyword{internal}
|
# This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common get_config new_operation new_request send_request
#' @include lexmodelbuildingservice_service.R
NULL
#' Creates a new version of the bot based on the $LATEST version
#'
#' Creates a new version of the bot based on the `$LATEST` version. If the
#' `$LATEST` version of this resource hasn\'t changed since you created the
#' last version, Amazon Lex doesn\'t create a new version. It returns the
#' last created version.
#'
#' You can update only the `$LATEST` version of the bot. You can\'t update
#' the numbered versions that you create with the `CreateBotVersion`
#' operation.
#'
#' When you create the first version of a bot, Amazon Lex sets the version
#' to 1. Subsequent versions increment by 1. For more information, see
#' versioning-intro.
#'
#' This operation requires permission for the `lex:CreateBotVersion`
#' action.
#'
#' @usage
#' lexmodelbuildingservice_create_bot_version(name, checksum)
#'
#' @param name [required] The name of the bot that you want to create a new version of. The name
#' is case sensitive.
#' @param checksum Identifies a specific revision of the `$LATEST` version of the bot. If
#' you specify a checksum and the `$LATEST` version of the bot has a
#' different checksum, a `PreconditionFailedException` exception is
#' returned and Amazon Lex doesn\'t publish a new version. If you don\'t
#' specify a checksum, Amazon Lex publishes the `$LATEST` version.
#'
#' @section Request syntax:
#' ```
#' svc$create_bot_version(
#' name = "string",
#' checksum = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_create_bot_version
lexmodelbuildingservice_create_bot_version <- function(name, checksum = NULL) {
op <- new_operation(
name = "CreateBotVersion",
http_method = "POST",
http_path = "/bots/{name}/versions",
paginator = list()
)
input <- .lexmodelbuildingservice$create_bot_version_input(name = name, checksum = checksum)
output <- .lexmodelbuildingservice$create_bot_version_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$create_bot_version <- lexmodelbuildingservice_create_bot_version
#' Creates a new version of an intent based on the $LATEST version of the
#' intent
#'
#' Creates a new version of an intent based on the `$LATEST` version of the
#' intent. If the `$LATEST` version of this intent hasn\'t changed since
#' you last updated it, Amazon Lex doesn\'t create a new version. It
#' returns the last version you created.
#'
#' You can update only the `$LATEST` version of the intent. You can\'t
#' update the numbered versions that you create with the
#' `CreateIntentVersion` operation.
#'
#' When you create a version of an intent, Amazon Lex sets the version to
#' 1. Subsequent versions increment by 1. For more information, see
#' versioning-intro.
#'
#' This operation requires permissions to perform the
#' `lex:CreateIntentVersion` action.
#'
#' @usage
#' lexmodelbuildingservice_create_intent_version(name, checksum)
#'
#' @param name [required] The name of the intent that you want to create a new version of. The
#' name is case sensitive.
#' @param checksum Checksum of the `$LATEST` version of the intent that should be used to
#' create the new version. If you specify a checksum and the `$LATEST`
#' version of the intent has a different checksum, Amazon Lex returns a
#' `PreconditionFailedException` exception and doesn\'t publish a new
#' version. If you don\'t specify a checksum, Amazon Lex publishes the
#' `$LATEST` version.
#'
#' @section Request syntax:
#' ```
#' svc$create_intent_version(
#' name = "string",
#' checksum = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_create_intent_version
lexmodelbuildingservice_create_intent_version <- function(name, checksum = NULL) {
op <- new_operation(
name = "CreateIntentVersion",
http_method = "POST",
http_path = "/intents/{name}/versions",
paginator = list()
)
input <- .lexmodelbuildingservice$create_intent_version_input(name = name, checksum = checksum)
output <- .lexmodelbuildingservice$create_intent_version_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$create_intent_version <- lexmodelbuildingservice_create_intent_version
#' Creates a new version of a slot type based on the $LATEST version of the
#' specified slot type
#'
#' Creates a new version of a slot type based on the `$LATEST` version of
#' the specified slot type. If the `$LATEST` version of this resource has
#' not changed since the last version that you created, Amazon Lex doesn\'t
#' create a new version. It returns the last version that you created.
#'
#' You can update only the `$LATEST` version of a slot type. You can\'t
#' update the numbered versions that you create with the
#' `CreateSlotTypeVersion` operation.
#'
#' When you create a version of a slot type, Amazon Lex sets the version to
#' 1. Subsequent versions increment by 1. For more information, see
#' versioning-intro.
#'
#' This operation requires permissions for the `lex:CreateSlotTypeVersion`
#' action.
#'
#' @usage
#' lexmodelbuildingservice_create_slot_type_version(name, checksum)
#'
#' @param name [required] The name of the slot type that you want to create a new version for. The
#' name is case sensitive.
#' @param checksum Checksum for the `$LATEST` version of the slot type that you want to
#' publish. If you specify a checksum and the `$LATEST` version of the slot
#' type has a different checksum, Amazon Lex returns a
#' `PreconditionFailedException` exception and doesn\'t publish the new
#' version. If you don\'t specify a checksum, Amazon Lex publishes the
#' `$LATEST` version.
#'
#' @section Request syntax:
#' ```
#' svc$create_slot_type_version(
#' name = "string",
#' checksum = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_create_slot_type_version
lexmodelbuildingservice_create_slot_type_version <- function(name, checksum = NULL) {
op <- new_operation(
name = "CreateSlotTypeVersion",
http_method = "POST",
http_path = "/slottypes/{name}/versions",
paginator = list()
)
input <- .lexmodelbuildingservice$create_slot_type_version_input(name = name, checksum = checksum)
output <- .lexmodelbuildingservice$create_slot_type_version_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$create_slot_type_version <- lexmodelbuildingservice_create_slot_type_version
#' Deletes all versions of the bot, including the $LATEST version
#'
#' Deletes all versions of the bot, including the `$LATEST` version. To
#' delete a specific version of the bot, use the DeleteBotVersion
#' operation. The `DeleteBot` operation doesn\'t immediately remove the bot
#' schema. Instead, it is marked for deletion and removed later.
#'
#' Amazon Lex stores utterances indefinitely for improving the ability of
#' your bot to respond to user inputs. These utterances are not removed
#' when the bot is deleted. To remove the utterances, use the
#' DeleteUtterances operation.
#'
#' If a bot has an alias, you can\'t delete it. Instead, the `DeleteBot`
#' operation returns a `ResourceInUseException` exception that includes a
#' reference to the alias that refers to the bot. To remove the reference
#' to the bot, delete the alias. If you get the same exception again,
#' delete the referring alias until the `DeleteBot` operation is
#' successful.
#'
#' This operation requires permissions for the `lex:DeleteBot` action.
#'
#' @usage
#' lexmodelbuildingservice_delete_bot(name)
#'
#' @param name [required] The name of the bot. The name is case sensitive.
#'
#' @section Request syntax:
#' ```
#' svc$delete_bot(
#' name = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_delete_bot
lexmodelbuildingservice_delete_bot <- function(name) {
op <- new_operation(
name = "DeleteBot",
http_method = "DELETE",
http_path = "/bots/{name}",
paginator = list()
)
input <- .lexmodelbuildingservice$delete_bot_input(name = name)
output <- .lexmodelbuildingservice$delete_bot_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$delete_bot <- lexmodelbuildingservice_delete_bot
#' Deletes an alias for the specified bot
#'
#' Deletes an alias for the specified bot.
#'
#' You can\'t delete an alias that is used in the association between a bot
#' and a messaging channel. If an alias is used in a channel association,
#' the `DeleteBot` operation returns a `ResourceInUseException` exception
#' that includes a reference to the channel association that refers to the
#' bot. You can remove the reference to the alias by deleting the channel
#' association. If you get the same exception again, delete the referring
#' association until the `DeleteBotAlias` operation is successful.
#'
#' @usage
#' lexmodelbuildingservice_delete_bot_alias(name, botName)
#'
#' @param name [required] The name of the alias to delete. The name is case sensitive.
#' @param botName [required] The name of the bot that the alias points to.
#'
#' @section Request syntax:
#' ```
#' svc$delete_bot_alias(
#' name = "string",
#' botName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_delete_bot_alias
lexmodelbuildingservice_delete_bot_alias <- function(name, botName) {
op <- new_operation(
name = "DeleteBotAlias",
http_method = "DELETE",
http_path = "/bots/{botName}/aliases/{name}",
paginator = list()
)
input <- .lexmodelbuildingservice$delete_bot_alias_input(name = name, botName = botName)
output <- .lexmodelbuildingservice$delete_bot_alias_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$delete_bot_alias <- lexmodelbuildingservice_delete_bot_alias
#' Deletes the association between an Amazon Lex bot and a messaging
#' platform
#'
#' Deletes the association between an Amazon Lex bot and a messaging
#' platform.
#'
#' This operation requires permission for the
#' `lex:DeleteBotChannelAssociation` action.
#'
#' @usage
#' lexmodelbuildingservice_delete_bot_channel_association(name, botName,
#' botAlias)
#'
#' @param name [required] The name of the association. The name is case sensitive.
#' @param botName [required] The name of the Amazon Lex bot.
#' @param botAlias [required] An alias that points to the specific version of the Amazon Lex bot to
#' which this association is being made.
#'
#' @section Request syntax:
#' ```
#' svc$delete_bot_channel_association(
#' name = "string",
#' botName = "string",
#' botAlias = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_delete_bot_channel_association
lexmodelbuildingservice_delete_bot_channel_association <- function(name, botName, botAlias) {
op <- new_operation(
name = "DeleteBotChannelAssociation",
http_method = "DELETE",
http_path = "/bots/{botName}/aliases/{aliasName}/channels/{name}",
paginator = list()
)
input <- .lexmodelbuildingservice$delete_bot_channel_association_input(name = name, botName = botName, botAlias = botAlias)
output <- .lexmodelbuildingservice$delete_bot_channel_association_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$delete_bot_channel_association <- lexmodelbuildingservice_delete_bot_channel_association
#' Deletes a specific version of a bot
#'
#' Deletes a specific version of a bot. To delete all versions of a bot,
#' use the DeleteBot operation.
#'
#' This operation requires permissions for the `lex:DeleteBotVersion`
#' action.
#'
#' @usage
#' lexmodelbuildingservice_delete_bot_version(name, version)
#'
#' @param name [required] The name of the bot.
#' @param version [required] The version of the bot to delete. You cannot delete the `$LATEST`
#' version of the bot. To delete the `$LATEST` version, use the DeleteBot
#' operation.
#'
#' @section Request syntax:
#' ```
#' svc$delete_bot_version(
#' name = "string",
#' version = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_delete_bot_version
lexmodelbuildingservice_delete_bot_version <- function(name, version) {
op <- new_operation(
name = "DeleteBotVersion",
http_method = "DELETE",
http_path = "/bots/{name}/versions/{version}",
paginator = list()
)
input <- .lexmodelbuildingservice$delete_bot_version_input(name = name, version = version)
output <- .lexmodelbuildingservice$delete_bot_version_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$delete_bot_version <- lexmodelbuildingservice_delete_bot_version
#' Deletes all versions of the intent, including the $LATEST version
#'
#' Deletes all versions of the intent, including the `$LATEST` version. To
#' delete a specific version of the intent, use the DeleteIntentVersion
#' operation.
#'
#' You can delete a version of an intent only if it is not referenced. To
#' delete an intent that is referred to in one or more bots (see
#' how-it-works), you must remove those references first.
#'
#' If you get the `ResourceInUseException` exception, it provides an
#' example reference that shows where the intent is referenced. To remove
#' the reference to the intent, either update the bot or delete it. If you
#' get the same exception when you attempt to delete the intent again,
#' repeat until the intent has no references and the call to `DeleteIntent`
#' is successful.
#'
#' This operation requires permission for the `lex:DeleteIntent` action.
#'
#' @usage
#' lexmodelbuildingservice_delete_intent(name)
#'
#' @param name [required] The name of the intent. The name is case sensitive.
#'
#' @section Request syntax:
#' ```
#' svc$delete_intent(
#' name = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_delete_intent
lexmodelbuildingservice_delete_intent <- function(name) {
op <- new_operation(
name = "DeleteIntent",
http_method = "DELETE",
http_path = "/intents/{name}",
paginator = list()
)
input <- .lexmodelbuildingservice$delete_intent_input(name = name)
output <- .lexmodelbuildingservice$delete_intent_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$delete_intent <- lexmodelbuildingservice_delete_intent
#' Deletes a specific version of an intent
#'
#' Deletes a specific version of an intent. To delete all versions of a
#' intent, use the DeleteIntent operation.
#'
#' This operation requires permissions for the `lex:DeleteIntentVersion`
#' action.
#'
#' @usage
#' lexmodelbuildingservice_delete_intent_version(name, version)
#'
#' @param name [required] The name of the intent.
#' @param version [required] The version of the intent to delete. You cannot delete the `$LATEST`
#' version of the intent. To delete the `$LATEST` version, use the
#' DeleteIntent operation.
#'
#' @section Request syntax:
#' ```
#' svc$delete_intent_version(
#' name = "string",
#' version = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_delete_intent_version
lexmodelbuildingservice_delete_intent_version <- function(name, version) {
op <- new_operation(
name = "DeleteIntentVersion",
http_method = "DELETE",
http_path = "/intents/{name}/versions/{version}",
paginator = list()
)
input <- .lexmodelbuildingservice$delete_intent_version_input(name = name, version = version)
output <- .lexmodelbuildingservice$delete_intent_version_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$delete_intent_version <- lexmodelbuildingservice_delete_intent_version
#' Deletes all versions of the slot type, including the $LATEST version
#'
#' Deletes all versions of the slot type, including the `$LATEST` version.
#' To delete a specific version of the slot type, use the
#' DeleteSlotTypeVersion operation.
#'
#' You can delete a version of a slot type only if it is not referenced. To
#' delete a slot type that is referred to in one or more intents, you must
#' remove those references first.
#'
#' If you get the `ResourceInUseException` exception, the exception
#' provides an example reference that shows the intent where the slot type
#' is referenced. To remove the reference to the slot type, either update
#' the intent or delete it. If you get the same exception when you attempt
#' to delete the slot type again, repeat until the slot type has no
#' references and the `DeleteSlotType` call is successful.
#'
#' This operation requires permission for the `lex:DeleteSlotType` action.
#'
#' @usage
#' lexmodelbuildingservice_delete_slot_type(name)
#'
#' @param name [required] The name of the slot type. The name is case sensitive.
#'
#' @section Request syntax:
#' ```
#' svc$delete_slot_type(
#' name = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_delete_slot_type
lexmodelbuildingservice_delete_slot_type <- function(name) {
op <- new_operation(
name = "DeleteSlotType",
http_method = "DELETE",
http_path = "/slottypes/{name}",
paginator = list()
)
input <- .lexmodelbuildingservice$delete_slot_type_input(name = name)
output <- .lexmodelbuildingservice$delete_slot_type_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$delete_slot_type <- lexmodelbuildingservice_delete_slot_type
#' Deletes a specific version of a slot type
#'
#' Deletes a specific version of a slot type. To delete all versions of a
#' slot type, use the DeleteSlotType operation.
#'
#' This operation requires permissions for the `lex:DeleteSlotTypeVersion`
#' action.
#'
#' @usage
#' lexmodelbuildingservice_delete_slot_type_version(name, version)
#'
#' @param name [required] The name of the slot type.
#' @param version [required] The version of the slot type to delete. You cannot delete the `$LATEST`
#' version of the slot type. To delete the `$LATEST` version, use the
#' DeleteSlotType operation.
#'
#' @section Request syntax:
#' ```
#' svc$delete_slot_type_version(
#' name = "string",
#' version = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_delete_slot_type_version
lexmodelbuildingservice_delete_slot_type_version <- function(name, version) {
op <- new_operation(
name = "DeleteSlotTypeVersion",
http_method = "DELETE",
http_path = "/slottypes/{name}/version/{version}",
paginator = list()
)
input <- .lexmodelbuildingservice$delete_slot_type_version_input(name = name, version = version)
output <- .lexmodelbuildingservice$delete_slot_type_version_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$delete_slot_type_version <- lexmodelbuildingservice_delete_slot_type_version
#' Deletes stored utterances
#'
#' Deletes stored utterances.
#'
#' Amazon Lex stores the utterances that users send to your bot. Utterances
#' are stored for 15 days for use with the GetUtterancesView operation, and
#' then stored indefinitely for use in improving the ability of your bot to
#' respond to user input.
#'
#' Use the `DeleteUtterances` operation to manually delete stored
#' utterances for a specific user. When you use the `DeleteUtterances`
#' operation, utterances stored for improving your bot\'s ability to
#' respond to user input are deleted immediately. Utterances stored for use
#' with the `GetUtterancesView` operation are deleted after 15 days.
#'
#' This operation requires permissions for the `lex:DeleteUtterances`
#' action.
#'
#' @usage
#' lexmodelbuildingservice_delete_utterances(botName, userId)
#'
#' @param botName [required] The name of the bot that stored the utterances.
#' @param userId [required] The unique identifier for the user that made the utterances. This is the
#' user ID that was sent in the
#' [PostContent](http://docs.aws.amazon.com/lex/latest/dg/API_runtime_PostContent.html)
#' or
#' [PostText](http://docs.aws.amazon.com/lex/latest/dg/API_runtime_PostText.html)
#' operation request that contained the utterance.
#'
#' @section Request syntax:
#' ```
#' svc$delete_utterances(
#' botName = "string",
#' userId = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_delete_utterances
lexmodelbuildingservice_delete_utterances <- function(botName, userId) {
op <- new_operation(
name = "DeleteUtterances",
http_method = "DELETE",
http_path = "/bots/{botName}/utterances/{userId}",
paginator = list()
)
input <- .lexmodelbuildingservice$delete_utterances_input(botName = botName, userId = userId)
output <- .lexmodelbuildingservice$delete_utterances_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$delete_utterances <- lexmodelbuildingservice_delete_utterances
#' Returns metadata information for a specific bot
#'
#' Returns metadata information for a specific bot. You must provide the
#' bot name and the bot version or alias.
#'
#' This operation requires permissions for the `lex:GetBot` action.
#'
#' @usage
#' lexmodelbuildingservice_get_bot(name, versionOrAlias)
#'
#' @param name [required] The name of the bot. The name is case sensitive.
#' @param versionOrAlias [required] The version or alias of the bot.
#'
#' @section Request syntax:
#' ```
#' svc$get_bot(
#' name = "string",
#' versionOrAlias = "string"
#' )
#' ```
#'
#' @examples
#' # This example shows how to get configuration information for a bot.
#' \donttest{svc$get_bot(
#' name = "DocOrderPizza",
#' versionOrAlias = "$LATEST"
#' )}
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_get_bot
lexmodelbuildingservice_get_bot <- function(name, versionOrAlias) {
op <- new_operation(
name = "GetBot",
http_method = "GET",
http_path = "/bots/{name}/versions/{versionoralias}",
paginator = list()
)
input <- .lexmodelbuildingservice$get_bot_input(name = name, versionOrAlias = versionOrAlias)
output <- .lexmodelbuildingservice$get_bot_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$get_bot <- lexmodelbuildingservice_get_bot
#' Returns information about an Amazon Lex bot alias
#'
#' Returns information about an Amazon Lex bot alias. For more information
#' about aliases, see versioning-aliases.
#'
#' This operation requires permissions for the `lex:GetBotAlias` action.
#'
#' @usage
#' lexmodelbuildingservice_get_bot_alias(name, botName)
#'
#' @param name [required] The name of the bot alias. The name is case sensitive.
#' @param botName [required] The name of the bot.
#'
#' @section Request syntax:
#' ```
#' svc$get_bot_alias(
#' name = "string",
#' botName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_get_bot_alias
lexmodelbuildingservice_get_bot_alias <- function(name, botName) {
op <- new_operation(
name = "GetBotAlias",
http_method = "GET",
http_path = "/bots/{botName}/aliases/{name}",
paginator = list()
)
input <- .lexmodelbuildingservice$get_bot_alias_input(name = name, botName = botName)
output <- .lexmodelbuildingservice$get_bot_alias_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$get_bot_alias <- lexmodelbuildingservice_get_bot_alias
#' Returns a list of aliases for a specified Amazon Lex bot
#'
#' Returns a list of aliases for a specified Amazon Lex bot.
#'
#' This operation requires permissions for the `lex:GetBotAliases` action.
#'
#' @usage
#' lexmodelbuildingservice_get_bot_aliases(botName, nextToken, maxResults,
#' nameContains)
#'
#' @param botName [required] The name of the bot.
#' @param nextToken A pagination token for fetching the next page of aliases. If the
#' response to this call is truncated, Amazon Lex returns a pagination
#' token in the response. To fetch the next page of aliases, specify the
#' pagination token in the next request.
#' @param maxResults The maximum number of aliases to return in the response. The default is
#' 50. .
#' @param nameContains Substring to match in bot alias names. An alias will be returned if any
#' part of its name matches the substring. For example, \"xyz\" matches
#' both \"xyzabc\" and \"abcxyz.\"
#'
#' @section Request syntax:
#' ```
#' svc$get_bot_aliases(
#' botName = "string",
#' nextToken = "string",
#' maxResults = 123,
#' nameContains = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_get_bot_aliases
lexmodelbuildingservice_get_bot_aliases <- function(botName, nextToken = NULL, maxResults = NULL, nameContains = NULL) {
op <- new_operation(
name = "GetBotAliases",
http_method = "GET",
http_path = "/bots/{botName}/aliases/",
paginator = list()
)
input <- .lexmodelbuildingservice$get_bot_aliases_input(botName = botName, nextToken = nextToken, maxResults = maxResults, nameContains = nameContains)
output <- .lexmodelbuildingservice$get_bot_aliases_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$get_bot_aliases <- lexmodelbuildingservice_get_bot_aliases
#' Returns information about the association between an Amazon Lex bot and
#' a messaging platform
#'
#' Returns information about the association between an Amazon Lex bot and
#' a messaging platform.
#'
#' This operation requires permissions for the
#' `lex:GetBotChannelAssociation` action.
#'
#' @usage
#' lexmodelbuildingservice_get_bot_channel_association(name, botName,
#' botAlias)
#'
#' @param name [required] The name of the association between the bot and the channel. The name is
#' case sensitive.
#' @param botName [required] The name of the Amazon Lex bot.
#' @param botAlias [required] An alias pointing to the specific version of the Amazon Lex bot to which
#' this association is being made.
#'
#' @section Request syntax:
#' ```
#' svc$get_bot_channel_association(
#' name = "string",
#' botName = "string",
#' botAlias = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_get_bot_channel_association
lexmodelbuildingservice_get_bot_channel_association <- function(name, botName, botAlias) {
op <- new_operation(
name = "GetBotChannelAssociation",
http_method = "GET",
http_path = "/bots/{botName}/aliases/{aliasName}/channels/{name}",
paginator = list()
)
input <- .lexmodelbuildingservice$get_bot_channel_association_input(name = name, botName = botName, botAlias = botAlias)
output <- .lexmodelbuildingservice$get_bot_channel_association_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$get_bot_channel_association <- lexmodelbuildingservice_get_bot_channel_association
#' Returns a list of all of the channels associated with the specified bot
#'
#' Returns a list of all of the channels associated with the specified bot.
#'
#' The `GetBotChannelAssociations` operation requires permissions for the
#' `lex:GetBotChannelAssociations` action.
#'
#' @usage
#' lexmodelbuildingservice_get_bot_channel_associations(botName, botAlias,
#' nextToken, maxResults, nameContains)
#'
#' @param botName [required] The name of the Amazon Lex bot in the association.
#' @param botAlias [required] An alias pointing to the specific version of the Amazon Lex bot to which
#' this association is being made.
#' @param nextToken A pagination token for fetching the next page of associations. If the
#' response to this call is truncated, Amazon Lex returns a pagination
#' token in the response. To fetch the next page of associations, specify
#' the pagination token in the next request.
#' @param maxResults The maximum number of associations to return in the response. The
#' default is 50.
#' @param nameContains Substring to match in channel association names. An association will be
#' returned if any part of its name matches the substring. For example,
#' \"xyz\" matches both \"xyzabc\" and \"abcxyz.\" To return all bot
#' channel associations, use a hyphen (\"-\") as the `nameContains`
#' parameter.
#'
#' @section Request syntax:
#' ```
#' svc$get_bot_channel_associations(
#' botName = "string",
#' botAlias = "string",
#' nextToken = "string",
#' maxResults = 123,
#' nameContains = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_get_bot_channel_associations
lexmodelbuildingservice_get_bot_channel_associations <- function(botName, botAlias, nextToken = NULL, maxResults = NULL, nameContains = NULL) {
op <- new_operation(
name = "GetBotChannelAssociations",
http_method = "GET",
http_path = "/bots/{botName}/aliases/{aliasName}/channels/",
paginator = list()
)
input <- .lexmodelbuildingservice$get_bot_channel_associations_input(botName = botName, botAlias = botAlias, nextToken = nextToken, maxResults = maxResults, nameContains = nameContains)
output <- .lexmodelbuildingservice$get_bot_channel_associations_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$get_bot_channel_associations <- lexmodelbuildingservice_get_bot_channel_associations
#' Gets information about all of the versions of a bot
#'
#' Gets information about all of the versions of a bot.
#'
#' The `GetBotVersions` operation returns a `BotMetadata` object for each
#' version of a bot. For example, if a bot has three numbered versions, the
#' `GetBotVersions` operation returns four `BotMetadata` objects in the
#' response, one for each numbered version and one for the `$LATEST`
#' version.
#'
#' The `GetBotVersions` operation always returns at least one version, the
#' `$LATEST` version.
#'
#' This operation requires permissions for the `lex:GetBotVersions` action.
#'
#' @usage
#' lexmodelbuildingservice_get_bot_versions(name, nextToken, maxResults)
#'
#' @param name [required] The name of the bot for which versions should be returned.
#' @param nextToken A pagination token for fetching the next page of bot versions. If the
#' response to this call is truncated, Amazon Lex returns a pagination
#' token in the response. To fetch the next page of versions, specify the
#' pagination token in the next request.
#' @param maxResults The maximum number of bot versions to return in the response. The
#' default is 10.
#'
#' @section Request syntax:
#' ```
#' svc$get_bot_versions(
#' name = "string",
#' nextToken = "string",
#' maxResults = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_get_bot_versions
lexmodelbuildingservice_get_bot_versions <- function(name, nextToken = NULL, maxResults = NULL) {
op <- new_operation(
name = "GetBotVersions",
http_method = "GET",
http_path = "/bots/{name}/versions/",
paginator = list()
)
input <- .lexmodelbuildingservice$get_bot_versions_input(name = name, nextToken = nextToken, maxResults = maxResults)
output <- .lexmodelbuildingservice$get_bot_versions_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$get_bot_versions <- lexmodelbuildingservice_get_bot_versions
#' Returns bot information as follows: - If you provide the nameContains
#' field, the response includes information for the $LATEST version of all
#' bots whose name contains the specified string
#'
#' Returns bot information as follows:
#'
#' - If you provide the `nameContains` field, the response includes
#' information for the `$LATEST` version of all bots whose name
#' contains the specified string.
#'
#' - If you don\'t specify the `nameContains` field, the operation
#' returns information about the `$LATEST` version of all of your bots.
#'
#' This operation requires permission for the `lex:GetBots` action.
#'
#' @usage
#' lexmodelbuildingservice_get_bots(nextToken, maxResults, nameContains)
#'
#' @param nextToken A pagination token that fetches the next page of bots. If the response
#' to this call is truncated, Amazon Lex returns a pagination token in the
#' response. To fetch the next page of bots, specify the pagination token
#' in the next request.
#' @param maxResults The maximum number of bots to return in the response that the request
#' will return. The default is 10.
#' @param nameContains Substring to match in bot names. A bot will be returned if any part of
#' its name matches the substring. For example, \"xyz\" matches both
#' \"xyzabc\" and \"abcxyz.\"
#'
#' @section Request syntax:
#' ```
#' svc$get_bots(
#' nextToken = "string",
#' maxResults = 123,
#' nameContains = "string"
#' )
#' ```
#'
#' @examples
#' # This example shows how to get a list of all of the bots in your account.
#' \donttest{svc$get_bots(
#' maxResults = 5L,
#' nextToken = ""
#' )}
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_get_bots
lexmodelbuildingservice_get_bots <- function(nextToken = NULL, maxResults = NULL, nameContains = NULL) {
op <- new_operation(
name = "GetBots",
http_method = "GET",
http_path = "/bots/",
paginator = list()
)
input <- .lexmodelbuildingservice$get_bots_input(nextToken = nextToken, maxResults = maxResults, nameContains = nameContains)
output <- .lexmodelbuildingservice$get_bots_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$get_bots <- lexmodelbuildingservice_get_bots
#' Returns information about a built-in intent
#'
#' Returns information about a built-in intent.
#'
#' This operation requires permission for the `lex:GetBuiltinIntent`
#' action.
#'
#' @usage
#' lexmodelbuildingservice_get_builtin_intent(signature)
#'
#' @param signature [required] The unique identifier for a built-in intent. To find the signature for
#' an intent, see [Standard Built-in
#' Intents](https://developer.amazon.com/public/solutions/alexa/alexa-skills-kit/docs/built-in-intent-ref/standard-intents)
#' in the *Alexa Skills Kit*.
#'
#' @section Request syntax:
#' ```
#' svc$get_builtin_intent(
#' signature = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_get_builtin_intent
lexmodelbuildingservice_get_builtin_intent <- function(signature) {
op <- new_operation(
name = "GetBuiltinIntent",
http_method = "GET",
http_path = "/builtins/intents/{signature}",
paginator = list()
)
input <- .lexmodelbuildingservice$get_builtin_intent_input(signature = signature)
output <- .lexmodelbuildingservice$get_builtin_intent_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$get_builtin_intent <- lexmodelbuildingservice_get_builtin_intent
#' Gets a list of built-in intents that meet the specified criteria
#'
#' Gets a list of built-in intents that meet the specified criteria.
#'
#' This operation requires permission for the `lex:GetBuiltinIntents`
#' action.
#'
#' @usage
#' lexmodelbuildingservice_get_builtin_intents(locale, signatureContains,
#' nextToken, maxResults)
#'
#' @param locale A list of locales that the intent supports.
#' @param signatureContains Substring to match in built-in intent signatures. An intent will be
#' returned if any part of its signature matches the substring. For
#' example, \"xyz\" matches both \"xyzabc\" and \"abcxyz.\" To find the
#' signature for an intent, see [Standard Built-in
#' Intents](https://developer.amazon.com/public/solutions/alexa/alexa-skills-kit/docs/built-in-intent-ref/standard-intents)
#' in the *Alexa Skills Kit*.
#' @param nextToken A pagination token that fetches the next page of intents. If this API
#' call is truncated, Amazon Lex returns a pagination token in the
#' response. To fetch the next page of intents, use the pagination token in
#' the next request.
#' @param maxResults The maximum number of intents to return in the response. The default is
#' 10.
#'
#' @section Request syntax:
#' ```
#' svc$get_builtin_intents(
#' locale = "en-US"|"en-GB"|"de-DE",
#' signatureContains = "string",
#' nextToken = "string",
#' maxResults = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_get_builtin_intents
lexmodelbuildingservice_get_builtin_intents <- function(locale = NULL, signatureContains = NULL, nextToken = NULL, maxResults = NULL) {
op <- new_operation(
name = "GetBuiltinIntents",
http_method = "GET",
http_path = "/builtins/intents/",
paginator = list()
)
input <- .lexmodelbuildingservice$get_builtin_intents_input(locale = locale, signatureContains = signatureContains, nextToken = nextToken, maxResults = maxResults)
output <- .lexmodelbuildingservice$get_builtin_intents_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$get_builtin_intents <- lexmodelbuildingservice_get_builtin_intents
#' Gets a list of built-in slot types that meet the specified criteria
#'
#' Gets a list of built-in slot types that meet the specified criteria.
#'
#' For a list of built-in slot types, see [Slot Type
#' Reference](https://developer.amazon.com/public/solutions/alexa/alexa-skills-kit/docs/built-in-intent-ref/slot-type-reference)
#' in the *Alexa Skills Kit*.
#'
#' This operation requires permission for the `lex:GetBuiltInSlotTypes`
#' action.
#'
#' @usage
#' lexmodelbuildingservice_get_builtin_slot_types(locale,
#' signatureContains, nextToken, maxResults)
#'
#' @param locale A list of locales that the slot type supports.
#' @param signatureContains Substring to match in built-in slot type signatures. A slot type will be
#' returned if any part of its signature matches the substring. For
#' example, \"xyz\" matches both \"xyzabc\" and \"abcxyz.\"
#' @param nextToken A pagination token that fetches the next page of slot types. If the
#' response to this API call is truncated, Amazon Lex returns a pagination
#' token in the response. To fetch the next page of slot types, specify the
#' pagination token in the next request.
#' @param maxResults The maximum number of slot types to return in the response. The default
#' is 10.
#'
#' @section Request syntax:
#' ```
#' svc$get_builtin_slot_types(
#' locale = "en-US"|"en-GB"|"de-DE",
#' signatureContains = "string",
#' nextToken = "string",
#' maxResults = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_get_builtin_slot_types
lexmodelbuildingservice_get_builtin_slot_types <- function(locale = NULL, signatureContains = NULL, nextToken = NULL, maxResults = NULL) {
op <- new_operation(
name = "GetBuiltinSlotTypes",
http_method = "GET",
http_path = "/builtins/slottypes/",
paginator = list()
)
input <- .lexmodelbuildingservice$get_builtin_slot_types_input(locale = locale, signatureContains = signatureContains, nextToken = nextToken, maxResults = maxResults)
output <- .lexmodelbuildingservice$get_builtin_slot_types_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$get_builtin_slot_types <- lexmodelbuildingservice_get_builtin_slot_types
#' Exports the contents of a Amazon Lex resource in a specified format
#'
#' Exports the contents of a Amazon Lex resource in a specified format.
#'
#' @usage
#' lexmodelbuildingservice_get_export(name, version, resourceType,
#' exportType)
#'
#' @param name [required] The name of the bot to export.
#' @param version [required] The version of the bot to export.
#' @param resourceType [required] The type of resource to export.
#' @param exportType [required] The format of the exported data.
#'
#' @section Request syntax:
#' ```
#' svc$get_export(
#' name = "string",
#' version = "string",
#' resourceType = "BOT"|"INTENT"|"SLOT_TYPE",
#' exportType = "ALEXA_SKILLS_KIT"|"LEX"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_get_export
lexmodelbuildingservice_get_export <- function(name, version, resourceType, exportType) {
op <- new_operation(
name = "GetExport",
http_method = "GET",
http_path = "/exports/",
paginator = list()
)
input <- .lexmodelbuildingservice$get_export_input(name = name, version = version, resourceType = resourceType, exportType = exportType)
output <- .lexmodelbuildingservice$get_export_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$get_export <- lexmodelbuildingservice_get_export
#' Gets information about an import job started with the StartImport
#' operation
#'
#' Gets information about an import job started with the `StartImport`
#' operation.
#'
#' @usage
#' lexmodelbuildingservice_get_import(importId)
#'
#' @param importId [required] The identifier of the import job information to return.
#'
#' @section Request syntax:
#' ```
#' svc$get_import(
#' importId = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_get_import
lexmodelbuildingservice_get_import <- function(importId) {
op <- new_operation(
name = "GetImport",
http_method = "GET",
http_path = "/imports/{importId}",
paginator = list()
)
input <- .lexmodelbuildingservice$get_import_input(importId = importId)
output <- .lexmodelbuildingservice$get_import_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$get_import <- lexmodelbuildingservice_get_import
#' Returns information about an intent
#'
#' Returns information about an intent. In addition to the intent name, you
#' must specify the intent version.
#'
#' This operation requires permissions to perform the `lex:GetIntent`
#' action.
#'
#' @usage
#' lexmodelbuildingservice_get_intent(name, version)
#'
#' @param name [required] The name of the intent. The name is case sensitive.
#' @param version [required] The version of the intent.
#'
#' @section Request syntax:
#' ```
#' svc$get_intent(
#' name = "string",
#' version = "string"
#' )
#' ```
#'
#' @examples
#' # This example shows how to get information about an intent.
#' \donttest{svc$get_intent(
#' version = "$LATEST",
#' name = "DocOrderPizza"
#' )}
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_get_intent
lexmodelbuildingservice_get_intent <- function(name, version) {
op <- new_operation(
name = "GetIntent",
http_method = "GET",
http_path = "/intents/{name}/versions/{version}",
paginator = list()
)
input <- .lexmodelbuildingservice$get_intent_input(name = name, version = version)
output <- .lexmodelbuildingservice$get_intent_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$get_intent <- lexmodelbuildingservice_get_intent
#' Gets information about all of the versions of an intent
#'
#' Gets information about all of the versions of an intent.
#'
#' The `GetIntentVersions` operation returns an `IntentMetadata` object for
#' each version of an intent. For example, if an intent has three numbered
#' versions, the `GetIntentVersions` operation returns four
#' `IntentMetadata` objects in the response, one for each numbered version
#' and one for the `$LATEST` version.
#'
#' The `GetIntentVersions` operation always returns at least one version,
#' the `$LATEST` version.
#'
#' This operation requires permissions for the `lex:GetIntentVersions`
#' action.
#'
#' @usage
#' lexmodelbuildingservice_get_intent_versions(name, nextToken, maxResults)
#'
#' @param name [required] The name of the intent for which versions should be returned.
#' @param nextToken A pagination token for fetching the next page of intent versions. If the
#' response to this call is truncated, Amazon Lex returns a pagination
#' token in the response. To fetch the next page of versions, specify the
#' pagination token in the next request.
#' @param maxResults The maximum number of intent versions to return in the response. The
#' default is 10.
#'
#' @section Request syntax:
#' ```
#' svc$get_intent_versions(
#' name = "string",
#' nextToken = "string",
#' maxResults = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_get_intent_versions
lexmodelbuildingservice_get_intent_versions <- function(name, nextToken = NULL, maxResults = NULL) {
op <- new_operation(
name = "GetIntentVersions",
http_method = "GET",
http_path = "/intents/{name}/versions/",
paginator = list()
)
input <- .lexmodelbuildingservice$get_intent_versions_input(name = name, nextToken = nextToken, maxResults = maxResults)
output <- .lexmodelbuildingservice$get_intent_versions_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$get_intent_versions <- lexmodelbuildingservice_get_intent_versions
#' Returns intent information as follows: - If you specify the nameContains
#' field, returns the $LATEST version of all intents that contain the
#' specified string
#'
#' Returns intent information as follows:
#'
#' - If you specify the `nameContains` field, returns the `$LATEST`
#' version of all intents that contain the specified string.
#'
#' - If you don\'t specify the `nameContains` field, returns information
#' about the `$LATEST` version of all intents.
#'
#' The operation requires permission for the `lex:GetIntents` action.
#'
#' @usage
#' lexmodelbuildingservice_get_intents(nextToken, maxResults, nameContains)
#'
#' @param nextToken A pagination token that fetches the next page of intents. If the
#' response to this API call is truncated, Amazon Lex returns a pagination
#' token in the response. To fetch the next page of intents, specify the
#' pagination token in the next request.
#' @param maxResults The maximum number of intents to return in the response. The default is
#' 10.
#' @param nameContains Substring to match in intent names. An intent will be returned if any
#' part of its name matches the substring. For example, \"xyz\" matches
#' both \"xyzabc\" and \"abcxyz.\"
#'
#' @section Request syntax:
#' ```
#' svc$get_intents(
#' nextToken = "string",
#' maxResults = 123,
#' nameContains = "string"
#' )
#' ```
#'
#' @examples
#' # This example shows how to get a list of all of the intents in your
#' # account.
#' \donttest{svc$get_intents(
#' maxResults = 10L,
#' nextToken = ""
#' )}
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_get_intents
lexmodelbuildingservice_get_intents <- function(nextToken = NULL, maxResults = NULL, nameContains = NULL) {
op <- new_operation(
name = "GetIntents",
http_method = "GET",
http_path = "/intents/",
paginator = list()
)
input <- .lexmodelbuildingservice$get_intents_input(nextToken = nextToken, maxResults = maxResults, nameContains = nameContains)
output <- .lexmodelbuildingservice$get_intents_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$get_intents <- lexmodelbuildingservice_get_intents
#' Returns information about a specific version of a slot type
#'
#' Returns information about a specific version of a slot type. In addition
#' to specifying the slot type name, you must specify the slot type
#' version.
#'
#' This operation requires permissions for the `lex:GetSlotType` action.
#'
#' @usage
#' lexmodelbuildingservice_get_slot_type(name, version)
#'
#' @param name [required] The name of the slot type. The name is case sensitive.
#' @param version [required] The version of the slot type.
#'
#' @section Request syntax:
#' ```
#' svc$get_slot_type(
#' name = "string",
#' version = "string"
#' )
#' ```
#'
#' @examples
#' # This example shows how to get information about a slot type.
#' \donttest{svc$get_slot_type(
#' version = "$LATEST",
#' name = "DocPizzaCrustType"
#' )}
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_get_slot_type
lexmodelbuildingservice_get_slot_type <- function(name, version) {
op <- new_operation(
name = "GetSlotType",
http_method = "GET",
http_path = "/slottypes/{name}/versions/{version}",
paginator = list()
)
input <- .lexmodelbuildingservice$get_slot_type_input(name = name, version = version)
output <- .lexmodelbuildingservice$get_slot_type_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$get_slot_type <- lexmodelbuildingservice_get_slot_type
#' Gets information about all versions of a slot type
#'
#' Gets information about all versions of a slot type.
#'
#' The `GetSlotTypeVersions` operation returns a `SlotTypeMetadata` object
#' for each version of a slot type. For example, if a slot type has three
#' numbered versions, the `GetSlotTypeVersions` operation returns four
#' `SlotTypeMetadata` objects in the response, one for each numbered
#' version and one for the `$LATEST` version.
#'
#' The `GetSlotTypeVersions` operation always returns at least one version,
#' the `$LATEST` version.
#'
#' This operation requires permissions for the `lex:GetSlotTypeVersions`
#' action.
#'
#' @usage
#' lexmodelbuildingservice_get_slot_type_versions(name, nextToken,
#' maxResults)
#'
#' @param name [required] The name of the slot type for which versions should be returned.
#' @param nextToken A pagination token for fetching the next page of slot type versions. If
#' the response to this call is truncated, Amazon Lex returns a pagination
#' token in the response. To fetch the next page of versions, specify the
#' pagination token in the next request.
#' @param maxResults The maximum number of slot type versions to return in the response. The
#' default is 10.
#'
#' @section Request syntax:
#' ```
#' svc$get_slot_type_versions(
#' name = "string",
#' nextToken = "string",
#' maxResults = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_get_slot_type_versions
lexmodelbuildingservice_get_slot_type_versions <- function(name, nextToken = NULL, maxResults = NULL) {
op <- new_operation(
name = "GetSlotTypeVersions",
http_method = "GET",
http_path = "/slottypes/{name}/versions/",
paginator = list()
)
input <- .lexmodelbuildingservice$get_slot_type_versions_input(name = name, nextToken = nextToken, maxResults = maxResults)
output <- .lexmodelbuildingservice$get_slot_type_versions_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$get_slot_type_versions <- lexmodelbuildingservice_get_slot_type_versions
#' Returns slot type information as follows: - If you specify the
#' nameContains field, returns the $LATEST version of all slot types that
#' contain the specified string
#'
#' Returns slot type information as follows:
#'
#' - If you specify the `nameContains` field, returns the `$LATEST`
#' version of all slot types that contain the specified string.
#'
#' - If you don\'t specify the `nameContains` field, returns information
#' about the `$LATEST` version of all slot types.
#'
#' The operation requires permission for the `lex:GetSlotTypes` action.
#'
#' @usage
#' lexmodelbuildingservice_get_slot_types(nextToken, maxResults,
#' nameContains)
#'
#' @param nextToken A pagination token that fetches the next page of slot types. If the
#' response to this API call is truncated, Amazon Lex returns a pagination
#' token in the response. To fetch next page of slot types, specify the
#' pagination token in the next request.
#' @param maxResults The maximum number of slot types to return in the response. The default
#' is 10.
#' @param nameContains Substring to match in slot type names. A slot type will be returned if
#' any part of its name matches the substring. For example, \"xyz\" matches
#' both \"xyzabc\" and \"abcxyz.\"
#'
#' @section Request syntax:
#' ```
#' svc$get_slot_types(
#' nextToken = "string",
#' maxResults = 123,
#' nameContains = "string"
#' )
#' ```
#'
#' @examples
#' # This example shows how to get a list of all of the slot types in your
#' # account.
#' \donttest{svc$get_slot_types(
#' maxResults = 10L,
#' nextToken = ""
#' )}
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_get_slot_types
lexmodelbuildingservice_get_slot_types <- function(nextToken = NULL, maxResults = NULL, nameContains = NULL) {
op <- new_operation(
name = "GetSlotTypes",
http_method = "GET",
http_path = "/slottypes/",
paginator = list()
)
input <- .lexmodelbuildingservice$get_slot_types_input(nextToken = nextToken, maxResults = maxResults, nameContains = nameContains)
output <- .lexmodelbuildingservice$get_slot_types_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$get_slot_types <- lexmodelbuildingservice_get_slot_types
#' Use the GetUtterancesView operation to get information about the
#' utterances that your users have made to your bot
#'
#' Use the `GetUtterancesView` operation to get information about the
#' utterances that your users have made to your bot. You can use this list
#' to tune the utterances that your bot responds to.
#'
#' For example, say that you have created a bot to order flowers. After
#' your users have used your bot for a while, use the `GetUtterancesView`
#' operation to see the requests that they have made and whether they have
#' been successful. You might find that the utterance \"I want flowers\" is
#' not being recognized. You could add this utterance to the `OrderFlowers`
#' intent so that your bot recognizes that utterance.
#'
#' After you publish a new version of a bot, you can get information about
#' the old version and the new so that you can compare the performance
#' across the two versions.
#'
#' Utterance statistics are generated once a day. Data is available for the
#' last 15 days. You can request information for up to 5 versions of your
#' bot in each request. Amazon Lex returns the most frequent utterances
#' received by the bot in the last 15 days. The response contains
#' information about a maximum of 100 utterances for each version.
#'
#' If you set `childDirected` field to true when you created your bot, or
#' if you opted out of participating in improving Amazon Lex, utterances
#' are not available.
#'
#' This operation requires permissions for the `lex:GetUtterancesView`
#' action.
#'
#' @usage
#' lexmodelbuildingservice_get_utterances_view(botName, botVersions,
#' statusType)
#'
#' @param botName [required] The name of the bot for which utterance information should be returned.
#' @param botVersions [required] An array of bot versions for which utterance information should be
#' returned. The limit is 5 versions per request.
#' @param statusType [required] To return utterances that were recognized and handled, use `Detected`.
#' To return utterances that were not recognized, use `Missed`.
#'
#' @section Request syntax:
#' ```
#' svc$get_utterances_view(
#' botName = "string",
#' botVersions = list(
#' "string"
#' ),
#' statusType = "Detected"|"Missed"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_get_utterances_view
lexmodelbuildingservice_get_utterances_view <- function(botName, botVersions, statusType) {
op <- new_operation(
name = "GetUtterancesView",
http_method = "GET",
http_path = "/bots/{botname}/utterances?view=aggregation",
paginator = list()
)
input <- .lexmodelbuildingservice$get_utterances_view_input(botName = botName, botVersions = botVersions, statusType = statusType)
output <- .lexmodelbuildingservice$get_utterances_view_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$get_utterances_view <- lexmodelbuildingservice_get_utterances_view
#' Creates an Amazon Lex conversational bot or replaces an existing bot
#'
#' Creates an Amazon Lex conversational bot or replaces an existing bot.
#' When you create or update a bot you are only required to specify a name,
#' a locale, and whether the bot is directed toward children under age 13.
#' You can use this to add intents later, or to remove intents from an
#' existing bot. When you create a bot with the minimum information, the
#' bot is created or updated but Amazon Lex returns the `` response
#' `FAILED`. You can build the bot after you add one or more intents. For
#' more information about Amazon Lex bots, see how-it-works.
#'
#' If you specify the name of an existing bot, the fields in the request
#' replace the existing values in the `$LATEST` version of the bot. Amazon
#' Lex removes any fields that you don\'t provide values for in the
#' request, except for the `idleTTLInSeconds` and `privacySettings` fields,
#' which are set to their default values. If you don\'t specify values for
#' required fields, Amazon Lex throws an exception.
#'
#' This operation requires permissions for the `lex:PutBot` action. For
#' more information, see security-iam.
#'
#' @usage
#' lexmodelbuildingservice_put_bot(name, description, intents,
#' clarificationPrompt, abortStatement, idleSessionTTLInSeconds, voiceId,
#' checksum, processBehavior, locale, childDirected, detectSentiment,
#' createVersion)
#'
#' @param name [required] The name of the bot. The name is *not* case sensitive.
#' @param description A description of the bot.
#' @param intents An array of `Intent` objects. Each intent represents a command that a
#' user can express. For example, a pizza ordering bot might support an
#' OrderPizza intent. For more information, see how-it-works.
#' @param clarificationPrompt When Amazon Lex doesn\'t understand the user\'s intent, it uses this
#' message to get clarification. To specify how many times Amazon Lex
#' should repeat the clarification prompt, use the `maxAttempts` field. If
#' Amazon Lex still doesn\'t understand, it sends the message in the
#' `abortStatement` field.
#'
#' When you create a clarification prompt, make sure that it suggests the
#' correct response from the user. for example, for a bot that orders pizza
#' and drinks, you might create this clarification prompt: \"What would you
#' like to do? You can say \'Order a pizza\' or \'Order a drink.\'\"
#'
#' If you have defined a fallback intent, it will be invoked if the
#' clarification prompt is repeated the number of times defined in the
#' `maxAttempts` field. For more information, see
#' [AMAZON.FallbackIntent](https://docs.aws.amazon.com/lex/latest/dg/built-in-intent-fallback.html).
#'
#' If you don\'t define a clarification prompt, at runtime Amazon Lex will
#' return a 400 Bad Request exception in three cases:
#'
#' - Follow-up prompt - When the user responds to a follow-up prompt but
#' does not provide an intent. For example, in response to a follow-up
#' prompt that says \"Would you like anything else today?\" the user
#' says \"Yes.\" Amazon Lex will return a 400 Bad Request exception
#' because it does not have a clarification prompt to send to the user
#' to get an intent.
#'
#' - Lambda function - When using a Lambda function, you return an
#' `ElicitIntent` dialog type. Since Amazon Lex does not have a
#' clarification prompt to get an intent from the user, it returns a
#' 400 Bad Request exception.
#'
#' - PutSession operation - When using the `PutSession` operation, you
#' send an `ElicitIntent` dialog type. Since Amazon Lex does not have a
#' clarification prompt to get an intent from the user, it returns a
#' 400 Bad Request exception.
#' @param abortStatement When Amazon Lex can\'t understand the user\'s input in context, it tries
#' to elicit the information a few times. After that, Amazon Lex sends the
#' message defined in `abortStatement` to the user, and then aborts the
#' conversation. To set the number of retries, use the
#' `valueElicitationPrompt` field for the slot type.
#'
#' For example, in a pizza ordering bot, Amazon Lex might ask a user \"What
#' type of crust would you like?\" If the user\'s response is not one of
#' the expected responses (for example, \"thin crust, \"deep dish,\" etc.),
#' Amazon Lex tries to elicit a correct response a few more times.
#'
#' For example, in a pizza ordering application, `OrderPizza` might be one
#' of the intents. This intent might require the `CrustType` slot. You
#' specify the `valueElicitationPrompt` field when you create the
#' `CrustType` slot.
#'
#' If you have defined a fallback intent the abort statement will not be
#' sent to the user, the fallback intent is used instead. For more
#' information, see
#' [AMAZON.FallbackIntent](https://docs.aws.amazon.com/lex/latest/dg/built-in-intent-fallback.html).
#' @param idleSessionTTLInSeconds The maximum time in seconds that Amazon Lex retains the data gathered in
#' a conversation.
#'
#' A user interaction session remains active for the amount of time
#' specified. If no conversation occurs during this time, the session
#' expires and Amazon Lex deletes any data provided before the timeout.
#'
#' For example, suppose that a user chooses the OrderPizza intent, but gets
#' sidetracked halfway through placing an order. If the user doesn\'t
#' complete the order within the specified time, Amazon Lex discards the
#' slot information that it gathered, and the user must start over.
#'
#' If you don\'t include the `idleSessionTTLInSeconds` element in a
#' `PutBot` operation request, Amazon Lex uses the default value. This is
#' also true if the request replaces an existing bot.
#'
#' The default is 300 seconds (5 minutes).
#' @param voiceId The Amazon Polly voice ID that you want Amazon Lex to use for voice
#' interactions with the user. The locale configured for the voice must
#' match the locale of the bot. For more information, see [Voices in Amazon
#' Polly](https://docs.aws.amazon.com/polly/latest/dg/voicelist.html) in
#' the *Amazon Polly Developer Guide*.
#' @param checksum Identifies a specific revision of the `$LATEST` version.
#'
#' When you create a new bot, leave the `checksum` field blank. If you
#' specify a checksum you get a `BadRequestException` exception.
#'
#' When you want to update a bot, set the `checksum` field to the checksum
#' of the most recent revision of the `$LATEST` version. If you don\'t
#' specify the ` checksum` field, or if the checksum does not match the
#' `$LATEST` version, you get a `PreconditionFailedException` exception.
#' @param processBehavior If you set the `processBehavior` element to `BUILD`, Amazon Lex builds
#' the bot so that it can be run. If you set the element to `SAVE` Amazon
#' Lex saves the bot, but doesn\'t build it.
#'
#' If you don\'t specify this value, the default value is `BUILD`.
#' @param locale [required] Specifies the target locale for the bot. Any intent used in the bot must
#' be compatible with the locale of the bot.
#'
#' The default is `en-US`.
#' @param childDirected [required] For each Amazon Lex bot created with the Amazon Lex Model Building
#' Service, you must specify whether your use of Amazon Lex is related to a
#' website, program, or other application that is directed or targeted, in
#' whole or in part, to children under age 13 and subject to the
#' Children\'s Online Privacy Protection Act (COPPA) by specifying `true`
#' or `false` in the `childDirected` field. By specifying `true` in the
#' `childDirected` field, you confirm that your use of Amazon Lex **is**
#' related to a website, program, or other application that is directed or
#' targeted, in whole or in part, to children under age 13 and subject to
#' COPPA. By specifying `false` in the `childDirected` field, you confirm
#' that your use of Amazon Lex **is not** related to a website, program, or
#' other application that is directed or targeted, in whole or in part, to
#' children under age 13 and subject to COPPA. You may not specify a
#' default value for the `childDirected` field that does not accurately
#' reflect whether your use of Amazon Lex is related to a website, program,
#' or other application that is directed or targeted, in whole or in part,
#' to children under age 13 and subject to COPPA.
#'
#' If your use of Amazon Lex relates to a website, program, or other
#' application that is directed in whole or in part, to children under age
#' 13, you must obtain any required verifiable parental consent under
#' COPPA. For information regarding the use of Amazon Lex in connection
#' with websites, programs, or other applications that are directed or
#' targeted, in whole or in part, to children under age 13, see the [Amazon
#' Lex FAQ.](https://aws.amazon.com/lex/faqs#data-security)
#' @param detectSentiment When set to `true` user utterances are sent to Amazon Comprehend for
#' sentiment analysis. If you don\'t specify `detectSentiment`, the default
#' is `false`.
#' @param createVersion When set to `true` a new numbered version of the bot is created. This is
#' the same as calling the `CreateBotVersion` operation. If you don\'t
#' specify `createVersion`, the default is `false`.
#'
#' @section Request syntax:
#' ```
#' svc$put_bot(
#' name = "string",
#' description = "string",
#' intents = list(
#' list(
#' intentName = "string",
#' intentVersion = "string"
#' )
#' ),
#' clarificationPrompt = list(
#' messages = list(
#' list(
#' contentType = "PlainText"|"SSML"|"CustomPayload",
#' content = "string",
#' groupNumber = 123
#' )
#' ),
#' maxAttempts = 123,
#' responseCard = "string"
#' ),
#' abortStatement = list(
#' messages = list(
#' list(
#' contentType = "PlainText"|"SSML"|"CustomPayload",
#' content = "string",
#' groupNumber = 123
#' )
#' ),
#' responseCard = "string"
#' ),
#' idleSessionTTLInSeconds = 123,
#' voiceId = "string",
#' checksum = "string",
#' processBehavior = "SAVE"|"BUILD",
#' locale = "en-US"|"en-GB"|"de-DE",
#' childDirected = TRUE|FALSE,
#' detectSentiment = TRUE|FALSE,
#' createVersion = TRUE|FALSE
#' )
#' ```
#'
#' @examples
#' # This example shows how to create a bot for ordering pizzas.
#' \donttest{svc$put_bot(
#' name = "DocOrderPizzaBot",
#' abortStatement = list(
#' messages = list(
#' list(
#' content = "I don't understand. Can you try again?",
#' contentType = "PlainText"
#' ),
#' list(
#' content = "I'm sorry, I don't understand.",
#' contentType = "PlainText"
#' )
#' )
#' ),
#' childDirected = TRUE,
#' clarificationPrompt = list(
#' maxAttempts = 1L,
#' messages = list(
#' list(
#' content = "I'm sorry, I didn't hear that. Can you repeate what you just said?",
#' contentType = "PlainText"
#' ),
#' list(
#' content = "Can you say that again?",
#' contentType = "PlainText"
#' )
#' )
#' ),
#' description = "Orders a pizza from a local pizzeria.",
#' idleSessionTTLInSeconds = 300L,
#' intents = list(
#' list(
#' intentName = "DocOrderPizza",
#' intentVersion = "$LATEST"
#' )
#' ),
#' locale = "en-US",
#' processBehavior = "SAVE"
#' )}
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_put_bot
lexmodelbuildingservice_put_bot <- function(name, description = NULL, intents = NULL, clarificationPrompt = NULL, abortStatement = NULL, idleSessionTTLInSeconds = NULL, voiceId = NULL, checksum = NULL, processBehavior = NULL, locale, childDirected, detectSentiment = NULL, createVersion = NULL) {
op <- new_operation(
name = "PutBot",
http_method = "PUT",
http_path = "/bots/{name}/versions/$LATEST",
paginator = list()
)
input <- .lexmodelbuildingservice$put_bot_input(name = name, description = description, intents = intents, clarificationPrompt = clarificationPrompt, abortStatement = abortStatement, idleSessionTTLInSeconds = idleSessionTTLInSeconds, voiceId = voiceId, checksum = checksum, processBehavior = processBehavior, locale = locale, childDirected = childDirected, detectSentiment = detectSentiment, createVersion = createVersion)
output <- .lexmodelbuildingservice$put_bot_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$put_bot <- lexmodelbuildingservice_put_bot
#' Creates an alias for the specified version of the bot or replaces an
#' alias for the specified bot
#'
#' Creates an alias for the specified version of the bot or replaces an
#' alias for the specified bot. To change the version of the bot that the
#' alias points to, replace the alias. For more information about aliases,
#' see versioning-aliases.
#'
#' This operation requires permissions for the `lex:PutBotAlias` action.
#'
#' @usage
#' lexmodelbuildingservice_put_bot_alias(name, description, botVersion,
#' botName, checksum, conversationLogs)
#'
#' @param name [required] The name of the alias. The name is *not* case sensitive.
#' @param description A description of the alias.
#' @param botVersion [required] The version of the bot.
#' @param botName [required] The name of the bot.
#' @param checksum Identifies a specific revision of the `$LATEST` version.
#'
#' When you create a new bot alias, leave the `checksum` field blank. If
#' you specify a checksum you get a `BadRequestException` exception.
#'
#' When you want to update a bot alias, set the `checksum` field to the
#' checksum of the most recent revision of the `$LATEST` version. If you
#' don\'t specify the ` checksum` field, or if the checksum does not match
#' the `$LATEST` version, you get a `PreconditionFailedException`
#' exception.
#' @param conversationLogs Settings for conversation logs for the alias.
#'
#' @section Request syntax:
#' ```
#' svc$put_bot_alias(
#' name = "string",
#' description = "string",
#' botVersion = "string",
#' botName = "string",
#' checksum = "string",
#' conversationLogs = list(
#' logSettings = list(
#' list(
#' logType = "AUDIO"|"TEXT",
#' destination = "CLOUDWATCH_LOGS"|"S3",
#' kmsKeyArn = "string",
#' resourceArn = "string"
#' )
#' ),
#' iamRoleArn = "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_put_bot_alias
lexmodelbuildingservice_put_bot_alias <- function(name, description = NULL, botVersion, botName, checksum = NULL, conversationLogs = NULL) {
op <- new_operation(
name = "PutBotAlias",
http_method = "PUT",
http_path = "/bots/{botName}/aliases/{name}",
paginator = list()
)
input <- .lexmodelbuildingservice$put_bot_alias_input(name = name, description = description, botVersion = botVersion, botName = botName, checksum = checksum, conversationLogs = conversationLogs)
output <- .lexmodelbuildingservice$put_bot_alias_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$put_bot_alias <- lexmodelbuildingservice_put_bot_alias
#' Creates an intent or replaces an existing intent
#'
#' Creates an intent or replaces an existing intent.
#'
#' To define the interaction between the user and your bot, you use one or
#' more intents. For a pizza ordering bot, for example, you would create an
#' `OrderPizza` intent.
#'
#' To create an intent or replace an existing intent, you must provide the
#' following:
#'
#' - Intent name. For example, `OrderPizza`.
#'
#' - Sample utterances. For example, \"Can I order a pizza, please.\" and
#' \"I want to order a pizza.\"
#'
#' - Information to be gathered. You specify slot types for the
#' information that your bot will request from the user. You can
#' specify standard slot types, such as a date or a time, or custom
#' slot types such as the size and crust of a pizza.
#'
#' - How the intent will be fulfilled. You can provide a Lambda function
#' or configure the intent to return the intent information to the
#' client application. If you use a Lambda function, when all of the
#' intent information is available, Amazon Lex invokes your Lambda
#' function. If you configure your intent to return the intent
#' information to the client application.
#'
#' You can specify other optional information in the request, such as:
#'
#' - A confirmation prompt to ask the user to confirm an intent. For
#' example, \"Shall I order your pizza?\"
#'
#' - A conclusion statement to send to the user after the intent has been
#' fulfilled. For example, \"I placed your pizza order.\"
#'
#' - A follow-up prompt that asks the user for additional activity. For
#' example, asking \"Do you want to order a drink with your pizza?\"
#'
#' If you specify an existing intent name to update the intent, Amazon Lex
#' replaces the values in the `$LATEST` version of the intent with the
#' values in the request. Amazon Lex removes fields that you don\'t provide
#' in the request. If you don\'t specify the required fields, Amazon Lex
#' throws an exception. When you update the `$LATEST` version of an intent,
#' the `status` field of any bot that uses the `$LATEST` version of the
#' intent is set to `NOT_BUILT`.
#'
#' For more information, see how-it-works.
#'
#' This operation requires permissions for the `lex:PutIntent` action.
#'
#' @usage
#' lexmodelbuildingservice_put_intent(name, description, slots,
#' sampleUtterances, confirmationPrompt, rejectionStatement,
#' followUpPrompt, conclusionStatement, dialogCodeHook,
#' fulfillmentActivity, parentIntentSignature, checksum, createVersion)
#'
#' @param name [required] The name of the intent. The name is *not* case sensitive.
#'
#' The name can\'t match a built-in intent name, or a built-in intent name
#' with \"AMAZON.\" removed. For example, because there is a built-in
#' intent called `AMAZON.HelpIntent`, you can\'t create a custom intent
#' called `HelpIntent`.
#'
#' For a list of built-in intents, see [Standard Built-in
#' Intents](https://developer.amazon.com/public/solutions/alexa/alexa-skills-kit/docs/built-in-intent-ref/standard-intents)
#' in the *Alexa Skills Kit*.
#' @param description A description of the intent.
#' @param slots An array of intent slots. At runtime, Amazon Lex elicits required slot
#' values from the user using prompts defined in the slots. For more
#' information, see how-it-works.
#' @param sampleUtterances An array of utterances (strings) that a user might say to signal the
#' intent. For example, \"I want \{PizzaSize\} pizza\", \"Order \{Quantity\}
#' \{PizzaSize\} pizzas\".
#'
#' In each utterance, a slot name is enclosed in curly braces.
#' @param confirmationPrompt Prompts the user to confirm the intent. This question should have a yes
#' or no answer.
#'
#' Amazon Lex uses this prompt to ensure that the user acknowledges that
#' the intent is ready for fulfillment. For example, with the `OrderPizza`
#' intent, you might want to confirm that the order is correct before
#' placing it. For other intents, such as intents that simply respond to
#' user questions, you might not need to ask the user for confirmation
#' before providing the information.
#'
#' You you must provide both the `rejectionStatement` and the
#' `confirmationPrompt`, or neither.
#' @param rejectionStatement When the user answers \"no\" to the question defined in
#' `confirmationPrompt`, Amazon Lex responds with this statement to
#' acknowledge that the intent was canceled.
#'
#' You must provide both the `rejectionStatement` and the
#' `confirmationPrompt`, or neither.
#' @param followUpPrompt Amazon Lex uses this prompt to solicit additional activity after
#' fulfilling an intent. For example, after the `OrderPizza` intent is
#' fulfilled, you might prompt the user to order a drink.
#'
#' The action that Amazon Lex takes depends on the user\'s response, as
#' follows:
#'
#' - If the user says \"Yes\" it responds with the clarification prompt
#' that is configured for the bot.
#'
#' - if the user says \"Yes\" and continues with an utterance that
#' triggers an intent it starts a conversation for the intent.
#'
#' - If the user says \"No\" it responds with the rejection statement
#' configured for the the follow-up prompt.
#'
#' - If it doesn\'t recognize the utterance it repeats the follow-up
#' prompt again.
#'
#' The `followUpPrompt` field and the `conclusionStatement` field are
#' mutually exclusive. You can specify only one.
#' @param conclusionStatement The statement that you want Amazon Lex to convey to the user after the
#' intent is successfully fulfilled by the Lambda function.
#'
#' This element is relevant only if you provide a Lambda function in the
#' `fulfillmentActivity`. If you return the intent to the client
#' application, you can\'t specify this element.
#'
#' The `followUpPrompt` and `conclusionStatement` are mutually exclusive.
#' You can specify only one.
#' @param dialogCodeHook Specifies a Lambda function to invoke for each user input. You can
#' invoke this Lambda function to personalize user interaction.
#'
#' For example, suppose your bot determines that the user is John. Your
#' Lambda function might retrieve John\'s information from a backend
#' database and prepopulate some of the values. For example, if you find
#' that John is gluten intolerant, you might set the corresponding intent
#' slot, `GlutenIntolerant`, to true. You might find John\'s phone number
#' and set the corresponding session attribute.
#' @param fulfillmentActivity Required. Describes how the intent is fulfilled. For example, after a
#' user provides all of the information for a pizza order,
#' `fulfillmentActivity` defines how the bot places an order with a local
#' pizza store.
#'
#' You might configure Amazon Lex to return all of the intent information
#' to the client application, or direct it to invoke a Lambda function that
#' can process the intent (for example, place an order with a pizzeria).
#' @param parentIntentSignature A unique identifier for the built-in intent to base this intent on. To
#' find the signature for an intent, see [Standard Built-in
#' Intents](https://developer.amazon.com/public/solutions/alexa/alexa-skills-kit/docs/built-in-intent-ref/standard-intents)
#' in the *Alexa Skills Kit*.
#' @param checksum Identifies a specific revision of the `$LATEST` version.
#'
#' When you create a new intent, leave the `checksum` field blank. If you
#' specify a checksum you get a `BadRequestException` exception.
#'
#' When you want to update a intent, set the `checksum` field to the
#' checksum of the most recent revision of the `$LATEST` version. If you
#' don\'t specify the ` checksum` field, or if the checksum does not match
#' the `$LATEST` version, you get a `PreconditionFailedException`
#' exception.
#' @param createVersion When set to `true` a new numbered version of the intent is created. This
#' is the same as calling the `CreateIntentVersion` operation. If you do
#' not specify `createVersion`, the default is `false`.
#'
#' @section Request syntax:
#' ```
#' svc$put_intent(
#' name = "string",
#' description = "string",
#' slots = list(
#' list(
#' name = "string",
#' description = "string",
#' slotConstraint = "Required"|"Optional",
#' slotType = "string",
#' slotTypeVersion = "string",
#' valueElicitationPrompt = list(
#' messages = list(
#' list(
#' contentType = "PlainText"|"SSML"|"CustomPayload",
#' content = "string",
#' groupNumber = 123
#' )
#' ),
#' maxAttempts = 123,
#' responseCard = "string"
#' ),
#' priority = 123,
#' sampleUtterances = list(
#' "string"
#' ),
#' responseCard = "string",
#' obfuscationSetting = "NONE"|"DEFAULT_OBFUSCATION"
#' )
#' ),
#' sampleUtterances = list(
#' "string"
#' ),
#' confirmationPrompt = list(
#' messages = list(
#' list(
#' contentType = "PlainText"|"SSML"|"CustomPayload",
#' content = "string",
#' groupNumber = 123
#' )
#' ),
#' maxAttempts = 123,
#' responseCard = "string"
#' ),
#' rejectionStatement = list(
#' messages = list(
#' list(
#' contentType = "PlainText"|"SSML"|"CustomPayload",
#' content = "string",
#' groupNumber = 123
#' )
#' ),
#' responseCard = "string"
#' ),
#' followUpPrompt = list(
#' prompt = list(
#' messages = list(
#' list(
#' contentType = "PlainText"|"SSML"|"CustomPayload",
#' content = "string",
#' groupNumber = 123
#' )
#' ),
#' maxAttempts = 123,
#' responseCard = "string"
#' ),
#' rejectionStatement = list(
#' messages = list(
#' list(
#' contentType = "PlainText"|"SSML"|"CustomPayload",
#' content = "string",
#' groupNumber = 123
#' )
#' ),
#' responseCard = "string"
#' )
#' ),
#' conclusionStatement = list(
#' messages = list(
#' list(
#' contentType = "PlainText"|"SSML"|"CustomPayload",
#' content = "string",
#' groupNumber = 123
#' )
#' ),
#' responseCard = "string"
#' ),
#' dialogCodeHook = list(
#' uri = "string",
#' messageVersion = "string"
#' ),
#' fulfillmentActivity = list(
#' type = "ReturnIntent"|"CodeHook",
#' codeHook = list(
#' uri = "string",
#' messageVersion = "string"
#' )
#' ),
#' parentIntentSignature = "string",
#' checksum = "string",
#' createVersion = TRUE|FALSE
#' )
#' ```
#'
#' @examples
#' # This example shows how to create an intent for ordering pizzas.
#' \donttest{svc$put_intent(
#' name = "DocOrderPizza",
#' conclusionStatement = list(
#' messages = list(
#' list(
#' content = "All right, I ordered you a {Crust} crust {Type} pizza with {Sauce} sauce...",
#' contentType = "PlainText"
#' ),
#' list(
#' content = "OK, your {Crust} crust {Type} pizza with {Sauce} sauce is on the way.",
#' contentType = "PlainText"
#' )
#' ),
#' responseCard = "foo"
#' ),
#' confirmationPrompt = list(
#' maxAttempts = 1L,
#' messages = list(
#' list(
#' content = "Should I order your {Crust} crust {Type} pizza with {Sauce} sauce?",
#' contentType = "PlainText"
#' )
#' )
#' ),
#' description = "Order a pizza from a local pizzeria.",
#' fulfillmentActivity = list(
#' type = "ReturnIntent"
#' ),
#' rejectionStatement = list(
#' messages = list(
#' list(
#' content = "Ok, I'll cancel your order.",
#' contentType = "PlainText"
#' ),
#' list(
#' content = "I cancelled your order.",
#' contentType = "PlainText"
#' )
#' )
#' ),
#' sampleUtterances = list(
#' "Order me a pizza.",
#' "Order me a {Type} pizza.",
#' "I want a {Crust} crust {Type} pizza",
#' "I want a {Crust} crust {Type} pizza with {Sauce} sauce."
#' ),
#' slots = list(
#' list(
#' name = "Type",
#' description = "The type of pizza to order.",
#' priority = 1L,
#' sampleUtterances = list(
#' "Get me a {Type} pizza.",
#' "A {Type} pizza please.",
#' "I'd like a {Type} pizza."
#' ),
#' slotConstraint = "Required",
#' slotType = "DocPizzaType",
#' slotTypeVersion = "$LATEST",
#' valueElicitationPrompt = list(
#' maxAttempts = 1L,
#' messages = list(
#' list(
#' content = "What type of pizza would you like?",
#' contentType = "PlainText"
#' ),
#' list(
#' content = "Vegie or cheese pizza?",
#' contentType = "PlainText"
#' ),
#' list(
#' content = "I can get you a vegie or a cheese pizza.",
#' contentType = "PlainText"
#' )
#' )
#' )
#' ),
#' list(
#' name = "Crust",
#' description = "The type of pizza crust to order.",
#' priority = 2L,
#' sampleUtterances = list(
#' "Make it a {Crust} crust.",
#' "I'd like a {Crust} crust."
#' ),
#' slotConstraint = "Required",
#' slotType = "DocPizzaCrustType",
#' slotTypeVersion = "$LATEST",
#' valueElicitationPrompt = list(
#' maxAttempts = 1L,
#' messages = list(
#' list(
#' content = "What type of crust would you like?",
#' contentType = "PlainText"
#' ),
#' list(
#' content = "Thick or thin crust?",
#' contentType = "PlainText"
#' )
#' )
#' )
#' ),
#' list(
#' name = "Sauce",
#' description = "The type of sauce to use on the pizza.",
#' priority = 3L,
#' sampleUtterances = list(
#' "Make it {Sauce} sauce.",
#' "I'd like {Sauce} sauce."
#' ),
#' slotConstraint = "Required",
#' slotType = "DocPizzaSauceType",
#' slotTypeVersion = "$LATEST",
#' valueElicitationPrompt = list(
#' maxAttempts = 1L,
#' messages = list(
#' list(
#' content = "White or red sauce?",
#' contentType = "PlainText"
#' ),
#' list(
#' content = "Garlic or tomato sauce?",
#' contentType = "PlainText"
#' )
#' )
#' )
#' )
#' )
#' )}
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_put_intent
lexmodelbuildingservice_put_intent <- function(name, description = NULL, slots = NULL, sampleUtterances = NULL, confirmationPrompt = NULL, rejectionStatement = NULL, followUpPrompt = NULL, conclusionStatement = NULL, dialogCodeHook = NULL, fulfillmentActivity = NULL, parentIntentSignature = NULL, checksum = NULL, createVersion = NULL) {
op <- new_operation(
name = "PutIntent",
http_method = "PUT",
http_path = "/intents/{name}/versions/$LATEST",
paginator = list()
)
input <- .lexmodelbuildingservice$put_intent_input(name = name, description = description, slots = slots, sampleUtterances = sampleUtterances, confirmationPrompt = confirmationPrompt, rejectionStatement = rejectionStatement, followUpPrompt = followUpPrompt, conclusionStatement = conclusionStatement, dialogCodeHook = dialogCodeHook, fulfillmentActivity = fulfillmentActivity, parentIntentSignature = parentIntentSignature, checksum = checksum, createVersion = createVersion)
output <- .lexmodelbuildingservice$put_intent_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$put_intent <- lexmodelbuildingservice_put_intent
#' Creates a custom slot type or replaces an existing custom slot type
#'
#' Creates a custom slot type or replaces an existing custom slot type.
#'
#' To create a custom slot type, specify a name for the slot type and a set
#' of enumeration values, which are the values that a slot of this type can
#' assume. For more information, see how-it-works.
#'
#' If you specify the name of an existing slot type, the fields in the
#' request replace the existing values in the `$LATEST` version of the slot
#' type. Amazon Lex removes the fields that you don\'t provide in the
#' request. If you don\'t specify required fields, Amazon Lex throws an
#' exception. When you update the `$LATEST` version of a slot type, if a
#' bot uses the `$LATEST` version of an intent that contains the slot type,
#' the bot\'s `status` field is set to `NOT_BUILT`.
#'
#' This operation requires permissions for the `lex:PutSlotType` action.
#'
#' @usage
#' lexmodelbuildingservice_put_slot_type(name, description,
#' enumerationValues, checksum, valueSelectionStrategy, createVersion)
#'
#' @param name [required] The name of the slot type. The name is *not* case sensitive.
#'
#' The name can\'t match a built-in slot type name, or a built-in slot type
#' name with \"AMAZON.\" removed. For example, because there is a built-in
#' slot type called `AMAZON.DATE`, you can\'t create a custom slot type
#' called `DATE`.
#'
#' For a list of built-in slot types, see [Slot Type
#' Reference](https://developer.amazon.com/public/solutions/alexa/alexa-skills-kit/docs/built-in-intent-ref/slot-type-reference)
#' in the *Alexa Skills Kit*.
#' @param description A description of the slot type.
#' @param enumerationValues A list of `EnumerationValue` objects that defines the values that the
#' slot type can take. Each value can have a list of `synonyms`, which are
#' additional values that help train the machine learning model about the
#' values that it resolves for a slot.
#'
#' When Amazon Lex resolves a slot value, it generates a resolution list
#' that contains up to five possible values for the slot. If you are using
#' a Lambda function, this resolution list is passed to the function. If
#' you are not using a Lambda function you can choose to return the value
#' that the user entered or the first value in the resolution list as the
#' slot value. The `valueSelectionStrategy` field indicates the option to
#' use.
#' @param checksum Identifies a specific revision of the `$LATEST` version.
#'
#' When you create a new slot type, leave the `checksum` field blank. If
#' you specify a checksum you get a `BadRequestException` exception.
#'
#' When you want to update a slot type, set the `checksum` field to the
#' checksum of the most recent revision of the `$LATEST` version. If you
#' don\'t specify the ` checksum` field, or if the checksum does not match
#' the `$LATEST` version, you get a `PreconditionFailedException`
#' exception.
#' @param valueSelectionStrategy Determines the slot resolution strategy that Amazon Lex uses to return
#' slot type values. The field can be set to one of the following values:
#'
#' - `ORIGINAL_VALUE` - Returns the value entered by the user, if the
#' user value is similar to the slot value.
#'
#' - `TOP_RESOLUTION` - If there is a resolution list for the slot,
#' return the first value in the resolution list as the slot type
#' value. If there is no resolution list, null is returned.
#'
#' If you don\'t specify the `valueSelectionStrategy`, the default is
#' `ORIGINAL_VALUE`.
#' @param createVersion When set to `true` a new numbered version of the slot type is created.
#' This is the same as calling the `CreateSlotTypeVersion` operation. If
#' you do not specify `createVersion`, the default is `false`.
#'
#' @section Request syntax:
#' ```
#' svc$put_slot_type(
#' name = "string",
#' description = "string",
#' enumerationValues = list(
#' list(
#' value = "string",
#' synonyms = list(
#' "string"
#' )
#' )
#' ),
#' checksum = "string",
#' valueSelectionStrategy = "ORIGINAL_VALUE"|"TOP_RESOLUTION",
#' createVersion = TRUE|FALSE
#' )
#' ```
#'
#' @examples
#' # This example shows how to create a slot type that describes pizza
#' # sauces.
#' \donttest{svc$put_slot_type(
#' name = "PizzaSauceType",
#' description = "Available pizza sauces",
#' enumerationValues = list(
#' list(
#' value = "red"
#' ),
#' list(
#' value = "white"
#' )
#' )
#' )}
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_put_slot_type
lexmodelbuildingservice_put_slot_type <- function(name, description = NULL, enumerationValues = NULL, checksum = NULL, valueSelectionStrategy = NULL, createVersion = NULL) {
op <- new_operation(
name = "PutSlotType",
http_method = "PUT",
http_path = "/slottypes/{name}/versions/$LATEST",
paginator = list()
)
input <- .lexmodelbuildingservice$put_slot_type_input(name = name, description = description, enumerationValues = enumerationValues, checksum = checksum, valueSelectionStrategy = valueSelectionStrategy, createVersion = createVersion)
output <- .lexmodelbuildingservice$put_slot_type_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$put_slot_type <- lexmodelbuildingservice_put_slot_type
#' Starts a job to import a resource to Amazon Lex
#'
#' Starts a job to import a resource to Amazon Lex.
#'
#' @usage
#' lexmodelbuildingservice_start_import(payload, resourceType,
#' mergeStrategy)
#'
#' @param payload [required] A zip archive in binary format. The archive should contain one file, a
#' JSON file containing the resource to import. The resource should match
#' the type specified in the `resourceType` field.
#' @param resourceType [required] Specifies the type of resource to export. Each resource also exports any
#' resources that it depends on.
#'
#' - A bot exports dependent intents.
#'
#' - An intent exports dependent slot types.
#' @param mergeStrategy [required] Specifies the action that the `StartImport` operation should take when
#' there is an existing resource with the same name.
#'
#' - FAIL\\_ON\\_CONFLICT - The import operation is stopped on the first
#' conflict between a resource in the import file and an existing
#' resource. The name of the resource causing the conflict is in the
#' `failureReason` field of the response to the `GetImport` operation.
#'
#' OVERWRITE\\_LATEST - The import operation proceeds even if there is a
#' conflict with an existing resource. The \\$LASTEST version of the
#' existing resource is overwritten with the data from the import file.
#'
#' @section Request syntax:
#' ```
#' svc$start_import(
#' payload = raw,
#' resourceType = "BOT"|"INTENT"|"SLOT_TYPE",
#' mergeStrategy = "OVERWRITE_LATEST"|"FAIL_ON_CONFLICT"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_start_import
lexmodelbuildingservice_start_import <- function(payload, resourceType, mergeStrategy) {
op <- new_operation(
name = "StartImport",
http_method = "POST",
http_path = "/imports/",
paginator = list()
)
input <- .lexmodelbuildingservice$start_import_input(payload = payload, resourceType = resourceType, mergeStrategy = mergeStrategy)
output <- .lexmodelbuildingservice$start_import_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$start_import <- lexmodelbuildingservice_start_import
| /paws/R/lexmodelbuildingservice_operations.R | permissive | ryanb8/paws | R | false | false | 99,372 | r | # This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common get_config new_operation new_request send_request
#' @include lexmodelbuildingservice_service.R
NULL
#' Creates a new version of the bot based on the $LATEST version
#'
#' Creates a new version of the bot based on the `$LATEST` version. If the
#' `$LATEST` version of this resource hasn\'t changed since you created the
#' last version, Amazon Lex doesn\'t create a new version. It returns the
#' last created version.
#'
#' You can update only the `$LATEST` version of the bot. You can\'t update
#' the numbered versions that you create with the `CreateBotVersion`
#' operation.
#'
#' When you create the first version of a bot, Amazon Lex sets the version
#' to 1. Subsequent versions increment by 1. For more information, see
#' versioning-intro.
#'
#' This operation requires permission for the `lex:CreateBotVersion`
#' action.
#'
#' @usage
#' lexmodelbuildingservice_create_bot_version(name, checksum)
#'
#' @param name [required] The name of the bot that you want to create a new version of. The name
#' is case sensitive.
#' @param checksum Identifies a specific revision of the `$LATEST` version of the bot. If
#' you specify a checksum and the `$LATEST` version of the bot has a
#' different checksum, a `PreconditionFailedException` exception is
#' returned and Amazon Lex doesn\'t publish a new version. If you don\'t
#' specify a checksum, Amazon Lex publishes the `$LATEST` version.
#'
#' @section Request syntax:
#' ```
#' svc$create_bot_version(
#' name = "string",
#' checksum = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_create_bot_version
lexmodelbuildingservice_create_bot_version <- function(name, checksum = NULL) {
op <- new_operation(
name = "CreateBotVersion",
http_method = "POST",
http_path = "/bots/{name}/versions",
paginator = list()
)
input <- .lexmodelbuildingservice$create_bot_version_input(name = name, checksum = checksum)
output <- .lexmodelbuildingservice$create_bot_version_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$create_bot_version <- lexmodelbuildingservice_create_bot_version
#' Creates a new version of an intent based on the $LATEST version of the
#' intent
#'
#' Creates a new version of an intent based on the `$LATEST` version of the
#' intent. If the `$LATEST` version of this intent hasn\'t changed since
#' you last updated it, Amazon Lex doesn\'t create a new version. It
#' returns the last version you created.
#'
#' You can update only the `$LATEST` version of the intent. You can\'t
#' update the numbered versions that you create with the
#' `CreateIntentVersion` operation.
#'
#' When you create a version of an intent, Amazon Lex sets the version to
#' 1. Subsequent versions increment by 1. For more information, see
#' versioning-intro.
#'
#' This operation requires permissions to perform the
#' `lex:CreateIntentVersion` action.
#'
#' @usage
#' lexmodelbuildingservice_create_intent_version(name, checksum)
#'
#' @param name [required] The name of the intent that you want to create a new version of. The
#' name is case sensitive.
#' @param checksum Checksum of the `$LATEST` version of the intent that should be used to
#' create the new version. If you specify a checksum and the `$LATEST`
#' version of the intent has a different checksum, Amazon Lex returns a
#' `PreconditionFailedException` exception and doesn\'t publish a new
#' version. If you don\'t specify a checksum, Amazon Lex publishes the
#' `$LATEST` version.
#'
#' @section Request syntax:
#' ```
#' svc$create_intent_version(
#' name = "string",
#' checksum = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_create_intent_version
lexmodelbuildingservice_create_intent_version <- function(name, checksum = NULL) {
op <- new_operation(
name = "CreateIntentVersion",
http_method = "POST",
http_path = "/intents/{name}/versions",
paginator = list()
)
input <- .lexmodelbuildingservice$create_intent_version_input(name = name, checksum = checksum)
output <- .lexmodelbuildingservice$create_intent_version_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$create_intent_version <- lexmodelbuildingservice_create_intent_version
#' Creates a new version of a slot type based on the $LATEST version of the
#' specified slot type
#'
#' Creates a new version of a slot type based on the `$LATEST` version of
#' the specified slot type. If the `$LATEST` version of this resource has
#' not changed since the last version that you created, Amazon Lex doesn\'t
#' create a new version. It returns the last version that you created.
#'
#' You can update only the `$LATEST` version of a slot type. You can\'t
#' update the numbered versions that you create with the
#' `CreateSlotTypeVersion` operation.
#'
#' When you create a version of a slot type, Amazon Lex sets the version to
#' 1. Subsequent versions increment by 1. For more information, see
#' versioning-intro.
#'
#' This operation requires permissions for the `lex:CreateSlotTypeVersion`
#' action.
#'
#' @usage
#' lexmodelbuildingservice_create_slot_type_version(name, checksum)
#'
#' @param name [required] The name of the slot type that you want to create a new version for. The
#' name is case sensitive.
#' @param checksum Checksum for the `$LATEST` version of the slot type that you want to
#' publish. If you specify a checksum and the `$LATEST` version of the slot
#' type has a different checksum, Amazon Lex returns a
#' `PreconditionFailedException` exception and doesn\'t publish the new
#' version. If you don\'t specify a checksum, Amazon Lex publishes the
#' `$LATEST` version.
#'
#' @section Request syntax:
#' ```
#' svc$create_slot_type_version(
#' name = "string",
#' checksum = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_create_slot_type_version
lexmodelbuildingservice_create_slot_type_version <- function(name, checksum = NULL) {
op <- new_operation(
name = "CreateSlotTypeVersion",
http_method = "POST",
http_path = "/slottypes/{name}/versions",
paginator = list()
)
input <- .lexmodelbuildingservice$create_slot_type_version_input(name = name, checksum = checksum)
output <- .lexmodelbuildingservice$create_slot_type_version_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$create_slot_type_version <- lexmodelbuildingservice_create_slot_type_version
#' Deletes all versions of the bot, including the $LATEST version
#'
#' Deletes all versions of the bot, including the `$LATEST` version. To
#' delete a specific version of the bot, use the DeleteBotVersion
#' operation. The `DeleteBot` operation doesn\'t immediately remove the bot
#' schema. Instead, it is marked for deletion and removed later.
#'
#' Amazon Lex stores utterances indefinitely for improving the ability of
#' your bot to respond to user inputs. These utterances are not removed
#' when the bot is deleted. To remove the utterances, use the
#' DeleteUtterances operation.
#'
#' If a bot has an alias, you can\'t delete it. Instead, the `DeleteBot`
#' operation returns a `ResourceInUseException` exception that includes a
#' reference to the alias that refers to the bot. To remove the reference
#' to the bot, delete the alias. If you get the same exception again,
#' delete the referring alias until the `DeleteBot` operation is
#' successful.
#'
#' This operation requires permissions for the `lex:DeleteBot` action.
#'
#' @usage
#' lexmodelbuildingservice_delete_bot(name)
#'
#' @param name [required] The name of the bot. The name is case sensitive.
#'
#' @section Request syntax:
#' ```
#' svc$delete_bot(
#' name = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_delete_bot
lexmodelbuildingservice_delete_bot <- function(name) {
op <- new_operation(
name = "DeleteBot",
http_method = "DELETE",
http_path = "/bots/{name}",
paginator = list()
)
input <- .lexmodelbuildingservice$delete_bot_input(name = name)
output <- .lexmodelbuildingservice$delete_bot_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$delete_bot <- lexmodelbuildingservice_delete_bot
#' Deletes an alias for the specified bot
#'
#' Deletes an alias for the specified bot.
#'
#' You can\'t delete an alias that is used in the association between a bot
#' and a messaging channel. If an alias is used in a channel association,
#' the `DeleteBot` operation returns a `ResourceInUseException` exception
#' that includes a reference to the channel association that refers to the
#' bot. You can remove the reference to the alias by deleting the channel
#' association. If you get the same exception again, delete the referring
#' association until the `DeleteBotAlias` operation is successful.
#'
#' @usage
#' lexmodelbuildingservice_delete_bot_alias(name, botName)
#'
#' @param name [required] The name of the alias to delete. The name is case sensitive.
#' @param botName [required] The name of the bot that the alias points to.
#'
#' @section Request syntax:
#' ```
#' svc$delete_bot_alias(
#' name = "string",
#' botName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_delete_bot_alias
lexmodelbuildingservice_delete_bot_alias <- function(name, botName) {
op <- new_operation(
name = "DeleteBotAlias",
http_method = "DELETE",
http_path = "/bots/{botName}/aliases/{name}",
paginator = list()
)
input <- .lexmodelbuildingservice$delete_bot_alias_input(name = name, botName = botName)
output <- .lexmodelbuildingservice$delete_bot_alias_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$delete_bot_alias <- lexmodelbuildingservice_delete_bot_alias
#' Deletes the association between an Amazon Lex bot and a messaging
#' platform
#'
#' Deletes the association between an Amazon Lex bot and a messaging
#' platform.
#'
#' This operation requires permission for the
#' `lex:DeleteBotChannelAssociation` action.
#'
#' @usage
#' lexmodelbuildingservice_delete_bot_channel_association(name, botName,
#' botAlias)
#'
#' @param name [required] The name of the association. The name is case sensitive.
#' @param botName [required] The name of the Amazon Lex bot.
#' @param botAlias [required] An alias that points to the specific version of the Amazon Lex bot to
#' which this association is being made.
#'
#' @section Request syntax:
#' ```
#' svc$delete_bot_channel_association(
#' name = "string",
#' botName = "string",
#' botAlias = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_delete_bot_channel_association
lexmodelbuildingservice_delete_bot_channel_association <- function(name, botName, botAlias) {
op <- new_operation(
name = "DeleteBotChannelAssociation",
http_method = "DELETE",
http_path = "/bots/{botName}/aliases/{aliasName}/channels/{name}",
paginator = list()
)
input <- .lexmodelbuildingservice$delete_bot_channel_association_input(name = name, botName = botName, botAlias = botAlias)
output <- .lexmodelbuildingservice$delete_bot_channel_association_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$delete_bot_channel_association <- lexmodelbuildingservice_delete_bot_channel_association
#' Deletes a specific version of a bot
#'
#' Deletes a specific version of a bot. To delete all versions of a bot,
#' use the DeleteBot operation.
#'
#' This operation requires permissions for the `lex:DeleteBotVersion`
#' action.
#'
#' @usage
#' lexmodelbuildingservice_delete_bot_version(name, version)
#'
#' @param name [required] The name of the bot.
#' @param version [required] The version of the bot to delete. You cannot delete the `$LATEST`
#' version of the bot. To delete the `$LATEST` version, use the DeleteBot
#' operation.
#'
#' @section Request syntax:
#' ```
#' svc$delete_bot_version(
#' name = "string",
#' version = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_delete_bot_version
lexmodelbuildingservice_delete_bot_version <- function(name, version) {
op <- new_operation(
name = "DeleteBotVersion",
http_method = "DELETE",
http_path = "/bots/{name}/versions/{version}",
paginator = list()
)
input <- .lexmodelbuildingservice$delete_bot_version_input(name = name, version = version)
output <- .lexmodelbuildingservice$delete_bot_version_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$delete_bot_version <- lexmodelbuildingservice_delete_bot_version
#' Deletes all versions of the intent, including the $LATEST version
#'
#' Deletes all versions of the intent, including the `$LATEST` version. To
#' delete a specific version of the intent, use the DeleteIntentVersion
#' operation.
#'
#' You can delete a version of an intent only if it is not referenced. To
#' delete an intent that is referred to in one or more bots (see
#' how-it-works), you must remove those references first.
#'
#' If you get the `ResourceInUseException` exception, it provides an
#' example reference that shows where the intent is referenced. To remove
#' the reference to the intent, either update the bot or delete it. If you
#' get the same exception when you attempt to delete the intent again,
#' repeat until the intent has no references and the call to `DeleteIntent`
#' is successful.
#'
#' This operation requires permission for the `lex:DeleteIntent` action.
#'
#' @usage
#' lexmodelbuildingservice_delete_intent(name)
#'
#' @param name [required] The name of the intent. The name is case sensitive.
#'
#' @section Request syntax:
#' ```
#' svc$delete_intent(
#' name = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_delete_intent
lexmodelbuildingservice_delete_intent <- function(name) {
op <- new_operation(
name = "DeleteIntent",
http_method = "DELETE",
http_path = "/intents/{name}",
paginator = list()
)
input <- .lexmodelbuildingservice$delete_intent_input(name = name)
output <- .lexmodelbuildingservice$delete_intent_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$delete_intent <- lexmodelbuildingservice_delete_intent
#' Deletes a specific version of an intent
#'
#' Deletes a specific version of an intent. To delete all versions of a
#' intent, use the DeleteIntent operation.
#'
#' This operation requires permissions for the `lex:DeleteIntentVersion`
#' action.
#'
#' @usage
#' lexmodelbuildingservice_delete_intent_version(name, version)
#'
#' @param name [required] The name of the intent.
#' @param version [required] The version of the intent to delete. You cannot delete the `$LATEST`
#' version of the intent. To delete the `$LATEST` version, use the
#' DeleteIntent operation.
#'
#' @section Request syntax:
#' ```
#' svc$delete_intent_version(
#' name = "string",
#' version = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_delete_intent_version
lexmodelbuildingservice_delete_intent_version <- function(name, version) {
op <- new_operation(
name = "DeleteIntentVersion",
http_method = "DELETE",
http_path = "/intents/{name}/versions/{version}",
paginator = list()
)
input <- .lexmodelbuildingservice$delete_intent_version_input(name = name, version = version)
output <- .lexmodelbuildingservice$delete_intent_version_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$delete_intent_version <- lexmodelbuildingservice_delete_intent_version
#' Deletes all versions of the slot type, including the $LATEST version
#'
#' Deletes all versions of the slot type, including the `$LATEST` version.
#' To delete a specific version of the slot type, use the
#' DeleteSlotTypeVersion operation.
#'
#' You can delete a version of a slot type only if it is not referenced. To
#' delete a slot type that is referred to in one or more intents, you must
#' remove those references first.
#'
#' If you get the `ResourceInUseException` exception, the exception
#' provides an example reference that shows the intent where the slot type
#' is referenced. To remove the reference to the slot type, either update
#' the intent or delete it. If you get the same exception when you attempt
#' to delete the slot type again, repeat until the slot type has no
#' references and the `DeleteSlotType` call is successful.
#'
#' This operation requires permission for the `lex:DeleteSlotType` action.
#'
#' @usage
#' lexmodelbuildingservice_delete_slot_type(name)
#'
#' @param name [required] The name of the slot type. The name is case sensitive.
#'
#' @section Request syntax:
#' ```
#' svc$delete_slot_type(
#' name = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_delete_slot_type
lexmodelbuildingservice_delete_slot_type <- function(name) {
op <- new_operation(
name = "DeleteSlotType",
http_method = "DELETE",
http_path = "/slottypes/{name}",
paginator = list()
)
input <- .lexmodelbuildingservice$delete_slot_type_input(name = name)
output <- .lexmodelbuildingservice$delete_slot_type_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$delete_slot_type <- lexmodelbuildingservice_delete_slot_type
#' Deletes a specific version of a slot type
#'
#' Deletes a specific version of a slot type. To delete all versions of a
#' slot type, use the DeleteSlotType operation.
#'
#' This operation requires permissions for the `lex:DeleteSlotTypeVersion`
#' action.
#'
#' @usage
#' lexmodelbuildingservice_delete_slot_type_version(name, version)
#'
#' @param name [required] The name of the slot type.
#' @param version [required] The version of the slot type to delete. You cannot delete the `$LATEST`
#' version of the slot type. To delete the `$LATEST` version, use the
#' DeleteSlotType operation.
#'
#' @section Request syntax:
#' ```
#' svc$delete_slot_type_version(
#' name = "string",
#' version = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_delete_slot_type_version
lexmodelbuildingservice_delete_slot_type_version <- function(name, version) {
op <- new_operation(
name = "DeleteSlotTypeVersion",
http_method = "DELETE",
http_path = "/slottypes/{name}/version/{version}",
paginator = list()
)
input <- .lexmodelbuildingservice$delete_slot_type_version_input(name = name, version = version)
output <- .lexmodelbuildingservice$delete_slot_type_version_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$delete_slot_type_version <- lexmodelbuildingservice_delete_slot_type_version
#' Deletes stored utterances
#'
#' Deletes stored utterances.
#'
#' Amazon Lex stores the utterances that users send to your bot. Utterances
#' are stored for 15 days for use with the GetUtterancesView operation, and
#' then stored indefinitely for use in improving the ability of your bot to
#' respond to user input.
#'
#' Use the `DeleteUtterances` operation to manually delete stored
#' utterances for a specific user. When you use the `DeleteUtterances`
#' operation, utterances stored for improving your bot\'s ability to
#' respond to user input are deleted immediately. Utterances stored for use
#' with the `GetUtterancesView` operation are deleted after 15 days.
#'
#' This operation requires permissions for the `lex:DeleteUtterances`
#' action.
#'
#' @usage
#' lexmodelbuildingservice_delete_utterances(botName, userId)
#'
#' @param botName [required] The name of the bot that stored the utterances.
#' @param userId [required] The unique identifier for the user that made the utterances. This is the
#' user ID that was sent in the
#' [PostContent](http://docs.aws.amazon.com/lex/latest/dg/API_runtime_PostContent.html)
#' or
#' [PostText](http://docs.aws.amazon.com/lex/latest/dg/API_runtime_PostText.html)
#' operation request that contained the utterance.
#'
#' @section Request syntax:
#' ```
#' svc$delete_utterances(
#' botName = "string",
#' userId = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_delete_utterances
lexmodelbuildingservice_delete_utterances <- function(botName, userId) {
op <- new_operation(
name = "DeleteUtterances",
http_method = "DELETE",
http_path = "/bots/{botName}/utterances/{userId}",
paginator = list()
)
input <- .lexmodelbuildingservice$delete_utterances_input(botName = botName, userId = userId)
output <- .lexmodelbuildingservice$delete_utterances_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$delete_utterances <- lexmodelbuildingservice_delete_utterances
#' Returns metadata information for a specific bot
#'
#' Returns metadata information for a specific bot. You must provide the
#' bot name and the bot version or alias.
#'
#' This operation requires permissions for the `lex:GetBot` action.
#'
#' @usage
#' lexmodelbuildingservice_get_bot(name, versionOrAlias)
#'
#' @param name [required] The name of the bot. The name is case sensitive.
#' @param versionOrAlias [required] The version or alias of the bot.
#'
#' @section Request syntax:
#' ```
#' svc$get_bot(
#' name = "string",
#' versionOrAlias = "string"
#' )
#' ```
#'
#' @examples
#' # This example shows how to get configuration information for a bot.
#' \donttest{svc$get_bot(
#' name = "DocOrderPizza",
#' versionOrAlias = "$LATEST"
#' )}
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_get_bot
lexmodelbuildingservice_get_bot <- function(name, versionOrAlias) {
op <- new_operation(
name = "GetBot",
http_method = "GET",
http_path = "/bots/{name}/versions/{versionoralias}",
paginator = list()
)
input <- .lexmodelbuildingservice$get_bot_input(name = name, versionOrAlias = versionOrAlias)
output <- .lexmodelbuildingservice$get_bot_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$get_bot <- lexmodelbuildingservice_get_bot
#' Returns information about an Amazon Lex bot alias
#'
#' Returns information about an Amazon Lex bot alias. For more information
#' about aliases, see versioning-aliases.
#'
#' This operation requires permissions for the `lex:GetBotAlias` action.
#'
#' @usage
#' lexmodelbuildingservice_get_bot_alias(name, botName)
#'
#' @param name [required] The name of the bot alias. The name is case sensitive.
#' @param botName [required] The name of the bot.
#'
#' @section Request syntax:
#' ```
#' svc$get_bot_alias(
#' name = "string",
#' botName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_get_bot_alias
lexmodelbuildingservice_get_bot_alias <- function(name, botName) {
op <- new_operation(
name = "GetBotAlias",
http_method = "GET",
http_path = "/bots/{botName}/aliases/{name}",
paginator = list()
)
input <- .lexmodelbuildingservice$get_bot_alias_input(name = name, botName = botName)
output <- .lexmodelbuildingservice$get_bot_alias_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$get_bot_alias <- lexmodelbuildingservice_get_bot_alias
#' Returns a list of aliases for a specified Amazon Lex bot
#'
#' Returns a list of aliases for a specified Amazon Lex bot.
#'
#' This operation requires permissions for the `lex:GetBotAliases` action.
#'
#' @usage
#' lexmodelbuildingservice_get_bot_aliases(botName, nextToken, maxResults,
#' nameContains)
#'
#' @param botName [required] The name of the bot.
#' @param nextToken A pagination token for fetching the next page of aliases. If the
#' response to this call is truncated, Amazon Lex returns a pagination
#' token in the response. To fetch the next page of aliases, specify the
#' pagination token in the next request.
#' @param maxResults The maximum number of aliases to return in the response. The default is
#' 50. .
#' @param nameContains Substring to match in bot alias names. An alias will be returned if any
#' part of its name matches the substring. For example, \"xyz\" matches
#' both \"xyzabc\" and \"abcxyz.\"
#'
#' @section Request syntax:
#' ```
#' svc$get_bot_aliases(
#' botName = "string",
#' nextToken = "string",
#' maxResults = 123,
#' nameContains = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_get_bot_aliases
lexmodelbuildingservice_get_bot_aliases <- function(botName, nextToken = NULL, maxResults = NULL, nameContains = NULL) {
op <- new_operation(
name = "GetBotAliases",
http_method = "GET",
http_path = "/bots/{botName}/aliases/",
paginator = list()
)
input <- .lexmodelbuildingservice$get_bot_aliases_input(botName = botName, nextToken = nextToken, maxResults = maxResults, nameContains = nameContains)
output <- .lexmodelbuildingservice$get_bot_aliases_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$get_bot_aliases <- lexmodelbuildingservice_get_bot_aliases
#' Returns information about the association between an Amazon Lex bot and
#' a messaging platform
#'
#' Returns information about the association between an Amazon Lex bot and
#' a messaging platform.
#'
#' This operation requires permissions for the
#' `lex:GetBotChannelAssociation` action.
#'
#' @usage
#' lexmodelbuildingservice_get_bot_channel_association(name, botName,
#' botAlias)
#'
#' @param name [required] The name of the association between the bot and the channel. The name is
#' case sensitive.
#' @param botName [required] The name of the Amazon Lex bot.
#' @param botAlias [required] An alias pointing to the specific version of the Amazon Lex bot to which
#' this association is being made.
#'
#' @section Request syntax:
#' ```
#' svc$get_bot_channel_association(
#' name = "string",
#' botName = "string",
#' botAlias = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_get_bot_channel_association
lexmodelbuildingservice_get_bot_channel_association <- function(name, botName, botAlias) {
op <- new_operation(
name = "GetBotChannelAssociation",
http_method = "GET",
http_path = "/bots/{botName}/aliases/{aliasName}/channels/{name}",
paginator = list()
)
input <- .lexmodelbuildingservice$get_bot_channel_association_input(name = name, botName = botName, botAlias = botAlias)
output <- .lexmodelbuildingservice$get_bot_channel_association_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$get_bot_channel_association <- lexmodelbuildingservice_get_bot_channel_association
#' Returns a list of all of the channels associated with the specified bot
#'
#' Returns a list of all of the channels associated with the specified bot.
#'
#' The `GetBotChannelAssociations` operation requires permissions for the
#' `lex:GetBotChannelAssociations` action.
#'
#' @usage
#' lexmodelbuildingservice_get_bot_channel_associations(botName, botAlias,
#' nextToken, maxResults, nameContains)
#'
#' @param botName [required] The name of the Amazon Lex bot in the association.
#' @param botAlias [required] An alias pointing to the specific version of the Amazon Lex bot to which
#' this association is being made.
#' @param nextToken A pagination token for fetching the next page of associations. If the
#' response to this call is truncated, Amazon Lex returns a pagination
#' token in the response. To fetch the next page of associations, specify
#' the pagination token in the next request.
#' @param maxResults The maximum number of associations to return in the response. The
#' default is 50.
#' @param nameContains Substring to match in channel association names. An association will be
#' returned if any part of its name matches the substring. For example,
#' \"xyz\" matches both \"xyzabc\" and \"abcxyz.\" To return all bot
#' channel associations, use a hyphen (\"-\") as the `nameContains`
#' parameter.
#'
#' @section Request syntax:
#' ```
#' svc$get_bot_channel_associations(
#' botName = "string",
#' botAlias = "string",
#' nextToken = "string",
#' maxResults = 123,
#' nameContains = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_get_bot_channel_associations
lexmodelbuildingservice_get_bot_channel_associations <- function(botName, botAlias, nextToken = NULL, maxResults = NULL, nameContains = NULL) {
op <- new_operation(
name = "GetBotChannelAssociations",
http_method = "GET",
http_path = "/bots/{botName}/aliases/{aliasName}/channels/",
paginator = list()
)
input <- .lexmodelbuildingservice$get_bot_channel_associations_input(botName = botName, botAlias = botAlias, nextToken = nextToken, maxResults = maxResults, nameContains = nameContains)
output <- .lexmodelbuildingservice$get_bot_channel_associations_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$get_bot_channel_associations <- lexmodelbuildingservice_get_bot_channel_associations
#' Gets information about all of the versions of a bot
#'
#' Gets information about all of the versions of a bot.
#'
#' The `GetBotVersions` operation returns a `BotMetadata` object for each
#' version of a bot. For example, if a bot has three numbered versions, the
#' `GetBotVersions` operation returns four `BotMetadata` objects in the
#' response, one for each numbered version and one for the `$LATEST`
#' version.
#'
#' The `GetBotVersions` operation always returns at least one version, the
#' `$LATEST` version.
#'
#' This operation requires permissions for the `lex:GetBotVersions` action.
#'
#' @usage
#' lexmodelbuildingservice_get_bot_versions(name, nextToken, maxResults)
#'
#' @param name [required] The name of the bot for which versions should be returned.
#' @param nextToken A pagination token for fetching the next page of bot versions. If the
#' response to this call is truncated, Amazon Lex returns a pagination
#' token in the response. To fetch the next page of versions, specify the
#' pagination token in the next request.
#' @param maxResults The maximum number of bot versions to return in the response. The
#' default is 10.
#'
#' @section Request syntax:
#' ```
#' svc$get_bot_versions(
#' name = "string",
#' nextToken = "string",
#' maxResults = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_get_bot_versions
lexmodelbuildingservice_get_bot_versions <- function(name, nextToken = NULL, maxResults = NULL) {
op <- new_operation(
name = "GetBotVersions",
http_method = "GET",
http_path = "/bots/{name}/versions/",
paginator = list()
)
input <- .lexmodelbuildingservice$get_bot_versions_input(name = name, nextToken = nextToken, maxResults = maxResults)
output <- .lexmodelbuildingservice$get_bot_versions_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$get_bot_versions <- lexmodelbuildingservice_get_bot_versions
#' Returns bot information as follows: - If you provide the nameContains
#' field, the response includes information for the $LATEST version of all
#' bots whose name contains the specified string
#'
#' Returns bot information as follows:
#'
#' - If you provide the `nameContains` field, the response includes
#' information for the `$LATEST` version of all bots whose name
#' contains the specified string.
#'
#' - If you don\'t specify the `nameContains` field, the operation
#' returns information about the `$LATEST` version of all of your bots.
#'
#' This operation requires permission for the `lex:GetBots` action.
#'
#' @usage
#' lexmodelbuildingservice_get_bots(nextToken, maxResults, nameContains)
#'
#' @param nextToken A pagination token that fetches the next page of bots. If the response
#' to this call is truncated, Amazon Lex returns a pagination token in the
#' response. To fetch the next page of bots, specify the pagination token
#' in the next request.
#' @param maxResults The maximum number of bots to return in the response that the request
#' will return. The default is 10.
#' @param nameContains Substring to match in bot names. A bot will be returned if any part of
#' its name matches the substring. For example, \"xyz\" matches both
#' \"xyzabc\" and \"abcxyz.\"
#'
#' @section Request syntax:
#' ```
#' svc$get_bots(
#' nextToken = "string",
#' maxResults = 123,
#' nameContains = "string"
#' )
#' ```
#'
#' @examples
#' # This example shows how to get a list of all of the bots in your account.
#' \donttest{svc$get_bots(
#' maxResults = 5L,
#' nextToken = ""
#' )}
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_get_bots
lexmodelbuildingservice_get_bots <- function(nextToken = NULL, maxResults = NULL, nameContains = NULL) {
op <- new_operation(
name = "GetBots",
http_method = "GET",
http_path = "/bots/",
paginator = list()
)
input <- .lexmodelbuildingservice$get_bots_input(nextToken = nextToken, maxResults = maxResults, nameContains = nameContains)
output <- .lexmodelbuildingservice$get_bots_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$get_bots <- lexmodelbuildingservice_get_bots
#' Returns information about a built-in intent
#'
#' Returns information about a built-in intent.
#'
#' This operation requires permission for the `lex:GetBuiltinIntent`
#' action.
#'
#' @usage
#' lexmodelbuildingservice_get_builtin_intent(signature)
#'
#' @param signature [required] The unique identifier for a built-in intent. To find the signature for
#' an intent, see [Standard Built-in
#' Intents](https://developer.amazon.com/public/solutions/alexa/alexa-skills-kit/docs/built-in-intent-ref/standard-intents)
#' in the *Alexa Skills Kit*.
#'
#' @section Request syntax:
#' ```
#' svc$get_builtin_intent(
#' signature = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_get_builtin_intent
lexmodelbuildingservice_get_builtin_intent <- function(signature) {
op <- new_operation(
name = "GetBuiltinIntent",
http_method = "GET",
http_path = "/builtins/intents/{signature}",
paginator = list()
)
input <- .lexmodelbuildingservice$get_builtin_intent_input(signature = signature)
output <- .lexmodelbuildingservice$get_builtin_intent_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$get_builtin_intent <- lexmodelbuildingservice_get_builtin_intent
#' Gets a list of built-in intents that meet the specified criteria
#'
#' Gets a list of built-in intents that meet the specified criteria.
#'
#' This operation requires permission for the `lex:GetBuiltinIntents`
#' action.
#'
#' @usage
#' lexmodelbuildingservice_get_builtin_intents(locale, signatureContains,
#' nextToken, maxResults)
#'
#' @param locale A list of locales that the intent supports.
#' @param signatureContains Substring to match in built-in intent signatures. An intent will be
#' returned if any part of its signature matches the substring. For
#' example, \"xyz\" matches both \"xyzabc\" and \"abcxyz.\" To find the
#' signature for an intent, see [Standard Built-in
#' Intents](https://developer.amazon.com/public/solutions/alexa/alexa-skills-kit/docs/built-in-intent-ref/standard-intents)
#' in the *Alexa Skills Kit*.
#' @param nextToken A pagination token that fetches the next page of intents. If this API
#' call is truncated, Amazon Lex returns a pagination token in the
#' response. To fetch the next page of intents, use the pagination token in
#' the next request.
#' @param maxResults The maximum number of intents to return in the response. The default is
#' 10.
#'
#' @section Request syntax:
#' ```
#' svc$get_builtin_intents(
#' locale = "en-US"|"en-GB"|"de-DE",
#' signatureContains = "string",
#' nextToken = "string",
#' maxResults = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_get_builtin_intents
lexmodelbuildingservice_get_builtin_intents <- function(locale = NULL, signatureContains = NULL, nextToken = NULL, maxResults = NULL) {
op <- new_operation(
name = "GetBuiltinIntents",
http_method = "GET",
http_path = "/builtins/intents/",
paginator = list()
)
input <- .lexmodelbuildingservice$get_builtin_intents_input(locale = locale, signatureContains = signatureContains, nextToken = nextToken, maxResults = maxResults)
output <- .lexmodelbuildingservice$get_builtin_intents_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$get_builtin_intents <- lexmodelbuildingservice_get_builtin_intents
#' Gets a list of built-in slot types that meet the specified criteria
#'
#' Gets a list of built-in slot types that meet the specified criteria.
#'
#' For a list of built-in slot types, see [Slot Type
#' Reference](https://developer.amazon.com/public/solutions/alexa/alexa-skills-kit/docs/built-in-intent-ref/slot-type-reference)
#' in the *Alexa Skills Kit*.
#'
#' This operation requires permission for the `lex:GetBuiltInSlotTypes`
#' action.
#'
#' @usage
#' lexmodelbuildingservice_get_builtin_slot_types(locale,
#' signatureContains, nextToken, maxResults)
#'
#' @param locale A list of locales that the slot type supports.
#' @param signatureContains Substring to match in built-in slot type signatures. A slot type will be
#' returned if any part of its signature matches the substring. For
#' example, \"xyz\" matches both \"xyzabc\" and \"abcxyz.\"
#' @param nextToken A pagination token that fetches the next page of slot types. If the
#' response to this API call is truncated, Amazon Lex returns a pagination
#' token in the response. To fetch the next page of slot types, specify the
#' pagination token in the next request.
#' @param maxResults The maximum number of slot types to return in the response. The default
#' is 10.
#'
#' @section Request syntax:
#' ```
#' svc$get_builtin_slot_types(
#' locale = "en-US"|"en-GB"|"de-DE",
#' signatureContains = "string",
#' nextToken = "string",
#' maxResults = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_get_builtin_slot_types
lexmodelbuildingservice_get_builtin_slot_types <- function(locale = NULL, signatureContains = NULL, nextToken = NULL, maxResults = NULL) {
op <- new_operation(
name = "GetBuiltinSlotTypes",
http_method = "GET",
http_path = "/builtins/slottypes/",
paginator = list()
)
input <- .lexmodelbuildingservice$get_builtin_slot_types_input(locale = locale, signatureContains = signatureContains, nextToken = nextToken, maxResults = maxResults)
output <- .lexmodelbuildingservice$get_builtin_slot_types_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$get_builtin_slot_types <- lexmodelbuildingservice_get_builtin_slot_types
#' Exports the contents of a Amazon Lex resource in a specified format
#'
#' Exports the contents of a Amazon Lex resource in a specified format.
#'
#' @usage
#' lexmodelbuildingservice_get_export(name, version, resourceType,
#' exportType)
#'
#' @param name [required] The name of the bot to export.
#' @param version [required] The version of the bot to export.
#' @param resourceType [required] The type of resource to export.
#' @param exportType [required] The format of the exported data.
#'
#' @section Request syntax:
#' ```
#' svc$get_export(
#' name = "string",
#' version = "string",
#' resourceType = "BOT"|"INTENT"|"SLOT_TYPE",
#' exportType = "ALEXA_SKILLS_KIT"|"LEX"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_get_export
lexmodelbuildingservice_get_export <- function(name, version, resourceType, exportType) {
op <- new_operation(
name = "GetExport",
http_method = "GET",
http_path = "/exports/",
paginator = list()
)
input <- .lexmodelbuildingservice$get_export_input(name = name, version = version, resourceType = resourceType, exportType = exportType)
output <- .lexmodelbuildingservice$get_export_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$get_export <- lexmodelbuildingservice_get_export
#' Gets information about an import job started with the StartImport
#' operation
#'
#' Gets information about an import job started with the `StartImport`
#' operation.
#'
#' @usage
#' lexmodelbuildingservice_get_import(importId)
#'
#' @param importId [required] The identifier of the import job information to return.
#'
#' @section Request syntax:
#' ```
#' svc$get_import(
#' importId = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_get_import
lexmodelbuildingservice_get_import <- function(importId) {
op <- new_operation(
name = "GetImport",
http_method = "GET",
http_path = "/imports/{importId}",
paginator = list()
)
input <- .lexmodelbuildingservice$get_import_input(importId = importId)
output <- .lexmodelbuildingservice$get_import_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$get_import <- lexmodelbuildingservice_get_import
#' Returns information about an intent
#'
#' Returns information about an intent. In addition to the intent name, you
#' must specify the intent version.
#'
#' This operation requires permissions to perform the `lex:GetIntent`
#' action.
#'
#' @usage
#' lexmodelbuildingservice_get_intent(name, version)
#'
#' @param name [required] The name of the intent. The name is case sensitive.
#' @param version [required] The version of the intent.
#'
#' @section Request syntax:
#' ```
#' svc$get_intent(
#' name = "string",
#' version = "string"
#' )
#' ```
#'
#' @examples
#' # This example shows how to get information about an intent.
#' \donttest{svc$get_intent(
#' version = "$LATEST",
#' name = "DocOrderPizza"
#' )}
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_get_intent
lexmodelbuildingservice_get_intent <- function(name, version) {
op <- new_operation(
name = "GetIntent",
http_method = "GET",
http_path = "/intents/{name}/versions/{version}",
paginator = list()
)
input <- .lexmodelbuildingservice$get_intent_input(name = name, version = version)
output <- .lexmodelbuildingservice$get_intent_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$get_intent <- lexmodelbuildingservice_get_intent
#' Gets information about all of the versions of an intent
#'
#' Gets information about all of the versions of an intent.
#'
#' The `GetIntentVersions` operation returns an `IntentMetadata` object for
#' each version of an intent. For example, if an intent has three numbered
#' versions, the `GetIntentVersions` operation returns four
#' `IntentMetadata` objects in the response, one for each numbered version
#' and one for the `$LATEST` version.
#'
#' The `GetIntentVersions` operation always returns at least one version,
#' the `$LATEST` version.
#'
#' This operation requires permissions for the `lex:GetIntentVersions`
#' action.
#'
#' @usage
#' lexmodelbuildingservice_get_intent_versions(name, nextToken, maxResults)
#'
#' @param name [required] The name of the intent for which versions should be returned.
#' @param nextToken A pagination token for fetching the next page of intent versions. If the
#' response to this call is truncated, Amazon Lex returns a pagination
#' token in the response. To fetch the next page of versions, specify the
#' pagination token in the next request.
#' @param maxResults The maximum number of intent versions to return in the response. The
#' default is 10.
#'
#' @section Request syntax:
#' ```
#' svc$get_intent_versions(
#' name = "string",
#' nextToken = "string",
#' maxResults = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_get_intent_versions
lexmodelbuildingservice_get_intent_versions <- function(name, nextToken = NULL, maxResults = NULL) {
op <- new_operation(
name = "GetIntentVersions",
http_method = "GET",
http_path = "/intents/{name}/versions/",
paginator = list()
)
input <- .lexmodelbuildingservice$get_intent_versions_input(name = name, nextToken = nextToken, maxResults = maxResults)
output <- .lexmodelbuildingservice$get_intent_versions_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$get_intent_versions <- lexmodelbuildingservice_get_intent_versions
#' Returns intent information as follows: - If you specify the nameContains
#' field, returns the $LATEST version of all intents that contain the
#' specified string
#'
#' Returns intent information as follows:
#'
#' - If you specify the `nameContains` field, returns the `$LATEST`
#' version of all intents that contain the specified string.
#'
#' - If you don\'t specify the `nameContains` field, returns information
#' about the `$LATEST` version of all intents.
#'
#' The operation requires permission for the `lex:GetIntents` action.
#'
#' @usage
#' lexmodelbuildingservice_get_intents(nextToken, maxResults, nameContains)
#'
#' @param nextToken A pagination token that fetches the next page of intents. If the
#' response to this API call is truncated, Amazon Lex returns a pagination
#' token in the response. To fetch the next page of intents, specify the
#' pagination token in the next request.
#' @param maxResults The maximum number of intents to return in the response. The default is
#' 10.
#' @param nameContains Substring to match in intent names. An intent will be returned if any
#' part of its name matches the substring. For example, \"xyz\" matches
#' both \"xyzabc\" and \"abcxyz.\"
#'
#' @section Request syntax:
#' ```
#' svc$get_intents(
#' nextToken = "string",
#' maxResults = 123,
#' nameContains = "string"
#' )
#' ```
#'
#' @examples
#' # This example shows how to get a list of all of the intents in your
#' # account.
#' \donttest{svc$get_intents(
#' maxResults = 10L,
#' nextToken = ""
#' )}
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_get_intents
lexmodelbuildingservice_get_intents <- function(nextToken = NULL, maxResults = NULL, nameContains = NULL) {
op <- new_operation(
name = "GetIntents",
http_method = "GET",
http_path = "/intents/",
paginator = list()
)
input <- .lexmodelbuildingservice$get_intents_input(nextToken = nextToken, maxResults = maxResults, nameContains = nameContains)
output <- .lexmodelbuildingservice$get_intents_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$get_intents <- lexmodelbuildingservice_get_intents
#' Returns information about a specific version of a slot type
#'
#' Returns information about a specific version of a slot type. In addition
#' to specifying the slot type name, you must specify the slot type
#' version.
#'
#' This operation requires permissions for the `lex:GetSlotType` action.
#'
#' @usage
#' lexmodelbuildingservice_get_slot_type(name, version)
#'
#' @param name [required] The name of the slot type. The name is case sensitive.
#' @param version [required] The version of the slot type.
#'
#' @section Request syntax:
#' ```
#' svc$get_slot_type(
#' name = "string",
#' version = "string"
#' )
#' ```
#'
#' @examples
#' # This example shows how to get information about a slot type.
#' \donttest{svc$get_slot_type(
#' version = "$LATEST",
#' name = "DocPizzaCrustType"
#' )}
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_get_slot_type
lexmodelbuildingservice_get_slot_type <- function(name, version) {
op <- new_operation(
name = "GetSlotType",
http_method = "GET",
http_path = "/slottypes/{name}/versions/{version}",
paginator = list()
)
input <- .lexmodelbuildingservice$get_slot_type_input(name = name, version = version)
output <- .lexmodelbuildingservice$get_slot_type_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$get_slot_type <- lexmodelbuildingservice_get_slot_type
#' Gets information about all versions of a slot type
#'
#' Gets information about all versions of a slot type.
#'
#' The `GetSlotTypeVersions` operation returns a `SlotTypeMetadata` object
#' for each version of a slot type. For example, if a slot type has three
#' numbered versions, the `GetSlotTypeVersions` operation returns four
#' `SlotTypeMetadata` objects in the response, one for each numbered
#' version and one for the `$LATEST` version.
#'
#' The `GetSlotTypeVersions` operation always returns at least one version,
#' the `$LATEST` version.
#'
#' This operation requires permissions for the `lex:GetSlotTypeVersions`
#' action.
#'
#' @usage
#' lexmodelbuildingservice_get_slot_type_versions(name, nextToken,
#' maxResults)
#'
#' @param name [required] The name of the slot type for which versions should be returned.
#' @param nextToken A pagination token for fetching the next page of slot type versions. If
#' the response to this call is truncated, Amazon Lex returns a pagination
#' token in the response. To fetch the next page of versions, specify the
#' pagination token in the next request.
#' @param maxResults The maximum number of slot type versions to return in the response. The
#' default is 10.
#'
#' @section Request syntax:
#' ```
#' svc$get_slot_type_versions(
#' name = "string",
#' nextToken = "string",
#' maxResults = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_get_slot_type_versions
lexmodelbuildingservice_get_slot_type_versions <- function(name, nextToken = NULL, maxResults = NULL) {
op <- new_operation(
name = "GetSlotTypeVersions",
http_method = "GET",
http_path = "/slottypes/{name}/versions/",
paginator = list()
)
input <- .lexmodelbuildingservice$get_slot_type_versions_input(name = name, nextToken = nextToken, maxResults = maxResults)
output <- .lexmodelbuildingservice$get_slot_type_versions_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$get_slot_type_versions <- lexmodelbuildingservice_get_slot_type_versions
#' Returns slot type information as follows: - If you specify the
#' nameContains field, returns the $LATEST version of all slot types that
#' contain the specified string
#'
#' Returns slot type information as follows:
#'
#' - If you specify the `nameContains` field, returns the `$LATEST`
#' version of all slot types that contain the specified string.
#'
#' - If you don\'t specify the `nameContains` field, returns information
#' about the `$LATEST` version of all slot types.
#'
#' The operation requires permission for the `lex:GetSlotTypes` action.
#'
#' @usage
#' lexmodelbuildingservice_get_slot_types(nextToken, maxResults,
#' nameContains)
#'
#' @param nextToken A pagination token that fetches the next page of slot types. If the
#' response to this API call is truncated, Amazon Lex returns a pagination
#' token in the response. To fetch next page of slot types, specify the
#' pagination token in the next request.
#' @param maxResults The maximum number of slot types to return in the response. The default
#' is 10.
#' @param nameContains Substring to match in slot type names. A slot type will be returned if
#' any part of its name matches the substring. For example, \"xyz\" matches
#' both \"xyzabc\" and \"abcxyz.\"
#'
#' @section Request syntax:
#' ```
#' svc$get_slot_types(
#' nextToken = "string",
#' maxResults = 123,
#' nameContains = "string"
#' )
#' ```
#'
#' @examples
#' # This example shows how to get a list of all of the slot types in your
#' # account.
#' \donttest{svc$get_slot_types(
#' maxResults = 10L,
#' nextToken = ""
#' )}
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_get_slot_types
lexmodelbuildingservice_get_slot_types <- function(nextToken = NULL, maxResults = NULL, nameContains = NULL) {
op <- new_operation(
name = "GetSlotTypes",
http_method = "GET",
http_path = "/slottypes/",
paginator = list()
)
input <- .lexmodelbuildingservice$get_slot_types_input(nextToken = nextToken, maxResults = maxResults, nameContains = nameContains)
output <- .lexmodelbuildingservice$get_slot_types_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$get_slot_types <- lexmodelbuildingservice_get_slot_types
#' Use the GetUtterancesView operation to get information about the
#' utterances that your users have made to your bot
#'
#' Use the `GetUtterancesView` operation to get information about the
#' utterances that your users have made to your bot. You can use this list
#' to tune the utterances that your bot responds to.
#'
#' For example, say that you have created a bot to order flowers. After
#' your users have used your bot for a while, use the `GetUtterancesView`
#' operation to see the requests that they have made and whether they have
#' been successful. You might find that the utterance \"I want flowers\" is
#' not being recognized. You could add this utterance to the `OrderFlowers`
#' intent so that your bot recognizes that utterance.
#'
#' After you publish a new version of a bot, you can get information about
#' the old version and the new so that you can compare the performance
#' across the two versions.
#'
#' Utterance statistics are generated once a day. Data is available for the
#' last 15 days. You can request information for up to 5 versions of your
#' bot in each request. Amazon Lex returns the most frequent utterances
#' received by the bot in the last 15 days. The response contains
#' information about a maximum of 100 utterances for each version.
#'
#' If you set `childDirected` field to true when you created your bot, or
#' if you opted out of participating in improving Amazon Lex, utterances
#' are not available.
#'
#' This operation requires permissions for the `lex:GetUtterancesView`
#' action.
#'
#' @usage
#' lexmodelbuildingservice_get_utterances_view(botName, botVersions,
#' statusType)
#'
#' @param botName [required] The name of the bot for which utterance information should be returned.
#' @param botVersions [required] An array of bot versions for which utterance information should be
#' returned. The limit is 5 versions per request.
#' @param statusType [required] To return utterances that were recognized and handled, use `Detected`.
#' To return utterances that were not recognized, use `Missed`.
#'
#' @section Request syntax:
#' ```
#' svc$get_utterances_view(
#' botName = "string",
#' botVersions = list(
#' "string"
#' ),
#' statusType = "Detected"|"Missed"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_get_utterances_view
lexmodelbuildingservice_get_utterances_view <- function(botName, botVersions, statusType) {
op <- new_operation(
name = "GetUtterancesView",
http_method = "GET",
http_path = "/bots/{botname}/utterances?view=aggregation",
paginator = list()
)
input <- .lexmodelbuildingservice$get_utterances_view_input(botName = botName, botVersions = botVersions, statusType = statusType)
output <- .lexmodelbuildingservice$get_utterances_view_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$get_utterances_view <- lexmodelbuildingservice_get_utterances_view
#' Creates an Amazon Lex conversational bot or replaces an existing bot
#'
#' Creates an Amazon Lex conversational bot or replaces an existing bot.
#' When you create or update a bot you are only required to specify a name,
#' a locale, and whether the bot is directed toward children under age 13.
#' You can use this to add intents later, or to remove intents from an
#' existing bot. When you create a bot with the minimum information, the
#' bot is created or updated but Amazon Lex returns the `` response
#' `FAILED`. You can build the bot after you add one or more intents. For
#' more information about Amazon Lex bots, see how-it-works.
#'
#' If you specify the name of an existing bot, the fields in the request
#' replace the existing values in the `$LATEST` version of the bot. Amazon
#' Lex removes any fields that you don\'t provide values for in the
#' request, except for the `idleTTLInSeconds` and `privacySettings` fields,
#' which are set to their default values. If you don\'t specify values for
#' required fields, Amazon Lex throws an exception.
#'
#' This operation requires permissions for the `lex:PutBot` action. For
#' more information, see security-iam.
#'
#' @usage
#' lexmodelbuildingservice_put_bot(name, description, intents,
#' clarificationPrompt, abortStatement, idleSessionTTLInSeconds, voiceId,
#' checksum, processBehavior, locale, childDirected, detectSentiment,
#' createVersion)
#'
#' @param name [required] The name of the bot. The name is *not* case sensitive.
#' @param description A description of the bot.
#' @param intents An array of `Intent` objects. Each intent represents a command that a
#' user can express. For example, a pizza ordering bot might support an
#' OrderPizza intent. For more information, see how-it-works.
#' @param clarificationPrompt When Amazon Lex doesn\'t understand the user\'s intent, it uses this
#' message to get clarification. To specify how many times Amazon Lex
#' should repeat the clarification prompt, use the `maxAttempts` field. If
#' Amazon Lex still doesn\'t understand, it sends the message in the
#' `abortStatement` field.
#'
#' When you create a clarification prompt, make sure that it suggests the
#' correct response from the user. for example, for a bot that orders pizza
#' and drinks, you might create this clarification prompt: \"What would you
#' like to do? You can say \'Order a pizza\' or \'Order a drink.\'\"
#'
#' If you have defined a fallback intent, it will be invoked if the
#' clarification prompt is repeated the number of times defined in the
#' `maxAttempts` field. For more information, see
#' [AMAZON.FallbackIntent](https://docs.aws.amazon.com/lex/latest/dg/built-in-intent-fallback.html).
#'
#' If you don\'t define a clarification prompt, at runtime Amazon Lex will
#' return a 400 Bad Request exception in three cases:
#'
#' - Follow-up prompt - When the user responds to a follow-up prompt but
#' does not provide an intent. For example, in response to a follow-up
#' prompt that says \"Would you like anything else today?\" the user
#' says \"Yes.\" Amazon Lex will return a 400 Bad Request exception
#' because it does not have a clarification prompt to send to the user
#' to get an intent.
#'
#' - Lambda function - When using a Lambda function, you return an
#' `ElicitIntent` dialog type. Since Amazon Lex does not have a
#' clarification prompt to get an intent from the user, it returns a
#' 400 Bad Request exception.
#'
#' - PutSession operation - When using the `PutSession` operation, you
#' send an `ElicitIntent` dialog type. Since Amazon Lex does not have a
#' clarification prompt to get an intent from the user, it returns a
#' 400 Bad Request exception.
#' @param abortStatement When Amazon Lex can\'t understand the user\'s input in context, it tries
#' to elicit the information a few times. After that, Amazon Lex sends the
#' message defined in `abortStatement` to the user, and then aborts the
#' conversation. To set the number of retries, use the
#' `valueElicitationPrompt` field for the slot type.
#'
#' For example, in a pizza ordering bot, Amazon Lex might ask a user \"What
#' type of crust would you like?\" If the user\'s response is not one of
#' the expected responses (for example, \"thin crust, \"deep dish,\" etc.),
#' Amazon Lex tries to elicit a correct response a few more times.
#'
#' For example, in a pizza ordering application, `OrderPizza` might be one
#' of the intents. This intent might require the `CrustType` slot. You
#' specify the `valueElicitationPrompt` field when you create the
#' `CrustType` slot.
#'
#' If you have defined a fallback intent the abort statement will not be
#' sent to the user, the fallback intent is used instead. For more
#' information, see
#' [AMAZON.FallbackIntent](https://docs.aws.amazon.com/lex/latest/dg/built-in-intent-fallback.html).
#' @param idleSessionTTLInSeconds The maximum time in seconds that Amazon Lex retains the data gathered in
#' a conversation.
#'
#' A user interaction session remains active for the amount of time
#' specified. If no conversation occurs during this time, the session
#' expires and Amazon Lex deletes any data provided before the timeout.
#'
#' For example, suppose that a user chooses the OrderPizza intent, but gets
#' sidetracked halfway through placing an order. If the user doesn\'t
#' complete the order within the specified time, Amazon Lex discards the
#' slot information that it gathered, and the user must start over.
#'
#' If you don\'t include the `idleSessionTTLInSeconds` element in a
#' `PutBot` operation request, Amazon Lex uses the default value. This is
#' also true if the request replaces an existing bot.
#'
#' The default is 300 seconds (5 minutes).
#' @param voiceId The Amazon Polly voice ID that you want Amazon Lex to use for voice
#' interactions with the user. The locale configured for the voice must
#' match the locale of the bot. For more information, see [Voices in Amazon
#' Polly](https://docs.aws.amazon.com/polly/latest/dg/voicelist.html) in
#' the *Amazon Polly Developer Guide*.
#' @param checksum Identifies a specific revision of the `$LATEST` version.
#'
#' When you create a new bot, leave the `checksum` field blank. If you
#' specify a checksum you get a `BadRequestException` exception.
#'
#' When you want to update a bot, set the `checksum` field to the checksum
#' of the most recent revision of the `$LATEST` version. If you don\'t
#' specify the ` checksum` field, or if the checksum does not match the
#' `$LATEST` version, you get a `PreconditionFailedException` exception.
#' @param processBehavior If you set the `processBehavior` element to `BUILD`, Amazon Lex builds
#' the bot so that it can be run. If you set the element to `SAVE` Amazon
#' Lex saves the bot, but doesn\'t build it.
#'
#' If you don\'t specify this value, the default value is `BUILD`.
#' @param locale [required] Specifies the target locale for the bot. Any intent used in the bot must
#' be compatible with the locale of the bot.
#'
#' The default is `en-US`.
#' @param childDirected [required] For each Amazon Lex bot created with the Amazon Lex Model Building
#' Service, you must specify whether your use of Amazon Lex is related to a
#' website, program, or other application that is directed or targeted, in
#' whole or in part, to children under age 13 and subject to the
#' Children\'s Online Privacy Protection Act (COPPA) by specifying `true`
#' or `false` in the `childDirected` field. By specifying `true` in the
#' `childDirected` field, you confirm that your use of Amazon Lex **is**
#' related to a website, program, or other application that is directed or
#' targeted, in whole or in part, to children under age 13 and subject to
#' COPPA. By specifying `false` in the `childDirected` field, you confirm
#' that your use of Amazon Lex **is not** related to a website, program, or
#' other application that is directed or targeted, in whole or in part, to
#' children under age 13 and subject to COPPA. You may not specify a
#' default value for the `childDirected` field that does not accurately
#' reflect whether your use of Amazon Lex is related to a website, program,
#' or other application that is directed or targeted, in whole or in part,
#' to children under age 13 and subject to COPPA.
#'
#' If your use of Amazon Lex relates to a website, program, or other
#' application that is directed in whole or in part, to children under age
#' 13, you must obtain any required verifiable parental consent under
#' COPPA. For information regarding the use of Amazon Lex in connection
#' with websites, programs, or other applications that are directed or
#' targeted, in whole or in part, to children under age 13, see the [Amazon
#' Lex FAQ.](https://aws.amazon.com/lex/faqs#data-security)
#' @param detectSentiment When set to `true` user utterances are sent to Amazon Comprehend for
#' sentiment analysis. If you don\'t specify `detectSentiment`, the default
#' is `false`.
#' @param createVersion When set to `true` a new numbered version of the bot is created. This is
#' the same as calling the `CreateBotVersion` operation. If you don\'t
#' specify `createVersion`, the default is `false`.
#'
#' @section Request syntax:
#' ```
#' svc$put_bot(
#' name = "string",
#' description = "string",
#' intents = list(
#' list(
#' intentName = "string",
#' intentVersion = "string"
#' )
#' ),
#' clarificationPrompt = list(
#' messages = list(
#' list(
#' contentType = "PlainText"|"SSML"|"CustomPayload",
#' content = "string",
#' groupNumber = 123
#' )
#' ),
#' maxAttempts = 123,
#' responseCard = "string"
#' ),
#' abortStatement = list(
#' messages = list(
#' list(
#' contentType = "PlainText"|"SSML"|"CustomPayload",
#' content = "string",
#' groupNumber = 123
#' )
#' ),
#' responseCard = "string"
#' ),
#' idleSessionTTLInSeconds = 123,
#' voiceId = "string",
#' checksum = "string",
#' processBehavior = "SAVE"|"BUILD",
#' locale = "en-US"|"en-GB"|"de-DE",
#' childDirected = TRUE|FALSE,
#' detectSentiment = TRUE|FALSE,
#' createVersion = TRUE|FALSE
#' )
#' ```
#'
#' @examples
#' # This example shows how to create a bot for ordering pizzas.
#' \donttest{svc$put_bot(
#' name = "DocOrderPizzaBot",
#' abortStatement = list(
#' messages = list(
#' list(
#' content = "I don't understand. Can you try again?",
#' contentType = "PlainText"
#' ),
#' list(
#' content = "I'm sorry, I don't understand.",
#' contentType = "PlainText"
#' )
#' )
#' ),
#' childDirected = TRUE,
#' clarificationPrompt = list(
#' maxAttempts = 1L,
#' messages = list(
#' list(
#' content = "I'm sorry, I didn't hear that. Can you repeate what you just said?",
#' contentType = "PlainText"
#' ),
#' list(
#' content = "Can you say that again?",
#' contentType = "PlainText"
#' )
#' )
#' ),
#' description = "Orders a pizza from a local pizzeria.",
#' idleSessionTTLInSeconds = 300L,
#' intents = list(
#' list(
#' intentName = "DocOrderPizza",
#' intentVersion = "$LATEST"
#' )
#' ),
#' locale = "en-US",
#' processBehavior = "SAVE"
#' )}
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_put_bot
lexmodelbuildingservice_put_bot <- function(name, description = NULL, intents = NULL, clarificationPrompt = NULL, abortStatement = NULL, idleSessionTTLInSeconds = NULL, voiceId = NULL, checksum = NULL, processBehavior = NULL, locale, childDirected, detectSentiment = NULL, createVersion = NULL) {
op <- new_operation(
name = "PutBot",
http_method = "PUT",
http_path = "/bots/{name}/versions/$LATEST",
paginator = list()
)
input <- .lexmodelbuildingservice$put_bot_input(name = name, description = description, intents = intents, clarificationPrompt = clarificationPrompt, abortStatement = abortStatement, idleSessionTTLInSeconds = idleSessionTTLInSeconds, voiceId = voiceId, checksum = checksum, processBehavior = processBehavior, locale = locale, childDirected = childDirected, detectSentiment = detectSentiment, createVersion = createVersion)
output <- .lexmodelbuildingservice$put_bot_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$put_bot <- lexmodelbuildingservice_put_bot
#' Creates an alias for the specified version of the bot or replaces an
#' alias for the specified bot
#'
#' Creates an alias for the specified version of the bot or replaces an
#' alias for the specified bot. To change the version of the bot that the
#' alias points to, replace the alias. For more information about aliases,
#' see versioning-aliases.
#'
#' This operation requires permissions for the `lex:PutBotAlias` action.
#'
#' @usage
#' lexmodelbuildingservice_put_bot_alias(name, description, botVersion,
#' botName, checksum, conversationLogs)
#'
#' @param name [required] The name of the alias. The name is *not* case sensitive.
#' @param description A description of the alias.
#' @param botVersion [required] The version of the bot.
#' @param botName [required] The name of the bot.
#' @param checksum Identifies a specific revision of the `$LATEST` version.
#'
#' When you create a new bot alias, leave the `checksum` field blank. If
#' you specify a checksum you get a `BadRequestException` exception.
#'
#' When you want to update a bot alias, set the `checksum` field to the
#' checksum of the most recent revision of the `$LATEST` version. If you
#' don\'t specify the ` checksum` field, or if the checksum does not match
#' the `$LATEST` version, you get a `PreconditionFailedException`
#' exception.
#' @param conversationLogs Settings for conversation logs for the alias.
#'
#' @section Request syntax:
#' ```
#' svc$put_bot_alias(
#' name = "string",
#' description = "string",
#' botVersion = "string",
#' botName = "string",
#' checksum = "string",
#' conversationLogs = list(
#' logSettings = list(
#' list(
#' logType = "AUDIO"|"TEXT",
#' destination = "CLOUDWATCH_LOGS"|"S3",
#' kmsKeyArn = "string",
#' resourceArn = "string"
#' )
#' ),
#' iamRoleArn = "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_put_bot_alias
lexmodelbuildingservice_put_bot_alias <- function(name, description = NULL, botVersion, botName, checksum = NULL, conversationLogs = NULL) {
op <- new_operation(
name = "PutBotAlias",
http_method = "PUT",
http_path = "/bots/{botName}/aliases/{name}",
paginator = list()
)
input <- .lexmodelbuildingservice$put_bot_alias_input(name = name, description = description, botVersion = botVersion, botName = botName, checksum = checksum, conversationLogs = conversationLogs)
output <- .lexmodelbuildingservice$put_bot_alias_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$put_bot_alias <- lexmodelbuildingservice_put_bot_alias
#' Creates an intent or replaces an existing intent
#'
#' Creates an intent or replaces an existing intent.
#'
#' To define the interaction between the user and your bot, you use one or
#' more intents. For a pizza ordering bot, for example, you would create an
#' `OrderPizza` intent.
#'
#' To create an intent or replace an existing intent, you must provide the
#' following:
#'
#' - Intent name. For example, `OrderPizza`.
#'
#' - Sample utterances. For example, \"Can I order a pizza, please.\" and
#' \"I want to order a pizza.\"
#'
#' - Information to be gathered. You specify slot types for the
#' information that your bot will request from the user. You can
#' specify standard slot types, such as a date or a time, or custom
#' slot types such as the size and crust of a pizza.
#'
#' - How the intent will be fulfilled. You can provide a Lambda function
#' or configure the intent to return the intent information to the
#' client application. If you use a Lambda function, when all of the
#' intent information is available, Amazon Lex invokes your Lambda
#' function. If you configure your intent to return the intent
#' information to the client application.
#'
#' You can specify other optional information in the request, such as:
#'
#' - A confirmation prompt to ask the user to confirm an intent. For
#' example, \"Shall I order your pizza?\"
#'
#' - A conclusion statement to send to the user after the intent has been
#' fulfilled. For example, \"I placed your pizza order.\"
#'
#' - A follow-up prompt that asks the user for additional activity. For
#' example, asking \"Do you want to order a drink with your pizza?\"
#'
#' If you specify an existing intent name to update the intent, Amazon Lex
#' replaces the values in the `$LATEST` version of the intent with the
#' values in the request. Amazon Lex removes fields that you don\'t provide
#' in the request. If you don\'t specify the required fields, Amazon Lex
#' throws an exception. When you update the `$LATEST` version of an intent,
#' the `status` field of any bot that uses the `$LATEST` version of the
#' intent is set to `NOT_BUILT`.
#'
#' For more information, see how-it-works.
#'
#' This operation requires permissions for the `lex:PutIntent` action.
#'
#' @usage
#' lexmodelbuildingservice_put_intent(name, description, slots,
#' sampleUtterances, confirmationPrompt, rejectionStatement,
#' followUpPrompt, conclusionStatement, dialogCodeHook,
#' fulfillmentActivity, parentIntentSignature, checksum, createVersion)
#'
#' @param name [required] The name of the intent. The name is *not* case sensitive.
#'
#' The name can\'t match a built-in intent name, or a built-in intent name
#' with \"AMAZON.\" removed. For example, because there is a built-in
#' intent called `AMAZON.HelpIntent`, you can\'t create a custom intent
#' called `HelpIntent`.
#'
#' For a list of built-in intents, see [Standard Built-in
#' Intents](https://developer.amazon.com/public/solutions/alexa/alexa-skills-kit/docs/built-in-intent-ref/standard-intents)
#' in the *Alexa Skills Kit*.
#' @param description A description of the intent.
#' @param slots An array of intent slots. At runtime, Amazon Lex elicits required slot
#' values from the user using prompts defined in the slots. For more
#' information, see how-it-works.
#' @param sampleUtterances An array of utterances (strings) that a user might say to signal the
#' intent. For example, \"I want \{PizzaSize\} pizza\", \"Order \{Quantity\}
#' \{PizzaSize\} pizzas\".
#'
#' In each utterance, a slot name is enclosed in curly braces.
#' @param confirmationPrompt Prompts the user to confirm the intent. This question should have a yes
#' or no answer.
#'
#' Amazon Lex uses this prompt to ensure that the user acknowledges that
#' the intent is ready for fulfillment. For example, with the `OrderPizza`
#' intent, you might want to confirm that the order is correct before
#' placing it. For other intents, such as intents that simply respond to
#' user questions, you might not need to ask the user for confirmation
#' before providing the information.
#'
#' You you must provide both the `rejectionStatement` and the
#' `confirmationPrompt`, or neither.
#' @param rejectionStatement When the user answers \"no\" to the question defined in
#' `confirmationPrompt`, Amazon Lex responds with this statement to
#' acknowledge that the intent was canceled.
#'
#' You must provide both the `rejectionStatement` and the
#' `confirmationPrompt`, or neither.
#' @param followUpPrompt Amazon Lex uses this prompt to solicit additional activity after
#' fulfilling an intent. For example, after the `OrderPizza` intent is
#' fulfilled, you might prompt the user to order a drink.
#'
#' The action that Amazon Lex takes depends on the user\'s response, as
#' follows:
#'
#' - If the user says \"Yes\" it responds with the clarification prompt
#' that is configured for the bot.
#'
#' - if the user says \"Yes\" and continues with an utterance that
#' triggers an intent it starts a conversation for the intent.
#'
#' - If the user says \"No\" it responds with the rejection statement
#' configured for the the follow-up prompt.
#'
#' - If it doesn\'t recognize the utterance it repeats the follow-up
#' prompt again.
#'
#' The `followUpPrompt` field and the `conclusionStatement` field are
#' mutually exclusive. You can specify only one.
#' @param conclusionStatement The statement that you want Amazon Lex to convey to the user after the
#' intent is successfully fulfilled by the Lambda function.
#'
#' This element is relevant only if you provide a Lambda function in the
#' `fulfillmentActivity`. If you return the intent to the client
#' application, you can\'t specify this element.
#'
#' The `followUpPrompt` and `conclusionStatement` are mutually exclusive.
#' You can specify only one.
#' @param dialogCodeHook Specifies a Lambda function to invoke for each user input. You can
#' invoke this Lambda function to personalize user interaction.
#'
#' For example, suppose your bot determines that the user is John. Your
#' Lambda function might retrieve John\'s information from a backend
#' database and prepopulate some of the values. For example, if you find
#' that John is gluten intolerant, you might set the corresponding intent
#' slot, `GlutenIntolerant`, to true. You might find John\'s phone number
#' and set the corresponding session attribute.
#' @param fulfillmentActivity Required. Describes how the intent is fulfilled. For example, after a
#' user provides all of the information for a pizza order,
#' `fulfillmentActivity` defines how the bot places an order with a local
#' pizza store.
#'
#' You might configure Amazon Lex to return all of the intent information
#' to the client application, or direct it to invoke a Lambda function that
#' can process the intent (for example, place an order with a pizzeria).
#' @param parentIntentSignature A unique identifier for the built-in intent to base this intent on. To
#' find the signature for an intent, see [Standard Built-in
#' Intents](https://developer.amazon.com/public/solutions/alexa/alexa-skills-kit/docs/built-in-intent-ref/standard-intents)
#' in the *Alexa Skills Kit*.
#' @param checksum Identifies a specific revision of the `$LATEST` version.
#'
#' When you create a new intent, leave the `checksum` field blank. If you
#' specify a checksum you get a `BadRequestException` exception.
#'
#' When you want to update a intent, set the `checksum` field to the
#' checksum of the most recent revision of the `$LATEST` version. If you
#' don\'t specify the ` checksum` field, or if the checksum does not match
#' the `$LATEST` version, you get a `PreconditionFailedException`
#' exception.
#' @param createVersion When set to `true` a new numbered version of the intent is created. This
#' is the same as calling the `CreateIntentVersion` operation. If you do
#' not specify `createVersion`, the default is `false`.
#'
#' @section Request syntax:
#' ```
#' svc$put_intent(
#' name = "string",
#' description = "string",
#' slots = list(
#' list(
#' name = "string",
#' description = "string",
#' slotConstraint = "Required"|"Optional",
#' slotType = "string",
#' slotTypeVersion = "string",
#' valueElicitationPrompt = list(
#' messages = list(
#' list(
#' contentType = "PlainText"|"SSML"|"CustomPayload",
#' content = "string",
#' groupNumber = 123
#' )
#' ),
#' maxAttempts = 123,
#' responseCard = "string"
#' ),
#' priority = 123,
#' sampleUtterances = list(
#' "string"
#' ),
#' responseCard = "string",
#' obfuscationSetting = "NONE"|"DEFAULT_OBFUSCATION"
#' )
#' ),
#' sampleUtterances = list(
#' "string"
#' ),
#' confirmationPrompt = list(
#' messages = list(
#' list(
#' contentType = "PlainText"|"SSML"|"CustomPayload",
#' content = "string",
#' groupNumber = 123
#' )
#' ),
#' maxAttempts = 123,
#' responseCard = "string"
#' ),
#' rejectionStatement = list(
#' messages = list(
#' list(
#' contentType = "PlainText"|"SSML"|"CustomPayload",
#' content = "string",
#' groupNumber = 123
#' )
#' ),
#' responseCard = "string"
#' ),
#' followUpPrompt = list(
#' prompt = list(
#' messages = list(
#' list(
#' contentType = "PlainText"|"SSML"|"CustomPayload",
#' content = "string",
#' groupNumber = 123
#' )
#' ),
#' maxAttempts = 123,
#' responseCard = "string"
#' ),
#' rejectionStatement = list(
#' messages = list(
#' list(
#' contentType = "PlainText"|"SSML"|"CustomPayload",
#' content = "string",
#' groupNumber = 123
#' )
#' ),
#' responseCard = "string"
#' )
#' ),
#' conclusionStatement = list(
#' messages = list(
#' list(
#' contentType = "PlainText"|"SSML"|"CustomPayload",
#' content = "string",
#' groupNumber = 123
#' )
#' ),
#' responseCard = "string"
#' ),
#' dialogCodeHook = list(
#' uri = "string",
#' messageVersion = "string"
#' ),
#' fulfillmentActivity = list(
#' type = "ReturnIntent"|"CodeHook",
#' codeHook = list(
#' uri = "string",
#' messageVersion = "string"
#' )
#' ),
#' parentIntentSignature = "string",
#' checksum = "string",
#' createVersion = TRUE|FALSE
#' )
#' ```
#'
#' @examples
#' # This example shows how to create an intent for ordering pizzas.
#' \donttest{svc$put_intent(
#' name = "DocOrderPizza",
#' conclusionStatement = list(
#' messages = list(
#' list(
#' content = "All right, I ordered you a {Crust} crust {Type} pizza with {Sauce} sauce...",
#' contentType = "PlainText"
#' ),
#' list(
#' content = "OK, your {Crust} crust {Type} pizza with {Sauce} sauce is on the way.",
#' contentType = "PlainText"
#' )
#' ),
#' responseCard = "foo"
#' ),
#' confirmationPrompt = list(
#' maxAttempts = 1L,
#' messages = list(
#' list(
#' content = "Should I order your {Crust} crust {Type} pizza with {Sauce} sauce?",
#' contentType = "PlainText"
#' )
#' )
#' ),
#' description = "Order a pizza from a local pizzeria.",
#' fulfillmentActivity = list(
#' type = "ReturnIntent"
#' ),
#' rejectionStatement = list(
#' messages = list(
#' list(
#' content = "Ok, I'll cancel your order.",
#' contentType = "PlainText"
#' ),
#' list(
#' content = "I cancelled your order.",
#' contentType = "PlainText"
#' )
#' )
#' ),
#' sampleUtterances = list(
#' "Order me a pizza.",
#' "Order me a {Type} pizza.",
#' "I want a {Crust} crust {Type} pizza",
#' "I want a {Crust} crust {Type} pizza with {Sauce} sauce."
#' ),
#' slots = list(
#' list(
#' name = "Type",
#' description = "The type of pizza to order.",
#' priority = 1L,
#' sampleUtterances = list(
#' "Get me a {Type} pizza.",
#' "A {Type} pizza please.",
#' "I'd like a {Type} pizza."
#' ),
#' slotConstraint = "Required",
#' slotType = "DocPizzaType",
#' slotTypeVersion = "$LATEST",
#' valueElicitationPrompt = list(
#' maxAttempts = 1L,
#' messages = list(
#' list(
#' content = "What type of pizza would you like?",
#' contentType = "PlainText"
#' ),
#' list(
#' content = "Vegie or cheese pizza?",
#' contentType = "PlainText"
#' ),
#' list(
#' content = "I can get you a vegie or a cheese pizza.",
#' contentType = "PlainText"
#' )
#' )
#' )
#' ),
#' list(
#' name = "Crust",
#' description = "The type of pizza crust to order.",
#' priority = 2L,
#' sampleUtterances = list(
#' "Make it a {Crust} crust.",
#' "I'd like a {Crust} crust."
#' ),
#' slotConstraint = "Required",
#' slotType = "DocPizzaCrustType",
#' slotTypeVersion = "$LATEST",
#' valueElicitationPrompt = list(
#' maxAttempts = 1L,
#' messages = list(
#' list(
#' content = "What type of crust would you like?",
#' contentType = "PlainText"
#' ),
#' list(
#' content = "Thick or thin crust?",
#' contentType = "PlainText"
#' )
#' )
#' )
#' ),
#' list(
#' name = "Sauce",
#' description = "The type of sauce to use on the pizza.",
#' priority = 3L,
#' sampleUtterances = list(
#' "Make it {Sauce} sauce.",
#' "I'd like {Sauce} sauce."
#' ),
#' slotConstraint = "Required",
#' slotType = "DocPizzaSauceType",
#' slotTypeVersion = "$LATEST",
#' valueElicitationPrompt = list(
#' maxAttempts = 1L,
#' messages = list(
#' list(
#' content = "White or red sauce?",
#' contentType = "PlainText"
#' ),
#' list(
#' content = "Garlic or tomato sauce?",
#' contentType = "PlainText"
#' )
#' )
#' )
#' )
#' )
#' )}
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_put_intent
lexmodelbuildingservice_put_intent <- function(name, description = NULL, slots = NULL, sampleUtterances = NULL, confirmationPrompt = NULL, rejectionStatement = NULL, followUpPrompt = NULL, conclusionStatement = NULL, dialogCodeHook = NULL, fulfillmentActivity = NULL, parentIntentSignature = NULL, checksum = NULL, createVersion = NULL) {
op <- new_operation(
name = "PutIntent",
http_method = "PUT",
http_path = "/intents/{name}/versions/$LATEST",
paginator = list()
)
input <- .lexmodelbuildingservice$put_intent_input(name = name, description = description, slots = slots, sampleUtterances = sampleUtterances, confirmationPrompt = confirmationPrompt, rejectionStatement = rejectionStatement, followUpPrompt = followUpPrompt, conclusionStatement = conclusionStatement, dialogCodeHook = dialogCodeHook, fulfillmentActivity = fulfillmentActivity, parentIntentSignature = parentIntentSignature, checksum = checksum, createVersion = createVersion)
output <- .lexmodelbuildingservice$put_intent_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$put_intent <- lexmodelbuildingservice_put_intent
#' Creates a custom slot type or replaces an existing custom slot type
#'
#' Creates a custom slot type or replaces an existing custom slot type.
#'
#' To create a custom slot type, specify a name for the slot type and a set
#' of enumeration values, which are the values that a slot of this type can
#' assume. For more information, see how-it-works.
#'
#' If you specify the name of an existing slot type, the fields in the
#' request replace the existing values in the `$LATEST` version of the slot
#' type. Amazon Lex removes the fields that you don\'t provide in the
#' request. If you don\'t specify required fields, Amazon Lex throws an
#' exception. When you update the `$LATEST` version of a slot type, if a
#' bot uses the `$LATEST` version of an intent that contains the slot type,
#' the bot\'s `status` field is set to `NOT_BUILT`.
#'
#' This operation requires permissions for the `lex:PutSlotType` action.
#'
#' @usage
#' lexmodelbuildingservice_put_slot_type(name, description,
#' enumerationValues, checksum, valueSelectionStrategy, createVersion)
#'
#' @param name [required] The name of the slot type. The name is *not* case sensitive.
#'
#' The name can\'t match a built-in slot type name, or a built-in slot type
#' name with \"AMAZON.\" removed. For example, because there is a built-in
#' slot type called `AMAZON.DATE`, you can\'t create a custom slot type
#' called `DATE`.
#'
#' For a list of built-in slot types, see [Slot Type
#' Reference](https://developer.amazon.com/public/solutions/alexa/alexa-skills-kit/docs/built-in-intent-ref/slot-type-reference)
#' in the *Alexa Skills Kit*.
#' @param description A description of the slot type.
#' @param enumerationValues A list of `EnumerationValue` objects that defines the values that the
#' slot type can take. Each value can have a list of `synonyms`, which are
#' additional values that help train the machine learning model about the
#' values that it resolves for a slot.
#'
#' When Amazon Lex resolves a slot value, it generates a resolution list
#' that contains up to five possible values for the slot. If you are using
#' a Lambda function, this resolution list is passed to the function. If
#' you are not using a Lambda function you can choose to return the value
#' that the user entered or the first value in the resolution list as the
#' slot value. The `valueSelectionStrategy` field indicates the option to
#' use.
#' @param checksum Identifies a specific revision of the `$LATEST` version.
#'
#' When you create a new slot type, leave the `checksum` field blank. If
#' you specify a checksum you get a `BadRequestException` exception.
#'
#' When you want to update a slot type, set the `checksum` field to the
#' checksum of the most recent revision of the `$LATEST` version. If you
#' don\'t specify the ` checksum` field, or if the checksum does not match
#' the `$LATEST` version, you get a `PreconditionFailedException`
#' exception.
#' @param valueSelectionStrategy Determines the slot resolution strategy that Amazon Lex uses to return
#' slot type values. The field can be set to one of the following values:
#'
#' - `ORIGINAL_VALUE` - Returns the value entered by the user, if the
#' user value is similar to the slot value.
#'
#' - `TOP_RESOLUTION` - If there is a resolution list for the slot,
#' return the first value in the resolution list as the slot type
#' value. If there is no resolution list, null is returned.
#'
#' If you don\'t specify the `valueSelectionStrategy`, the default is
#' `ORIGINAL_VALUE`.
#' @param createVersion When set to `true` a new numbered version of the slot type is created.
#' This is the same as calling the `CreateSlotTypeVersion` operation. If
#' you do not specify `createVersion`, the default is `false`.
#'
#' @section Request syntax:
#' ```
#' svc$put_slot_type(
#' name = "string",
#' description = "string",
#' enumerationValues = list(
#' list(
#' value = "string",
#' synonyms = list(
#' "string"
#' )
#' )
#' ),
#' checksum = "string",
#' valueSelectionStrategy = "ORIGINAL_VALUE"|"TOP_RESOLUTION",
#' createVersion = TRUE|FALSE
#' )
#' ```
#'
#' @examples
#' # This example shows how to create a slot type that describes pizza
#' # sauces.
#' \donttest{svc$put_slot_type(
#' name = "PizzaSauceType",
#' description = "Available pizza sauces",
#' enumerationValues = list(
#' list(
#' value = "red"
#' ),
#' list(
#' value = "white"
#' )
#' )
#' )}
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_put_slot_type
lexmodelbuildingservice_put_slot_type <- function(name, description = NULL, enumerationValues = NULL, checksum = NULL, valueSelectionStrategy = NULL, createVersion = NULL) {
op <- new_operation(
name = "PutSlotType",
http_method = "PUT",
http_path = "/slottypes/{name}/versions/$LATEST",
paginator = list()
)
input <- .lexmodelbuildingservice$put_slot_type_input(name = name, description = description, enumerationValues = enumerationValues, checksum = checksum, valueSelectionStrategy = valueSelectionStrategy, createVersion = createVersion)
output <- .lexmodelbuildingservice$put_slot_type_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$put_slot_type <- lexmodelbuildingservice_put_slot_type
#' Starts a job to import a resource to Amazon Lex
#'
#' Starts a job to import a resource to Amazon Lex.
#'
#' @usage
#' lexmodelbuildingservice_start_import(payload, resourceType,
#' mergeStrategy)
#'
#' @param payload [required] A zip archive in binary format. The archive should contain one file, a
#' JSON file containing the resource to import. The resource should match
#' the type specified in the `resourceType` field.
#' @param resourceType [required] Specifies the type of resource to export. Each resource also exports any
#' resources that it depends on.
#'
#' - A bot exports dependent intents.
#'
#' - An intent exports dependent slot types.
#' @param mergeStrategy [required] Specifies the action that the `StartImport` operation should take when
#' there is an existing resource with the same name.
#'
#' - FAIL\\_ON\\_CONFLICT - The import operation is stopped on the first
#' conflict between a resource in the import file and an existing
#' resource. The name of the resource causing the conflict is in the
#' `failureReason` field of the response to the `GetImport` operation.
#'
#' OVERWRITE\\_LATEST - The import operation proceeds even if there is a
#' conflict with an existing resource. The \\$LASTEST version of the
#' existing resource is overwritten with the data from the import file.
#'
#' @section Request syntax:
#' ```
#' svc$start_import(
#' payload = raw,
#' resourceType = "BOT"|"INTENT"|"SLOT_TYPE",
#' mergeStrategy = "OVERWRITE_LATEST"|"FAIL_ON_CONFLICT"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname lexmodelbuildingservice_start_import
lexmodelbuildingservice_start_import <- function(payload, resourceType, mergeStrategy) {
op <- new_operation(
name = "StartImport",
http_method = "POST",
http_path = "/imports/",
paginator = list()
)
input <- .lexmodelbuildingservice$start_import_input(payload = payload, resourceType = resourceType, mergeStrategy = mergeStrategy)
output <- .lexmodelbuildingservice$start_import_output()
config <- get_config()
svc <- .lexmodelbuildingservice$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.lexmodelbuildingservice$operations$start_import <- lexmodelbuildingservice_start_import
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/event_list.R
\name{event_list}
\alias{event_list}
\alias{as_event_list}
\alias{as_event_list.list}
\alias{as_event_list.LogEvent}
\alias{as_event_list.data.frame}
\alias{as.data.table.event_list}
\alias{as.data.frame.event_list}
\title{A List of LogEvents}
\usage{
event_list(...)
as_event_list(x, ...)
\method{as_event_list}{list}(x, ..., scalarize = FALSE)
\method{as_event_list}{LogEvent}(x, ..., scalarize = FALSE)
\method{as_event_list}{data.frame}(x, na.rm = TRUE, ...)
as.data.table.event_list(x, na.rm = TRUE)
\method{as.data.frame}{event_list}(
x,
row.names = NULL,
optional = FALSE,
stringsAsFactors = FALSE,
na.rm = TRUE,
...
)
}
\arguments{
\item{...}{for \code{event} elements to be added to the list, for the \verb{as_*()}
functions parameters passed on to methods.}
\item{x}{any \code{R} object}
\item{scalarize}{\code{logical} scalar. Turn \link{LogEvents} with non-scalar \code{msg}
field into separate log events}
\item{na.rm}{remove \code{NA} values before coercing a data.frame to an \code{event_list()}.}
\item{row.names}{\code{NULL} or a character vector giving the row
names for the data frame. Missing values are not allowed.}
\item{optional}{currently ignored and only included for compatibility.}
\item{stringsAsFactors}{\code{logical} scalar: should \code{character} vectors be
converted to factors? Defaults to \code{FALSE} (as opposed to
\code{\link[base:as.data.frame]{base::as.data.frame()}}) and is only included for compatibility.}
}
\value{
an \code{event_list()} and \code{as_event_list()} return a flat \code{list}
of \link{LogEvents}. Nested lists get automatically flattened.
\code{as.data.frame} and \code{as.data.table} return a \code{data.frame} or \code{data.table}
respectively
}
\description{
An event_list is a class for \code{list()}s whose only elements are \link{LogEvents}.
This structure is occasionally used internally in lgr (for example by
\link{AppenderBuffer}) and can be useful for developers that want to write
their own Appenders.
}
\details{
For convenience, \code{as.data.frame()} and \code{as.data.table()} methods
exist for event lists.
}
\examples{
e <- LogEvent$new(level = 300, msg = "a", logger = lgr)
as_event_list(e)
as_event_list(c(e, e))
# nested lists get automatically unnested
as_event_list(c(e, list(nested_event = e)))
# scalarize = TRUE "unpacks" events with vector log messages
e <- LogEvent$new(level = 300, msg = c("A", "B"), logger = lgr)
as_event_list(e, scalarize = FALSE)
as_event_list(e, scalarize = TRUE)
}
\seealso{
Other docs relevant for extending lgr:
\code{\link{LogEvent}},
\code{\link{as_LogEvent}()},
\code{\link{standardize_threshold}()}
}
\concept{docs relevant for extending lgr}
| /man/event_list.Rd | permissive | s-fleck/lgr | R | false | true | 2,787 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/event_list.R
\name{event_list}
\alias{event_list}
\alias{as_event_list}
\alias{as_event_list.list}
\alias{as_event_list.LogEvent}
\alias{as_event_list.data.frame}
\alias{as.data.table.event_list}
\alias{as.data.frame.event_list}
\title{A List of LogEvents}
\usage{
event_list(...)
as_event_list(x, ...)
\method{as_event_list}{list}(x, ..., scalarize = FALSE)
\method{as_event_list}{LogEvent}(x, ..., scalarize = FALSE)
\method{as_event_list}{data.frame}(x, na.rm = TRUE, ...)
as.data.table.event_list(x, na.rm = TRUE)
\method{as.data.frame}{event_list}(
x,
row.names = NULL,
optional = FALSE,
stringsAsFactors = FALSE,
na.rm = TRUE,
...
)
}
\arguments{
\item{...}{for \code{event} elements to be added to the list, for the \verb{as_*()}
functions parameters passed on to methods.}
\item{x}{any \code{R} object}
\item{scalarize}{\code{logical} scalar. Turn \link{LogEvents} with non-scalar \code{msg}
field into separate log events}
\item{na.rm}{remove \code{NA} values before coercing a data.frame to an \code{event_list()}.}
\item{row.names}{\code{NULL} or a character vector giving the row
names for the data frame. Missing values are not allowed.}
\item{optional}{currently ignored and only included for compatibility.}
\item{stringsAsFactors}{\code{logical} scalar: should \code{character} vectors be
converted to factors? Defaults to \code{FALSE} (as opposed to
\code{\link[base:as.data.frame]{base::as.data.frame()}}) and is only included for compatibility.}
}
\value{
an \code{event_list()} and \code{as_event_list()} return a flat \code{list}
of \link{LogEvents}. Nested lists get automatically flattened.
\code{as.data.frame} and \code{as.data.table} return a \code{data.frame} or \code{data.table}
respectively
}
\description{
An event_list is a class for \code{list()}s whose only elements are \link{LogEvents}.
This structure is occasionally used internally in lgr (for example by
\link{AppenderBuffer}) and can be useful for developers that want to write
their own Appenders.
}
\details{
For convenience, \code{as.data.frame()} and \code{as.data.table()} methods
exist for event lists.
}
\examples{
e <- LogEvent$new(level = 300, msg = "a", logger = lgr)
as_event_list(e)
as_event_list(c(e, e))
# nested lists get automatically unnested
as_event_list(c(e, list(nested_event = e)))
# scalarize = TRUE "unpacks" events with vector log messages
e <- LogEvent$new(level = 300, msg = c("A", "B"), logger = lgr)
as_event_list(e, scalarize = FALSE)
as_event_list(e, scalarize = TRUE)
}
\seealso{
Other docs relevant for extending lgr:
\code{\link{LogEvent}},
\code{\link{as_LogEvent}()},
\code{\link{standardize_threshold}()}
}
\concept{docs relevant for extending lgr}
|
parse_arg <- function(x){
#special for json obj
if(inherits(x, "AsIs")){
class(x) <- utils::tail(class(x), -1);
return(x);
}
#cast (e.g. for NULL)
x <- as.character(x);
#empty vector causes issues
if(!length(x)){
x <- " ";
}
#some special cases for json compatibility
switch(x,
"true" = return(as.expression(TRUE)),
"false" = return(as.expression(FALSE)),
"null" = return(as.expression(NULL))
);
#if string starts with { or [ we test for json
if(grepl("^[ \t\r\n]*(\\{|\\[)", x)) {
if(validate(x)) {
return(fromJSON(x));
}
}
#check if it is a session key
if(grepl(session_regex(), x)){
x <- paste0(x, "::.val")
}
#try to parse code.
myexpr <- parse_utf8(x)
#inject code if enabled
if(isTRUE(config("enable.post.code"))){
#wrap in block if more than one call
if(length(myexpr) > 1 || (is.call(myexpr[[1]]) && identical(myexpr[[1]][[1]], quote(`=`)))){
myexpr <- parse_utf8(paste("{", x, "}"))
}
collect_session_keys(myexpr)
return(myexpr)
}
#otherwise check for primitive
if(!length(myexpr)){
return(expression());
}
#check if it is a boolean, number or string
if(identical(1L, length(myexpr))) {
#parse primitives
if(is.character(myexpr[[1]]) || is.logical(myexpr[[1]]) || is.numeric(myexpr[[1]]) || is.name(myexpr[[1]])) {
return(myexpr);
}
#parse namespaced objects foo::bar
if(is.call(myexpr[[1]]) && identical(myexpr[[1]][[1]], quote(`::`))){
collect_session_keys(myexpr)
return(myexpr)
}
}
#failed to parse argument
stop("Invalid argument: ", x, ".\nThis server has disabled posting R code in arguments.");
}
| /R/parse_arg.R | permissive | opencpu/opencpu | R | false | false | 1,699 | r | parse_arg <- function(x){
#special for json obj
if(inherits(x, "AsIs")){
class(x) <- utils::tail(class(x), -1);
return(x);
}
#cast (e.g. for NULL)
x <- as.character(x);
#empty vector causes issues
if(!length(x)){
x <- " ";
}
#some special cases for json compatibility
switch(x,
"true" = return(as.expression(TRUE)),
"false" = return(as.expression(FALSE)),
"null" = return(as.expression(NULL))
);
#if string starts with { or [ we test for json
if(grepl("^[ \t\r\n]*(\\{|\\[)", x)) {
if(validate(x)) {
return(fromJSON(x));
}
}
#check if it is a session key
if(grepl(session_regex(), x)){
x <- paste0(x, "::.val")
}
#try to parse code.
myexpr <- parse_utf8(x)
#inject code if enabled
if(isTRUE(config("enable.post.code"))){
#wrap in block if more than one call
if(length(myexpr) > 1 || (is.call(myexpr[[1]]) && identical(myexpr[[1]][[1]], quote(`=`)))){
myexpr <- parse_utf8(paste("{", x, "}"))
}
collect_session_keys(myexpr)
return(myexpr)
}
#otherwise check for primitive
if(!length(myexpr)){
return(expression());
}
#check if it is a boolean, number or string
if(identical(1L, length(myexpr))) {
#parse primitives
if(is.character(myexpr[[1]]) || is.logical(myexpr[[1]]) || is.numeric(myexpr[[1]]) || is.name(myexpr[[1]])) {
return(myexpr);
}
#parse namespaced objects foo::bar
if(is.call(myexpr[[1]]) && identical(myexpr[[1]][[1]], quote(`::`))){
collect_session_keys(myexpr)
return(myexpr)
}
}
#failed to parse argument
stop("Invalid argument: ", x, ".\nThis server has disabled posting R code in arguments.");
}
|
# Tidying the data from databases
# Natalie Cooper
# Dec 2018
#---------------------------------
# Load libraries
library(tidyverse)
library(lubridate)
library(purrr)
library(taxize)
# Short function to get decades
floor_decade <- function(x){
if(!is.na(x)){
x - x %% 10
}else{NA_character_}
}
#---------------------------------------------------------------
# Read in the GBIF data
#---------------------------------------------------------------
# Birds
amnh_b <- read_delim("raw-data/AMNH_birds.txt", delim = "\t")
fmnh_b <- read_delim("raw-data/FMNH_birds.txt", delim = "\t")
mnhn_b <- read_delim("raw-data/MNHN_birds.txt", delim = "\t")
nmnh_b <- read_delim("raw-data/NMNH_birds.txt", delim = "\t")
nhmuk_b <- read_delim("raw-data/NHMUK_birds.txt", delim = "\t")
# Mammals
amnh_m <- read_delim("raw-data/AMNH_mammals.txt", delim = "\t")
fmnh_m <- read_delim("raw-data/FMNH_mammals.txt", delim = "\t")
mnhn_m <- read_delim("raw-data/MNHN_mammals.txt", delim = "\t")
nmnh_m <- read_delim("raw-data/NMNH_mammals.txt", delim = "\t")
nhmuk_m <- read_delim("raw-data/NHMUK_mammals.txt", delim = "\t")
# Combine into a list
all <- list(amnh_b, fmnh_b, mnhn_b, nmnh_b, nhmuk_b,
amnh_m, fmnh_m, mnhn_m, nmnh_m, nhmuk_m)
# Combine based on shared columns
# and make a new binomial column
ds <- all %>%
map(function(x) x[Reduce(intersect, map(all, colnames))]) %>%
reduce(rbind)
#----------------------------------
# Tidy up the data
#----------------------------------
ds2 <-
ds %>%
# Remove paleo collections from NHMUK data and other incorrect codes
filter(collectionCode != "PAL" & collectionCode != "Birds - Eggs" &
collectionCode != "BOT" & collectionCode != "Fishes" &
collectionCode != "BMNH(E)" & collectionCode != "Entomology") %>%
# Remove weird collections that aren't the key datasets
filter(institutionCode == "USNM" | institutionCode == "FMNH" |
institutionCode == "AMNH" | institutionCode == "NHMUK" |
institutionCode == "MNHN" ) %>%
# Check that all included records are specimens not observations
filter(basisOfRecord == "PRESERVED_SPECIMEN") %>%
# Create a new column for specimen ID number
unite(col = specID, `institutionCode`, `catalogNumber`, sep = "_", remove = FALSE) %>%
# Remove records where specimen is definitely not an adult
# This will exclude the juveniles, fetuses and young
# All unknown age individuals (the majority) are kept, on the basis
# that juveniles are almost always coded as such, but adult status is not
filter(lifeStage == "ADULT" | is.na(lifeStage)) %>%
# Remove capitalised words
mutate(sex = str_replace(sex, "FEMALE", "Female")) %>%
mutate(sex = str_replace(sex, "MALE", "Male")) %>%
# Extract only male or female specimens
# Excludes specimens with ?, intersex and multiple sex specimens
filter(sex == "Male" | sex == "Female" | is.na(sex)) %>%
# Replace the poorly coded year data with NAs
mutate(year = as.numeric(year)) %>%
mutate(year = ifelse(year > 2018 | year < 1750, NA_character_, year)) %>%
# Coerce to numeric again
mutate(year = as.numeric(year)) %>%
# Add decade variable (function above)
# This maps to character to deal with NAs so needs coercing back to numeric
mutate(decade = map_chr(year, floor_decade)) %>%
mutate(decade = as.numeric(decade)) %>%
# Remove entries with no order designated (these are errors)
filter(!is.na(order)) %>%
# Remove classes that shouldn't be there
filter(class != "Actinopterygii" & class != "Insecta" & class != "MALE") %>%
# Add a name bearing type column and designate name bearing types as such
# Non types
mutate(type = if_else(is.na(typeStatus), "NonType", typeStatus)) %>%
# Name bearing types
mutate(type = str_replace(type, "HOLOTYPE", "Type")) %>%
mutate(type = str_replace(type, "SYNTYPE", "Type")) %>%
mutate(type = str_replace(type, "LECTOTYPE", "Type")) %>%
mutate(type = str_replace(type, "NEOTYPE", "Type")) %>%
# Non name bearing types
mutate(type = str_replace(type, "COTYPE", "NonNameType")) %>%
mutate(type = str_replace(type, "ALLOTYPE", "NonNameType")) %>%
mutate(type = str_replace(type, "PARALECTOTYPE", "NonNameType")) %>%
mutate(type = str_replace(type, "PARAType", "NonNameType")) %>%
mutate(type = str_replace(type, "PARATYPE", "NonNameType")) %>%
mutate(type = str_replace(type, "TOPOTYPE", "NonNameType")) %>%
# It is unclear if these are name bearing or not
mutate(type = str_replace(type, "TYPE", "AmbiguousType")) %>%
# Rename classes to english names
mutate(class = str_replace_all(class, "Aves", "Birds")) %>%
mutate(class = str_replace_all(class, "Mammalia", "Mammals")) %>%
# Remove other errors
filter(class == "Birds" | class == "Mammals") %>%
# Select just the columns of interest
select(institutionCode, specID, sex, class, order, family, genus,
continent, year, decade, typeStatus, type)
#----------------------------------------------------------------
# Taxonomy match will be tricky for Genera so I've left it out
#-----------------------------------------------------
# Write to file for analyses
#-----------------------------------------------------
write_csv(ds2, path = "data/genera-specimen-data.csv")
| /data-wrangling/03-revision-extract-genus-level-data.R | permissive | nhcooper123/sex-bias-museums | R | false | false | 5,298 | r | # Tidying the data from databases
# Natalie Cooper
# Dec 2018
#---------------------------------
# Load libraries
library(tidyverse)
library(lubridate)
library(purrr)
library(taxize)
# Short function to get decades
floor_decade <- function(x){
if(!is.na(x)){
x - x %% 10
}else{NA_character_}
}
#---------------------------------------------------------------
# Read in the GBIF data
#---------------------------------------------------------------
# Birds
amnh_b <- read_delim("raw-data/AMNH_birds.txt", delim = "\t")
fmnh_b <- read_delim("raw-data/FMNH_birds.txt", delim = "\t")
mnhn_b <- read_delim("raw-data/MNHN_birds.txt", delim = "\t")
nmnh_b <- read_delim("raw-data/NMNH_birds.txt", delim = "\t")
nhmuk_b <- read_delim("raw-data/NHMUK_birds.txt", delim = "\t")
# Mammals
amnh_m <- read_delim("raw-data/AMNH_mammals.txt", delim = "\t")
fmnh_m <- read_delim("raw-data/FMNH_mammals.txt", delim = "\t")
mnhn_m <- read_delim("raw-data/MNHN_mammals.txt", delim = "\t")
nmnh_m <- read_delim("raw-data/NMNH_mammals.txt", delim = "\t")
nhmuk_m <- read_delim("raw-data/NHMUK_mammals.txt", delim = "\t")
# Combine into a list
all <- list(amnh_b, fmnh_b, mnhn_b, nmnh_b, nhmuk_b,
amnh_m, fmnh_m, mnhn_m, nmnh_m, nhmuk_m)
# Combine based on shared columns
# and make a new binomial column
ds <- all %>%
map(function(x) x[Reduce(intersect, map(all, colnames))]) %>%
reduce(rbind)
#----------------------------------
# Tidy up the data
#----------------------------------
ds2 <-
ds %>%
# Remove paleo collections from NHMUK data and other incorrect codes
filter(collectionCode != "PAL" & collectionCode != "Birds - Eggs" &
collectionCode != "BOT" & collectionCode != "Fishes" &
collectionCode != "BMNH(E)" & collectionCode != "Entomology") %>%
# Remove weird collections that aren't the key datasets
filter(institutionCode == "USNM" | institutionCode == "FMNH" |
institutionCode == "AMNH" | institutionCode == "NHMUK" |
institutionCode == "MNHN" ) %>%
# Check that all included records are specimens not observations
filter(basisOfRecord == "PRESERVED_SPECIMEN") %>%
# Create a new column for specimen ID number
unite(col = specID, `institutionCode`, `catalogNumber`, sep = "_", remove = FALSE) %>%
# Remove records where specimen is definitely not an adult
# This will exclude the juveniles, fetuses and young
# All unknown age individuals (the majority) are kept, on the basis
# that juveniles are almost always coded as such, but adult status is not
filter(lifeStage == "ADULT" | is.na(lifeStage)) %>%
# Remove capitalised words
mutate(sex = str_replace(sex, "FEMALE", "Female")) %>%
mutate(sex = str_replace(sex, "MALE", "Male")) %>%
# Extract only male or female specimens
# Excludes specimens with ?, intersex and multiple sex specimens
filter(sex == "Male" | sex == "Female" | is.na(sex)) %>%
# Replace the poorly coded year data with NAs
mutate(year = as.numeric(year)) %>%
mutate(year = ifelse(year > 2018 | year < 1750, NA_character_, year)) %>%
# Coerce to numeric again
mutate(year = as.numeric(year)) %>%
# Add decade variable (function above)
# This maps to character to deal with NAs so needs coercing back to numeric
mutate(decade = map_chr(year, floor_decade)) %>%
mutate(decade = as.numeric(decade)) %>%
# Remove entries with no order designated (these are errors)
filter(!is.na(order)) %>%
# Remove classes that shouldn't be there
filter(class != "Actinopterygii" & class != "Insecta" & class != "MALE") %>%
# Add a name bearing type column and designate name bearing types as such
# Non types
mutate(type = if_else(is.na(typeStatus), "NonType", typeStatus)) %>%
# Name bearing types
mutate(type = str_replace(type, "HOLOTYPE", "Type")) %>%
mutate(type = str_replace(type, "SYNTYPE", "Type")) %>%
mutate(type = str_replace(type, "LECTOTYPE", "Type")) %>%
mutate(type = str_replace(type, "NEOTYPE", "Type")) %>%
# Non name bearing types
mutate(type = str_replace(type, "COTYPE", "NonNameType")) %>%
mutate(type = str_replace(type, "ALLOTYPE", "NonNameType")) %>%
mutate(type = str_replace(type, "PARALECTOTYPE", "NonNameType")) %>%
mutate(type = str_replace(type, "PARAType", "NonNameType")) %>%
mutate(type = str_replace(type, "PARATYPE", "NonNameType")) %>%
mutate(type = str_replace(type, "TOPOTYPE", "NonNameType")) %>%
# It is unclear if these are name bearing or not
mutate(type = str_replace(type, "TYPE", "AmbiguousType")) %>%
# Rename classes to english names
mutate(class = str_replace_all(class, "Aves", "Birds")) %>%
mutate(class = str_replace_all(class, "Mammalia", "Mammals")) %>%
# Remove other errors
filter(class == "Birds" | class == "Mammals") %>%
# Select just the columns of interest
select(institutionCode, specID, sex, class, order, family, genus,
continent, year, decade, typeStatus, type)
#----------------------------------------------------------------
# Taxonomy match will be tricky for Genera so I've left it out
#-----------------------------------------------------
# Write to file for analyses
#-----------------------------------------------------
write_csv(ds2, path = "data/genera-specimen-data.csv")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qtn_partially_pleiotropic.R
\name{qtn_partially_pleiotropic}
\alias{qtn_partially_pleiotropic}
\title{Select SNPs to be assigned as QTNs}
\usage{
qtn_partially_pleiotropic(
genotypes = NULL,
seed = NULL,
pleio_a = NULL,
pleio_d = NULL,
pleio_e = NULL,
trait_spec_a_QTN_num = NULL,
trait_spec_d_QTN_num = NULL,
trait_spec_e_QTN_num = NULL,
epi_type = NULL,
epi_interaction = 2,
ntraits = NULL,
constraints = list(maf_above = NULL, maf_below = NULL, hets = NULL),
rep = NULL,
rep_by = NULL,
export_gt = NULL,
same_add_dom_QTN = NULL,
add = NULL,
dom = NULL,
epi = NULL,
verbose = verbose
)
}
\arguments{
\item{genotypes}{= NULL,}
\item{seed}{= NULL,}
\item{pleio_a}{= NULL,}
\item{pleio_d}{= NULL,}
\item{pleio_e}{= NULL,}
\item{trait_spec_a_QTN_num}{= NULL,}
\item{trait_spec_d_QTN_num}{= NULL,}
\item{trait_spec_e_QTN_num}{= NULL,}
\item{epi_type}{= NULL,}
\item{epi_interaction}{= 2,}
\item{ntraits}{= NULL}
\item{constraints}{= list(maf_above = NULL, maf_below = NULL)}
\item{rep}{= 1,}
\item{rep_by}{= 'QTN',}
\item{export_gt}{= FALSE}
\item{same_add_dom_QTN}{= NULL,}
\item{add}{null}
\item{dom}{= NULL,}
\item{epi}{= NULL}
\item{verbose}{= verbose}
}
\value{
Genotype of selected SNPs
}
\description{
Select SNPs to be assigned as QTNs
}
\author{
Samuel Fernandes and Alexander Lipka
Last update: Apr 20, 2020
----------------------------- QTN_partially_pleiotropic ----------------------
}
\keyword{internal}
| /man/QTN_partially_pleiotropic.Rd | permissive | samuelbfernandes/simplePHENOTYPES | R | false | true | 1,553 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qtn_partially_pleiotropic.R
\name{qtn_partially_pleiotropic}
\alias{qtn_partially_pleiotropic}
\title{Select SNPs to be assigned as QTNs}
\usage{
qtn_partially_pleiotropic(
genotypes = NULL,
seed = NULL,
pleio_a = NULL,
pleio_d = NULL,
pleio_e = NULL,
trait_spec_a_QTN_num = NULL,
trait_spec_d_QTN_num = NULL,
trait_spec_e_QTN_num = NULL,
epi_type = NULL,
epi_interaction = 2,
ntraits = NULL,
constraints = list(maf_above = NULL, maf_below = NULL, hets = NULL),
rep = NULL,
rep_by = NULL,
export_gt = NULL,
same_add_dom_QTN = NULL,
add = NULL,
dom = NULL,
epi = NULL,
verbose = verbose
)
}
\arguments{
\item{genotypes}{= NULL,}
\item{seed}{= NULL,}
\item{pleio_a}{= NULL,}
\item{pleio_d}{= NULL,}
\item{pleio_e}{= NULL,}
\item{trait_spec_a_QTN_num}{= NULL,}
\item{trait_spec_d_QTN_num}{= NULL,}
\item{trait_spec_e_QTN_num}{= NULL,}
\item{epi_type}{= NULL,}
\item{epi_interaction}{= 2,}
\item{ntraits}{= NULL}
\item{constraints}{= list(maf_above = NULL, maf_below = NULL)}
\item{rep}{= 1,}
\item{rep_by}{= 'QTN',}
\item{export_gt}{= FALSE}
\item{same_add_dom_QTN}{= NULL,}
\item{add}{null}
\item{dom}{= NULL,}
\item{epi}{= NULL}
\item{verbose}{= verbose}
}
\value{
Genotype of selected SNPs
}
\description{
Select SNPs to be assigned as QTNs
}
\author{
Samuel Fernandes and Alexander Lipka
Last update: Apr 20, 2020
----------------------------- QTN_partially_pleiotropic ----------------------
}
\keyword{internal}
|
library(shiny)
library(dplyr)
library(shinybusy)
library(shinythemes)
shinyUI(fluidPage(theme = shinytheme("flatly"),
titlePanel(
h1("Madden Injury Simulator", align = "center")
),
sidebarLayout(
sidebarPanel(width = 12,
h4("Simulates whether or not an injury occurs
in franchise mode. Use after playing each
week's game."),
sliderInput(
"slider", "Week of Season",
value = 1, min = 1, max = 20, step = 1
),
actionButton(
inputId = "run_it",
label = "Run Simulator"
)
),
mainPanel(
h2(""),
textOutput("injury"),
tags$head(tags$style("#injury{color: black;
font-size: 25px;
}"
)
),
h2(""),
textOutput("position_hurt"),
tags$head(tags$style("#position_hurt{color: black;
font-size: 25px;
}"
)
),
h2(""),
textOutput("quarter_hurt"),
tags$head(tags$style("#quarter_hurt{color: black;
font-size: 25px;
}"
)
),
h2(""),
textOutput("weeks_out"),
tags$head(tags$style("#weeks_out{color: black;
font-size: 25px;
}"
)
),
h2(""),
textOutput("week_return"),
tags$head(tags$style("#week_return{color: black;
font-size: 25px;
}"
)
),
)
)
)) | /Old App/ui.R | no_license | lqual/Madden_Injury_Simulator | R | false | false | 2,616 | r | library(shiny)
library(dplyr)
library(shinybusy)
library(shinythemes)
shinyUI(fluidPage(theme = shinytheme("flatly"),
titlePanel(
h1("Madden Injury Simulator", align = "center")
),
sidebarLayout(
sidebarPanel(width = 12,
h4("Simulates whether or not an injury occurs
in franchise mode. Use after playing each
week's game."),
sliderInput(
"slider", "Week of Season",
value = 1, min = 1, max = 20, step = 1
),
actionButton(
inputId = "run_it",
label = "Run Simulator"
)
),
mainPanel(
h2(""),
textOutput("injury"),
tags$head(tags$style("#injury{color: black;
font-size: 25px;
}"
)
),
h2(""),
textOutput("position_hurt"),
tags$head(tags$style("#position_hurt{color: black;
font-size: 25px;
}"
)
),
h2(""),
textOutput("quarter_hurt"),
tags$head(tags$style("#quarter_hurt{color: black;
font-size: 25px;
}"
)
),
h2(""),
textOutput("weeks_out"),
tags$head(tags$style("#weeks_out{color: black;
font-size: 25px;
}"
)
),
h2(""),
textOutput("week_return"),
tags$head(tags$style("#week_return{color: black;
font-size: 25px;
}"
)
),
)
)
)) |
#' @import officer
#' @title Define flextable displayed values
#' @description Modify flextable displayed values by specifying a
#' string expression. Function is handling complex formatting as well as
#' image insertion.
#' @param x a flextable object
#' @param i rows selection
#' @param col_key column to modify, a single character
#' @param pattern string to format
#' @param formatters a list of formula, left side for the name,
#' right side for the content.
#' @param fprops a named list of \link[officer]{fp_text}
#' @param part partname of the table (one of 'all', 'body', 'header', 'footer')
#' @note
#' The function \code{display} only works with \code{flextable} objects,
#' use \code{\link{set_formatter}} for regulartable objects.
#' @section pattern:
#' It defined the template used to format the produced strings. Names enclosed
#' by double braces will be evaluated as R code, the corresponding R code is defined
#' with the argument \code{formatters}.
#' @section formatters:
#' Each compound is specifying the R code to execute to produce strings that will be
#' substituted in the \code{pattern} argument. An element must be a formula: the
#' left-hand side is a name (matching a name enclosed by double braces in
#' \code{pattern}) and the right-hand side is an R expression to be evaluated (that
#' will produce the corresponding strings).
#' @section fprops:
#' A named list of \link[officer]{fp_text}. It defines the formatting properties
#' associated to a compound in \code{formatters}. If not defined for an element
#' of \code{formatters}, the default formatting properties will be applied.
#' @examples
#' library(officer)
#' # Formatting data values example ------
#' ft <- flextable(head( mtcars, n = 10))
#' ft <- display(ft, col_key = "carb",
#' i = ~ drat > 3.5, pattern = "# {{carb}}",
#' formatters = list(carb ~ sprintf("%.1f", carb)),
#' fprops = list(carb = fp_text(color="orange") ) )
#' \donttest{ft <- autofit(ft)}
#' @export
display <- function(x, i = NULL, col_key,
pattern, formatters = list(), fprops = list(),
part = "body"){
part <- match.arg(part, c("body", "header", "footer"), several.ok = FALSE )
stopifnot(is.character(pattern), length(pattern)==1)
if( length( fprops ) && !all(sapply( fprops, inherits, "fp_text")) ){
stop("argument fprops should be a list of fp_text")
}
check_formula_i_and_part(i, part)
i <- get_rows_id(x[[part]], i )
j <- get_columns_id(x[[part]], col_key )
obj <- display_parser$new( x = pattern,
formatters = formatters, fprops = fprops )
lazy_f_id <- fp_sign(obj)
x[[part]]$styles$formats$set_fp(i, x$col_keys[j], obj, lazy_f_id )
x
}
| /R/display.R | no_license | maislind/flextable | R | false | false | 2,724 | r | #' @import officer
#' @title Define flextable displayed values
#' @description Modify flextable displayed values by specifying a
#' string expression. Function is handling complex formatting as well as
#' image insertion.
#' @param x a flextable object
#' @param i rows selection
#' @param col_key column to modify, a single character
#' @param pattern string to format
#' @param formatters a list of formula, left side for the name,
#' right side for the content.
#' @param fprops a named list of \link[officer]{fp_text}
#' @param part partname of the table (one of 'all', 'body', 'header', 'footer')
#' @note
#' The function \code{display} only works with \code{flextable} objects,
#' use \code{\link{set_formatter}} for regulartable objects.
#' @section pattern:
#' It defined the template used to format the produced strings. Names enclosed
#' by double braces will be evaluated as R code, the corresponding R code is defined
#' with the argument \code{formatters}.
#' @section formatters:
#' Each compound is specifying the R code to execute to produce strings that will be
#' substituted in the \code{pattern} argument. An element must be a formula: the
#' left-hand side is a name (matching a name enclosed by double braces in
#' \code{pattern}) and the right-hand side is an R expression to be evaluated (that
#' will produce the corresponding strings).
#' @section fprops:
#' A named list of \link[officer]{fp_text}. It defines the formatting properties
#' associated to a compound in \code{formatters}. If not defined for an element
#' of \code{formatters}, the default formatting properties will be applied.
#' @examples
#' library(officer)
#' # Formatting data values example ------
#' ft <- flextable(head( mtcars, n = 10))
#' ft <- display(ft, col_key = "carb",
#' i = ~ drat > 3.5, pattern = "# {{carb}}",
#' formatters = list(carb ~ sprintf("%.1f", carb)),
#' fprops = list(carb = fp_text(color="orange") ) )
#' \donttest{ft <- autofit(ft)}
#' @export
display <- function(x, i = NULL, col_key,
pattern, formatters = list(), fprops = list(),
part = "body"){
part <- match.arg(part, c("body", "header", "footer"), several.ok = FALSE )
stopifnot(is.character(pattern), length(pattern)==1)
if( length( fprops ) && !all(sapply( fprops, inherits, "fp_text")) ){
stop("argument fprops should be a list of fp_text")
}
check_formula_i_and_part(i, part)
i <- get_rows_id(x[[part]], i )
j <- get_columns_id(x[[part]], col_key )
obj <- display_parser$new( x = pattern,
formatters = formatters, fprops = fprops )
lazy_f_id <- fp_sign(obj)
x[[part]]$styles$formats$set_fp(i, x$col_keys[j], obj, lazy_f_id )
x
}
|
library(skater)
library(testthat)
test_that("Degree tibble", {
expect_equal(3:9 %>% purrr::map(dibble) %>% purrr::map_int(nrow), (3:9)+2)
expect_error(dibble(12))
expect_warning(dibble(10))
expect_error(dibble(0))
expect_warning(dibble(2))
})
test_that("Degree inference from kinship coefficient", {
expect_equal(kin2degree(.25), 1L)
expect_equal(kin2degree(.125), 2L)
expect_equal(kin2degree(.0625), 3L)
expect_identical(kin2degree(0), NA_integer_)
expect_identical(kin2degree(c(.5, .25, .125, .0625, .03125, 0, .25, .5)), c(0L, 1L, 2L, 3L, NA, NA, 1L, 0L))
})
test_that("Expect errors on on plot_pedigree()", {
famfile <- system.file("extdata", "3gens.fam", package="skater", mustWork=TRUE)
fam <- read_fam(famfile)
peds <- fam2ped(fam)
expect_error(plot_pedigree(peds))
expect_error(plot_pedigree(peds, file=paste0(tempfile(), ".pdf")))
expect_error(plot_pedigree(peds$ped[[1]], file=paste0(tempfile(), ".pdf")))
})
| /tests/testthat/test-skater.R | permissive | signaturescience/skater | R | false | false | 954 | r | library(skater)
library(testthat)
test_that("Degree tibble", {
expect_equal(3:9 %>% purrr::map(dibble) %>% purrr::map_int(nrow), (3:9)+2)
expect_error(dibble(12))
expect_warning(dibble(10))
expect_error(dibble(0))
expect_warning(dibble(2))
})
test_that("Degree inference from kinship coefficient", {
expect_equal(kin2degree(.25), 1L)
expect_equal(kin2degree(.125), 2L)
expect_equal(kin2degree(.0625), 3L)
expect_identical(kin2degree(0), NA_integer_)
expect_identical(kin2degree(c(.5, .25, .125, .0625, .03125, 0, .25, .5)), c(0L, 1L, 2L, 3L, NA, NA, 1L, 0L))
})
test_that("Expect errors on on plot_pedigree()", {
famfile <- system.file("extdata", "3gens.fam", package="skater", mustWork=TRUE)
fam <- read_fam(famfile)
peds <- fam2ped(fam)
expect_error(plot_pedigree(peds))
expect_error(plot_pedigree(peds, file=paste0(tempfile(), ".pdf")))
expect_error(plot_pedigree(peds$ped[[1]], file=paste0(tempfile(), ".pdf")))
})
|
### adding historical timeseries to the dashboard ###
# Progress: deploy
# Dependencies
library(reshape2)
# ScraperWiki helper function
onSw <- function(p = NULL, d = TRUE, l = 'tool/ckan/') {
if(d) return(paste0(l,p))
else return(p)
}
# config
PATH = onSw('data/temp.csv')
db_table_name = 'ckan_dataset_data'
# Loading helper libaries
source(onSw('code/write_tables.R'))
source(onSw('code/sw_status.R'))
# get data and insert in db
getAndInsert <- function(p = NULL, table = NULL) {
# downloading the file from another box
download.file(
'https://ds-ec2.scraperwiki.com/zaflugd/iokwwtf3ldspuao/cgi-bin/csv/hdx_repo_analytics.csv',
destfile=p,
method='wget'
)
# adding data to local database
data <- read.csv(p)
data$Number_of_Licenses <- NULL
data$Number_of_Countries <- NULL
data$Number_of_Tags <- NULL
data$orgs_sharing_data <- NA
names(data) <- c('number_of_datasets', 'number_of_organizations', 'number_of_users', 'date', 'orgs_sharing_data')
writeTable(data, table, 'scraperwiki', overwrite = TRUE) # overwriting the table name if it exists
}
# ScraperWiki wraper function
runScraper <- function(p = NULL, table = NULL) {
getAndInsert(p, table)
}
# ScraperWiki-specific error handler
# Changing the status of SW.
tryCatch(runScraper(p = PATH, table = db_table_name),
error = function(e) {
cat('Error detected ... sending notification.')
system('mail -s "CKAN statistics: historic collection failed." luiscape@gmail.com')
changeSwStatus(type = "error", message = "Scraper failed.")
{ stop("!!") }
}
)
# If success:
changeSwStatus(type = 'ok') | /ckan/code/dataset_data_historical.R | no_license | luiscape/hdx_management_dashboard | R | false | false | 1,638 | r | ### adding historical timeseries to the dashboard ###
# Progress: deploy
# Dependencies
library(reshape2)
# ScraperWiki helper function
onSw <- function(p = NULL, d = TRUE, l = 'tool/ckan/') {
if(d) return(paste0(l,p))
else return(p)
}
# config
PATH = onSw('data/temp.csv')
db_table_name = 'ckan_dataset_data'
# Loading helper libaries
source(onSw('code/write_tables.R'))
source(onSw('code/sw_status.R'))
# get data and insert in db
getAndInsert <- function(p = NULL, table = NULL) {
# downloading the file from another box
download.file(
'https://ds-ec2.scraperwiki.com/zaflugd/iokwwtf3ldspuao/cgi-bin/csv/hdx_repo_analytics.csv',
destfile=p,
method='wget'
)
# adding data to local database
data <- read.csv(p)
data$Number_of_Licenses <- NULL
data$Number_of_Countries <- NULL
data$Number_of_Tags <- NULL
data$orgs_sharing_data <- NA
names(data) <- c('number_of_datasets', 'number_of_organizations', 'number_of_users', 'date', 'orgs_sharing_data')
writeTable(data, table, 'scraperwiki', overwrite = TRUE) # overwriting the table name if it exists
}
# ScraperWiki wraper function
runScraper <- function(p = NULL, table = NULL) {
getAndInsert(p, table)
}
# ScraperWiki-specific error handler
# Changing the status of SW.
tryCatch(runScraper(p = PATH, table = db_table_name),
error = function(e) {
cat('Error detected ... sending notification.')
system('mail -s "CKAN statistics: historic collection failed." luiscape@gmail.com')
changeSwStatus(type = "error", message = "Scraper failed.")
{ stop("!!") }
}
)
# If success:
changeSwStatus(type = 'ok') |
library(MASS)
library(recipes)
library(rsample)
library(car)
library(DataExplorer)
library(polycor)
library(tidyverse)
library(ROCR)
library(caret)
library(glmnet)
data_raw = HR_Churn
glimpse(data_raw)
plot_missing(data_raw)
plot_density(data_raw)
set.seed(993)
train_test_split = initial_split(data_raw, prop=.70)
train_test_split
train_tbl = training(train_test_split)
test_tbl = testing(train_test_split)
cake = recipe(Gone ~., data=train_tbl) %>%
step_dummy(all_nominal(), -all_outcomes(), one_hot=TRUE) %>%
step_BoxCox(all_predictors(), -all_outcomes()) %>%
prep(data=train_tbl)
cake
train_clean = bake(cake,new_data=train_tbl)
test_clean = bake(cake,new_data=test_tbl)
train_clean$Gone = as.factor(train_clean$Gone)
test_clean$Gone = as.factor(test_clean$Gone)
glimpse(train_clean)
set.seed(3432)
train_x <- model.matrix(Gone ~ . -1, data = train_clean)
train_y <- train_clean$Gone
grid = 10^seq(10,-2,by=-.1)
cv.lasso <- cv.glmnet(train_x, train_y, family="binomial", alpha=1, lambda=grid)
plot(cv.lasso)
best_lambda = cv.lasso$lambda.min
coef(cv.lasso)
train_clean2 = train_clean %>% select(-PerformanceRating,-RelationshipSatisfaction,-YearsAtCompany,-YearsSinceLastPromotion)
glimpse(train_clean2)
test_clean2 = test_clean %>% select(-PerformanceRating,-RelationshipSatisfaction,-YearsAtCompany,-YearsSinceLastPromotion)
set.seed(3854)
train_x <- model.matrix(Gone ~ . -1, data = train_clean2)
train_y <- train_clean2$Gone
grid = 10^seq(10,-2,by=-.1)
cv.lasso2 <- cv.glmnet(train_x, train_y, family="binomial", alpha=1, lambda=grid)
plot(cv.lasso2)
###########################
lasso.prob = predict(cv.lasso2, s = best_lambda, newx = data.matrix(train_x), type="response")
lasso.pred = ifelse(lasso.prob>0.5, "Yes", "No")
lasso.prob
lasso.pred
ConfusionMatrix(table(lasso.pred, test_y))
mean(lasso.pred==test_clean2$Gone)
###################
best_lambda = cv.lasso2$lambda.min
coef(cv.lasso2)
best_lambda
test_x <- model.matrix(Gone ~ . -1, data = test_clean2)
test_y <- test_clean2$Gone
lasso.pred = predict(cv.lasso2, newx = data.matrix(train_x), type="response")
lasso.pred
control = trainControl(method="repeatedcv", number=10, repeats=3, summaryFunction=twoClassSummary,classProbs=TRUE, savePredictions="final")
lda.fit = train(Gone ~., data=train_clean2, method="lda", metric="ROC", trControl=control)
lda.fit
lda.pred = predict(lda.fit, test_clean2)
confusionMatrix(lda.pred, test_clean2$Gone)
#grid = expand.grid(alpha=1, lambda=10^seq(10,-2,length=1000))
#control = trainControl(method="repeatedcv", number=10, repeats=3)
#cv.lasso = train(Gone ~., data=train_clean,
#method="glmnet",
#trControl=control,
#tuneGrid=grid)
#cv.lasso$lambda.min
#coef(cv.lasso, s=0.1)
| /HW 6 - Final - group.R | no_license | hamzaktk18/R-classwork | R | false | false | 2,847 | r | library(MASS)
library(recipes)
library(rsample)
library(car)
library(DataExplorer)
library(polycor)
library(tidyverse)
library(ROCR)
library(caret)
library(glmnet)
data_raw = HR_Churn
glimpse(data_raw)
plot_missing(data_raw)
plot_density(data_raw)
set.seed(993)
train_test_split = initial_split(data_raw, prop=.70)
train_test_split
train_tbl = training(train_test_split)
test_tbl = testing(train_test_split)
cake = recipe(Gone ~., data=train_tbl) %>%
step_dummy(all_nominal(), -all_outcomes(), one_hot=TRUE) %>%
step_BoxCox(all_predictors(), -all_outcomes()) %>%
prep(data=train_tbl)
cake
train_clean = bake(cake,new_data=train_tbl)
test_clean = bake(cake,new_data=test_tbl)
train_clean$Gone = as.factor(train_clean$Gone)
test_clean$Gone = as.factor(test_clean$Gone)
glimpse(train_clean)
set.seed(3432)
train_x <- model.matrix(Gone ~ . -1, data = train_clean)
train_y <- train_clean$Gone
grid = 10^seq(10,-2,by=-.1)
cv.lasso <- cv.glmnet(train_x, train_y, family="binomial", alpha=1, lambda=grid)
plot(cv.lasso)
best_lambda = cv.lasso$lambda.min
coef(cv.lasso)
train_clean2 = train_clean %>% select(-PerformanceRating,-RelationshipSatisfaction,-YearsAtCompany,-YearsSinceLastPromotion)
glimpse(train_clean2)
test_clean2 = test_clean %>% select(-PerformanceRating,-RelationshipSatisfaction,-YearsAtCompany,-YearsSinceLastPromotion)
set.seed(3854)
train_x <- model.matrix(Gone ~ . -1, data = train_clean2)
train_y <- train_clean2$Gone
grid = 10^seq(10,-2,by=-.1)
cv.lasso2 <- cv.glmnet(train_x, train_y, family="binomial", alpha=1, lambda=grid)
plot(cv.lasso2)
###########################
lasso.prob = predict(cv.lasso2, s = best_lambda, newx = data.matrix(train_x), type="response")
lasso.pred = ifelse(lasso.prob>0.5, "Yes", "No")
lasso.prob
lasso.pred
ConfusionMatrix(table(lasso.pred, test_y))
mean(lasso.pred==test_clean2$Gone)
###################
best_lambda = cv.lasso2$lambda.min
coef(cv.lasso2)
best_lambda
test_x <- model.matrix(Gone ~ . -1, data = test_clean2)
test_y <- test_clean2$Gone
lasso.pred = predict(cv.lasso2, newx = data.matrix(train_x), type="response")
lasso.pred
control = trainControl(method="repeatedcv", number=10, repeats=3, summaryFunction=twoClassSummary,classProbs=TRUE, savePredictions="final")
lda.fit = train(Gone ~., data=train_clean2, method="lda", metric="ROC", trControl=control)
lda.fit
lda.pred = predict(lda.fit, test_clean2)
confusionMatrix(lda.pred, test_clean2$Gone)
#grid = expand.grid(alpha=1, lambda=10^seq(10,-2,length=1000))
#control = trainControl(method="repeatedcv", number=10, repeats=3)
#cv.lasso = train(Gone ~., data=train_clean,
#method="glmnet",
#trControl=control,
#tuneGrid=grid)
#cv.lasso$lambda.min
#coef(cv.lasso, s=0.1)
|
# Please build your own test file from test-Template.R, and place it in tests folder
# please specify the package you need to run the sim function in the test files.
# to test all the test files in the tests folder:
test_dir("/Users/kaitlynschyrmann/Desktop/SpaDES GM Module 2018/trapsRepo/modules/loadTreeCover/tests/testthat")
# Alternative, you can use test_file to test individual test file, e.g.:
test_file("/Users/kaitlynschyrmann/Desktop/SpaDES GM Module 2018/trapsRepo/modules/loadTreeCover/tests/testthat/test-template.R")
| /modules/loadLcc2015/tests/unitTests.R | no_license | ianmseddy/GMRiskMapSpaDES | R | false | false | 535 | r |
# Please build your own test file from test-Template.R, and place it in tests folder
# please specify the package you need to run the sim function in the test files.
# to test all the test files in the tests folder:
test_dir("/Users/kaitlynschyrmann/Desktop/SpaDES GM Module 2018/trapsRepo/modules/loadTreeCover/tests/testthat")
# Alternative, you can use test_file to test individual test file, e.g.:
test_file("/Users/kaitlynschyrmann/Desktop/SpaDES GM Module 2018/trapsRepo/modules/loadTreeCover/tests/testthat/test-template.R")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/temp_converter.R
\name{F_to_K}
\alias{F_to_K}
\title{Converts temperature from one unit to another}
\usage{
F_to_K(temp)
}
\arguments{
\item{temp}{Temperature in Fahrenheit}
}
\value{
R object or list to be converted to Kelvin
}
\description{
Converts temperature from one unit to another
}
| /man/F_to_K.Rd | no_license | SherryLi1234/awapi | R | false | true | 369 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/temp_converter.R
\name{F_to_K}
\alias{F_to_K}
\title{Converts temperature from one unit to another}
\usage{
F_to_K(temp)
}
\arguments{
\item{temp}{Temperature in Fahrenheit}
}
\value{
R object or list to be converted to Kelvin
}
\description{
Converts temperature from one unit to another
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/table-utils.R
\name{oneOfEach}
\alias{oneOfEach}
\title{Sampler of first observation of a particular table column.}
\usage{
oneOfEach(...)
}
\arguments{
\item{...:}{Arguments to \code{nOfEach}.}
}
\value{
Data table with first occurrence of each unique value from the
indicated column in the given table.
}
\description{
\code{oneOfEach} grabs the first observation for each unique value
for a particular column within a table.
}
\seealso{
\code{\link{nOfEach}}
}
| /man/oneOfEach.Rd | no_license | vreuter/SwissR | R | false | true | 552 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/table-utils.R
\name{oneOfEach}
\alias{oneOfEach}
\title{Sampler of first observation of a particular table column.}
\usage{
oneOfEach(...)
}
\arguments{
\item{...:}{Arguments to \code{nOfEach}.}
}
\value{
Data table with first occurrence of each unique value from the
indicated column in the given table.
}
\description{
\code{oneOfEach} grabs the first observation for each unique value
for a particular column within a table.
}
\seealso{
\code{\link{nOfEach}}
}
|
# Os dados de mortalidade foram obtidos através do Tabnet do DATASUS.
# A base de dados é o Sistema de Informacões de Mortalidade (SIM).
# O link abaixo leva direto para os dados de São Paulo
# http://tabnet.datasus.gov.br/cgi/deftohtm.exe?sim/cnv/obt10SP.def
# Escolhi na linha: 'Causa CID-BR-10'
# Coluna: 'Mês do Óbito'
# Conteúdo: 'Óbitos por ocorrência'
# Período: '2018'
# Clica em mostrar, e ao final da nova página gerada clica em COPIA COMO .CSV
# Vai baixar um arquivo com uma hash como nome. Aqui foi A133322193_52_24_15.csv
# Coloca na pasta data e vamos renomear o arquivo para mortalidade.csv
setwd('/home/mribeirodantas/Dropbox/Analyses/GIFcasosSP/')
# Lendo dados de mortalidade de SP -------------------------------------
library(dplyr)
library(readr)
library(stringr)
library(purrr)
mortalidade_sp <- read_lines('data/mortalidade.csv') %>%
head(n = -10) %>%
paste(collapse = '\n') %>%
read_csv2(skip = 3, locale = locale(encoding = 'latin1'))
# Preprocessando dados de mortalidade de SP -------------------------------
glimpse(mortalidade_sp)
# Limpar começo do nome das doenças
mortalidade_sp %>%
select(`Causa - CID-BR-10`) %>%
pull %>%
str_replace('^\\.*\\s', '') %>%
str_replace('^[0-9]*[\\-]?[0-9]*[\\.]?[0-9]* ', '') -> mortalidade_sp$`Causa - CID-BR-10`
# Você pode achar que sem os números perderemos o controle dos grupos de
# doencas, mas segue sendo possível identificá-los porque estão com o nome
# todo em letra maíscula.
# Lista com o nome de doenças de interesse
doencas_interesse = c('Tuberculose', 'Doenças virais',
'Neopl malig da traquéia,brônquios e pulmões',
'Neoplasia maligna da mama',
'Leucemia', 'Diabetes mellitus', 'Desnutrição',
'Doença de Alzheimer', 'Infarto agudo do miocárdio',
'Influenza (gripe)', 'Pneumonia', 'Fibrose e cirrose do fígado',
'CAUSAS EXTERNAS DE MORBIDADE E MORTALIDADE')
# Eu vou juntar acidentes, quedas,
# Vamos ter certeza que os nomes estão escritos corretamente
all(doencas_interesse %in% mortalidade_sp$`Causa - CID-BR-10`)
# Meses de interesse
# Como iremos analisar apenas dados de COVID de Março e Abril,
# é mais indicado tirarmos a média diária dessas causas utilizando
# Março e Abril para tentar corrigir alguma possível sazionalidade.
meses_interesse = c('Março', 'Abril')
# Filtrar meses e doencas de interesse
mortalidade_sp <- mortalidade_sp %>%
select(c(`Causa - CID-BR-10`, all_of(meses_interesse))) %>%
filter(`Causa - CID-BR-10` %in% doencas_interesse)
# Vamos renomear as colunas e declarar os datatypes
colnames(mortalidade_sp) <- c('causa', 'Março', 'Abril')
mortalidade_sp$Março <- as.numeric(mortalidade_sp$Março)
mortalidade_sp$Abril <- as.numeric(mortalidade_sp$Abril)
# Vamos criar a coluna média diária
mortalidade_sp <- mortalidade_sp %>%
mutate(media_diaria = rowSums(.[2:3])/61)
rm(doencas_interesse, meses_interesse)
write_csv2(mortalidade_sp, 'scripts/outputs/mortalidade_preprocessada.csv')
| /scripts/preprocess_mortalidade.R | no_license | mribeirodantas/covid19gif | R | false | false | 3,129 | r | # Os dados de mortalidade foram obtidos através do Tabnet do DATASUS.
# A base de dados é o Sistema de Informacões de Mortalidade (SIM).
# O link abaixo leva direto para os dados de São Paulo
# http://tabnet.datasus.gov.br/cgi/deftohtm.exe?sim/cnv/obt10SP.def
# Escolhi na linha: 'Causa CID-BR-10'
# Coluna: 'Mês do Óbito'
# Conteúdo: 'Óbitos por ocorrência'
# Período: '2018'
# Clica em mostrar, e ao final da nova página gerada clica em COPIA COMO .CSV
# Vai baixar um arquivo com uma hash como nome. Aqui foi A133322193_52_24_15.csv
# Coloca na pasta data e vamos renomear o arquivo para mortalidade.csv
setwd('/home/mribeirodantas/Dropbox/Analyses/GIFcasosSP/')
# Lendo dados de mortalidade de SP -------------------------------------
library(dplyr)
library(readr)
library(stringr)
library(purrr)
mortalidade_sp <- read_lines('data/mortalidade.csv') %>%
head(n = -10) %>%
paste(collapse = '\n') %>%
read_csv2(skip = 3, locale = locale(encoding = 'latin1'))
# Preprocessando dados de mortalidade de SP -------------------------------
glimpse(mortalidade_sp)
# Limpar começo do nome das doenças
mortalidade_sp %>%
select(`Causa - CID-BR-10`) %>%
pull %>%
str_replace('^\\.*\\s', '') %>%
str_replace('^[0-9]*[\\-]?[0-9]*[\\.]?[0-9]* ', '') -> mortalidade_sp$`Causa - CID-BR-10`
# Você pode achar que sem os números perderemos o controle dos grupos de
# doencas, mas segue sendo possível identificá-los porque estão com o nome
# todo em letra maíscula.
# Lista com o nome de doenças de interesse
doencas_interesse = c('Tuberculose', 'Doenças virais',
'Neopl malig da traquéia,brônquios e pulmões',
'Neoplasia maligna da mama',
'Leucemia', 'Diabetes mellitus', 'Desnutrição',
'Doença de Alzheimer', 'Infarto agudo do miocárdio',
'Influenza (gripe)', 'Pneumonia', 'Fibrose e cirrose do fígado',
'CAUSAS EXTERNAS DE MORBIDADE E MORTALIDADE')
# Eu vou juntar acidentes, quedas,
# Vamos ter certeza que os nomes estão escritos corretamente
all(doencas_interesse %in% mortalidade_sp$`Causa - CID-BR-10`)
# Meses de interesse
# Como iremos analisar apenas dados de COVID de Março e Abril,
# é mais indicado tirarmos a média diária dessas causas utilizando
# Março e Abril para tentar corrigir alguma possível sazionalidade.
meses_interesse = c('Março', 'Abril')
# Filtrar meses e doencas de interesse
mortalidade_sp <- mortalidade_sp %>%
select(c(`Causa - CID-BR-10`, all_of(meses_interesse))) %>%
filter(`Causa - CID-BR-10` %in% doencas_interesse)
# Vamos renomear as colunas e declarar os datatypes
colnames(mortalidade_sp) <- c('causa', 'Março', 'Abril')
mortalidade_sp$Março <- as.numeric(mortalidade_sp$Março)
mortalidade_sp$Abril <- as.numeric(mortalidade_sp$Abril)
# Vamos criar a coluna média diária
mortalidade_sp <- mortalidade_sp %>%
mutate(media_diaria = rowSums(.[2:3])/61)
rm(doencas_interesse, meses_interesse)
write_csv2(mortalidade_sp, 'scripts/outputs/mortalidade_preprocessada.csv')
|
##
## You should create one R script called run_analysis.R that does the following:
## 1) Merges the training and the test sets to create one data set.
## 2) Extracts only the measurements on the mean and standard deviation for each measurement.
## 3) Uses descriptive activity names to name the activities in the data set
## 4) Appropriately labels the data set with descriptive variable names.
## 5) From the data set in step 4, creates a second, independent tidy data set with the average
## of each variable for each activity and each subject.
##
#
# Step 1: Merge the training and test sets into one data set
#
testData <- read.table("X_test.txt")
#trainData <- read.table("X_train.txt")
allData <- rbind(testData, read.table("X_train.txt"))
#
# Step 2: Pull in column & row headers, and other descriptors
#
activities <- read.table("activity_labels.txt", col.names = c("activity.code", "activity"))
features <- read.table("features.txt", col.names = c("column", "feature"))
actCodeByRow <- read.table("y_test.txt", col.name = "activity.code")
actCodeByRow = rbind(actCodeByRow, read.table("y_train.txt", col.name ="activity.code"))
subjectByRow <- read.table("subject_test.txt", col.name = "Subject")
subjectByRow = rbind(subjectByRow, read.table("subject_train.txt", col.name ="Subject"))
#
# Step 3: Set column names (features)
#
names(allData) <- features$feature
#
# Step 4: Just keep "std" and "mean" columns
#
goodCols = grep(pattern = "-mean()", x = features$feature, fixed = TRUE)
goodCols = append(goodCols, grep(pattern = "-std()", x = features$feature, fixed = TRUE))
measuredData <- allData[goodCols]
#
# Step 5: Set row header (subject)
#
measuredData = cbind(Subject = subjectByRow, measuredData)
#
# Step 6: Set row header (activities)
#
activityByRow <- activities[actCodeByRow$activity.code, "activity"]
measuredData = cbind(Activity = activityByRow, measuredData)
#
# Step 7: Create tidy data set and write it out to text file
#
tidyData <- group_by(measuredData, Activity, Subject) %>% summarise_each(funs(mean))
write.table(tidyData, "tidayData.txt", row.name=FALSE)
#
# Step 8: Display file
#
print(tidyData)
| /run_Analysis.R | no_license | alanfarkas/GCD-CourseProject | R | false | false | 2,152 | r | ##
## You should create one R script called run_analysis.R that does the following:
## 1) Merges the training and the test sets to create one data set.
## 2) Extracts only the measurements on the mean and standard deviation for each measurement.
## 3) Uses descriptive activity names to name the activities in the data set
## 4) Appropriately labels the data set with descriptive variable names.
## 5) From the data set in step 4, creates a second, independent tidy data set with the average
## of each variable for each activity and each subject.
##
#
# Step 1: Merge the training and test sets into one data set
#
testData <- read.table("X_test.txt")
#trainData <- read.table("X_train.txt")
allData <- rbind(testData, read.table("X_train.txt"))
#
# Step 2: Pull in column & row headers, and other descriptors
#
activities <- read.table("activity_labels.txt", col.names = c("activity.code", "activity"))
features <- read.table("features.txt", col.names = c("column", "feature"))
actCodeByRow <- read.table("y_test.txt", col.name = "activity.code")
actCodeByRow = rbind(actCodeByRow, read.table("y_train.txt", col.name ="activity.code"))
subjectByRow <- read.table("subject_test.txt", col.name = "Subject")
subjectByRow = rbind(subjectByRow, read.table("subject_train.txt", col.name ="Subject"))
#
# Step 3: Set column names (features)
#
names(allData) <- features$feature
#
# Step 4: Just keep "std" and "mean" columns
#
goodCols = grep(pattern = "-mean()", x = features$feature, fixed = TRUE)
goodCols = append(goodCols, grep(pattern = "-std()", x = features$feature, fixed = TRUE))
measuredData <- allData[goodCols]
#
# Step 5: Set row header (subject)
#
measuredData = cbind(Subject = subjectByRow, measuredData)
#
# Step 6: Set row header (activities)
#
activityByRow <- activities[actCodeByRow$activity.code, "activity"]
measuredData = cbind(Activity = activityByRow, measuredData)
#
# Step 7: Create tidy data set and write it out to text file
#
tidyData <- group_by(measuredData, Activity, Subject) %>% summarise_each(funs(mean))
write.table(tidyData, "tidayData.txt", row.name=FALSE)
#
# Step 8: Display file
#
print(tidyData)
|
test_that("display and print method works - markdown", {
skip_on_cran()
skip_if(getRversion() < "4.0.0")
skip_if_not_or_load_if_installed("gt")
expect_snapshot(print(correlation(iris)))
expect_snapshot(display(correlation(iris)))
})
# display and print method works - HTML -----------------------------
test_that("display and print method works - HTML", {
skip_on_cran()
skip_if(getRversion() < "4.0.0")
skip_if_not_or_load_if_installed("gt")
expect_snapshot(display(print(correlation(subset(mtcars, select = c("wt", "mpg"))), format = "html")))
expect_snapshot(print(correlation(subset(mtcars, select = c("wt", "mpg"))), format = "html"))
})
| /tests/testthat/test-display_print_dataframe.R | no_license | cran/correlation | R | false | false | 690 | r | test_that("display and print method works - markdown", {
skip_on_cran()
skip_if(getRversion() < "4.0.0")
skip_if_not_or_load_if_installed("gt")
expect_snapshot(print(correlation(iris)))
expect_snapshot(display(correlation(iris)))
})
# display and print method works - HTML -----------------------------
test_that("display and print method works - HTML", {
skip_on_cran()
skip_if(getRversion() < "4.0.0")
skip_if_not_or_load_if_installed("gt")
expect_snapshot(display(print(correlation(subset(mtcars, select = c("wt", "mpg"))), format = "html")))
expect_snapshot(print(correlation(subset(mtcars, select = c("wt", "mpg"))), format = "html"))
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geom_timeline.R
\name{new_theme}
\alias{new_theme}
\title{new_theme}
\usage{
new_theme(...)
}
\arguments{
\item{...}{Extra Params.}
}
\value{
This function adds the lineGrobs and pointGrob into the current graphics device.
}
\description{
Created a new theme tovisualizes ggplots
}
\examples{
\dontrun{
filename <- system.file("extdata", "signif.txt", package="earthquakeGeoms")
library(readr)
input <- readr::read_delim(filename, delim = "\\t")
sample <- input \%>\%
eq_clean_data() \%>\%
eq_location_clean("LOCATION_NAME")\%>\%
filter(YEAR >= 2000) \%>\%
filter(COUNTRY \%in\% c("USA", "MEXICO"))
ggplot2::ggplot(data=sample, aes(x = date, y = COUNTRY, color = DEATHS, size = EQ_PRIMARY)) +
geom_timeline() +
scale_size_continuous(name = 'Richter scale value', guide = guide_legend(order = 1)) +
scale_color_continuous(name = '# of Deaths', guide = guide_colorbar(order = 2))+
new_theme()
}
}
| /man/new_theme.Rd | no_license | marcelamu95/earthquakeGeoms | R | false | true | 987 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geom_timeline.R
\name{new_theme}
\alias{new_theme}
\title{new_theme}
\usage{
new_theme(...)
}
\arguments{
\item{...}{Extra Params.}
}
\value{
This function adds the lineGrobs and pointGrob into the current graphics device.
}
\description{
Created a new theme tovisualizes ggplots
}
\examples{
\dontrun{
filename <- system.file("extdata", "signif.txt", package="earthquakeGeoms")
library(readr)
input <- readr::read_delim(filename, delim = "\\t")
sample <- input \%>\%
eq_clean_data() \%>\%
eq_location_clean("LOCATION_NAME")\%>\%
filter(YEAR >= 2000) \%>\%
filter(COUNTRY \%in\% c("USA", "MEXICO"))
ggplot2::ggplot(data=sample, aes(x = date, y = COUNTRY, color = DEATHS, size = EQ_PRIMARY)) +
geom_timeline() +
scale_size_continuous(name = 'Richter scale value', guide = guide_legend(order = 1)) +
scale_color_continuous(name = '# of Deaths', guide = guide_colorbar(order = 2))+
new_theme()
}
}
|
#!/usr/bin/env Rscript
source("../.Rprofile", chdir = TRUE)
suppressPackageStartupMessages({
library(jsonlite)
library(ggplot2)
library(ggsci)
library(reshape2)
library(dplyr)
library(extrafont)
})
source("../Utilities/utils.R")
# Read data
df <- read.csv(snakemake@input[["data"]], check.names = FALSE, stringsAsFactors = FALSE)
df$dataset <- factor(df$dataset, levels = df %>%
select(dataset, n_cell) %>%
arrange(n_cell) %>%
distinct() %>%
pull(dataset)
) # This determines dataset order
df$method <- factor(df$method, levels = snakemake@config[["method"]])
levels(df$method) <- gsub("[_.]", " ", levels(df$method))
color_mapping <- unlist(fromJSON(snakemake@input[["palette"]]))
# MAP
gp <- ggplot(data = df %>% group_by(dataset, method, dimensionality) %>% summarise(
sd = sd(mean_average_precision),
mean_average_precision = mean(mean_average_precision)
), mapping = aes(
x = dimensionality, y = mean_average_precision,
ymin = mean_average_precision - sd,
ymax = mean_average_precision + sd,
col = method
)) + geom_line(alpha = 0.8) + geom_errorbar(width = 1.5, alpha = 0.6) + facet_wrap(
~dataset, nrow = 2
) + scale_color_manual(
name = "Method", values = color_mapping, limits = levels(df$method)
) + scale_x_continuous(
name = "Dimensionality"
) + scale_y_continuous(
name = "Mean average precision"
)
ggsave(snakemake@output[["map"]], mod_style(gp), width = 7.5, height = 5)
# Optimal MAP
optdims <- as.data.frame(df %>% group_by(method, dimensionality, dataset) %>% summarise(
mean_average_precision = mean(mean_average_precision)
) %>% group_by(method, dimensionality) %>% summarise(
mean_average_precision = mean(mean_average_precision)
) %>% group_by(method) %>% summarise(
dimensionality = dimensionality[which.max(mean_average_precision)]
))
optdims[optdims$method == "Cell BLAST", "dimensionality"] <- 10
# optdims[!(optdims$method %in% c("tSNE", "UMAP", "SAUCIE")), "dimensionality"] <- 10 # For addressing reviewer question
method_dim <- sprintf("%s (%d)", optdims$method, optdims$dimensionality)
names(method_dim) <- optdims$method
optdf <- merge(optdims, df)
levels(optdf$method) <- method_dim[levels(optdf$method)]
names(color_mapping) <- method_dim[names(color_mapping)]
gp <- ggplot(data = optdf %>% group_by(dataset, method) %>% summarise(
sd = sd(mean_average_precision),
mean_average_precision = mean(mean_average_precision)
), mapping = aes(
x = dataset, y = mean_average_precision,
ymin = mean_average_precision - sd,
ymax = mean_average_precision + sd,
fill = method
)) + geom_bar(
stat = "identity", position = position_dodge(0.85), width = 0.85
) + geom_point(
data = optdf, mapping = aes(
x = dataset, y = mean_average_precision, fill = method # fill is ineffective and only used for position dodge
), size = 0.5, color = "#808080",
position = position_jitterdodge(jitter.width = 0.2, dodge.width = 0.85),
inherit.aes = FALSE, show.legend = FALSE
) + geom_errorbar(
position = position_dodge(0.85), width = 0.2
) + scale_x_discrete(
name = "Dataset"
) + scale_y_continuous(
name = "Mean average precision"
) + scale_fill_manual(
name = "Method", values = color_mapping
) + coord_cartesian(ylim = c(0.4, 1.0))
ggsave(snakemake@output[["optmap"]], mod_style(gp), width = 11, height = 4.5)
# Integrative
optdf_summarize_seed <- optdf %>% group_by(method, dataset) %>% summarise(
mean_average_precision = mean(mean_average_precision)
) %>% arrange(method, dataset) %>% as.data.frame()
optdf_summarize_dataset <- optdf_summarize_seed %>% group_by(method) %>% summarise(
sd = sd(mean_average_precision),
mean_average_precision = mean(mean_average_precision)
) %>% as.data.frame()
gp <- ggplot(data = optdf_summarize_dataset, mapping = aes(
x = method, y = mean_average_precision, fill = method,
ymin = mean_average_precision - sd,
ymax = mean_average_precision + sd
)) + geom_bar(
stat = "identity", width = 0.65
) + geom_point(
data = optdf_summarize_seed, mapping = aes(
x = method, y = mean_average_precision
), size = 1, color = "#808080", position = position_jitter(0.2),
inherit.aes = FALSE, show.legend = FALSE
) + geom_errorbar(
width = 0.15
) + scale_x_discrete(
name = "Method", limits = optdf_summarize_dataset %>% arrange(
desc(mean_average_precision)
) %>% pull(method)
) + scale_y_continuous(
name = "Mean average precision"
) + scale_fill_manual(
values = color_mapping, name = "Method"
) + coord_cartesian(ylim = c(0.55, 1.0)) + guides(fill = FALSE)
ggsave(snakemake@output[["integrative"]], mod_style(gp, rotate.x = TRUE), width = 7, height = 4)
| /Evaluation/dimension_reduction_plot.R | permissive | JiahuaQu/Cell_BLAST | R | false | false | 4,735 | r | #!/usr/bin/env Rscript
source("../.Rprofile", chdir = TRUE)
suppressPackageStartupMessages({
library(jsonlite)
library(ggplot2)
library(ggsci)
library(reshape2)
library(dplyr)
library(extrafont)
})
source("../Utilities/utils.R")
# Read data
df <- read.csv(snakemake@input[["data"]], check.names = FALSE, stringsAsFactors = FALSE)
df$dataset <- factor(df$dataset, levels = df %>%
select(dataset, n_cell) %>%
arrange(n_cell) %>%
distinct() %>%
pull(dataset)
) # This determines dataset order
df$method <- factor(df$method, levels = snakemake@config[["method"]])
levels(df$method) <- gsub("[_.]", " ", levels(df$method))
color_mapping <- unlist(fromJSON(snakemake@input[["palette"]]))
# MAP
gp <- ggplot(data = df %>% group_by(dataset, method, dimensionality) %>% summarise(
sd = sd(mean_average_precision),
mean_average_precision = mean(mean_average_precision)
), mapping = aes(
x = dimensionality, y = mean_average_precision,
ymin = mean_average_precision - sd,
ymax = mean_average_precision + sd,
col = method
)) + geom_line(alpha = 0.8) + geom_errorbar(width = 1.5, alpha = 0.6) + facet_wrap(
~dataset, nrow = 2
) + scale_color_manual(
name = "Method", values = color_mapping, limits = levels(df$method)
) + scale_x_continuous(
name = "Dimensionality"
) + scale_y_continuous(
name = "Mean average precision"
)
ggsave(snakemake@output[["map"]], mod_style(gp), width = 7.5, height = 5)
# Optimal MAP
optdims <- as.data.frame(df %>% group_by(method, dimensionality, dataset) %>% summarise(
mean_average_precision = mean(mean_average_precision)
) %>% group_by(method, dimensionality) %>% summarise(
mean_average_precision = mean(mean_average_precision)
) %>% group_by(method) %>% summarise(
dimensionality = dimensionality[which.max(mean_average_precision)]
))
optdims[optdims$method == "Cell BLAST", "dimensionality"] <- 10
# optdims[!(optdims$method %in% c("tSNE", "UMAP", "SAUCIE")), "dimensionality"] <- 10 # For addressing reviewer question
method_dim <- sprintf("%s (%d)", optdims$method, optdims$dimensionality)
names(method_dim) <- optdims$method
optdf <- merge(optdims, df)
levels(optdf$method) <- method_dim[levels(optdf$method)]
names(color_mapping) <- method_dim[names(color_mapping)]
gp <- ggplot(data = optdf %>% group_by(dataset, method) %>% summarise(
sd = sd(mean_average_precision),
mean_average_precision = mean(mean_average_precision)
), mapping = aes(
x = dataset, y = mean_average_precision,
ymin = mean_average_precision - sd,
ymax = mean_average_precision + sd,
fill = method
)) + geom_bar(
stat = "identity", position = position_dodge(0.85), width = 0.85
) + geom_point(
data = optdf, mapping = aes(
x = dataset, y = mean_average_precision, fill = method # fill is ineffective and only used for position dodge
), size = 0.5, color = "#808080",
position = position_jitterdodge(jitter.width = 0.2, dodge.width = 0.85),
inherit.aes = FALSE, show.legend = FALSE
) + geom_errorbar(
position = position_dodge(0.85), width = 0.2
) + scale_x_discrete(
name = "Dataset"
) + scale_y_continuous(
name = "Mean average precision"
) + scale_fill_manual(
name = "Method", values = color_mapping
) + coord_cartesian(ylim = c(0.4, 1.0))
ggsave(snakemake@output[["optmap"]], mod_style(gp), width = 11, height = 4.5)
# Integrative
optdf_summarize_seed <- optdf %>% group_by(method, dataset) %>% summarise(
mean_average_precision = mean(mean_average_precision)
) %>% arrange(method, dataset) %>% as.data.frame()
optdf_summarize_dataset <- optdf_summarize_seed %>% group_by(method) %>% summarise(
sd = sd(mean_average_precision),
mean_average_precision = mean(mean_average_precision)
) %>% as.data.frame()
gp <- ggplot(data = optdf_summarize_dataset, mapping = aes(
x = method, y = mean_average_precision, fill = method,
ymin = mean_average_precision - sd,
ymax = mean_average_precision + sd
)) + geom_bar(
stat = "identity", width = 0.65
) + geom_point(
data = optdf_summarize_seed, mapping = aes(
x = method, y = mean_average_precision
), size = 1, color = "#808080", position = position_jitter(0.2),
inherit.aes = FALSE, show.legend = FALSE
) + geom_errorbar(
width = 0.15
) + scale_x_discrete(
name = "Method", limits = optdf_summarize_dataset %>% arrange(
desc(mean_average_precision)
) %>% pull(method)
) + scale_y_continuous(
name = "Mean average precision"
) + scale_fill_manual(
values = color_mapping, name = "Method"
) + coord_cartesian(ylim = c(0.55, 1.0)) + guides(fill = FALSE)
ggsave(snakemake@output[["integrative"]], mod_style(gp, rotate.x = TRUE), width = 7, height = 4)
|
library(scales)
library(sqldf)
library(rstudioapi)
library(RODBC)
library(dplyr)
library(reshape2)
library(ggplot2)
library(data.table)
library(stringr)
#library(wesanderson)
#library(RColorBrewer)
install.packages("reshape2")
library(sqldf)
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
#read in files
HH_type<-read.csv('M:\\Technical Services\\QA Documents\\Projects\\Sub Regional Forecast\\Data Files\\HH_Household_Type\\HH_HHTYPE.csv',stringsAsFactors = FALSE,fileEncoding="UTF-8-BOM")
#add y characterto year testes
HH_type$year<- "y"
HH_type$yr <- as.factor(paste( HH_type$year, HH_type$yr, sep = ""))
HH_type$year<- NULL
HH_type_reg<-aggregate(N~hht+yr, data=HH_type, sum)
HH_type_cpa<-aggregate(N~hht+yr+jcpa, data=HH_type, sum)
HH_type_jur<-aggregate(N~hht+yr+jurisdiction_id, data=HH_type, sum)
#if Andy's geography file doesn't include the jurisdiction you could exclude this statement or use it to exclude NULL
HH_type_cpa <- subset(HH_type_cpa, jcpa > 19)
HH_type_cpa_cast <- dcast(HH_type_cpa, jcpa+hht~yr, value.var="N")
HH_type_jur_cast <- dcast(HH_type_jur, jurisdiction_id+hht~yr, value.var="N")
head(HH_type_cpa_cast)
#################
#add percent change and absolute change and save as csv
#################
#unittype_cpa_cast$pct_chg <- (unittype_cpa_cast$y2050-unittype_cpa_cast$y2018)/unittype_cpa_cast$y2050
#unittype_jur_cast$pct_chg <- (unittype_jur_cast$y2050-unittype_jur_cast$y2018)/unittype_jur_cast$y2050
#unittype_cpa_cast$abs_chg <- unittype_cpa_cast$y2050-unittype_cpa_cast$y2018
#unittype_jur_cast$abs_chg <- unittype_jur_cast$y2050-unittype_jur_cast$y2018
#unittype_cpa_cast$pct_chg <- round(unittype_cpa_cast$pct_chg * 100, 2)
#unittype_jur_cast$pct_chg <- round(unittype_jur_cast$pct_chg * 100, 2)
write.csv(HH_type_cpa_cast,"M:\\Technical Services\\QA Documents\\Projects\\Sub Regional Forecast\\Scripts\\output\\HH_type cpa freq.csv" )
write.csv(HH_type_jur_cast,"M:\\Technical Services\\QA Documents\\Projects\\Sub Regional Forecast\\Scripts\\output\\HH_type jur freq.csv" )
write.csv(HH_type_reg,"M:\\Technical Services\\QA Documents\\Projects\\Sub Regional Forecast\\Scripts\\output\\HH_type reg freq.csv" )
HH_type_cpa_omit<-na.omit(HH_type_cpa)
#add figure script and write out file
##################################################
#graphs
##################################################
#graphs save here
results<-"M:\\Technical Services\\QA Documents\\Projects\\Sub Regional Forecast\\Scripts\\output\\"
BRI I HAVE STOPPED HERE
##################################
#household Unit Type region
HH_type_reg<-ggplot(data=HH_type_reg, aes(x=yr, stat="count",colour=hht), lab=c("0","1")) +
geom_bar (size =1)+
labs(title="SD Regionwide Households by Unit Type", y="Households by Unit Type", x=" Year",
caption="Source: isam.xpef03.household")+
expand_limits(y = c(1, 3000000))+
scale_y_continuous(labels= comma, limits = c(1000,500000))+
theme_bw(base_size = 16)+
theme(legend.position = "bottom",
legend.title=element_blank())
ggsave(unittype_region, file=paste(results, "Unit Type_reg.pdf"))
# #household Unit Type region
# unittype_region<-ggplot(data=unittype_reg, aes(x=yr, y=N, group=unittype, colour=unittype,)) +
# geom_bar() +
# labs(title="SD Regionwide Households by Unit Type", y="Households by Unit Type", x=" Year",
# caption="Source: isam.xpef03.household")+
# expand_limits(y = c(1, 3000000))+
# scale_y_continuous(labels= comma, limits = c(1000,500000))+
# theme_bw(base_size = 16)+
# theme(legend.position = "bottom",
# legend.title=element_blank())
#
# ggsave(unittype_region, file=paste(results, "Unit Type_reg.pdf"))
#household Unit Type jurisdiction
#this creates the list for "i" which is what the loop relies on - like x in a do repeat
jur_list<- c(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19)
jur_list2<- c("Carlsbad","Chula Vista","Coronado","Del Mar","El Cajon","Encinitas","Escondido","Imperial Beach","La Mesa","Lemon Grove",
"National City","Oceanside","Poway","San Diego","San Marcos","Santee","Solana Beach","Vista","Unincorporated")
#this is the loop with the subset, the ggplot and the ggsave commands
for(i in 1:length(jur_list)){
plot<-ggplot(subset(unittype_jur, unittype_jur$jurisdiction_id==jur_list[i]),
aes(x=yr, y=N, group=as.factor(unittype), color=as.factor(unittype))) +
geom_line(size=1.25) +
labs(title=paste("Households by Jurisdiction by Unit Type\n", jur_list2[i]), y="Households by Unit Type category", x="Year",
caption="Source: isam.xpef03.household+data_cafe.regional_forecast.sr13_final.mgra13")+
expand_limits(y = c(1, 300000))+
scale_y_continuous(labels= comma, limits = c((.75 * min(subset(unittype_jur$N, unittype_jur$jurisdiction_id==jur_list[i]))),(1.5 * max(subset(unittype_jur$N, unittype_jur$jurisdiction_id==jur_list[i])))))+
theme_bw(base_size = 16)+
theme(legend.position = "bottom",
legend.title=element_blank())
ggsave(plot, file= paste(results, 'unittype_jur', jur_list[i], ".pdf", sep=''), scale=2)
}
#household Unit Type cpa
#this creates the list for "i" which is what the loop relies on - like x in a do repeat
cpa_list<- c(1401,
1402,
1403,
1404,
1405,
1406,
1407,
1408,
1409,
1410,
1412,
1414,
1415,
1417,
1418,
1419,
1420,
1421,
1423,
1424,
1425,
1426,
1427,
1428,
1429,
1430,
1431,
1432,
1433,
1434,
1435,
1438,
1439,
1440,
1441,
1442,
1444,
1447,
1448,
1449,
1450,
1455,
1456,
1457,
1458,
1459,
1461,
1462,
1463,
1464,
1465,
1466,
1467,
1468,
1469,
1481,
1482,
1483,
1485,
1486,
1488,
1491,
1901,
1902,
1903,
1904,
1906,
1907,
1908,
1909,
1911,
1912,
1914,
1915,
1918,
1919,
1920,
1921,
1922,
1951,
1952,
1953,
1954,
1955,
1998,
1999
)
unittype_cpa_omit<-order(unittype_cpa_omit$yr)
#this is the loop with the subset, the ggplot and the ggsave commands
for(i in cpa_list){
plot<-ggplot(subset(unittype_cpa_omit, unittype_cpa_omit$jcpa==cpa_list[i]),
aes(x=yr, y=N, group=unittype, color=unittype), na.rm=TRUE) +
geom_line(size=1.25) +
labs(title="Households by Unit Type by CPA", y="Households by Unit Type", x="Year",
caption="Source: isam.xpef03.household+data_cafe.regional_forecast.sr13_final.mgra13")+
expand_limits(y = c(1, 3000000))+
scale_y_continuous(labels= comma, limits = c(100,50000))+
theme_bw(base_size = 16)+
theme(legend.position = "bottom",
legend.title=element_blank())
ggsave(plot, file= paste(results, 'unittype_cpa', cpa_list[i], ".pdf", sep=''), scale=1)
} | /Unfiled/SR14Forecast/SubRegional/old scripts/HH_Househol_family_type (HHT).R | no_license | SANDAG/QA | R | false | false | 7,758 | r |
library(scales)
library(sqldf)
library(rstudioapi)
library(RODBC)
library(dplyr)
library(reshape2)
library(ggplot2)
library(data.table)
library(stringr)
#library(wesanderson)
#library(RColorBrewer)
install.packages("reshape2")
library(sqldf)
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
#read in files
HH_type<-read.csv('M:\\Technical Services\\QA Documents\\Projects\\Sub Regional Forecast\\Data Files\\HH_Household_Type\\HH_HHTYPE.csv',stringsAsFactors = FALSE,fileEncoding="UTF-8-BOM")
#add y characterto year testes
HH_type$year<- "y"
HH_type$yr <- as.factor(paste( HH_type$year, HH_type$yr, sep = ""))
HH_type$year<- NULL
HH_type_reg<-aggregate(N~hht+yr, data=HH_type, sum)
HH_type_cpa<-aggregate(N~hht+yr+jcpa, data=HH_type, sum)
HH_type_jur<-aggregate(N~hht+yr+jurisdiction_id, data=HH_type, sum)
#if Andy's geography file doesn't include the jurisdiction you could exclude this statement or use it to exclude NULL
HH_type_cpa <- subset(HH_type_cpa, jcpa > 19)
HH_type_cpa_cast <- dcast(HH_type_cpa, jcpa+hht~yr, value.var="N")
HH_type_jur_cast <- dcast(HH_type_jur, jurisdiction_id+hht~yr, value.var="N")
head(HH_type_cpa_cast)
#################
#add percent change and absolute change and save as csv
#################
#unittype_cpa_cast$pct_chg <- (unittype_cpa_cast$y2050-unittype_cpa_cast$y2018)/unittype_cpa_cast$y2050
#unittype_jur_cast$pct_chg <- (unittype_jur_cast$y2050-unittype_jur_cast$y2018)/unittype_jur_cast$y2050
#unittype_cpa_cast$abs_chg <- unittype_cpa_cast$y2050-unittype_cpa_cast$y2018
#unittype_jur_cast$abs_chg <- unittype_jur_cast$y2050-unittype_jur_cast$y2018
#unittype_cpa_cast$pct_chg <- round(unittype_cpa_cast$pct_chg * 100, 2)
#unittype_jur_cast$pct_chg <- round(unittype_jur_cast$pct_chg * 100, 2)
write.csv(HH_type_cpa_cast,"M:\\Technical Services\\QA Documents\\Projects\\Sub Regional Forecast\\Scripts\\output\\HH_type cpa freq.csv" )
write.csv(HH_type_jur_cast,"M:\\Technical Services\\QA Documents\\Projects\\Sub Regional Forecast\\Scripts\\output\\HH_type jur freq.csv" )
write.csv(HH_type_reg,"M:\\Technical Services\\QA Documents\\Projects\\Sub Regional Forecast\\Scripts\\output\\HH_type reg freq.csv" )
HH_type_cpa_omit<-na.omit(HH_type_cpa)
#add figure script and write out file
##################################################
#graphs
##################################################
#graphs save here
results<-"M:\\Technical Services\\QA Documents\\Projects\\Sub Regional Forecast\\Scripts\\output\\"
BRI I HAVE STOPPED HERE
##################################
#household Unit Type region
HH_type_reg<-ggplot(data=HH_type_reg, aes(x=yr, stat="count",colour=hht), lab=c("0","1")) +
geom_bar (size =1)+
labs(title="SD Regionwide Households by Unit Type", y="Households by Unit Type", x=" Year",
caption="Source: isam.xpef03.household")+
expand_limits(y = c(1, 3000000))+
scale_y_continuous(labels= comma, limits = c(1000,500000))+
theme_bw(base_size = 16)+
theme(legend.position = "bottom",
legend.title=element_blank())
ggsave(unittype_region, file=paste(results, "Unit Type_reg.pdf"))
# #household Unit Type region
# unittype_region<-ggplot(data=unittype_reg, aes(x=yr, y=N, group=unittype, colour=unittype,)) +
# geom_bar() +
# labs(title="SD Regionwide Households by Unit Type", y="Households by Unit Type", x=" Year",
# caption="Source: isam.xpef03.household")+
# expand_limits(y = c(1, 3000000))+
# scale_y_continuous(labels= comma, limits = c(1000,500000))+
# theme_bw(base_size = 16)+
# theme(legend.position = "bottom",
# legend.title=element_blank())
#
# ggsave(unittype_region, file=paste(results, "Unit Type_reg.pdf"))
#household Unit Type jurisdiction
#this creates the list for "i" which is what the loop relies on - like x in a do repeat
jur_list<- c(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19)
jur_list2<- c("Carlsbad","Chula Vista","Coronado","Del Mar","El Cajon","Encinitas","Escondido","Imperial Beach","La Mesa","Lemon Grove",
"National City","Oceanside","Poway","San Diego","San Marcos","Santee","Solana Beach","Vista","Unincorporated")
#this is the loop with the subset, the ggplot and the ggsave commands
for(i in 1:length(jur_list)){
plot<-ggplot(subset(unittype_jur, unittype_jur$jurisdiction_id==jur_list[i]),
aes(x=yr, y=N, group=as.factor(unittype), color=as.factor(unittype))) +
geom_line(size=1.25) +
labs(title=paste("Households by Jurisdiction by Unit Type\n", jur_list2[i]), y="Households by Unit Type category", x="Year",
caption="Source: isam.xpef03.household+data_cafe.regional_forecast.sr13_final.mgra13")+
expand_limits(y = c(1, 300000))+
scale_y_continuous(labels= comma, limits = c((.75 * min(subset(unittype_jur$N, unittype_jur$jurisdiction_id==jur_list[i]))),(1.5 * max(subset(unittype_jur$N, unittype_jur$jurisdiction_id==jur_list[i])))))+
theme_bw(base_size = 16)+
theme(legend.position = "bottom",
legend.title=element_blank())
ggsave(plot, file= paste(results, 'unittype_jur', jur_list[i], ".pdf", sep=''), scale=2)
}
#household Unit Type cpa
#this creates the list for "i" which is what the loop relies on - like x in a do repeat
cpa_list<- c(1401,
1402,
1403,
1404,
1405,
1406,
1407,
1408,
1409,
1410,
1412,
1414,
1415,
1417,
1418,
1419,
1420,
1421,
1423,
1424,
1425,
1426,
1427,
1428,
1429,
1430,
1431,
1432,
1433,
1434,
1435,
1438,
1439,
1440,
1441,
1442,
1444,
1447,
1448,
1449,
1450,
1455,
1456,
1457,
1458,
1459,
1461,
1462,
1463,
1464,
1465,
1466,
1467,
1468,
1469,
1481,
1482,
1483,
1485,
1486,
1488,
1491,
1901,
1902,
1903,
1904,
1906,
1907,
1908,
1909,
1911,
1912,
1914,
1915,
1918,
1919,
1920,
1921,
1922,
1951,
1952,
1953,
1954,
1955,
1998,
1999
)
unittype_cpa_omit<-order(unittype_cpa_omit$yr)
#this is the loop with the subset, the ggplot and the ggsave commands
for(i in cpa_list){
plot<-ggplot(subset(unittype_cpa_omit, unittype_cpa_omit$jcpa==cpa_list[i]),
aes(x=yr, y=N, group=unittype, color=unittype), na.rm=TRUE) +
geom_line(size=1.25) +
labs(title="Households by Unit Type by CPA", y="Households by Unit Type", x="Year",
caption="Source: isam.xpef03.household+data_cafe.regional_forecast.sr13_final.mgra13")+
expand_limits(y = c(1, 3000000))+
scale_y_continuous(labels= comma, limits = c(100,50000))+
theme_bw(base_size = 16)+
theme(legend.position = "bottom",
legend.title=element_blank())
ggsave(plot, file= paste(results, 'unittype_cpa', cpa_list[i], ".pdf", sep=''), scale=1)
} |
#' Compute the Average Variance (AVariance) of an estimator
#'
#' Computes the Average Variance (AVariance) of an estimator
#' in a Monte Carlo simulations study.
#' @param modelFit A matrix of fitted probabilities of dimensions (number of observations x
#' number of simulations).
#' @return A numeric scalar value.
#' @keywords PredictingBlackSwans
#' @export
AVar_fun <- function(modelFit) {
nSim <- ncol(modelFit)
n <- nrow(modelFit)
expectedFit <- rowMeans(modelFit)
expFitMat <- matrix(rep(expectedFit, nSim), n, nSim)
sqDev <- (modelFit - expFitMat)^2
indVar <- rowMeans(sqDev)
aveVar <- mean(indVar)
return(aveVar)
}
| /PredictingBlackSwans/R/AVar_fun.R | permissive | PabloRMira/PredictingBlackSwans | R | false | false | 641 | r | #' Compute the Average Variance (AVariance) of an estimator
#'
#' Computes the Average Variance (AVariance) of an estimator
#' in a Monte Carlo simulations study.
#' @param modelFit A matrix of fitted probabilities of dimensions (number of observations x
#' number of simulations).
#' @return A numeric scalar value.
#' @keywords PredictingBlackSwans
#' @export
AVar_fun <- function(modelFit) {
nSim <- ncol(modelFit)
n <- nrow(modelFit)
expectedFit <- rowMeans(modelFit)
expFitMat <- matrix(rep(expectedFit, nSim), n, nSim)
sqDev <- (modelFit - expFitMat)^2
indVar <- rowMeans(sqDev)
aveVar <- mean(indVar)
return(aveVar)
}
|
args = commandArgs(trailingOnly = T)
#i represent ethnic group
#j represent chromosome
#l represent the causal SNPs proportion
#m represent the training sample size
#i_rep represent simulation replication
#i1 represent the genetic architecture
i = as.numeric(args[[1]])
l = as.numeric(args[[2]])
# for(i in 2:3){
# for(l in 1:2){
library(dplyr)
library(data.table)
eth <- c("EUR","AFR","AMR")
trait_vec <-c("height","bmi")
trait = trait_vec[l]
out.dir.prs = paste0("/data/zhangh24/multi_ethnic/result/AOU/prs/PRSCSX/EUR/",trait,"/")
phi = c("1e+00","1e-02","1e-04","1e-06")
sid<-Sys.getenv('SLURM_JOB_ID')
dir.create(paste0('/lscratch/',sid,'/test'),showWarnings = FALSE)
temp.dir = paste0('/lscratch/',sid,'/test/')
system(paste0("mkdir ",temp.dir,"ukb"))
geno.data = paste0("/data/zhangh24/multi_ethnic/data/UKBB/genotype/all_data/")
system(paste0("cp ",geno.data,eth[i],"/all_chr.bed ",temp.dir,"ukb/all_chr.bed"))
system(paste0("cp ",geno.data,eth[i],"/all_chr.bim ",temp.dir,"ukb/all_chr.bim"))
system(paste0("cp ",geno.data,eth[i],"/all_chr.fam ",temp.dir,"ukb/all_chr.fam"))
v = 1
#find unique snp set
data_list = list()
for(i_eth in 1:3){
load(paste0(out.dir.prs,"sum_",eth[i_eth],"_pst_eff_a1_b0.5_phi",phi[v],".rdata"))
data_temp = data %>% select(V2, V4)
data_list[[i_eth]] = data_temp
}
snp_set = rbindlist(data_list) %>% distinct()
colnames(snp_set) = c("SNP", "A1")
BETA_mat = matrix(0,nrow(snp_set),length(phi)*length(eth))
temp = 1
for(v in 1:4){
for(i_eth in 1:length(eth)){
load(paste0(out.dir.prs,"sum_",eth[i_eth],"_pst_eff_a1_b0.5_phi",phi[v],".rdata"))
data_tar = data %>% select(V2, V6) %>%
rename(SNP = V2)
snp_set_temp = left_join(snp_set, data_tar) %>%
mutate(BETA = ifelse(is.na(V6),0,V6))
BETA_mat[, temp] = snp_set_temp$BETA
temp = temp + 1
}
}
prs_file = cbind(snp_set,BETA_mat)
n_col = ncol(prs_file)
#file_out = paste0("/data/zhangh24/multi_ethnic/result/AOU/prs/PRSCSx/",eth[i],"/",trait,"")
write.table(prs_file,file = paste0(temp.dir,"prs_prep"),col.names = T,row.names = F,quote=F)
ref_gene_pred = paste0(temp.dir,"ukb/all_chr")
res = system(paste0("/data/zhangh24/software/plink2_alpha ",
"--score-col-nums 3-",n_col," --threads 2 ",
"--score ",temp.dir,"prs_prep cols=+scoresums,-scoreavgs header no-mean-imputation ",
"--bfile ",ref_gene_pred,
" --out ",temp.dir,"PRS"))
out.dir.prs = paste0("/data/zhangh24/multi_ethnic/result/AOU/prs/PRSCSX_all/",eth[i],"/",trait_vec[l],"/")
system(paste0("cp ",temp.dir,"PRS.sscore ",out.dir.prs))
prs_mat = fread(paste0(temp.dir,"PRS.sscore"))
prs_score = prs_mat[,5:ncol(prs_mat)]
colnames(prs_mat)[2] = "id"
pheno.dir = "/data/zhangh24/multi_ethnic/data/UKBB/phenotype/"
pheno_tuning = as.data.frame(fread(paste0(pheno.dir,trait,"/tuning+validation/",eth[i],"_tuning.txt")))
pheno_tuning = pheno_tuning[,1:2]
covar <- as.data.frame(fread(paste0(pheno.dir,"/covariates/tuning+validation/",eth[i],"_all_data.txt")))
pheno_tuning <- left_join(pheno_tuning, covar)
colnames(pheno_tuning) = c('id','y','sex','age',paste0('pc',1:10))
pheno_tuning_com = pheno_tuning[complete.cases(pheno_tuning$y),]
pheno_tuning = left_join(pheno_tuning_com,prs_mat,by = "id")
model.null <- lm(y~pc1+pc2+pc3+pc4+pc5+pc6+pc7+pc8+pc9+pc10+age+sex,data=pheno_tuning)
y_tun = model.null$residual
prs_tun = pheno_tuning[,colnames(prs_score)]
pheno_vad = as.data.frame(fread(paste0(pheno.dir,trait,"/tuning+validation/",eth[i],"_validation.txt")))
pheno_vad = pheno_vad[,1:2]
pheno_vad <- left_join(pheno_vad, covar)
colnames(pheno_vad) = c('id','y','sex','age',paste0('pc',1:10))
pheno_vad_com = pheno_vad[complete.cases(pheno_vad$y),]
pheno_vad = left_join(pheno_vad_com,prs_mat,by = "id")
model.null <- lm(y~pc1+pc2+pc3+pc4+pc5+pc6+pc7+pc8+pc9+pc10+age+sex,data=pheno_vad)
y_vad = model.null$residual
prs_vad = pheno_vad[,colnames(prs_score)]
r2_vec_tun = rep(0, 4)
coef_mat = matrix(0, 4, length(eth))
for(v in 1:4){
score1 = prs_tun[,3*v-2]
score2 = prs_tun[,3*v-1]
score3 = prs_tun[,3*v]
model = lm(y_tun~score1+score2+score3)
coef_mat[v,] = coefficients(model)[-1]
r2_vec_tun[v] = summary(model)$r.square
}
max_ind = which.max(r2_vec_tun)
score1 = prs_vad[, 3*max_ind-2]
score2= prs_vad[, 3*max_ind-1]
score3= prs_vad[, 3*max_ind]
coef = coef_mat[max_ind,]
prs_vad = cbind(score1, score2, score3)%*%coef
model = lm(y_vad~ prs_vad)
r2 = summary(model)$r.square
data = data.frame(y = y_vad, x = prs_vad)
R2Boot = function(data,indices){
boot_data = data[indices, ]
model = lm(y ~ x, data = boot_data)
result = summary(model)$r.square
return(c(result))
}
library(boot)
boot_r2 = boot(data = data, statistic = R2Boot, R = 10000)
ci_result = boot.ci(boot_r2, type = "bca")
r2.result = data.frame(eth = eth[i],
trait = trait_vec[l],
method = "PRS-CSx (three ancestries)",
r2 = r2,
r2_low = ci_result$bca[4],
r2_high = ci_result$bca[5]
)
out.dir = paste0("/data/zhangh24/multi_ethnic/result/AOU/PRSCSX/",eth[i],"/",trait,"/")
save(r2.result, file = paste0(out.dir, "prscsx_all.result"))
# system(paste0("rm -rf ", temp.dir))
# }
# }
prs_score = as.data.frame(prs_score)
score1 = prs_score[, 3*max_ind-2]
score2= prs_score[, 3*max_ind-1]
score3= prs_score[, 3*max_ind]
prs_max_score = cbind(score1, score2, score3)%*%coef
prs_max = cbind(prs_mat[,1:4],prs_max_score)
write.table(prs_max, file = paste0(out.dir.prs, "best_prs.sscore"),
row.names = F,
col.names = T,
quote = F)
| /code/AOU/PRSCSx/7_compute_prs_r2_all.R | no_license | andrewhaoyu/multi_ethnic | R | false | false | 5,661 | r | args = commandArgs(trailingOnly = T)
#i represent ethnic group
#j represent chromosome
#l represent the causal SNPs proportion
#m represent the training sample size
#i_rep represent simulation replication
#i1 represent the genetic architecture
i = as.numeric(args[[1]])
l = as.numeric(args[[2]])
# for(i in 2:3){
# for(l in 1:2){
library(dplyr)
library(data.table)
eth <- c("EUR","AFR","AMR")
trait_vec <-c("height","bmi")
trait = trait_vec[l]
out.dir.prs = paste0("/data/zhangh24/multi_ethnic/result/AOU/prs/PRSCSX/EUR/",trait,"/")
phi = c("1e+00","1e-02","1e-04","1e-06")
sid<-Sys.getenv('SLURM_JOB_ID')
dir.create(paste0('/lscratch/',sid,'/test'),showWarnings = FALSE)
temp.dir = paste0('/lscratch/',sid,'/test/')
system(paste0("mkdir ",temp.dir,"ukb"))
geno.data = paste0("/data/zhangh24/multi_ethnic/data/UKBB/genotype/all_data/")
system(paste0("cp ",geno.data,eth[i],"/all_chr.bed ",temp.dir,"ukb/all_chr.bed"))
system(paste0("cp ",geno.data,eth[i],"/all_chr.bim ",temp.dir,"ukb/all_chr.bim"))
system(paste0("cp ",geno.data,eth[i],"/all_chr.fam ",temp.dir,"ukb/all_chr.fam"))
v = 1
#find unique snp set
data_list = list()
for(i_eth in 1:3){
load(paste0(out.dir.prs,"sum_",eth[i_eth],"_pst_eff_a1_b0.5_phi",phi[v],".rdata"))
data_temp = data %>% select(V2, V4)
data_list[[i_eth]] = data_temp
}
snp_set = rbindlist(data_list) %>% distinct()
colnames(snp_set) = c("SNP", "A1")
BETA_mat = matrix(0,nrow(snp_set),length(phi)*length(eth))
temp = 1
for(v in 1:4){
for(i_eth in 1:length(eth)){
load(paste0(out.dir.prs,"sum_",eth[i_eth],"_pst_eff_a1_b0.5_phi",phi[v],".rdata"))
data_tar = data %>% select(V2, V6) %>%
rename(SNP = V2)
snp_set_temp = left_join(snp_set, data_tar) %>%
mutate(BETA = ifelse(is.na(V6),0,V6))
BETA_mat[, temp] = snp_set_temp$BETA
temp = temp + 1
}
}
prs_file = cbind(snp_set,BETA_mat)
n_col = ncol(prs_file)
#file_out = paste0("/data/zhangh24/multi_ethnic/result/AOU/prs/PRSCSx/",eth[i],"/",trait,"")
write.table(prs_file,file = paste0(temp.dir,"prs_prep"),col.names = T,row.names = F,quote=F)
ref_gene_pred = paste0(temp.dir,"ukb/all_chr")
res = system(paste0("/data/zhangh24/software/plink2_alpha ",
"--score-col-nums 3-",n_col," --threads 2 ",
"--score ",temp.dir,"prs_prep cols=+scoresums,-scoreavgs header no-mean-imputation ",
"--bfile ",ref_gene_pred,
" --out ",temp.dir,"PRS"))
out.dir.prs = paste0("/data/zhangh24/multi_ethnic/result/AOU/prs/PRSCSX_all/",eth[i],"/",trait_vec[l],"/")
system(paste0("cp ",temp.dir,"PRS.sscore ",out.dir.prs))
prs_mat = fread(paste0(temp.dir,"PRS.sscore"))
prs_score = prs_mat[,5:ncol(prs_mat)]
colnames(prs_mat)[2] = "id"
pheno.dir = "/data/zhangh24/multi_ethnic/data/UKBB/phenotype/"
pheno_tuning = as.data.frame(fread(paste0(pheno.dir,trait,"/tuning+validation/",eth[i],"_tuning.txt")))
pheno_tuning = pheno_tuning[,1:2]
covar <- as.data.frame(fread(paste0(pheno.dir,"/covariates/tuning+validation/",eth[i],"_all_data.txt")))
pheno_tuning <- left_join(pheno_tuning, covar)
colnames(pheno_tuning) = c('id','y','sex','age',paste0('pc',1:10))
pheno_tuning_com = pheno_tuning[complete.cases(pheno_tuning$y),]
pheno_tuning = left_join(pheno_tuning_com,prs_mat,by = "id")
model.null <- lm(y~pc1+pc2+pc3+pc4+pc5+pc6+pc7+pc8+pc9+pc10+age+sex,data=pheno_tuning)
y_tun = model.null$residual
prs_tun = pheno_tuning[,colnames(prs_score)]
pheno_vad = as.data.frame(fread(paste0(pheno.dir,trait,"/tuning+validation/",eth[i],"_validation.txt")))
pheno_vad = pheno_vad[,1:2]
pheno_vad <- left_join(pheno_vad, covar)
colnames(pheno_vad) = c('id','y','sex','age',paste0('pc',1:10))
pheno_vad_com = pheno_vad[complete.cases(pheno_vad$y),]
pheno_vad = left_join(pheno_vad_com,prs_mat,by = "id")
model.null <- lm(y~pc1+pc2+pc3+pc4+pc5+pc6+pc7+pc8+pc9+pc10+age+sex,data=pheno_vad)
y_vad = model.null$residual
prs_vad = pheno_vad[,colnames(prs_score)]
r2_vec_tun = rep(0, 4)
coef_mat = matrix(0, 4, length(eth))
for(v in 1:4){
score1 = prs_tun[,3*v-2]
score2 = prs_tun[,3*v-1]
score3 = prs_tun[,3*v]
model = lm(y_tun~score1+score2+score3)
coef_mat[v,] = coefficients(model)[-1]
r2_vec_tun[v] = summary(model)$r.square
}
max_ind = which.max(r2_vec_tun)
score1 = prs_vad[, 3*max_ind-2]
score2= prs_vad[, 3*max_ind-1]
score3= prs_vad[, 3*max_ind]
coef = coef_mat[max_ind,]
prs_vad = cbind(score1, score2, score3)%*%coef
model = lm(y_vad~ prs_vad)
r2 = summary(model)$r.square
data = data.frame(y = y_vad, x = prs_vad)
R2Boot = function(data,indices){
boot_data = data[indices, ]
model = lm(y ~ x, data = boot_data)
result = summary(model)$r.square
return(c(result))
}
library(boot)
boot_r2 = boot(data = data, statistic = R2Boot, R = 10000)
ci_result = boot.ci(boot_r2, type = "bca")
r2.result = data.frame(eth = eth[i],
trait = trait_vec[l],
method = "PRS-CSx (three ancestries)",
r2 = r2,
r2_low = ci_result$bca[4],
r2_high = ci_result$bca[5]
)
out.dir = paste0("/data/zhangh24/multi_ethnic/result/AOU/PRSCSX/",eth[i],"/",trait,"/")
save(r2.result, file = paste0(out.dir, "prscsx_all.result"))
# system(paste0("rm -rf ", temp.dir))
# }
# }
prs_score = as.data.frame(prs_score)
score1 = prs_score[, 3*max_ind-2]
score2= prs_score[, 3*max_ind-1]
score3= prs_score[, 3*max_ind]
prs_max_score = cbind(score1, score2, score3)%*%coef
prs_max = cbind(prs_mat[,1:4],prs_max_score)
write.table(prs_max, file = paste0(out.dir.prs, "best_prs.sscore"),
row.names = F,
col.names = T,
quote = F)
|
# Plain R Script (can't do this in a notebook that
# loops through a designated region classification)
# [state, county, country, watershed]
# and extracts data from NCDC. Saves Data as
# CSV and netCDF.
library("rnoaa")
library("isdparser")
library("lubridate")
library("ncdf4")
library("dplyr")
# library("openair")
library("rlist")
library("readxl")
library("tidyverse")
library("tidycensus")
# my common loc ids : SD->FIPS:46
# AK->FIPS:02
# NC->FIPS:37
# CA->FIPS:06
# WI->FIPS:55
# PA->FIPS:42
# NM->FIPS:35
# NAMIBIA->FIPS:WA
# Mongolia->FIPS:MG
# CHEYENNE->HUC:101202 &
# HUC:101201
# CHEYENNE->HUC:101202 &
# HUC:101201
# Pennington->FIPS:46103
# Buncombe->FIPS:37021
# Onslow->FIPS:37133
ncdc_ids = ncdc_stations(locationid = 'FIPS:AS',
datasetid = 'GHCND',
extent = c(-89, 138, 89, 139),
limit = 1000)
#extent = c(-89.,119,89,120),
#extent = c(50,11,51,12),
target_data_directory_root = "./GHCN_DATA/AUS/"
dir.create(path = str_c(target_data_directory_root,"netCDF", sep=""),
recursive = TRUE)
dir.create(path = str_c(target_data_directory_root,"RData", sep=""),
recursive = TRUE)
rdata_bigfile_name = "GHCND-GLOBAL_"
n_stations = ncdc_ids$meta$pageCount
ncdc_ids = ncdc_ids$data
print(ncdc_ids)
ghcn_station_information = ncdc_ids
total_number_of_stations = length(ncdc_ids$name)
if (total_number_of_stations == 1000) {
print("too many files")
stop()
}
print("")
print(" Begin Looping")
print("")
ghcn_metadata = read_excel(path = "~/GitHub/NCDC_rnoaa_ISD/GHCN_Metadata.xlsx")
ghcn_station_information$mindate = as.Date(ghcn_station_information$mindate)
ghcn_station_information$maxdate = as.Date(ghcn_station_information$maxdate)
indexlist = 1:total_number_of_stations
for (ncdc_index in 1:total_number_of_stations ) {
station_name_label = ncdc_ids$name[ncdc_index]
station_latitude = ncdc_ids$latitude[ncdc_index]
station_longitude = ncdc_ids$longitude[ncdc_index]
station_altitude = ncdc_ids$elevation[ncdc_index]
ncdc_id_code = ncdc_ids$id[ncdc_index]
ghcn_station_code = unlist(strsplit(x = ncdc_id_code,
split = ":"))
ncdc_start_yymmdd = ncdc_ids$mindate[ncdc_index]
ncdc_end_yymmdd = ncdc_ids$maxdate[ncdc_index]
Date = seq(from = as.Date(ncdc_start_yymmdd),
to = as.Date(ncdc_end_yymmdd),
by = "days")
ncdc_data = ghcnd(stationid = ghcn_station_code[2])
available_datafields = unique(ncdc_data$element)
sorted_data = ghcnd_splitvars(ncdc_data)
filename_station_label = ncdc_ids$name[ncdc_index]
filename_station_label = gsub(" US", "", filename_station_label)
filename_station_label = gsub( ",", "", filename_station_label)
filename_station_label = gsub( " ", "_", filename_station_label)
filename_ghcn_label = ncdc_ids$id[ncdc_index]
filename_ghcn_label = gsub(":", "-", filename_ghcn_label)
file_title_string = paste(filename_ghcn_label,
"__",
filename_station_label,
sep="")
remove(ncdc_data)
print("--------------------------")
print(paste("Station # ",
ncdc_index,
" of ",
total_number_of_stations,
sep = ""))
print(filename_station_label)
ghcn_station = data.frame( station_name = as.character(ncdc_ids$name[ncdc_index]),
station_latitude = ncdc_ids$latitude[ncdc_index],
station_longitude = ncdc_ids$longitude[ncdc_index],
station_altitude = ncdc_ids$elevation[ncdc_index],
ncdc_id_code = as.character(ncdc_id_code),
time = Date)
print(available_datafields)
if ("TMAX" %in% available_datafields) {
tmax_full_field = sorted_data$tmax
ordered = order(tmax_full_field$date)
tmax_full_field$tmax[] = tmax_full_field$tmax[ordered] / 10
tmax_full_field$date[] = tmax_full_field$date[ordered]
sub = data.frame( station_name = as.character(ncdc_ids$name[ncdc_index]),
station_latitude = ncdc_ids$latitude[ncdc_index],
station_longitude = ncdc_ids$longitude[ncdc_index],
station_altitude = ncdc_ids$elevation[ncdc_index],
ncdc_id_code = as.character(ncdc_id_code),
time = tmax_full_field$date,
tmax = tmax_full_field$tmax)
ghcn_station = full_join(ghcn_station,
sub,
by=c("station_name",
"station_latitude",
"station_longitude",
"station_altitude",
"ncdc_id_code",
"time") )
}
if ("TMIN" %in% available_datafields) {
tmin_full_field = sorted_data$tmin
ordered = order(tmin_full_field$date)
tmin_full_field$tmin[] = tmin_full_field$tmin[ordered] / 10
tmin_full_field$date[] = tmin_full_field$date[ordered]
sub = data.frame( station_name = as.character(ncdc_ids$name[ncdc_index]),
station_latitude = ncdc_ids$latitude[ncdc_index],
station_longitude = ncdc_ids$longitude[ncdc_index],
station_altitude = ncdc_ids$elevation[ncdc_index],
ncdc_id_code = as.character(ncdc_id_code),
time = tmin_full_field$date,
tmin = tmin_full_field$tmin)
ghcn_station = full_join(ghcn_station,
sub,
by=c("station_name",
"station_latitude",
"station_longitude",
"station_altitude",
"ncdc_id_code",
"time") )
}
if ("TAVG" %in% available_datafields) {
tavg_full_field = sorted_data$tavg
ordered = order(tavg_full_field$date)
tavg_full_field$tavg[] = tavg_full_field$tavg[ordered] / 10
tavg_full_field$date[] = tavg_full_field$date[ordered]
sub = data.frame( station_name = as.character(ncdc_ids$name[ncdc_index]),
station_latitude = ncdc_ids$latitude[ncdc_index],
station_longitude = ncdc_ids$longitude[ncdc_index],
station_altitude = ncdc_ids$elevation[ncdc_index],
ncdc_id_code = as.character(ncdc_id_code),
time = tavg_full_field$date,
tavg = tavg_full_field$tavg)
ghcn_station = full_join(ghcn_station,
sub,
by=c("station_name",
"station_latitude",
"station_longitude",
"station_altitude",
"ncdc_id_code",
"time") )
}
if ("PRCP" %in% available_datafields) {
prcp_full_field = sorted_data$prcp
ordered = order(prcp_full_field$date)
prcp_full_field$prcp[] = prcp_full_field$prcp[ordered] / 10
prcp_full_field$date[] = prcp_full_field$date[ordered]
sub = data.frame( station_name = as.character(ncdc_ids$name[ncdc_index]),
station_latitude = ncdc_ids$latitude[ncdc_index],
station_longitude = ncdc_ids$longitude[ncdc_index],
station_altitude = ncdc_ids$elevation[ncdc_index],
ncdc_id_code = as.character(ncdc_id_code),
time = prcp_full_field$date,
prcp = prcp_full_field$prcp)
ghcn_station = full_join(ghcn_station,
sub,
by=c("station_name",
"station_latitude",
"station_longitude",
"station_altitude",
"ncdc_id_code",
"time") )
}
if ("SNOW" %in% available_datafields) {
snow_full_field = sorted_data$snow
ordered = order(snow_full_field$date)
snow_full_field$snow[] = snow_full_field$snow[ordered]
snow_full_field$date[] = snow_full_field$date[ordered]
sub = data.frame( station_name = as.character(ncdc_ids$name[ncdc_index]),
station_latitude = ncdc_ids$latitude[ncdc_index],
station_longitude = ncdc_ids$longitude[ncdc_index],
station_altitude = ncdc_ids$elevation[ncdc_index],
ncdc_id_code = as.character(ncdc_id_code),
time = snow_full_field$date,
snow = snow_full_field$snow)
ghcn_station = full_join(ghcn_station,
sub,
by=c("station_name",
"station_latitude",
"station_longitude",
"station_altitude",
"ncdc_id_code",
"time") )
}
if ("SNWD" %in% available_datafields) {
snwd_full_field = sorted_data$snwd
ordered = order(snwd_full_field$date)
snwd_full_field$snwd[] = snwd_full_field$snwd[ordered]
snwd_full_field$date[] = snwd_full_field$date[ordered]
sub = data.frame( station_name = as.character(ncdc_ids$name[ncdc_index]),
station_latitude = ncdc_ids$latitude[ncdc_index],
station_longitude = ncdc_ids$longitude[ncdc_index],
station_altitude = ncdc_ids$elevation[ncdc_index],
ncdc_id_code = as.character(ncdc_id_code),
time = snwd_full_field$date,
snwd = snwd_full_field$snwd)
ghcn_station = full_join(ghcn_station,
sub,
by=c("station_name",
"station_latitude",
"station_longitude",
"station_altitude",
"ncdc_id_code",
"time") )
}
if ("WESD" %in% available_datafields) {
wesd_full_field = sorted_data$wesd
ordered = order(wesd_full_field$date)
wesd_full_field$wesd[] = wesd_full_field$wesd[ordered] / 10
wesd_full_field$date[] = wesd_full_field$date[ordered]
sub = data.frame( station_name = as.character(ncdc_ids$name[ncdc_index]),
station_latitude = ncdc_ids$latitude[ncdc_index],
station_longitude = ncdc_ids$longitude[ncdc_index],
station_altitude = ncdc_ids$elevation[ncdc_index],
ncdc_id_code = as.character(ncdc_id_code),
time = wesd_full_field$date,
wesd = wesd_full_field$wesd)
ghcn_station = full_join(ghcn_station,
sub,
by=c("station_name",
"station_latitude",
"station_longitude",
"station_altitude",
"ncdc_id_code",
"time") )
}
if ("WESF" %in% available_datafields) {
wesf_full_field = sorted_data$wesf
ordered = order(wesf_full_field$date)
wesf_full_field$wesf[] = wesf_full_field$wesf[ordered] / 10
wesf_full_field$date[] = wesf_full_field$date[ordered]
sub = data.frame( station_name = as.character(ncdc_ids$name[ncdc_index]),
station_latitude = ncdc_ids$latitude[ncdc_index],
station_longitude = ncdc_ids$longitude[ncdc_index],
station_altitude = ncdc_ids$elevation[ncdc_index],
ncdc_id_code = as.character(ncdc_id_code),
time = wesf_full_field$date,
wesf = wesf_full_field$wesf)
ghcn_station = full_join(ghcn_station,
sub,
by=c("station_name",
"station_latitude",
"station_longitude",
"station_altitude",
"ncdc_id_code",
"time") )
}
if ("AWND" %in% available_datafields) {
awnd_full_field = sorted_data$awnd
ordered = order(awnd_full_field$date)
awnd_full_field$awnd[] = awnd_full_field$awnd[ordered] / 10
awnd_full_field$date[] = awnd_full_field$date[ordered]
sub = data.frame( station_name = as.character(ncdc_ids$name[ncdc_index]),
station_latitude = ncdc_ids$latitude[ncdc_index],
station_longitude = ncdc_ids$longitude[ncdc_index],
station_altitude = ncdc_ids$elevation[ncdc_index],
ncdc_id_code = as.character(ncdc_id_code),
time = awnd_full_field$date,
awnd = awnd_full_field$awnd)
ghcn_station = full_join(ghcn_station,
sub,
by=c("station_name",
"station_latitude",
"station_longitude",
"station_altitude",
"ncdc_id_code",
"time") )
}
if ("AWDR" %in% available_datafields) {
print(" AWDR")
awdr_full_field = sorted_data$awdr
ordered = order(awdr_full_field$date)
awdr_full_field$awdr[] = awdr_full_field$awdr[ordered]
awdr_full_field$date[] = awdr_full_field$date[ordered]
sub = data.frame( station_name = as.character(ncdc_ids$name[ncdc_index]),
station_latitude = ncdc_ids$latitude[ncdc_index],
station_longitude = ncdc_ids$longitude[ncdc_index],
station_altitude = ncdc_ids$elevation[ncdc_index],
ncdc_id_code = as.character(ncdc_id_code),
time = awdr_full_field$date,
awdr = awdr_full_field$awdr)
ghcn_station = full_join(ghcn_station,
sub,
by=c("station_name",
"station_latitude",
"station_longitude",
"station_altitude",
"ncdc_id_code",
"time") )
}
remove(sorted_data)
remove(ordered)
if (ncdc_index == 1) {
ghcn_stations = ghcn_station
} else {
ghcn_stations = bind_rows(ghcn_stations,
ghcn_station)
}
save(gchn_station = ghcn_station,
file = paste(target_data_directory_root,
"RData/",
file_title_string,
".Rdata",
sep=""))
remove(ghcn_station)
Days_from_1970_01_01 = as.numeric( as.Date(Date) )
if ("TMAX" %in% available_datafields) {
ts_in = ts(data = tmax_full_field$tmax,
start = as.Date(tmax_full_field$date[1]),
end = as.Date(tmax_full_field$date[length(tmax_full_field$date)]),
deltat = 1,
ts.eps = getOption("ts.eps")
)
tmax = window(x = ts_in,
start = as.numeric(as.Date(ncdc_start_yymmdd)),
end = as.numeric(as.Date(ncdc_end_yymmdd)),
deltat = 1,
extend = TRUE)
remove(tmax_full_field,
ts_in)
}
if ("TMIN" %in% available_datafields) {
ts_in = ts(data = tmin_full_field$tmin,
start = as.Date(tmin_full_field$date[1]),
end = as.Date(tmin_full_field$date[length(tmin_full_field$date)]),
deltat = 1,
ts.eps = getOption("ts.eps")
)
tmin = window(x = ts_in,
start = as.numeric(as.Date(ncdc_start_yymmdd)),
end = as.numeric(as.Date(ncdc_end_yymmdd)),
deltat = 1,
extend = TRUE)
remove(tmin_full_field,
ts_in)
}
if ("TAVG" %in% available_datafields) {
ts_in = ts(data = tavg_full_field$tavg,
start = as.Date(tavg_full_field$date[1]),
end = as.Date(tavg_full_field$date[length(tavg_full_field$date)]),
deltat = 1,
ts.eps = getOption("ts.eps") )
tavg = window(x = ts_in,
start = as.numeric(as.Date(ncdc_start_yymmdd)),
end = as.numeric(as.Date(ncdc_end_yymmdd)),
deltat = 1,
extend = TRUE)
remove(tavg_full_field,
ts_in)
}
if ("PRCP" %in% available_datafields) {
ts_in = ts(data = prcp_full_field$prcp,
start = as.Date(prcp_full_field$date[1]),
end = as.Date(prcp_full_field$date[length(prcp_full_field$date)]),
deltat = 1,
ts.eps = getOption("ts.eps") )
prcp = window(x = ts_in,
start = as.numeric(as.Date(ncdc_start_yymmdd)),
end = as.numeric(as.Date(ncdc_end_yymmdd)),
deltat = 1,
extend = TRUE)
remove(prcp_full_field,
ts_in)
}
if ("SNOW" %in% available_datafields) {
ts_in = ts(data = snow_full_field$snow,
start = as.Date(snow_full_field$date[1]),
end = as.Date(snow_full_field$date[length(snow_full_field$date)]),
deltat = 1,
ts.eps = getOption("ts.eps") )
snow = window(x = ts_in,
start = as.numeric(as.Date(ncdc_start_yymmdd)),
end = as.numeric(as.Date(ncdc_end_yymmdd)),
deltat = 1,
extend = TRUE)
remove(snow_full_field,
ts_in)
}
if ("SNWD" %in% available_datafields) {
ts_in = ts(data = snwd_full_field$snwd,
start = as.Date(snwd_full_field$date[1]),
end = as.Date(snwd_full_field$date[length(snwd_full_field$date)]),
deltat = 1,
ts.eps = getOption("ts.eps") )
snwd = window(x = ts_in,
start = as.numeric(as.Date(ncdc_start_yymmdd)),
end = as.numeric(as.Date(ncdc_end_yymmdd)),
deltat = 1,
extend = TRUE)
remove(snwd_full_field,
ts_in)
}
if ("WESD" %in% available_datafields) {
ts_in = ts(data = wesd_full_field$wesd,
start = as.Date(wesd_full_field$date[1]),
end = as.Date(wesd_full_field$date[length(wesd_full_field$date)]),
deltat = 1,
ts.eps = getOption("ts.eps") )
wesd = window(x = ts_in,
start = as.numeric(as.Date(ncdc_start_yymmdd)),
end = as.numeric(as.Date(ncdc_end_yymmdd)),
deltat = 1,
extend = TRUE)
remove(wesd_full_field,
ts_in)
}
if ("WESF" %in% available_datafields) {
ts_in = ts(data = wesf_full_field$wesf,
start = as.Date(wesf_full_field$date[1]),
end = as.Date(wesf_full_field$date[length(wesf_full_field$date)]),
deltat = 1,
ts.eps = getOption("ts.eps") )
wesf = window(x = ts_in,
start = as.numeric(as.Date(ncdc_start_yymmdd)),
end = as.numeric(as.Date(ncdc_end_yymmdd)),
deltat = 1,
extend = TRUE)
remove(wesf_full_field,
ts_in)
}
if ("AWND" %in% available_datafields) {
ts_in = ts(data = awnd_full_field$awnd,
start = as.Date(awnd_full_field$date[1]),
end = as.Date(awnd_full_field$date[length(awnd_full_field$date)]),
deltat = 1,
ts.eps = getOption("ts.eps") )
awnd = window(x = ts_in,
start = as.numeric(as.Date(ncdc_start_yymmdd)),
end = as.numeric(as.Date(ncdc_end_yymmdd)),
deltat = 1,
extend = TRUE)
remove(awnd_full_field,
ts_in)
}
if ("AWDR" %in% available_datafields) {
ts_in = ts(data = awdr_full_field$awdr,
start = as.Date(awdr_full_field$date[1]),
end = as.Date(awdr_full_field$date[length(awdr_full_field$date)]),
deltat = 1,
ts.eps = getOption("ts.eps") )
awnd = window(x = ts_in,
start = as.numeric(as.Date(ncdc_start_yymmdd)),
end = as.numeric(as.Date(ncdc_end_yymmdd)),
deltat = 1,
extend = TRUE)
remove(awdr_full_field,
ts_in)
}
if (1 < 0) {
if ("TMAX" %in% available_datafields) {
plot(x = Date,
y = tmax,
type = "p",
pch = ".", # as points
col = "red",
lwd = 1.5,
cex.lab = 1.25,
xlab = "Date",
ylab = "Max Temperature (degC)",
main = station_name_label)
}
if ("TMIN" %in% available_datafields) {
plot(x = Date,
y = tmin,
type = "p",
pch = ".", # as points
col = "darkgoldenrod1",
lwd = 1.5,
cex.lab = 1.25,
xlab = "Date",
ylab = "Min Temperature (degC)",
main = station_name_label)
}
if ("TAVG" %in% available_datafields) {
plot(x = Date,
y = tavg,
type = "p",
pch = ".", # as points
col = "orange",
lwd = 1.5,
cex.lab = 1.25,
xlab = "Date",
ylab = "Mean Temperature (degC)",
main = station_name_label)
}
if ("PRCP" %in% available_datafields) {
plot(x = Date,
y = prcp,
type = "p",
pch = ".", # as points
col = "green",
lwd = 1.5,
cex.lab = 1.25,
xlab = "Date",
ylab = "Daily Precip (mm)",
main = station_name_label)
}
if ("SNOW" %in% available_datafields) {
plot(x = Date,
y = snow,
type = "p",
pch = ".", # as points
col = "cyan",
lwd = 1.5,
cex.lab = 1.25,
xlab = "Date",
ylab = "Daily Snowfall (mm)",
main = station_name_label)
}
if ("SNWD" %in% available_datafields) {
plot(x = Date,
y = snwd,
type = "p",
pch = ".", # as points
col = "darkblue",
lwd = 1.5,
cex.lab = 1.25,
xlab = "Date",
ylab = "Daily Snowfall (mm)",
main = station_name_label)
}
if ("WESD" %in% available_datafields) {
plot(x = Date,
y = wesd,
type = "p",
pch = ".", # as points
col = "blue",
lwd = 1.5,
cex.lab = 1.25,
xlab = "Date",
ylab = "Snow Water Equivalent Depth (mm)",
main = station_name_label)
}
if ("AWDR" %in% available_datafields) {
plot(x = Date,
y = awdr,
type = "p",
pch = ".", # as points
col = "blue",
lwd = 1.5,
cex.lab = 1.25,
xlab = "Date",
ylab = "Wind Direction (degrees from)",
main = station_name_label)
}
if ("WESF" %in% available_datafields) {
plot(x = Date,
y = wesf,
type = "p",
pch = ".", # as points
col = "deepskyblue4",
lwd = 1.5,
cex.lab = 1.25,
xlab = "Date",
ylab = "Snowfall Water Equivalent (mm)",
main = station_name_label)
}
if ("AWND" %in% available_datafields) {
plot(x = Date,
y = awnd,
type = "p",
pch = ".", # as points
col = "deeppink2",
lwd = 1.5,
cex.lab = 1.25,
xlab = "Date",
ylab = "Average Wind Speed (m s-1)",
main = station_name_label)
}
}
targ_time_series_raw = data.frame(date = Date)
if ("TMAX" %in% available_datafields)
targ_time_series_raw$Max_Temperature = tmax
if ("TMIN" %in% available_datafields)
targ_time_series_raw$Min_Temperature = tmin
if ("TAVG" %in% available_datafields)
targ_time_series_raw$Mean_Temperature = tavg
if ("PRCP" %in% available_datafields)
targ_time_series_raw$Precipitation = prcp
if ("SNOW" %in% available_datafields)
targ_time_series_raw$SnowFall = snow
if ("SNWD" %in% available_datafields)
targ_time_series_raw$SnowDepth = snwd
if ("WESD" %in% available_datafields)
targ_time_series_raw$Snowdepth_Water_Equiv = wesd
if ("WESF" %in% available_datafields)
targ_time_series_raw$Snowfall_Water_Equiv = wesf
if ("AWND" %in% available_datafields)
targ_time_series_raw$Mean_Wind_Speed = awnd
if ("AWDR" %in% available_datafields)
targ_time_series_raw$Mean_Wind_From_Direction = awdr
# output_file_name = paste(file_title_string,
# ".csv",
# sep="")
# write.csv(x = targ_time_series_raw,
# file = output_file_name,
# row.names = FALSE)
remove(targ_time_series_raw)
netcdf_output_file_name = paste(target_data_directory_root,
"netCDF/",
file_title_string,
".nc",
sep="")
netcdf_time_dim = ncdim_def(name = "time",
units = "days since 1970-01-01 00:00:00",
val = Days_from_1970_01_01,
unlim = TRUE,
calendar="standard")
netcdf_name_dim = ncdim_def(name = "name_strlen",
units = "",
val = 1:nchar(file_title_string),
unlim = FALSE,
create_dimvar=FALSE)
netcdf_bounds_dim = ncdim_def(name = "bnds",
units = "",
val = 1:2,
unlim = FALSE,
create_dimvar = FALSE)
fill_value = 9.96921e+36
fill_value_double = 9.969209968386869e+36
netcdf_stn = ncvar_def(nam = "station_name",
units = "",
dim = netcdf_name_dim,
longname = "station name",
prec = "char")
netcdf_lat = ncvar_def(nam = "latitude",
units = "degrees_north",
dim = list(),
longname = "Latitude",
prec = "single")
netcdf_lon = ncvar_def(nam = "longitude",
units = "degrees_east",
dim = list(),
longname = "Longitude",
prec = "single")
netcdf_alt = ncvar_def(nam = "altitude",
units = "m",
dim = list(),
longname = "Elevation",
prec = "single")
bnds = 1:2
time_bounds = array( 0,
dim = c(2,length(Days_from_1970_01_01)),
dimnames = list(bnds,Days_from_1970_01_01))
time_bounds[1,] = Days_from_1970_01_01
time_bounds[2,] = Days_from_1970_01_01 + 1
netcdf_time_bounds = ncvar_def(nam = "time_bnds",
units = "days since 1970-01-01 00:00:00",
dim = list(netcdf_bounds_dim,
netcdf_time_dim),
longname = "Time Bounds",
prec = "double")
netcdf_available_variables = list(netcdf_time_bounds,
netcdf_lat,
netcdf_lon,
netcdf_alt,
netcdf_stn)
if ("TMAX" %in% available_datafields) {
netcdf_tmax = ncvar_def(nam = "maximum_air_temperature",
units = "degC",
dim = netcdf_time_dim,
missval = fill_value,
longname = "2-m Maximum Daily Air Temperature",
prec = "single")
netcdf_available_variables = list.append(netcdf_available_variables,
netcdf_tmax)
}
if ("TMIN" %in% available_datafields) {
netcdf_tmin = ncvar_def(nam = "minimum_air_temperature",
units = "degC",
dim = netcdf_time_dim,
missval = fill_value,
longname = "2-m Minimium Daily Air Temperature",
prec = "single")
netcdf_available_variables = list.append(netcdf_available_variables,
netcdf_tmin)
}
if ("TAVG" %in% available_datafields) {
netcdf_tavg = ncvar_def(nam = "mean_air_temperature",
units = "degC",
dim = netcdf_time_dim,
missval = fill_value,
longname = "2-m Mean Daily Air Temperature",
prec = "single")
netcdf_available_variables = list.append(netcdf_available_variables,
netcdf_tavg)
}
if ("PRCP" %in% available_datafields) {
netcdf_prcp = ncvar_def(nam = "precipitation_amount",
units = "kg m-2",
dim = netcdf_time_dim,
missval = fill_value,
longname = "Daily Total Precipitation",
prec = "single")
netcdf_available_variables = list.append(netcdf_available_variables,
netcdf_prcp)
}
if ("SNOW" %in% available_datafields) {
netcdf_snow = ncvar_def(nam = "thickness_of_snowfall_amount",
units = "m",
dim = netcdf_time_dim,
missval = fill_value,
longname = "Daily Total Snowfall",
prec = "single")
netcdf_available_variables = list.append(netcdf_available_variables,
netcdf_snow)
}
if ("SNWD" %in% available_datafields) {
netcdf_snwd = ncvar_def(nam = "surface_snow_thickness",
units = "m",
dim = netcdf_time_dim,
missval = fill_value,
longname = "Snow Depth on Surface",
prec = "single")
netcdf_available_variables = list.append(netcdf_available_variables,
netcdf_snwd)
}
if ("WESD" %in% available_datafields) {
netcdf_wesd = ncvar_def(nam = "liquid_water_content_of_surface_snow",
units = "kg m-2",
dim = netcdf_time_dim,
missval = fill_value,
longname = "Liquid Snow Water Equivalent Depth on Surface",
prec = "single")
netcdf_available_variables = list.append(netcdf_available_variables,
netcdf_wesd)
}
if ("WESF" %in% available_datafields) {
netcdf_wesf = ncvar_def(nam = "liquid_water_equivalent_snowfall_amount",
units = "kg m-2",
dim = netcdf_time_dim,
missval = fill_value,
longname = "Liquid Snowfall Water Equivalent Depth on Surface",
prec = "single")
netcdf_available_variables = list.append(netcdf_available_variables,
netcdf_wesf)
}
if ("AWND" %in% available_datafields) {
netcdf_awnd = ncvar_def(nam = "mean_wind_speed",
units = "m s-1",
dim = netcdf_time_dim,
missval = fill_value,
longname = "Mean Daily Wind Speed",
prec = "single")
netcdf_available_variables = list.append(netcdf_available_variables,
netcdf_awnd)
}
if ("AWDR" %in% available_datafields) {
netcdf_awdr = ncvar_def(nam = "mean_wind_from_direction",
units = "degrees_from",
dim = netcdf_time_dim,
missval = fill_value,
longname = "Mean Daily Wind Origin Direction",
prec = "single")
netcdf_available_variables = list.append(netcdf_available_variables,
netcdf_awdr)
}
nc_ghcn = nc_create(filename = netcdf_output_file_name,
vars = netcdf_available_variables,
force_v4 = FALSE,
verbose = FALSE )
ncatt_put(nc = nc_ghcn,
varid = 0,
attname = "Title",
attval = paste("NCEI Data Hourly Output for ",
station_name_label,
sep=""),
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = 0,
attname = "GHCN_Station_Code",
attval = ncdc_id_code,
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = 0,
attname = "Station_Name",
attval = station_name_label,
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = 0,
attname = "Station_Latitude",
attval = station_latitude,
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = 0,
attname = "Station_Longitude",
attval = station_longitude,
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = 0,
attname = "Station_Elevation_in_Meters",
attval = station_altitude,
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = 0,
attname = "featureType",
attval = "timeSeries",
prec = NA,
verbose = FALSE,
definemode = FALSE)
ncatt_put(nc = nc_ghcn,
varid = 0,
attname = "Conventions",
attval = "CF-1.6",
prec = NA,
verbose = FALSE,
definemode = FALSE)
ncatt_put(nc = nc_ghcn,
varid = netcdf_stn,
attname = "description",
attval = "station name",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_stn,
attname = "cf_role",
attval = "timeseries_id",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_alt,
attname = "standard_name",
attval = "altitude",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_alt,
attname = "axis",
attval = "Z",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_alt,
attname = "positive",
attval = "up",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_alt,
attname = "description",
attval = "Elevation",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_lon,
attname = "standard_name",
attval = "longitude",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_lon,
attname = "axis",
attval = "X",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_lon,
attname = "description",
attval = "Longitude",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_lat,
attname = "standard_name",
attval = "latitude",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_lat,
attname = "description",
attval = "Latitude",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_lat,
attname = "axis",
attval = "Y",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = "time",
attname = "description",
attval = "time",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = "time",
attname = "bounds",
attval = "time_bnds",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = "time",
attname = "axis",
attval = "T",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_time_bounds,
attname = "description",
attval = "Time Bounds",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_time_bounds,
attname = "standard_name",
attval = "time",
prec = NA,
verbose = FALSE,
definemode = FALSE )
if ("TMAX" %in% available_datafields) {
ncatt_put(nc = nc_ghcn,
varid = netcdf_tmax,
attname = "standard_name",
attval = "air_temperature",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_tmax,
attname = "cell_methods",
attval = "time: maximum",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_tmax,
attname = "description",
attval = "2-m Maximum Daily Air Temperature",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_tmax,
attname = "coordinates",
attval = "time latitude longitude altitude station_name",
prec = NA,
verbose = FALSE,
definemode = FALSE )
}
if ("TMIN" %in% available_datafields) {
ncatt_put(nc = nc_ghcn,
varid = netcdf_tmin,
attname = "standard_name",
attval = "air_temperature",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_tmin,
attname = "cell_methods",
attval = "time: minimum",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_tmin,
attname = "description",
attval = "2-m Minimium Daily Air Temperature",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_tmin,
attname = "coordinates",
attval = "time latitude longitude altitude station_name",
prec = NA,
verbose = FALSE,
definemode = FALSE )
}
if ("TAVG" %in% available_datafields) {
ncatt_put(nc = nc_ghcn,
varid = netcdf_tavg,
attname = "standard_name",
attval = "air_temperature",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_tavg,
attname = "cell_methods",
attval = "time: mean",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_tavg,
attname = "description",
attval = "2-m Mean Daily Air Temperature",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_tavg,
attname = "coordinates",
attval = "time latitude longitude altitude station_name",
prec = NA,
verbose = FALSE,
definemode = FALSE )
}
if ("PRCP" %in% available_datafields) {
ncatt_put(nc = nc_ghcn,
varid = netcdf_prcp,
attname = "standard_name",
attval = "precipitation_amount",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_prcp,
attname = "cell_methods",
attval = "time: sum",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_prcp,
attname = "description",
attval = "Daily Total Precipitation",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_prcp,
attname = "coordinates",
attval = "time latitude longitude altitude station_name",
prec = NA,
verbose = FALSE,
definemode = FALSE )
}
if ("SNOW" %in% available_datafields) {
ncatt_put(nc = nc_ghcn,
varid = netcdf_snow,
attname = "standard_name",
attval = "thickness_of_snowfall_amount",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_snow,
attname = "cell_methods",
attval = "time: sum",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_snow,
attname = "description",
attval = "Daily Total Snowfall",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_snow,
attname = "coordinates",
attval = "time latitude longitude altitude station_name",
prec = NA,
verbose = FALSE,
definemode = FALSE )
}
if ("SNWD" %in% available_datafields) {
ncatt_put(nc = nc_ghcn,
varid = netcdf_snwd,
attname = "standard_name",
attval = "surface_snow_thickness",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_snwd,
attname = "cell_methods",
attval = "time: point",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_snwd,
attname = "description",
attval = "Snow Depth on Surface",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_snwd,
attname = "coordinates",
attval = "time latitude longitude altitude station_name",
prec = NA,
verbose = FALSE,
definemode = FALSE )
}
if ("WESD" %in% available_datafields) {
ncatt_put(nc = nc_ghcn,
varid = netcdf_wesd,
attname = "standard_name",
attval = "liquid_water_content_of_surface_snow",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_wesd,
attname = "cell_methods",
attval = "time: point",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_wesd,
attname = "description",
attval = "Liquid Snow Water Equivalent Depth on Surface",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_wesd,
attname = "coordinates",
attval = "time latitude longitude altitude station_name",
prec = NA,
verbose = FALSE,
definemode = FALSE )
}
if ("WSEF" %in% available_datafields) {
ncatt_put(nc = nc_ghcn,
varid = netcdf_wesf,
attname = "standard_name",
attval = "snowfall_amount",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_wesf,
attname = "cell_methods",
attval = "time: sum",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_wesf,
attname = "description",
attval = "Liquid Snowfall Water Equivalent",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_wesf,
attname = "coordinates",
attval = "time latitude longitude altitude station_name",
prec = NA,
verbose = FALSE,
definemode = FALSE )
}
if ("AWND" %in% available_datafields) {
ncatt_put(nc = nc_ghcn,
varid = netcdf_awnd,
attname = "standard_name",
attval = "wind_speed",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_awnd,
attname = "cell_methods",
attval = "time: mean",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_awnd,
attname = "description",
attval = "Mean Daily Wind Speed",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_awnd,
attname = "coordinates",
attval = "time latitude longitude altitude station_name",
prec = NA,
verbose = FALSE,
definemode = FALSE )
}
if ("AWDR" %in% available_datafields) {
ncatt_put(nc = nc_ghcn,
varid = netcdf_awdr,
attname = "standard_name",
attval = "wind_from_direction",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_awdr,
attname = "cell_methods",
attval = "time: mean",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_awdr,
attname = "description",
attval = "Mean Daily Wind Origin Direction",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_awdr,
attname = "coordinates",
attval = "time latitude longitude altitude station_name",
prec = NA,
verbose = FALSE,
definemode = FALSE )
}
ncvar_put(nc = nc_ghcn,
varid = netcdf_time_bounds,
vals = time_bounds,
verbose = FALSE )
ncvar_put(nc = nc_ghcn,
varid = netcdf_lat,
vals = station_latitude,
verbose = FALSE )
remove(netcdf_lat,
station_latitude)
ncvar_put(nc = nc_ghcn,
varid = netcdf_lon,
vals = station_longitude,
verbose = FALSE )
remove(netcdf_lon,
station_longitude)
ncvar_put(nc = nc_ghcn,
varid = netcdf_alt,
vals = station_altitude,
verbose = FALSE )
ncvar_put(nc = nc_ghcn,
varid = netcdf_stn,
vals = file_title_string,
verbose = FALSE )
remove(netcdf_alt,
station_altitude)
if ("TMAX" %in% available_datafields) {
ncvar_put(nc = nc_ghcn,
varid = netcdf_tmax,
vals = tmax,
verbose = FALSE )
remove(netcdf_tmax,
tmax)
}
if ("TMIN" %in% available_datafields) {
ncvar_put(nc = nc_ghcn,
varid = netcdf_tmin,
vals = tmin,
verbose = FALSE )
remove(netcdf_tmin,
tmin)
}
if ("TAVG" %in% available_datafields) {
ncvar_put(nc = nc_ghcn,
varid = netcdf_tavg,
vals = tavg,
verbose = FALSE )
remove(netcdf_tavg,
tavg)
}
if ("PRCP" %in% available_datafields) {
ncvar_put(nc = nc_ghcn,
varid = netcdf_prcp,
vals = prcp,
verbose = FALSE )
remove(netcdf_prcp,
prcp)
}
if ("SNOW" %in% available_datafields) {
ncvar_put(nc = nc_ghcn,
varid = netcdf_snow,
vals = (snow/1000.0),
verbose = FALSE )
remove(netcdf_snow,
snow)
}
if ("SNWD" %in% available_datafields) {
ncvar_put(nc = nc_ghcn,
varid = netcdf_snwd,
vals = (snwd/1000.0),
verbose = FALSE )
remove(netcdf_snwd,
snwd)
}
if ("WESD" %in% available_datafields) {
ncvar_put(nc = nc_ghcn,
varid = netcdf_wesd,
vals = wesd,
verbose = FALSE )
remove(netcdf_wesd,
wesd)
}
if ("WESF" %in% available_datafields) {
ncvar_put(nc = nc_ghcn,
varid = netcdf_wesf,
vals = wesf,
verbose = FALSE )
remove(netcdf_wesf,
wesf)
}
if ("AWND" %in% available_datafields) {
ncvar_put(nc = nc_ghcn,
varid = netcdf_awnd,
vals = awnd,
verbose = FALSE )
remove(netcdf_awnd,
awnd)
}
if ("AWDR" %in% available_datafields) {
ncvar_put(nc = nc_ghcn,
varid = netcdf_awdr,
vals = awdr,
verbose = FALSE )
remove(netcdf_awdr,
awdr)
}
nc_close( nc_ghcn )
remove(nc_ghcn,
netcdf_time_dim)
remove(Date,
Days_from_1970_01_01,
available_datafields,
file_title_string,
filename_ghcn_label,
filename_station_label,
fill_value,
ghcn_station_code,
ncdc_end_yymmdd,
ncdc_start_yymmdd,
netcdf_available_variables,
output_file_name,
ncdc_id_code,
netcdf_output_file_name,
station_name_label)
}
| /rnoaa_GHCN_csv_netcdf_loop.R | no_license | wjcapehart/NCDC_rnoaa_ISD | R | false | false | 58,042 | r |
# Plain R Script (can't do this in a notebook that
# loops through a designated region classification)
# [state, county, country, watershed]
# and extracts data from NCDC. Saves Data as
# CSV and netCDF.
library("rnoaa")
library("isdparser")
library("lubridate")
library("ncdf4")
library("dplyr")
# library("openair")
library("rlist")
library("readxl")
library("tidyverse")
library("tidycensus")
# my common loc ids : SD->FIPS:46
# AK->FIPS:02
# NC->FIPS:37
# CA->FIPS:06
# WI->FIPS:55
# PA->FIPS:42
# NM->FIPS:35
# NAMIBIA->FIPS:WA
# Mongolia->FIPS:MG
# CHEYENNE->HUC:101202 &
# HUC:101201
# CHEYENNE->HUC:101202 &
# HUC:101201
# Pennington->FIPS:46103
# Buncombe->FIPS:37021
# Onslow->FIPS:37133
ncdc_ids = ncdc_stations(locationid = 'FIPS:AS',
datasetid = 'GHCND',
extent = c(-89, 138, 89, 139),
limit = 1000)
#extent = c(-89.,119,89,120),
#extent = c(50,11,51,12),
target_data_directory_root = "./GHCN_DATA/AUS/"
dir.create(path = str_c(target_data_directory_root,"netCDF", sep=""),
recursive = TRUE)
dir.create(path = str_c(target_data_directory_root,"RData", sep=""),
recursive = TRUE)
rdata_bigfile_name = "GHCND-GLOBAL_"
n_stations = ncdc_ids$meta$pageCount
ncdc_ids = ncdc_ids$data
print(ncdc_ids)
ghcn_station_information = ncdc_ids
total_number_of_stations = length(ncdc_ids$name)
if (total_number_of_stations == 1000) {
print("too many files")
stop()
}
print("")
print(" Begin Looping")
print("")
ghcn_metadata = read_excel(path = "~/GitHub/NCDC_rnoaa_ISD/GHCN_Metadata.xlsx")
ghcn_station_information$mindate = as.Date(ghcn_station_information$mindate)
ghcn_station_information$maxdate = as.Date(ghcn_station_information$maxdate)
indexlist = 1:total_number_of_stations
for (ncdc_index in 1:total_number_of_stations ) {
station_name_label = ncdc_ids$name[ncdc_index]
station_latitude = ncdc_ids$latitude[ncdc_index]
station_longitude = ncdc_ids$longitude[ncdc_index]
station_altitude = ncdc_ids$elevation[ncdc_index]
ncdc_id_code = ncdc_ids$id[ncdc_index]
ghcn_station_code = unlist(strsplit(x = ncdc_id_code,
split = ":"))
ncdc_start_yymmdd = ncdc_ids$mindate[ncdc_index]
ncdc_end_yymmdd = ncdc_ids$maxdate[ncdc_index]
Date = seq(from = as.Date(ncdc_start_yymmdd),
to = as.Date(ncdc_end_yymmdd),
by = "days")
ncdc_data = ghcnd(stationid = ghcn_station_code[2])
available_datafields = unique(ncdc_data$element)
sorted_data = ghcnd_splitvars(ncdc_data)
filename_station_label = ncdc_ids$name[ncdc_index]
filename_station_label = gsub(" US", "", filename_station_label)
filename_station_label = gsub( ",", "", filename_station_label)
filename_station_label = gsub( " ", "_", filename_station_label)
filename_ghcn_label = ncdc_ids$id[ncdc_index]
filename_ghcn_label = gsub(":", "-", filename_ghcn_label)
file_title_string = paste(filename_ghcn_label,
"__",
filename_station_label,
sep="")
remove(ncdc_data)
print("--------------------------")
print(paste("Station # ",
ncdc_index,
" of ",
total_number_of_stations,
sep = ""))
print(filename_station_label)
ghcn_station = data.frame( station_name = as.character(ncdc_ids$name[ncdc_index]),
station_latitude = ncdc_ids$latitude[ncdc_index],
station_longitude = ncdc_ids$longitude[ncdc_index],
station_altitude = ncdc_ids$elevation[ncdc_index],
ncdc_id_code = as.character(ncdc_id_code),
time = Date)
print(available_datafields)
if ("TMAX" %in% available_datafields) {
tmax_full_field = sorted_data$tmax
ordered = order(tmax_full_field$date)
tmax_full_field$tmax[] = tmax_full_field$tmax[ordered] / 10
tmax_full_field$date[] = tmax_full_field$date[ordered]
sub = data.frame( station_name = as.character(ncdc_ids$name[ncdc_index]),
station_latitude = ncdc_ids$latitude[ncdc_index],
station_longitude = ncdc_ids$longitude[ncdc_index],
station_altitude = ncdc_ids$elevation[ncdc_index],
ncdc_id_code = as.character(ncdc_id_code),
time = tmax_full_field$date,
tmax = tmax_full_field$tmax)
ghcn_station = full_join(ghcn_station,
sub,
by=c("station_name",
"station_latitude",
"station_longitude",
"station_altitude",
"ncdc_id_code",
"time") )
}
if ("TMIN" %in% available_datafields) {
tmin_full_field = sorted_data$tmin
ordered = order(tmin_full_field$date)
tmin_full_field$tmin[] = tmin_full_field$tmin[ordered] / 10
tmin_full_field$date[] = tmin_full_field$date[ordered]
sub = data.frame( station_name = as.character(ncdc_ids$name[ncdc_index]),
station_latitude = ncdc_ids$latitude[ncdc_index],
station_longitude = ncdc_ids$longitude[ncdc_index],
station_altitude = ncdc_ids$elevation[ncdc_index],
ncdc_id_code = as.character(ncdc_id_code),
time = tmin_full_field$date,
tmin = tmin_full_field$tmin)
ghcn_station = full_join(ghcn_station,
sub,
by=c("station_name",
"station_latitude",
"station_longitude",
"station_altitude",
"ncdc_id_code",
"time") )
}
if ("TAVG" %in% available_datafields) {
tavg_full_field = sorted_data$tavg
ordered = order(tavg_full_field$date)
tavg_full_field$tavg[] = tavg_full_field$tavg[ordered] / 10
tavg_full_field$date[] = tavg_full_field$date[ordered]
sub = data.frame( station_name = as.character(ncdc_ids$name[ncdc_index]),
station_latitude = ncdc_ids$latitude[ncdc_index],
station_longitude = ncdc_ids$longitude[ncdc_index],
station_altitude = ncdc_ids$elevation[ncdc_index],
ncdc_id_code = as.character(ncdc_id_code),
time = tavg_full_field$date,
tavg = tavg_full_field$tavg)
ghcn_station = full_join(ghcn_station,
sub,
by=c("station_name",
"station_latitude",
"station_longitude",
"station_altitude",
"ncdc_id_code",
"time") )
}
if ("PRCP" %in% available_datafields) {
prcp_full_field = sorted_data$prcp
ordered = order(prcp_full_field$date)
prcp_full_field$prcp[] = prcp_full_field$prcp[ordered] / 10
prcp_full_field$date[] = prcp_full_field$date[ordered]
sub = data.frame( station_name = as.character(ncdc_ids$name[ncdc_index]),
station_latitude = ncdc_ids$latitude[ncdc_index],
station_longitude = ncdc_ids$longitude[ncdc_index],
station_altitude = ncdc_ids$elevation[ncdc_index],
ncdc_id_code = as.character(ncdc_id_code),
time = prcp_full_field$date,
prcp = prcp_full_field$prcp)
ghcn_station = full_join(ghcn_station,
sub,
by=c("station_name",
"station_latitude",
"station_longitude",
"station_altitude",
"ncdc_id_code",
"time") )
}
if ("SNOW" %in% available_datafields) {
snow_full_field = sorted_data$snow
ordered = order(snow_full_field$date)
snow_full_field$snow[] = snow_full_field$snow[ordered]
snow_full_field$date[] = snow_full_field$date[ordered]
sub = data.frame( station_name = as.character(ncdc_ids$name[ncdc_index]),
station_latitude = ncdc_ids$latitude[ncdc_index],
station_longitude = ncdc_ids$longitude[ncdc_index],
station_altitude = ncdc_ids$elevation[ncdc_index],
ncdc_id_code = as.character(ncdc_id_code),
time = snow_full_field$date,
snow = snow_full_field$snow)
ghcn_station = full_join(ghcn_station,
sub,
by=c("station_name",
"station_latitude",
"station_longitude",
"station_altitude",
"ncdc_id_code",
"time") )
}
if ("SNWD" %in% available_datafields) {
snwd_full_field = sorted_data$snwd
ordered = order(snwd_full_field$date)
snwd_full_field$snwd[] = snwd_full_field$snwd[ordered]
snwd_full_field$date[] = snwd_full_field$date[ordered]
sub = data.frame( station_name = as.character(ncdc_ids$name[ncdc_index]),
station_latitude = ncdc_ids$latitude[ncdc_index],
station_longitude = ncdc_ids$longitude[ncdc_index],
station_altitude = ncdc_ids$elevation[ncdc_index],
ncdc_id_code = as.character(ncdc_id_code),
time = snwd_full_field$date,
snwd = snwd_full_field$snwd)
ghcn_station = full_join(ghcn_station,
sub,
by=c("station_name",
"station_latitude",
"station_longitude",
"station_altitude",
"ncdc_id_code",
"time") )
}
if ("WESD" %in% available_datafields) {
wesd_full_field = sorted_data$wesd
ordered = order(wesd_full_field$date)
wesd_full_field$wesd[] = wesd_full_field$wesd[ordered] / 10
wesd_full_field$date[] = wesd_full_field$date[ordered]
sub = data.frame( station_name = as.character(ncdc_ids$name[ncdc_index]),
station_latitude = ncdc_ids$latitude[ncdc_index],
station_longitude = ncdc_ids$longitude[ncdc_index],
station_altitude = ncdc_ids$elevation[ncdc_index],
ncdc_id_code = as.character(ncdc_id_code),
time = wesd_full_field$date,
wesd = wesd_full_field$wesd)
ghcn_station = full_join(ghcn_station,
sub,
by=c("station_name",
"station_latitude",
"station_longitude",
"station_altitude",
"ncdc_id_code",
"time") )
}
if ("WESF" %in% available_datafields) {
wesf_full_field = sorted_data$wesf
ordered = order(wesf_full_field$date)
wesf_full_field$wesf[] = wesf_full_field$wesf[ordered] / 10
wesf_full_field$date[] = wesf_full_field$date[ordered]
sub = data.frame( station_name = as.character(ncdc_ids$name[ncdc_index]),
station_latitude = ncdc_ids$latitude[ncdc_index],
station_longitude = ncdc_ids$longitude[ncdc_index],
station_altitude = ncdc_ids$elevation[ncdc_index],
ncdc_id_code = as.character(ncdc_id_code),
time = wesf_full_field$date,
wesf = wesf_full_field$wesf)
ghcn_station = full_join(ghcn_station,
sub,
by=c("station_name",
"station_latitude",
"station_longitude",
"station_altitude",
"ncdc_id_code",
"time") )
}
if ("AWND" %in% available_datafields) {
awnd_full_field = sorted_data$awnd
ordered = order(awnd_full_field$date)
awnd_full_field$awnd[] = awnd_full_field$awnd[ordered] / 10
awnd_full_field$date[] = awnd_full_field$date[ordered]
sub = data.frame( station_name = as.character(ncdc_ids$name[ncdc_index]),
station_latitude = ncdc_ids$latitude[ncdc_index],
station_longitude = ncdc_ids$longitude[ncdc_index],
station_altitude = ncdc_ids$elevation[ncdc_index],
ncdc_id_code = as.character(ncdc_id_code),
time = awnd_full_field$date,
awnd = awnd_full_field$awnd)
ghcn_station = full_join(ghcn_station,
sub,
by=c("station_name",
"station_latitude",
"station_longitude",
"station_altitude",
"ncdc_id_code",
"time") )
}
if ("AWDR" %in% available_datafields) {
print(" AWDR")
awdr_full_field = sorted_data$awdr
ordered = order(awdr_full_field$date)
awdr_full_field$awdr[] = awdr_full_field$awdr[ordered]
awdr_full_field$date[] = awdr_full_field$date[ordered]
sub = data.frame( station_name = as.character(ncdc_ids$name[ncdc_index]),
station_latitude = ncdc_ids$latitude[ncdc_index],
station_longitude = ncdc_ids$longitude[ncdc_index],
station_altitude = ncdc_ids$elevation[ncdc_index],
ncdc_id_code = as.character(ncdc_id_code),
time = awdr_full_field$date,
awdr = awdr_full_field$awdr)
ghcn_station = full_join(ghcn_station,
sub,
by=c("station_name",
"station_latitude",
"station_longitude",
"station_altitude",
"ncdc_id_code",
"time") )
}
remove(sorted_data)
remove(ordered)
if (ncdc_index == 1) {
ghcn_stations = ghcn_station
} else {
ghcn_stations = bind_rows(ghcn_stations,
ghcn_station)
}
save(gchn_station = ghcn_station,
file = paste(target_data_directory_root,
"RData/",
file_title_string,
".Rdata",
sep=""))
remove(ghcn_station)
Days_from_1970_01_01 = as.numeric( as.Date(Date) )
if ("TMAX" %in% available_datafields) {
ts_in = ts(data = tmax_full_field$tmax,
start = as.Date(tmax_full_field$date[1]),
end = as.Date(tmax_full_field$date[length(tmax_full_field$date)]),
deltat = 1,
ts.eps = getOption("ts.eps")
)
tmax = window(x = ts_in,
start = as.numeric(as.Date(ncdc_start_yymmdd)),
end = as.numeric(as.Date(ncdc_end_yymmdd)),
deltat = 1,
extend = TRUE)
remove(tmax_full_field,
ts_in)
}
if ("TMIN" %in% available_datafields) {
ts_in = ts(data = tmin_full_field$tmin,
start = as.Date(tmin_full_field$date[1]),
end = as.Date(tmin_full_field$date[length(tmin_full_field$date)]),
deltat = 1,
ts.eps = getOption("ts.eps")
)
tmin = window(x = ts_in,
start = as.numeric(as.Date(ncdc_start_yymmdd)),
end = as.numeric(as.Date(ncdc_end_yymmdd)),
deltat = 1,
extend = TRUE)
remove(tmin_full_field,
ts_in)
}
if ("TAVG" %in% available_datafields) {
ts_in = ts(data = tavg_full_field$tavg,
start = as.Date(tavg_full_field$date[1]),
end = as.Date(tavg_full_field$date[length(tavg_full_field$date)]),
deltat = 1,
ts.eps = getOption("ts.eps") )
tavg = window(x = ts_in,
start = as.numeric(as.Date(ncdc_start_yymmdd)),
end = as.numeric(as.Date(ncdc_end_yymmdd)),
deltat = 1,
extend = TRUE)
remove(tavg_full_field,
ts_in)
}
if ("PRCP" %in% available_datafields) {
ts_in = ts(data = prcp_full_field$prcp,
start = as.Date(prcp_full_field$date[1]),
end = as.Date(prcp_full_field$date[length(prcp_full_field$date)]),
deltat = 1,
ts.eps = getOption("ts.eps") )
prcp = window(x = ts_in,
start = as.numeric(as.Date(ncdc_start_yymmdd)),
end = as.numeric(as.Date(ncdc_end_yymmdd)),
deltat = 1,
extend = TRUE)
remove(prcp_full_field,
ts_in)
}
if ("SNOW" %in% available_datafields) {
ts_in = ts(data = snow_full_field$snow,
start = as.Date(snow_full_field$date[1]),
end = as.Date(snow_full_field$date[length(snow_full_field$date)]),
deltat = 1,
ts.eps = getOption("ts.eps") )
snow = window(x = ts_in,
start = as.numeric(as.Date(ncdc_start_yymmdd)),
end = as.numeric(as.Date(ncdc_end_yymmdd)),
deltat = 1,
extend = TRUE)
remove(snow_full_field,
ts_in)
}
if ("SNWD" %in% available_datafields) {
ts_in = ts(data = snwd_full_field$snwd,
start = as.Date(snwd_full_field$date[1]),
end = as.Date(snwd_full_field$date[length(snwd_full_field$date)]),
deltat = 1,
ts.eps = getOption("ts.eps") )
snwd = window(x = ts_in,
start = as.numeric(as.Date(ncdc_start_yymmdd)),
end = as.numeric(as.Date(ncdc_end_yymmdd)),
deltat = 1,
extend = TRUE)
remove(snwd_full_field,
ts_in)
}
if ("WESD" %in% available_datafields) {
ts_in = ts(data = wesd_full_field$wesd,
start = as.Date(wesd_full_field$date[1]),
end = as.Date(wesd_full_field$date[length(wesd_full_field$date)]),
deltat = 1,
ts.eps = getOption("ts.eps") )
wesd = window(x = ts_in,
start = as.numeric(as.Date(ncdc_start_yymmdd)),
end = as.numeric(as.Date(ncdc_end_yymmdd)),
deltat = 1,
extend = TRUE)
remove(wesd_full_field,
ts_in)
}
if ("WESF" %in% available_datafields) {
ts_in = ts(data = wesf_full_field$wesf,
start = as.Date(wesf_full_field$date[1]),
end = as.Date(wesf_full_field$date[length(wesf_full_field$date)]),
deltat = 1,
ts.eps = getOption("ts.eps") )
wesf = window(x = ts_in,
start = as.numeric(as.Date(ncdc_start_yymmdd)),
end = as.numeric(as.Date(ncdc_end_yymmdd)),
deltat = 1,
extend = TRUE)
remove(wesf_full_field,
ts_in)
}
if ("AWND" %in% available_datafields) {
ts_in = ts(data = awnd_full_field$awnd,
start = as.Date(awnd_full_field$date[1]),
end = as.Date(awnd_full_field$date[length(awnd_full_field$date)]),
deltat = 1,
ts.eps = getOption("ts.eps") )
awnd = window(x = ts_in,
start = as.numeric(as.Date(ncdc_start_yymmdd)),
end = as.numeric(as.Date(ncdc_end_yymmdd)),
deltat = 1,
extend = TRUE)
remove(awnd_full_field,
ts_in)
}
if ("AWDR" %in% available_datafields) {
ts_in = ts(data = awdr_full_field$awdr,
start = as.Date(awdr_full_field$date[1]),
end = as.Date(awdr_full_field$date[length(awdr_full_field$date)]),
deltat = 1,
ts.eps = getOption("ts.eps") )
awnd = window(x = ts_in,
start = as.numeric(as.Date(ncdc_start_yymmdd)),
end = as.numeric(as.Date(ncdc_end_yymmdd)),
deltat = 1,
extend = TRUE)
remove(awdr_full_field,
ts_in)
}
if (1 < 0) {
if ("TMAX" %in% available_datafields) {
plot(x = Date,
y = tmax,
type = "p",
pch = ".", # as points
col = "red",
lwd = 1.5,
cex.lab = 1.25,
xlab = "Date",
ylab = "Max Temperature (degC)",
main = station_name_label)
}
if ("TMIN" %in% available_datafields) {
plot(x = Date,
y = tmin,
type = "p",
pch = ".", # as points
col = "darkgoldenrod1",
lwd = 1.5,
cex.lab = 1.25,
xlab = "Date",
ylab = "Min Temperature (degC)",
main = station_name_label)
}
if ("TAVG" %in% available_datafields) {
plot(x = Date,
y = tavg,
type = "p",
pch = ".", # as points
col = "orange",
lwd = 1.5,
cex.lab = 1.25,
xlab = "Date",
ylab = "Mean Temperature (degC)",
main = station_name_label)
}
if ("PRCP" %in% available_datafields) {
plot(x = Date,
y = prcp,
type = "p",
pch = ".", # as points
col = "green",
lwd = 1.5,
cex.lab = 1.25,
xlab = "Date",
ylab = "Daily Precip (mm)",
main = station_name_label)
}
if ("SNOW" %in% available_datafields) {
plot(x = Date,
y = snow,
type = "p",
pch = ".", # as points
col = "cyan",
lwd = 1.5,
cex.lab = 1.25,
xlab = "Date",
ylab = "Daily Snowfall (mm)",
main = station_name_label)
}
if ("SNWD" %in% available_datafields) {
plot(x = Date,
y = snwd,
type = "p",
pch = ".", # as points
col = "darkblue",
lwd = 1.5,
cex.lab = 1.25,
xlab = "Date",
ylab = "Daily Snowfall (mm)",
main = station_name_label)
}
if ("WESD" %in% available_datafields) {
plot(x = Date,
y = wesd,
type = "p",
pch = ".", # as points
col = "blue",
lwd = 1.5,
cex.lab = 1.25,
xlab = "Date",
ylab = "Snow Water Equivalent Depth (mm)",
main = station_name_label)
}
if ("AWDR" %in% available_datafields) {
plot(x = Date,
y = awdr,
type = "p",
pch = ".", # as points
col = "blue",
lwd = 1.5,
cex.lab = 1.25,
xlab = "Date",
ylab = "Wind Direction (degrees from)",
main = station_name_label)
}
if ("WESF" %in% available_datafields) {
plot(x = Date,
y = wesf,
type = "p",
pch = ".", # as points
col = "deepskyblue4",
lwd = 1.5,
cex.lab = 1.25,
xlab = "Date",
ylab = "Snowfall Water Equivalent (mm)",
main = station_name_label)
}
if ("AWND" %in% available_datafields) {
plot(x = Date,
y = awnd,
type = "p",
pch = ".", # as points
col = "deeppink2",
lwd = 1.5,
cex.lab = 1.25,
xlab = "Date",
ylab = "Average Wind Speed (m s-1)",
main = station_name_label)
}
}
targ_time_series_raw = data.frame(date = Date)
if ("TMAX" %in% available_datafields)
targ_time_series_raw$Max_Temperature = tmax
if ("TMIN" %in% available_datafields)
targ_time_series_raw$Min_Temperature = tmin
if ("TAVG" %in% available_datafields)
targ_time_series_raw$Mean_Temperature = tavg
if ("PRCP" %in% available_datafields)
targ_time_series_raw$Precipitation = prcp
if ("SNOW" %in% available_datafields)
targ_time_series_raw$SnowFall = snow
if ("SNWD" %in% available_datafields)
targ_time_series_raw$SnowDepth = snwd
if ("WESD" %in% available_datafields)
targ_time_series_raw$Snowdepth_Water_Equiv = wesd
if ("WESF" %in% available_datafields)
targ_time_series_raw$Snowfall_Water_Equiv = wesf
if ("AWND" %in% available_datafields)
targ_time_series_raw$Mean_Wind_Speed = awnd
if ("AWDR" %in% available_datafields)
targ_time_series_raw$Mean_Wind_From_Direction = awdr
# output_file_name = paste(file_title_string,
# ".csv",
# sep="")
# write.csv(x = targ_time_series_raw,
# file = output_file_name,
# row.names = FALSE)
remove(targ_time_series_raw)
netcdf_output_file_name = paste(target_data_directory_root,
"netCDF/",
file_title_string,
".nc",
sep="")
netcdf_time_dim = ncdim_def(name = "time",
units = "days since 1970-01-01 00:00:00",
val = Days_from_1970_01_01,
unlim = TRUE,
calendar="standard")
netcdf_name_dim = ncdim_def(name = "name_strlen",
units = "",
val = 1:nchar(file_title_string),
unlim = FALSE,
create_dimvar=FALSE)
netcdf_bounds_dim = ncdim_def(name = "bnds",
units = "",
val = 1:2,
unlim = FALSE,
create_dimvar = FALSE)
fill_value = 9.96921e+36
fill_value_double = 9.969209968386869e+36
netcdf_stn = ncvar_def(nam = "station_name",
units = "",
dim = netcdf_name_dim,
longname = "station name",
prec = "char")
netcdf_lat = ncvar_def(nam = "latitude",
units = "degrees_north",
dim = list(),
longname = "Latitude",
prec = "single")
netcdf_lon = ncvar_def(nam = "longitude",
units = "degrees_east",
dim = list(),
longname = "Longitude",
prec = "single")
netcdf_alt = ncvar_def(nam = "altitude",
units = "m",
dim = list(),
longname = "Elevation",
prec = "single")
bnds = 1:2
time_bounds = array( 0,
dim = c(2,length(Days_from_1970_01_01)),
dimnames = list(bnds,Days_from_1970_01_01))
time_bounds[1,] = Days_from_1970_01_01
time_bounds[2,] = Days_from_1970_01_01 + 1
netcdf_time_bounds = ncvar_def(nam = "time_bnds",
units = "days since 1970-01-01 00:00:00",
dim = list(netcdf_bounds_dim,
netcdf_time_dim),
longname = "Time Bounds",
prec = "double")
netcdf_available_variables = list(netcdf_time_bounds,
netcdf_lat,
netcdf_lon,
netcdf_alt,
netcdf_stn)
if ("TMAX" %in% available_datafields) {
netcdf_tmax = ncvar_def(nam = "maximum_air_temperature",
units = "degC",
dim = netcdf_time_dim,
missval = fill_value,
longname = "2-m Maximum Daily Air Temperature",
prec = "single")
netcdf_available_variables = list.append(netcdf_available_variables,
netcdf_tmax)
}
if ("TMIN" %in% available_datafields) {
netcdf_tmin = ncvar_def(nam = "minimum_air_temperature",
units = "degC",
dim = netcdf_time_dim,
missval = fill_value,
longname = "2-m Minimium Daily Air Temperature",
prec = "single")
netcdf_available_variables = list.append(netcdf_available_variables,
netcdf_tmin)
}
if ("TAVG" %in% available_datafields) {
netcdf_tavg = ncvar_def(nam = "mean_air_temperature",
units = "degC",
dim = netcdf_time_dim,
missval = fill_value,
longname = "2-m Mean Daily Air Temperature",
prec = "single")
netcdf_available_variables = list.append(netcdf_available_variables,
netcdf_tavg)
}
if ("PRCP" %in% available_datafields) {
netcdf_prcp = ncvar_def(nam = "precipitation_amount",
units = "kg m-2",
dim = netcdf_time_dim,
missval = fill_value,
longname = "Daily Total Precipitation",
prec = "single")
netcdf_available_variables = list.append(netcdf_available_variables,
netcdf_prcp)
}
if ("SNOW" %in% available_datafields) {
netcdf_snow = ncvar_def(nam = "thickness_of_snowfall_amount",
units = "m",
dim = netcdf_time_dim,
missval = fill_value,
longname = "Daily Total Snowfall",
prec = "single")
netcdf_available_variables = list.append(netcdf_available_variables,
netcdf_snow)
}
if ("SNWD" %in% available_datafields) {
netcdf_snwd = ncvar_def(nam = "surface_snow_thickness",
units = "m",
dim = netcdf_time_dim,
missval = fill_value,
longname = "Snow Depth on Surface",
prec = "single")
netcdf_available_variables = list.append(netcdf_available_variables,
netcdf_snwd)
}
if ("WESD" %in% available_datafields) {
netcdf_wesd = ncvar_def(nam = "liquid_water_content_of_surface_snow",
units = "kg m-2",
dim = netcdf_time_dim,
missval = fill_value,
longname = "Liquid Snow Water Equivalent Depth on Surface",
prec = "single")
netcdf_available_variables = list.append(netcdf_available_variables,
netcdf_wesd)
}
if ("WESF" %in% available_datafields) {
netcdf_wesf = ncvar_def(nam = "liquid_water_equivalent_snowfall_amount",
units = "kg m-2",
dim = netcdf_time_dim,
missval = fill_value,
longname = "Liquid Snowfall Water Equivalent Depth on Surface",
prec = "single")
netcdf_available_variables = list.append(netcdf_available_variables,
netcdf_wesf)
}
if ("AWND" %in% available_datafields) {
netcdf_awnd = ncvar_def(nam = "mean_wind_speed",
units = "m s-1",
dim = netcdf_time_dim,
missval = fill_value,
longname = "Mean Daily Wind Speed",
prec = "single")
netcdf_available_variables = list.append(netcdf_available_variables,
netcdf_awnd)
}
if ("AWDR" %in% available_datafields) {
netcdf_awdr = ncvar_def(nam = "mean_wind_from_direction",
units = "degrees_from",
dim = netcdf_time_dim,
missval = fill_value,
longname = "Mean Daily Wind Origin Direction",
prec = "single")
netcdf_available_variables = list.append(netcdf_available_variables,
netcdf_awdr)
}
nc_ghcn = nc_create(filename = netcdf_output_file_name,
vars = netcdf_available_variables,
force_v4 = FALSE,
verbose = FALSE )
ncatt_put(nc = nc_ghcn,
varid = 0,
attname = "Title",
attval = paste("NCEI Data Hourly Output for ",
station_name_label,
sep=""),
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = 0,
attname = "GHCN_Station_Code",
attval = ncdc_id_code,
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = 0,
attname = "Station_Name",
attval = station_name_label,
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = 0,
attname = "Station_Latitude",
attval = station_latitude,
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = 0,
attname = "Station_Longitude",
attval = station_longitude,
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = 0,
attname = "Station_Elevation_in_Meters",
attval = station_altitude,
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = 0,
attname = "featureType",
attval = "timeSeries",
prec = NA,
verbose = FALSE,
definemode = FALSE)
ncatt_put(nc = nc_ghcn,
varid = 0,
attname = "Conventions",
attval = "CF-1.6",
prec = NA,
verbose = FALSE,
definemode = FALSE)
ncatt_put(nc = nc_ghcn,
varid = netcdf_stn,
attname = "description",
attval = "station name",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_stn,
attname = "cf_role",
attval = "timeseries_id",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_alt,
attname = "standard_name",
attval = "altitude",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_alt,
attname = "axis",
attval = "Z",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_alt,
attname = "positive",
attval = "up",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_alt,
attname = "description",
attval = "Elevation",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_lon,
attname = "standard_name",
attval = "longitude",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_lon,
attname = "axis",
attval = "X",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_lon,
attname = "description",
attval = "Longitude",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_lat,
attname = "standard_name",
attval = "latitude",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_lat,
attname = "description",
attval = "Latitude",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_lat,
attname = "axis",
attval = "Y",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = "time",
attname = "description",
attval = "time",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = "time",
attname = "bounds",
attval = "time_bnds",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = "time",
attname = "axis",
attval = "T",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_time_bounds,
attname = "description",
attval = "Time Bounds",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_time_bounds,
attname = "standard_name",
attval = "time",
prec = NA,
verbose = FALSE,
definemode = FALSE )
if ("TMAX" %in% available_datafields) {
ncatt_put(nc = nc_ghcn,
varid = netcdf_tmax,
attname = "standard_name",
attval = "air_temperature",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_tmax,
attname = "cell_methods",
attval = "time: maximum",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_tmax,
attname = "description",
attval = "2-m Maximum Daily Air Temperature",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_tmax,
attname = "coordinates",
attval = "time latitude longitude altitude station_name",
prec = NA,
verbose = FALSE,
definemode = FALSE )
}
if ("TMIN" %in% available_datafields) {
ncatt_put(nc = nc_ghcn,
varid = netcdf_tmin,
attname = "standard_name",
attval = "air_temperature",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_tmin,
attname = "cell_methods",
attval = "time: minimum",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_tmin,
attname = "description",
attval = "2-m Minimium Daily Air Temperature",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_tmin,
attname = "coordinates",
attval = "time latitude longitude altitude station_name",
prec = NA,
verbose = FALSE,
definemode = FALSE )
}
if ("TAVG" %in% available_datafields) {
ncatt_put(nc = nc_ghcn,
varid = netcdf_tavg,
attname = "standard_name",
attval = "air_temperature",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_tavg,
attname = "cell_methods",
attval = "time: mean",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_tavg,
attname = "description",
attval = "2-m Mean Daily Air Temperature",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_tavg,
attname = "coordinates",
attval = "time latitude longitude altitude station_name",
prec = NA,
verbose = FALSE,
definemode = FALSE )
}
if ("PRCP" %in% available_datafields) {
ncatt_put(nc = nc_ghcn,
varid = netcdf_prcp,
attname = "standard_name",
attval = "precipitation_amount",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_prcp,
attname = "cell_methods",
attval = "time: sum",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_prcp,
attname = "description",
attval = "Daily Total Precipitation",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_prcp,
attname = "coordinates",
attval = "time latitude longitude altitude station_name",
prec = NA,
verbose = FALSE,
definemode = FALSE )
}
if ("SNOW" %in% available_datafields) {
ncatt_put(nc = nc_ghcn,
varid = netcdf_snow,
attname = "standard_name",
attval = "thickness_of_snowfall_amount",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_snow,
attname = "cell_methods",
attval = "time: sum",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_snow,
attname = "description",
attval = "Daily Total Snowfall",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_snow,
attname = "coordinates",
attval = "time latitude longitude altitude station_name",
prec = NA,
verbose = FALSE,
definemode = FALSE )
}
if ("SNWD" %in% available_datafields) {
ncatt_put(nc = nc_ghcn,
varid = netcdf_snwd,
attname = "standard_name",
attval = "surface_snow_thickness",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_snwd,
attname = "cell_methods",
attval = "time: point",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_snwd,
attname = "description",
attval = "Snow Depth on Surface",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_snwd,
attname = "coordinates",
attval = "time latitude longitude altitude station_name",
prec = NA,
verbose = FALSE,
definemode = FALSE )
}
if ("WESD" %in% available_datafields) {
ncatt_put(nc = nc_ghcn,
varid = netcdf_wesd,
attname = "standard_name",
attval = "liquid_water_content_of_surface_snow",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_wesd,
attname = "cell_methods",
attval = "time: point",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_wesd,
attname = "description",
attval = "Liquid Snow Water Equivalent Depth on Surface",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_wesd,
attname = "coordinates",
attval = "time latitude longitude altitude station_name",
prec = NA,
verbose = FALSE,
definemode = FALSE )
}
if ("WSEF" %in% available_datafields) {
ncatt_put(nc = nc_ghcn,
varid = netcdf_wesf,
attname = "standard_name",
attval = "snowfall_amount",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_wesf,
attname = "cell_methods",
attval = "time: sum",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_wesf,
attname = "description",
attval = "Liquid Snowfall Water Equivalent",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_wesf,
attname = "coordinates",
attval = "time latitude longitude altitude station_name",
prec = NA,
verbose = FALSE,
definemode = FALSE )
}
if ("AWND" %in% available_datafields) {
ncatt_put(nc = nc_ghcn,
varid = netcdf_awnd,
attname = "standard_name",
attval = "wind_speed",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_awnd,
attname = "cell_methods",
attval = "time: mean",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_awnd,
attname = "description",
attval = "Mean Daily Wind Speed",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_awnd,
attname = "coordinates",
attval = "time latitude longitude altitude station_name",
prec = NA,
verbose = FALSE,
definemode = FALSE )
}
if ("AWDR" %in% available_datafields) {
ncatt_put(nc = nc_ghcn,
varid = netcdf_awdr,
attname = "standard_name",
attval = "wind_from_direction",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_awdr,
attname = "cell_methods",
attval = "time: mean",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_awdr,
attname = "description",
attval = "Mean Daily Wind Origin Direction",
prec = NA,
verbose = FALSE,
definemode = FALSE )
ncatt_put(nc = nc_ghcn,
varid = netcdf_awdr,
attname = "coordinates",
attval = "time latitude longitude altitude station_name",
prec = NA,
verbose = FALSE,
definemode = FALSE )
}
ncvar_put(nc = nc_ghcn,
varid = netcdf_time_bounds,
vals = time_bounds,
verbose = FALSE )
ncvar_put(nc = nc_ghcn,
varid = netcdf_lat,
vals = station_latitude,
verbose = FALSE )
remove(netcdf_lat,
station_latitude)
ncvar_put(nc = nc_ghcn,
varid = netcdf_lon,
vals = station_longitude,
verbose = FALSE )
remove(netcdf_lon,
station_longitude)
ncvar_put(nc = nc_ghcn,
varid = netcdf_alt,
vals = station_altitude,
verbose = FALSE )
ncvar_put(nc = nc_ghcn,
varid = netcdf_stn,
vals = file_title_string,
verbose = FALSE )
remove(netcdf_alt,
station_altitude)
if ("TMAX" %in% available_datafields) {
ncvar_put(nc = nc_ghcn,
varid = netcdf_tmax,
vals = tmax,
verbose = FALSE )
remove(netcdf_tmax,
tmax)
}
if ("TMIN" %in% available_datafields) {
ncvar_put(nc = nc_ghcn,
varid = netcdf_tmin,
vals = tmin,
verbose = FALSE )
remove(netcdf_tmin,
tmin)
}
if ("TAVG" %in% available_datafields) {
ncvar_put(nc = nc_ghcn,
varid = netcdf_tavg,
vals = tavg,
verbose = FALSE )
remove(netcdf_tavg,
tavg)
}
if ("PRCP" %in% available_datafields) {
ncvar_put(nc = nc_ghcn,
varid = netcdf_prcp,
vals = prcp,
verbose = FALSE )
remove(netcdf_prcp,
prcp)
}
if ("SNOW" %in% available_datafields) {
ncvar_put(nc = nc_ghcn,
varid = netcdf_snow,
vals = (snow/1000.0),
verbose = FALSE )
remove(netcdf_snow,
snow)
}
if ("SNWD" %in% available_datafields) {
ncvar_put(nc = nc_ghcn,
varid = netcdf_snwd,
vals = (snwd/1000.0),
verbose = FALSE )
remove(netcdf_snwd,
snwd)
}
if ("WESD" %in% available_datafields) {
ncvar_put(nc = nc_ghcn,
varid = netcdf_wesd,
vals = wesd,
verbose = FALSE )
remove(netcdf_wesd,
wesd)
}
if ("WESF" %in% available_datafields) {
ncvar_put(nc = nc_ghcn,
varid = netcdf_wesf,
vals = wesf,
verbose = FALSE )
remove(netcdf_wesf,
wesf)
}
if ("AWND" %in% available_datafields) {
ncvar_put(nc = nc_ghcn,
varid = netcdf_awnd,
vals = awnd,
verbose = FALSE )
remove(netcdf_awnd,
awnd)
}
if ("AWDR" %in% available_datafields) {
ncvar_put(nc = nc_ghcn,
varid = netcdf_awdr,
vals = awdr,
verbose = FALSE )
remove(netcdf_awdr,
awdr)
}
nc_close( nc_ghcn )
remove(nc_ghcn,
netcdf_time_dim)
remove(Date,
Days_from_1970_01_01,
available_datafields,
file_title_string,
filename_ghcn_label,
filename_station_label,
fill_value,
ghcn_station_code,
ncdc_end_yymmdd,
ncdc_start_yymmdd,
netcdf_available_variables,
output_file_name,
ncdc_id_code,
netcdf_output_file_name,
station_name_label)
}
|
/VectorManipulation/Vdecode/Vdecode.r | no_license | selectedacre/Max_Objects | R | false | false | 505 | r | ||
# Libraries Used
library(dplyr)
# Read supporting Metadata
if(!file.exists("./data")) {dir.create("./data")}
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl, destfile = "./data/Dataset.zip", method = "curl")
unzip(zipfile = "./data/Dataset.zip", exdir = "./data")
path_rf<- file.path("./data" , "UCI HAR Dataset")
files <- list.files(path_rf, recursive = TRUE)
# Format training and test data sets
# Read training data
subjectTrain <- read.table("UCI HAR Dataset/train/subject_train.txt",
header = FALSE)
activityTrain <- read.table("UCI HAR Dataset/train/y_train.txt",
header = FALSE)
featureTrain <- read.table("UCI HAR Dataset/train/x_train.txt",
header = FALSE)
# Read test data
subjectTest <- read.table("UCI HAR Dataset/test/subject_test.txt",
header = FALSE)
activityTest <- read.table("UCI HAR Dataset/test/y_test.txt",
header = FALSE)
featureTest <- read.table("UCI HAR Dataset/test/x_test.txt",
header = FALSE)
# Part 1 - Merge the training and the test set to create one data set
# Concatenate the data tables by rows
subject <- rbind(subjectTrain, subjectTest)
activity <- rbind(activityTrain, activityTest)
features <- rbind(featureTrain, featureTest)
# Merge the data
colnames(activity) <- c("Activity")
colnames(subject) <- c("Subject")
completeData <- read.table(file.path(path_rf, "features.txt"), header = FALSE)
names(dataFeatures) <- dataFeaturesNames$v2
dataCombine <- cbind(subject, activity)
data <- cbind(features, dataCombine)
# Part 2 - Extracts only the measurements on the mean and standard deviation
# for each measurement
columnsWithMeanSTD <- completeData$v2[grep("mean\\(\\)|std\\(\\)", completeData$V2)]
requiredcolumns <- c(columnsWithMeanSTD, 562, 563)
extractedData <- completeData[, requiredcolumns]
# Subset the data frame by selected names of features
selectdNames <- c(as.character(columnsWithMeanSTD), "Activity", "Subject")
Data <- subset(completeData, select = selectdNames)
# Part 3 - Uses descriptive activity names to name the activities in the data set
extractedData <- read.table(file.path(path_rf, "activity_labels.txt"), header =
FALSE)
| /run_analysis.R | no_license | Ifiri800/Getting_and_Cleaning_Data | R | false | false | 2,435 | r | # Libraries Used
library(dplyr)
# Read supporting Metadata
if(!file.exists("./data")) {dir.create("./data")}
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl, destfile = "./data/Dataset.zip", method = "curl")
unzip(zipfile = "./data/Dataset.zip", exdir = "./data")
path_rf<- file.path("./data" , "UCI HAR Dataset")
files <- list.files(path_rf, recursive = TRUE)
# Format training and test data sets
# Read training data
subjectTrain <- read.table("UCI HAR Dataset/train/subject_train.txt",
header = FALSE)
activityTrain <- read.table("UCI HAR Dataset/train/y_train.txt",
header = FALSE)
featureTrain <- read.table("UCI HAR Dataset/train/x_train.txt",
header = FALSE)
# Read test data
subjectTest <- read.table("UCI HAR Dataset/test/subject_test.txt",
header = FALSE)
activityTest <- read.table("UCI HAR Dataset/test/y_test.txt",
header = FALSE)
featureTest <- read.table("UCI HAR Dataset/test/x_test.txt",
header = FALSE)
# Part 1 - Merge the training and the test set to create one data set
# Concatenate the data tables by rows
subject <- rbind(subjectTrain, subjectTest)
activity <- rbind(activityTrain, activityTest)
features <- rbind(featureTrain, featureTest)
# Merge the data
colnames(activity) <- c("Activity")
colnames(subject) <- c("Subject")
completeData <- read.table(file.path(path_rf, "features.txt"), header = FALSE)
names(dataFeatures) <- dataFeaturesNames$v2
dataCombine <- cbind(subject, activity)
data <- cbind(features, dataCombine)
# Part 2 - Extracts only the measurements on the mean and standard deviation
# for each measurement
columnsWithMeanSTD <- completeData$v2[grep("mean\\(\\)|std\\(\\)", completeData$V2)]
requiredcolumns <- c(columnsWithMeanSTD, 562, 563)
extractedData <- completeData[, requiredcolumns]
# Subset the data frame by selected names of features
selectdNames <- c(as.character(columnsWithMeanSTD), "Activity", "Subject")
Data <- subset(completeData, select = selectdNames)
# Part 3 - Uses descriptive activity names to name the activities in the data set
extractedData <- read.table(file.path(path_rf, "activity_labels.txt"), header =
FALSE)
|
plot3 <- function() {
raw.data<-read.table("./household_power_consumption.txt",header=TRUE,sep=";",na.strings = c("?"))
raw.data$Date<-strptime(raw.data$Date,format="%d/%m/%Y")
data<-raw.data[raw.data$Date == "2007-02-01" | raw.data$Date == "2007-02-02",]
data<-data[!is.na(data$Date),]
data$Time<-strptime(data$Time,format="%H:%M:%S")
data$Time<-format(data$Time, "%H:%M:%OS")
png(filename = "plot3.png",width = 480, height = 480, units = "px")
plot(data$DateTime,data$Sub_metering_1,type="l", ylab="Energy sub metering", xlab= "")
lines(data$DateTime,data$Sub_metering_2,col="Red")
lines(data$DateTime,data$Sub_metering_3,col="Blue")
legend("topright",lty=1,col=c("Black","Red","Blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
dev.off()
} | /plot3.R | no_license | ZhannaYurchuk/ExData_Plotting1 | R | false | false | 893 | r | plot3 <- function() {
raw.data<-read.table("./household_power_consumption.txt",header=TRUE,sep=";",na.strings = c("?"))
raw.data$Date<-strptime(raw.data$Date,format="%d/%m/%Y")
data<-raw.data[raw.data$Date == "2007-02-01" | raw.data$Date == "2007-02-02",]
data<-data[!is.na(data$Date),]
data$Time<-strptime(data$Time,format="%H:%M:%S")
data$Time<-format(data$Time, "%H:%M:%OS")
png(filename = "plot3.png",width = 480, height = 480, units = "px")
plot(data$DateTime,data$Sub_metering_1,type="l", ylab="Energy sub metering", xlab= "")
lines(data$DateTime,data$Sub_metering_2,col="Red")
lines(data$DateTime,data$Sub_metering_3,col="Blue")
legend("topright",lty=1,col=c("Black","Red","Blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
dev.off()
} |
library(odds.converter)
### Name: odds.prob2dec
### Title: Convert Probabilities to Decimal odds
### Aliases: odds.prob2dec
### ** Examples
odds.prob2dec(c(0.5,0.6))
| /data/genthat_extracted_code/odds.converter/examples/odds.prob2dec.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 173 | r | library(odds.converter)
### Name: odds.prob2dec
### Title: Convert Probabilities to Decimal odds
### Aliases: odds.prob2dec
### ** Examples
odds.prob2dec(c(0.5,0.6))
|
# hw4_oop4
#'
# In this exercise, we will create a class `shakeshack_order`.
#'
# 1. Create a constructor `new_shakeshack_order(names, prices)` that:
# - takes in a vector of `names`
# - a vector of `price` attribute whose type is double.
# - instanciates an object of class `shakeshack_order` using `structure`.
# - and it should be a list with 2 elements: `names` and `prices`.
# Note: Use `stopifnot` to check the input.
# Use `new_shakeshack_order(names, prices)` to create a helper function `shakeshack_order`
# that coerces the arguments `names` and `prices` respectively to string and numeric
# using `as.character` and `as.double`.
## Do not modify this line!
new_shakeshack_order<-function(names, prices){
stopifnot(is.double(prices))
structure(list(names=names,prices=prices),class="shakeshack_order")
}
shakeshack_order<-function(names, prices){
names<-as.character(names)
prices<-as.double(prices)
new_shakeshack_order(names, prices)
}
# 2. Write a new `sum(..., na.rm = FALSE)` method for the class `shakeshack_order` that
# returns the sum of the prices in a given order. Note that:
# - the `sum` generic can take more than one argument via `...`, and you can capture
# it using `list(...)`.
# - the `na.rm` argument should be used to provide a way to sum
# when some prices are not available.
# For instance, the following code should work without error:
# ```
# o <- shakeshack_order(c("shack burger", "fries"), c(5, 2))
# o2 <- shakeshack_order(c("fries", "coke"), c(2, NA))
# sum(o)
# sum(o, o2)
# sum(o, o2, na.rm = TRUE)
# ```
# The first sum should be equal to 7, the second to `NA`, and the third to 9.
# Do NOT use a `for`, `while` or `repeat` loop!
# (Hint: a nice solution could use a combination of `map` and `reduce`.)
## Do not modify this line!
library(purrr)
sum.shakeshack_order<-function(..., na.rm = FALSE){
x<-list(...)
if(na.rm==T) reduce(map(x,function(x) sum(x[[2]],na.rm=T)),sum)
else reduce(map(x,function(x) sum(x[[2]],na.rm=F)),sum)
}
# 3. Write a new `print` method for the class `shakeshack_order` that prints
# `"Your order is <names>. The total price is sum(<prices>)."` using `print`.
# If `length(names)` is larger than one (e.g., 3), the function should print
# `"Your order is <names[1]>, <names[2]>, <names[3]>. The total price is sum(<prices>)."`
# For instance, printing the order `o` describe above should output
# `"Your order is shack burger. The total price is $5.29."`.
# Note that:
# - The `print` method should return the input invisibly.
# - The arguments of print are `x` and `...`, but `...` won't be used in the
# body of `print.shakeshack_order`.
## Do not modify this line!
print.shakeshack_order<-function(x,...){
p<-paste(x[[1]],collapse=", ")
print(paste0("Your order is ",p,". The total price is $",sum(x),"."))
invisible(x)
}
# 4. Now, you need to create a combine operator for the class `shakeshack_order`.
# For example, `c(o, o2)` should equal
# `shakeshack_order(names = c('shack burger', 'fries', 'fries', 'coke'), prices = c(5, 2, 2, NA))`.
# Similarly as for `sum.shakeshack_order`, the `...` argument of `c.shakeshack_order`
# can be captured using `list(...)`.
# Do NOT use a `for`, `while` or `repeat` loop!
# (Hint: a nice solution could use a combination of `map2` and `reduce`.)
#'
## Do not modify this line!
c.shakeshack_order<-function(...){
x<-list(...)
y<-reduce(x,function(x1,x2) map2(x1,x2,c))
shakeshack_order(y[[1]],y[[2]])
}
| /Lecture 4/hw4_oop5.R | no_license | Zijie-Xia/GR5206-Introduction-to-Data-Science | R | false | false | 3,480 | r | # hw4_oop4
#'
# In this exercise, we will create a class `shakeshack_order`.
#'
# 1. Create a constructor `new_shakeshack_order(names, prices)` that:
# - takes in a vector of `names`
# - a vector of `price` attribute whose type is double.
# - instanciates an object of class `shakeshack_order` using `structure`.
# - and it should be a list with 2 elements: `names` and `prices`.
# Note: Use `stopifnot` to check the input.
# Use `new_shakeshack_order(names, prices)` to create a helper function `shakeshack_order`
# that coerces the arguments `names` and `prices` respectively to string and numeric
# using `as.character` and `as.double`.
## Do not modify this line!
new_shakeshack_order<-function(names, prices){
stopifnot(is.double(prices))
structure(list(names=names,prices=prices),class="shakeshack_order")
}
shakeshack_order<-function(names, prices){
names<-as.character(names)
prices<-as.double(prices)
new_shakeshack_order(names, prices)
}
# 2. Write a new `sum(..., na.rm = FALSE)` method for the class `shakeshack_order` that
# returns the sum of the prices in a given order. Note that:
# - the `sum` generic can take more than one argument via `...`, and you can capture
# it using `list(...)`.
# - the `na.rm` argument should be used to provide a way to sum
# when some prices are not available.
# For instance, the following code should work without error:
# ```
# o <- shakeshack_order(c("shack burger", "fries"), c(5, 2))
# o2 <- shakeshack_order(c("fries", "coke"), c(2, NA))
# sum(o)
# sum(o, o2)
# sum(o, o2, na.rm = TRUE)
# ```
# The first sum should be equal to 7, the second to `NA`, and the third to 9.
# Do NOT use a `for`, `while` or `repeat` loop!
# (Hint: a nice solution could use a combination of `map` and `reduce`.)
## Do not modify this line!
library(purrr)
sum.shakeshack_order<-function(..., na.rm = FALSE){
x<-list(...)
if(na.rm==T) reduce(map(x,function(x) sum(x[[2]],na.rm=T)),sum)
else reduce(map(x,function(x) sum(x[[2]],na.rm=F)),sum)
}
# 3. Write a new `print` method for the class `shakeshack_order` that prints
# `"Your order is <names>. The total price is sum(<prices>)."` using `print`.
# If `length(names)` is larger than one (e.g., 3), the function should print
# `"Your order is <names[1]>, <names[2]>, <names[3]>. The total price is sum(<prices>)."`
# For instance, printing the order `o` describe above should output
# `"Your order is shack burger. The total price is $5.29."`.
# Note that:
# - The `print` method should return the input invisibly.
# - The arguments of print are `x` and `...`, but `...` won't be used in the
# body of `print.shakeshack_order`.
## Do not modify this line!
print.shakeshack_order<-function(x,...){
p<-paste(x[[1]],collapse=", ")
print(paste0("Your order is ",p,". The total price is $",sum(x),"."))
invisible(x)
}
# 4. Now, you need to create a combine operator for the class `shakeshack_order`.
# For example, `c(o, o2)` should equal
# `shakeshack_order(names = c('shack burger', 'fries', 'fries', 'coke'), prices = c(5, 2, 2, NA))`.
# Similarly as for `sum.shakeshack_order`, the `...` argument of `c.shakeshack_order`
# can be captured using `list(...)`.
# Do NOT use a `for`, `while` or `repeat` loop!
# (Hint: a nice solution could use a combination of `map2` and `reduce`.)
#'
## Do not modify this line!
c.shakeshack_order<-function(...){
x<-list(...)
y<-reduce(x,function(x1,x2) map2(x1,x2,c))
shakeshack_order(y[[1]],y[[2]])
}
|
\name{pnmtrem}
\alias{pnmtrem}
\alias{pnmtrem}
\docType{package}
\title{
Probit-Normal Marginalized Transition Random Effects Models
}
\description{
Fits Probit-Normal Marginalized Transition Random Effects Models which is proposed for modeling multivariate longitudinal binary data by
Asar, O., Ilk, O., Sezer, A. D. (2013). A marginalized multilevel model for analyzing multivariate longitudinal binary data. Submitted.
}
\details{
\tabular{ll}{
Package: \tab pnmtrem\cr
Type: \tab Package\cr
Version: \tab 1.3\cr
Date: \tab 2013-05-19\cr
License: \tab GPL (>=2)\cr
}
}
| /man/pnmtrem.Rd | no_license | cran/pnmtrem | R | false | false | 595 | rd | \name{pnmtrem}
\alias{pnmtrem}
\alias{pnmtrem}
\docType{package}
\title{
Probit-Normal Marginalized Transition Random Effects Models
}
\description{
Fits Probit-Normal Marginalized Transition Random Effects Models which is proposed for modeling multivariate longitudinal binary data by
Asar, O., Ilk, O., Sezer, A. D. (2013). A marginalized multilevel model for analyzing multivariate longitudinal binary data. Submitted.
}
\details{
\tabular{ll}{
Package: \tab pnmtrem\cr
Type: \tab Package\cr
Version: \tab 1.3\cr
Date: \tab 2013-05-19\cr
License: \tab GPL (>=2)\cr
}
}
|
#' Remove null elements from a list
#' @param x Input list
#' @export
#' @keywords internal
suncompact <- function(x){
Filter(Negate(is.null), x)
}
| /R/zzz.R | permissive | seankross/rsunlight | R | false | false | 151 | r | #' Remove null elements from a list
#' @param x Input list
#' @export
#' @keywords internal
suncompact <- function(x){
Filter(Negate(is.null), x)
}
|
# Data simulation for genome dating
## Tree topology
### use
library(phangorn)
trees_topo <- read.tree('simtree1.tre')
for(i in 1:length(trees_topo)){
trees_topo[[i]] <- chronopl(trees_topo[[i]], lambda = 5, age.min = 100)
print(max(branching.times(trees_topo[[i]])))
}
write.tree(trees_topo, file = 'sim_chrono.tre')
set.seed(123456)
pm1 <- rlnorm(38, -6.9, 0.2)
set.seed(654421)
pm2 <- rlnorm(38, -6.9, 0.2)
set.seed(222222)
pm3 <- rlnorm(38, -6.9, 0.2)
set.seed(33333)
pm4 <- rlnorm(38, -6.9, 0.2)
set.seed(666666)
pm5 <- rlnorm(38, -6.9, 0.2)
# important q's:
#can we recover the clusters?
# How far are we from the gene tree
var_sites_mat <- matrix(NA, 510, 2)
var_temp <- 0
#tree 1. Large wide cluster with slow pacemakers
tree_temp <- trees_topo[[1]]
tree_temp$edge.length <- tree_temp$edge.length * pm1 / 1.5
for(k in 1:50){
sim_temp_1 <- as.DNAbin(simSeq(tree_temp, l = 1000))
write.dna(sim_temp_1, file = paste0('tr_', 1, '_pm_1_', k, '.fasta'), format = 'fasta', nbcol = -1, colsep = '')
var_temp <- var_temp + 1
var_sites_mat[var_temp, ] <- c(paste0('tr_', 1, '_pm_1_', k, '.fasta'), length(seg.sites(sim_temp_1)))
}
tree_temp <- trees_topo[[1]]
tree_temp$edge.length <- tree_temp$edge.length * pm2
for(k in 1:40){
sim_temp_1 <- as.DNAbin(simSeq(tree_temp, l = 1000))
write.dna(sim_temp_1, file = paste0('tr_', 1, '_pm_2_', k, '.fasta'), format = 'fasta', nbcol = -1, colsep = '')
var_temp <- var_temp + 1
var_sites_mat[var_temp, ] <- c(paste0('tr_', 1, '_pm_2_', k, '.fasta'), length(seg.sites(sim_temp_1)))
}
tree_temp <- trees_topo[[1]]
tree_temp$edge.length <- tree_temp$edge.length * pm3 * 2
for(k in 1:30){
sim_temp_1 <- as.DNAbin(simSeq(tree_temp, l = 1000))
write.dna(sim_temp_1, file = paste0('tr_', 1, '_pm_3_', k, '.fasta'), format = 'fasta', nbcol = -1, colsep = '')
var_temp <- var_temp + 1
var_sites_mat[var_temp, ] <- c(paste0('tr_', 1, '_pm_3_', k, '.fasta'), length(seg.sites(sim_temp_1)))
}
#tree 2. Large narrow cluster with fast pacemakers
tree_temp <- trees_topo[[2]]
tree_temp$edge.length <- tree_temp$edge.length * pm1 / 1.5
for(k in 1:40){
sim_temp_1 <- as.DNAbin(simSeq(tree_temp, l = 1000))
write.dna(sim_temp_1, file = paste0('tr_', 2, '_pm_1_', k, '.fasta'), format = 'fasta', nbcol = -1, colsep = '')
var_temp <- var_temp + 1
var_sites_mat[var_temp, ] <- c(paste0('tr_', 2, '_pm_1_', k, '.fasta'), length(seg.sites(sim_temp_1)))
}
tree_temp <- trees_topo[[2]]
tree_temp$edge.length <- tree_temp$edge.length * pm2 / 1.5
for(k in 1:30){
sim_temp_1 <- as.DNAbin(simSeq(tree_temp, l = 1000))
write.dna(sim_temp_1, file = paste0('tr_', 2, '_pm_2_', k, '.fasta'), format = 'fasta', nbcol = -1, colsep = '')
var_temp <- var_temp + 1
var_sites_mat[var_temp, ] <- c(paste0('tr_', 2, '_pm_2_', k, '.fasta'), length(seg.sites(sim_temp_1)))
}
tree_temp <- trees_topo[[2]]
tree_temp$edge.length <- tree_temp$edge.length * pm3 * 1.5
for(k in 1:30){
sim_temp_1 <- as.DNAbin(simSeq(tree_temp, l = 1000))
write.dna(sim_temp_1, file = paste0('tr_', 2, '_pm_3_', k, '.fasta'), format = 'fasta', nbcol = -1, colsep = '')
var_temp <- var_temp + 1
var_sites_mat[var_temp, ] <- c(paste0('tr_', 2, '_pm_3_', k, '.fasta'), length(seg.sites(sim_temp_1)))
}
if(T){
#tree 3. Large narrow cluster with fast pacemakers
tree_temp <- trees_topo[[3]]
tree_temp$edge.length <- tree_temp$edge.length * pm1 / 1.5
for(k in 1:40){
sim_temp_1 <- as.DNAbin(simSeq(tree_temp, l = 1000))
write.dna(sim_temp_1, file = paste0('tr_', 3, '_pm_1_', k, '.fasta'), format = 'fasta', nbcol = -1, colsep = '')
var_temp <- var_temp + 1
var_sites_mat[var_temp, ] <- c(paste0('tr_', 3, '_pm_1_', k, '.fasta'), length(seg.sites(sim_temp_1)))
}
tree_temp <- trees_topo[[3]]
tree_temp$edge.length <- tree_temp$edge.length * pm2 / 1.5
for(k in 1:30){
sim_temp_1 <- as.DNAbin(simSeq(tree_temp, l = 1000))
write.dna(sim_temp_1, file = paste0('tr_', 3, '_pm_2_', k, '.fasta'), format = 'fasta', nbcol = -1, colsep = '')
var_temp <- var_temp + 1
var_sites_mat[var_temp, ] <- c(paste0('tr_', 3, '_pm_2_', k, '.fasta'), length(seg.sites(sim_temp_1)))
}
tree_temp <- trees_topo[[3]]
tree_temp$edge.length <- tree_temp$edge.length * pm3 * 3
for(k in 1:30){
sim_temp_1 <- as.DNAbin(simSeq(tree_temp, l = 1000))
write.dna(sim_temp_1, file = paste0('tr_', 3, '_pm_3_', k, '.fasta'), format = 'fasta', nbcol = -1, colsep = '')
var_temp <- var_temp + 1
var_sites_mat[var_temp, ] <- c(paste0('tr_', 3, '_pm_3_', k, '.fasta'), length(seg.sites(sim_temp_1)))
}
}
if(T){
#tree 4. Large narrow cluster with fast pacemakers
tree_temp <- trees_topo[[4]]
tree_temp$edge.length <- tree_temp$edge.length * pm1 / 1.5
for(k in 1:40){
sim_temp_1 <- as.DNAbin(simSeq(tree_temp, l = 1000))
write.dna(sim_temp_1, file = paste0('tr_', 4, '_pm_1_', k, '.fasta'), format = 'fasta', nbcol = -1, colsep = '')
var_temp <- var_temp + 1
var_sites_mat[var_temp, ] <- c(paste0('tr_', 4, '_pm_1_', k, '.fasta'), length(seg.sites(sim_temp_1)))
}
tree_temp <- trees_topo[[4]]
tree_temp$edge.length <- tree_temp$edge.length * pm2 / 2
for(k in 1:30){
sim_temp_1 <- as.DNAbin(simSeq(tree_temp, l = 1000))
write.dna(sim_temp_1, file = paste0('tr_', 4, '_pm_2_', k, '.fasta'), format = 'fasta', nbcol = -1, colsep = '')
var_temp <- var_temp + 1
var_sites_mat[var_temp, ] <- c(paste0('tr_', 4, '_pm_2_', k, '.fasta'), length(seg.sites(sim_temp_1)))
}
tree_temp <- trees_topo[[4]]
tree_temp$edge.length <- tree_temp$edge.length * pm3 / 2
for(k in 1:30){
sim_temp_1 <- as.DNAbin(simSeq(tree_temp, l = 1000))
write.dna(sim_temp_1, file = paste0('tr_', 4, '_pm_3_', k, '.fasta'), format = 'fasta', nbcol = -1, colsep = '')
var_temp <- var_temp + 1
var_sites_mat[var_temp, ] <- c(paste0('tr_', 4, '_pm_3_', k, '.fasta'), length(seg.sites(sim_temp_1)))
}
}
#tree 5. Large narrow cluster with fast pacemakers
tree_temp <- trees_topo[[5]]
tree_temp$edge.length <- tree_temp$edge.length * pm1 * 2
for(k in 1:40){
sim_temp_1 <- as.DNAbin(simSeq(tree_temp, l = 1000))
write.dna(sim_temp_1, file = paste0('tr_', 5, '_pm_1_', k, '.fasta'), format = 'fasta', nbcol = -1, colsep = '')
var_temp <- var_temp + 1
var_sites_mat[var_temp, ] <- c(paste0('tr_', 5, '_pm_1_', k, '.fasta'), length(seg.sites(sim_temp_1)))
}
tree_temp <- trees_topo[[5]]
tree_temp$edge.length <- tree_temp$edge.length * pm2
for(k in 1:30){
sim_temp_1 <- as.DNAbin(simSeq(tree_temp, l = 1000))
write.dna(sim_temp_1, file = paste0('tr_', 5, '_pm_2_', k, '.fasta'), format = 'fasta', nbcol = -1, colsep = '')
var_temp <- var_temp + 1
var_sites_mat[var_temp, ] <- c(paste0('tr_', 5, '_pm_2_', k, '.fasta'), length(seg.sites(sim_temp_1)))
}
tree_temp <- trees_topo[[5]]
tree_temp$edge.length <- tree_temp$edge.length * pm3 * 3
for(k in 1:20){
sim_temp_1 <- as.DNAbin(simSeq(tree_temp, l = 1000))
write.dna(sim_temp_1, file = paste0('tr_', 5, '_pm_3_', k, '.fasta'), format = 'fasta', nbcol = -1, colsep = '')
var_temp <- var_temp + 1
var_sites_mat[var_temp, ] <- c(paste0('tr_', 5, '_pm_3_', k, '.fasta'), length(seg.sites(sim_temp_1)))
}
write.table(var_sites_mat, file = 'var_stes.txt', row.names = F)
| /gdata_1/generate_data.R | no_license | sebastianduchene/genome_dating | R | false | false | 7,828 | r | # Data simulation for genome dating
## Tree topology
### use
library(phangorn)
trees_topo <- read.tree('simtree1.tre')
for(i in 1:length(trees_topo)){
trees_topo[[i]] <- chronopl(trees_topo[[i]], lambda = 5, age.min = 100)
print(max(branching.times(trees_topo[[i]])))
}
write.tree(trees_topo, file = 'sim_chrono.tre')
set.seed(123456)
pm1 <- rlnorm(38, -6.9, 0.2)
set.seed(654421)
pm2 <- rlnorm(38, -6.9, 0.2)
set.seed(222222)
pm3 <- rlnorm(38, -6.9, 0.2)
set.seed(33333)
pm4 <- rlnorm(38, -6.9, 0.2)
set.seed(666666)
pm5 <- rlnorm(38, -6.9, 0.2)
# important q's:
#can we recover the clusters?
# How far are we from the gene tree
var_sites_mat <- matrix(NA, 510, 2)
var_temp <- 0
#tree 1. Large wide cluster with slow pacemakers
tree_temp <- trees_topo[[1]]
tree_temp$edge.length <- tree_temp$edge.length * pm1 / 1.5
for(k in 1:50){
sim_temp_1 <- as.DNAbin(simSeq(tree_temp, l = 1000))
write.dna(sim_temp_1, file = paste0('tr_', 1, '_pm_1_', k, '.fasta'), format = 'fasta', nbcol = -1, colsep = '')
var_temp <- var_temp + 1
var_sites_mat[var_temp, ] <- c(paste0('tr_', 1, '_pm_1_', k, '.fasta'), length(seg.sites(sim_temp_1)))
}
tree_temp <- trees_topo[[1]]
tree_temp$edge.length <- tree_temp$edge.length * pm2
for(k in 1:40){
sim_temp_1 <- as.DNAbin(simSeq(tree_temp, l = 1000))
write.dna(sim_temp_1, file = paste0('tr_', 1, '_pm_2_', k, '.fasta'), format = 'fasta', nbcol = -1, colsep = '')
var_temp <- var_temp + 1
var_sites_mat[var_temp, ] <- c(paste0('tr_', 1, '_pm_2_', k, '.fasta'), length(seg.sites(sim_temp_1)))
}
tree_temp <- trees_topo[[1]]
tree_temp$edge.length <- tree_temp$edge.length * pm3 * 2
for(k in 1:30){
sim_temp_1 <- as.DNAbin(simSeq(tree_temp, l = 1000))
write.dna(sim_temp_1, file = paste0('tr_', 1, '_pm_3_', k, '.fasta'), format = 'fasta', nbcol = -1, colsep = '')
var_temp <- var_temp + 1
var_sites_mat[var_temp, ] <- c(paste0('tr_', 1, '_pm_3_', k, '.fasta'), length(seg.sites(sim_temp_1)))
}
#tree 2. Large narrow cluster with fast pacemakers
tree_temp <- trees_topo[[2]]
tree_temp$edge.length <- tree_temp$edge.length * pm1 / 1.5
for(k in 1:40){
sim_temp_1 <- as.DNAbin(simSeq(tree_temp, l = 1000))
write.dna(sim_temp_1, file = paste0('tr_', 2, '_pm_1_', k, '.fasta'), format = 'fasta', nbcol = -1, colsep = '')
var_temp <- var_temp + 1
var_sites_mat[var_temp, ] <- c(paste0('tr_', 2, '_pm_1_', k, '.fasta'), length(seg.sites(sim_temp_1)))
}
tree_temp <- trees_topo[[2]]
tree_temp$edge.length <- tree_temp$edge.length * pm2 / 1.5
for(k in 1:30){
sim_temp_1 <- as.DNAbin(simSeq(tree_temp, l = 1000))
write.dna(sim_temp_1, file = paste0('tr_', 2, '_pm_2_', k, '.fasta'), format = 'fasta', nbcol = -1, colsep = '')
var_temp <- var_temp + 1
var_sites_mat[var_temp, ] <- c(paste0('tr_', 2, '_pm_2_', k, '.fasta'), length(seg.sites(sim_temp_1)))
}
tree_temp <- trees_topo[[2]]
tree_temp$edge.length <- tree_temp$edge.length * pm3 * 1.5
for(k in 1:30){
sim_temp_1 <- as.DNAbin(simSeq(tree_temp, l = 1000))
write.dna(sim_temp_1, file = paste0('tr_', 2, '_pm_3_', k, '.fasta'), format = 'fasta', nbcol = -1, colsep = '')
var_temp <- var_temp + 1
var_sites_mat[var_temp, ] <- c(paste0('tr_', 2, '_pm_3_', k, '.fasta'), length(seg.sites(sim_temp_1)))
}
if(T){
#tree 3. Large narrow cluster with fast pacemakers
tree_temp <- trees_topo[[3]]
tree_temp$edge.length <- tree_temp$edge.length * pm1 / 1.5
for(k in 1:40){
sim_temp_1 <- as.DNAbin(simSeq(tree_temp, l = 1000))
write.dna(sim_temp_1, file = paste0('tr_', 3, '_pm_1_', k, '.fasta'), format = 'fasta', nbcol = -1, colsep = '')
var_temp <- var_temp + 1
var_sites_mat[var_temp, ] <- c(paste0('tr_', 3, '_pm_1_', k, '.fasta'), length(seg.sites(sim_temp_1)))
}
tree_temp <- trees_topo[[3]]
tree_temp$edge.length <- tree_temp$edge.length * pm2 / 1.5
for(k in 1:30){
sim_temp_1 <- as.DNAbin(simSeq(tree_temp, l = 1000))
write.dna(sim_temp_1, file = paste0('tr_', 3, '_pm_2_', k, '.fasta'), format = 'fasta', nbcol = -1, colsep = '')
var_temp <- var_temp + 1
var_sites_mat[var_temp, ] <- c(paste0('tr_', 3, '_pm_2_', k, '.fasta'), length(seg.sites(sim_temp_1)))
}
tree_temp <- trees_topo[[3]]
tree_temp$edge.length <- tree_temp$edge.length * pm3 * 3
for(k in 1:30){
sim_temp_1 <- as.DNAbin(simSeq(tree_temp, l = 1000))
write.dna(sim_temp_1, file = paste0('tr_', 3, '_pm_3_', k, '.fasta'), format = 'fasta', nbcol = -1, colsep = '')
var_temp <- var_temp + 1
var_sites_mat[var_temp, ] <- c(paste0('tr_', 3, '_pm_3_', k, '.fasta'), length(seg.sites(sim_temp_1)))
}
}
if(T){
#tree 4. Large narrow cluster with fast pacemakers
tree_temp <- trees_topo[[4]]
tree_temp$edge.length <- tree_temp$edge.length * pm1 / 1.5
for(k in 1:40){
sim_temp_1 <- as.DNAbin(simSeq(tree_temp, l = 1000))
write.dna(sim_temp_1, file = paste0('tr_', 4, '_pm_1_', k, '.fasta'), format = 'fasta', nbcol = -1, colsep = '')
var_temp <- var_temp + 1
var_sites_mat[var_temp, ] <- c(paste0('tr_', 4, '_pm_1_', k, '.fasta'), length(seg.sites(sim_temp_1)))
}
tree_temp <- trees_topo[[4]]
tree_temp$edge.length <- tree_temp$edge.length * pm2 / 2
for(k in 1:30){
sim_temp_1 <- as.DNAbin(simSeq(tree_temp, l = 1000))
write.dna(sim_temp_1, file = paste0('tr_', 4, '_pm_2_', k, '.fasta'), format = 'fasta', nbcol = -1, colsep = '')
var_temp <- var_temp + 1
var_sites_mat[var_temp, ] <- c(paste0('tr_', 4, '_pm_2_', k, '.fasta'), length(seg.sites(sim_temp_1)))
}
tree_temp <- trees_topo[[4]]
tree_temp$edge.length <- tree_temp$edge.length * pm3 / 2
for(k in 1:30){
sim_temp_1 <- as.DNAbin(simSeq(tree_temp, l = 1000))
write.dna(sim_temp_1, file = paste0('tr_', 4, '_pm_3_', k, '.fasta'), format = 'fasta', nbcol = -1, colsep = '')
var_temp <- var_temp + 1
var_sites_mat[var_temp, ] <- c(paste0('tr_', 4, '_pm_3_', k, '.fasta'), length(seg.sites(sim_temp_1)))
}
}
#tree 5. Large narrow cluster with fast pacemakers
tree_temp <- trees_topo[[5]]
tree_temp$edge.length <- tree_temp$edge.length * pm1 * 2
for(k in 1:40){
sim_temp_1 <- as.DNAbin(simSeq(tree_temp, l = 1000))
write.dna(sim_temp_1, file = paste0('tr_', 5, '_pm_1_', k, '.fasta'), format = 'fasta', nbcol = -1, colsep = '')
var_temp <- var_temp + 1
var_sites_mat[var_temp, ] <- c(paste0('tr_', 5, '_pm_1_', k, '.fasta'), length(seg.sites(sim_temp_1)))
}
tree_temp <- trees_topo[[5]]
tree_temp$edge.length <- tree_temp$edge.length * pm2
for(k in 1:30){
sim_temp_1 <- as.DNAbin(simSeq(tree_temp, l = 1000))
write.dna(sim_temp_1, file = paste0('tr_', 5, '_pm_2_', k, '.fasta'), format = 'fasta', nbcol = -1, colsep = '')
var_temp <- var_temp + 1
var_sites_mat[var_temp, ] <- c(paste0('tr_', 5, '_pm_2_', k, '.fasta'), length(seg.sites(sim_temp_1)))
}
tree_temp <- trees_topo[[5]]
tree_temp$edge.length <- tree_temp$edge.length * pm3 * 3
for(k in 1:20){
sim_temp_1 <- as.DNAbin(simSeq(tree_temp, l = 1000))
write.dna(sim_temp_1, file = paste0('tr_', 5, '_pm_3_', k, '.fasta'), format = 'fasta', nbcol = -1, colsep = '')
var_temp <- var_temp + 1
var_sites_mat[var_temp, ] <- c(paste0('tr_', 5, '_pm_3_', k, '.fasta'), length(seg.sites(sim_temp_1)))
}
write.table(var_sites_mat, file = 'var_stes.txt', row.names = F)
|
getEstimatorsVector <-
function (g,simv,Y,C,alpha)
{
y2d<-list()
neighborsInGraph(g,simv)->ng
ent<-0
probv<-list()
t(apply(ng,1,function(x) (x*Y)))->ngvy
t(apply(ng,1,function(x) (x*C)))->ngvc
for (i in 1:nrow(ngvc))
{
ngvc[i,]->line
line[line > 0]->line
ent[i]<-entropy2d(line)
probv[[i]]<-prob2d(line)
}
for (i in 1:nrow(ngvc))
{
for (j in 1:max(C))
{
Y[ngvc[i,]==j]->y2d[[j]]
}
if (ent[i]>0.5)
sig<-"*"
else
if (ent[i]>0.2)
sig<-"."
else
sig<-" "
ye<-vector()
if (probv[[i]]>0)
{ k=1
for (j in 1:max(ngvc[i,]))
{
if (length(y2d[[j]] ) > 0)
{
ye[k]<-mean(y2d[[j]])
k=k+1
}
}
if (ent[i]==0)
{
s<-paste0("p=",round(probv[[i]],3),",yhat=",round(as.numeric(ye),3))
cat(sig," ","entropy=",ent[i],s,"\n")
}
else
{
s<-paste0("(p=",round(probv[[i]],3),",yhat=",round(ye,3),")")
cat(sig," ","entropy=",ent[i],s,"\n")
}
}
else
{ sig=" "
s<-paste0("p=",1)
cat(sig," ","entropy=",0,s,"-no neighbors in graph.\n")
}
}
#print(ngvc)
return (ngvc)
}
| /KiNN/R/getEstimatorsVector.R | no_license | jossiekat/KiNN | R | false | false | 1,239 | r | getEstimatorsVector <-
function (g,simv,Y,C,alpha)
{
y2d<-list()
neighborsInGraph(g,simv)->ng
ent<-0
probv<-list()
t(apply(ng,1,function(x) (x*Y)))->ngvy
t(apply(ng,1,function(x) (x*C)))->ngvc
for (i in 1:nrow(ngvc))
{
ngvc[i,]->line
line[line > 0]->line
ent[i]<-entropy2d(line)
probv[[i]]<-prob2d(line)
}
for (i in 1:nrow(ngvc))
{
for (j in 1:max(C))
{
Y[ngvc[i,]==j]->y2d[[j]]
}
if (ent[i]>0.5)
sig<-"*"
else
if (ent[i]>0.2)
sig<-"."
else
sig<-" "
ye<-vector()
if (probv[[i]]>0)
{ k=1
for (j in 1:max(ngvc[i,]))
{
if (length(y2d[[j]] ) > 0)
{
ye[k]<-mean(y2d[[j]])
k=k+1
}
}
if (ent[i]==0)
{
s<-paste0("p=",round(probv[[i]],3),",yhat=",round(as.numeric(ye),3))
cat(sig," ","entropy=",ent[i],s,"\n")
}
else
{
s<-paste0("(p=",round(probv[[i]],3),",yhat=",round(ye,3),")")
cat(sig," ","entropy=",ent[i],s,"\n")
}
}
else
{ sig=" "
s<-paste0("p=",1)
cat(sig," ","entropy=",0,s,"-no neighbors in graph.\n")
}
}
#print(ngvc)
return (ngvc)
}
|
#######################################################
# DOWNLOADS
################################################
# Download GDP long series
download.gdp <- function()
{
# urls
url1 <- "https://sidra.ibge.gov.br/geratabela?format=us.csv&name=tabela1846.csv&terr=N&rank=-&query=t/1846/n1/all/v/all/p/all/c11255/90707/d/v585%204/l/t%2Bc11255%2Bv,,p"
url2 <- "https://sidra.ibge.gov.br/geratabela?format=us.csv&name=tabela6612.csv&terr=N&rank=-&query=t/6612/n1/all/v/all/p/all/c11255/90707/d/v9318%204/l/t%2Bc11255%2Bv,,p"
url3 <- "https://sidra.ibge.gov.br/geratabela?format=us.csv&name=tabela6613.csv&terr=N&rank=-&query=t/6613/n1/all/v/all/p/all/c11255/90707/d/v9319%204/l/v%2Bt,c11255,p"
# Download Data CSV
download.file(url1, "../data/CNT-SIDRA-1846.csv") # current values
download.file(url2, "../data/CNT-SIDRA-6612.csv") # chained BRL from 1995 NSA
download.file(url3, "../data/CNT-SIDRA-6613.csv") # chained BRL from 1995 SA
# load trim CSV
x1 <- load.trim("../data/CNT-SIDRA-1846.csv", 5, 9) # nominal
x2 <- load.trim("../data/CNT-SIDRA-6612.csv", 5, 9) # 1995=100 NSA
x3 <- load.trim("../data/CNT-SIDRA-6613.csv", 5, 9) # 1995=100 SA
# load trim CSV
dates <- x1[,1]
data1 <- x1[,2]; data2 <- x2[,2]; data3 <- x3[,2]
# names and dates
Q <- substring(dates, 1,1)
Y <- substring(dates, 14,17)
names(data1) <- names(data2) <- names(data3) <- paste0(Y, ":","Q", Q)
data1 <- date.quarter(data1)
data2 <- date.quarter(data2)
data3 <- date.quarter(data3)
gdp.list <- list("nominal"=data1, "real.NSA"=data2, "real.SA"=data3)
# SAVE in RDS
saveRDS(gdp.list, "../data/gdp.rds" )
return(1)
}
####################################################
load.gdp <- function()
{
gdp <- readRDS("../data/gdp.rds" )
gdp[["ret1"]] = ret1.q(gdp$real.SA)
gdp[["ret4"]] = ret4(gdp$real.NSA)
gdp[["sum4.nominal"]] = sum4(gdp$nominal)
gdp[["sum4.real"]] = sum4(gdp$real.NSA)
gdp[["ret.ac4q.nominal"]] = ret4(sum4(gdp$nominal))
gdp[["ret.ac4q.real"]] = ret4(sum4(gdp$real.NSA))
return(gdp)
}
####################################################
# LOAD files
load.trim <- function(filename, nhead, ntail)
{
# load trimmed files without the first nhead lines and the last ntail lines
# trim head
data <- read.csv(filename, stringsAsFactors = F, skip = nhead, header = F, sep = ",")
# trim tail
newdata <- head(data, -ntail)
return(newdata)
}
################################################
# Download ipca long series
download.ipca <- function()
{
# urls
url1 <- "https://sidra.ibge.gov.br/geratabela?format=us.csv&name=tabela1737.csv&terr=N&rank=-&query=t/1737/n1/all/v/2266/p/all/d/v2266%2013/l/v%2Bt,,p"
# Download Data CSV
download.file(url1, "../data/IPCA-SIDRA-1737.csv")
# Load Data into R
ipca <- load.trim("../data/IPCA-SIDRA-1737.csv", 4, 11)
ipca1 <- date.month.SIDRA(ipca)
# SAVE in RDS
saveRDS(ipca1, "../data/ipca.rds" )
return(1)
}
################################################
# Download industry long series
download.pimpfbr <- function()
{
#
filename <- "../data/PIMPFBR-SIDRA-3653.csv"
# urls
url1 <- "https://sidra.ibge.gov.br/geratabela?format=us.csv&name=tabela3653.csv&terr=N&rank=-&query=t/3653/n1/all/v/3134,3135,3138/p/all/c544/129314/d/v3134%201,v3135%201,v3138%201/l/t%2Bc544,v,p"
# Download Data CSV
download.file(url1, filename)
# Load Data into R
data <- load.trim(filename,nhead=5,ntail=9)
ind1 <- fun.month.SIDRA(data[,c(1,2)])
ind2 <- fun.month.SIDRA(data[,c(1,3)])
ind3 <- fun.month.SIDRA(data[-seq(1,23),c(1,4)]) - 100
ind <- list(ind1, ind2, ind3); names(ind) <- c("NSA", "SA", "ac12")
# SAVE in RDS
saveRDS(ind, "../data/ind.rds" )
return(1)
}
################################################
# Download commerce long series
download.pmc <- function()
{
#
filename <- "../data/PMC-SIDRA-3416.csv"
# urls
url1 <- "https://sidra.ibge.gov.br/geratabela?format=us.csv&name=tabela3416.csv&terr=N&rank=-&query=t/3416/n1/all/v/564/p/all/c11046/40311,40312/d/v564%201/l/v%2Bt,c11046,p"
# Download Data CSV
download.file(url1, filename)
# Load Data into R
data <- load.trim(filename,nhead=5,ntail=9)
data1 <- fun.month.SIDRA(data[,c(1,2)])
data2 <- fun.month.SIDRA(data[,c(1,3)])
newdata <- list(data1, data2); names(newdata) <- c("NSA", "SA")
# SAVE in RDS
saveRDS(newdata, "../data/pmc.rds" )
return(1)
}
################################################
# Download services long series
download.pms <- function()
{
#
filename <- "../data/PMS-SIDRA-6442.csv"
# urls
url1 <- "https://sidra.ibge.gov.br/geratabela?format=us.csv&name=tabela6442.csv&terr=N&rank=-&query=t/6442/n1/all/v/8677/p/all/c11046/40311,40312/d/v8677%201/l/t%2Bv,c11046,p"
# Download Data CSV
download.file(url1, filename)
# Load Data into R
data <- load.trim(filename,nhead=5,ntail=9)
data1 <- fun.month.SIDRA(data[,c(1,2)])
data2 <- fun.month.SIDRA(data[,c(1,3)])
newdata <- list(data1, data2); names(newdata) <- c("NSA", "SA")
# SAVE in RDS
saveRDS(newdata, "../data/pms.rds" )
return(1)
}
################################################
# Download IBC long series
download.ibc <- function()
{
#
file1 <- "../data/IBC-SGS-24363.csv"
file2 <- "../data/IBC-SGS-24364.csv"
# urs
url1 <- "http://api.bcb.gov.br/dados/serie/bcdata.sgs.24363/dados?formato=csv"
url2 <- "http://api.bcb.gov.br/dados/serie/bcdata.sgs.24364/dados?formato=csv"
# Download Data CSV
download.file(url1, file1) # NSA
download.file(url2, file2) # SA
# Load Data into R
x1 <- read.csv(file1, stringsAsFactors = F,skip=1,header=F, sep=";", dec=",")
x2 <- read.csv(file2, stringsAsFactors = F,skip=1,header=F, sep=";", dec=",")
y <- list(x1, x2)
# Adjust names and dates
x1 <- fun.month.sgs(x1)
x2 <- fun.month.sgs(x2)
y <- list(x1, x2)
names(y) <- c("NSA", "SA")
# SAVE in RDS
saveRDS(y, "../data/IBC.rds" )
return(1)
}
################################################
# Download SELIC
download.selic <- function()
{
#
file1 <- "../data/SELIC-SGS-11.csv"
file2 <- "../data/SELIC-SGS-1178.csv"
file3 <- "../data/SELIC-SGS-432.csv"
file4 <- "../data/SELIC-SGS-4189.csv"
# urls
url1 <- "https://api.bcb.gov.br/dados/serie/bcdata.sgs.11/dados?formato=csv" # selic diaria
url2 <- "https://api.bcb.gov.br/dados/serie/bcdata.sgs.1178/dados?formato=csv" # selic.an diaria
url3 <- "https://api.bcb.gov.br/dados/serie/bcdata.sgs.432/dados?formato=csv" # selic.meta diaria
url4 <- "https://api.bcb.gov.br/dados/serie/bcdata.sgs.4189/dados?formato=csv" # selic.an ac mes
# Download Data CSV
download.file(url1, file1) # selic
download.file(url2, file2) # selic.an
download.file(url3, file3) # selic.meta
download.file(url4, file4) # selic.meta
return(1)
}
####################################################
# DATES
####################################################
# quarter
date.quarter <- function(data)
{
T <- NROW(data); dates <- names(data)
# dates as numeric for TS
Y <- as.numeric(substring(dates, 1,4))
Q <- as.numeric(substring(dates, 7,7))
# Time Series
newdata <- ts(data, start = c(Y[1], Q[1]), end = c(Y[T], Q[T]), frequency = 4)
return(newdata)
}
####################################################
# month
date.month <- function(data)
{
T <- NROW(data); dates <- names(data)
Y <- as.numeric(substring(dates, 1,4))
M <- as.numeric(substring(dates, 6,7))
newdata <- ts(data, start = c(Y[1], Q[1]), end = c(Y[T], Q[T]), frequency = 12)
return(newdata)
}
####################################################
date.month.SIDRA <- function(data)
{
# IPCA DATES (%B %Y) to %Y:%m
# format(Sys.Date(), "%B/%Y")
# format(Sys.Date(), "%Y:%m")
# numeric values
data2 <- as.numeric(as.character(data[,-1]))
K <- NROW(data)
# dates
dates <- data[,1]
m1 <- c("janeiro", "fevereiro", "março", "abril", "maio", "junho", "julho", "agosto", "setembro", "outubro", "novembro", "dezembro")
m2 <- c("01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12")
for(i in 1:12)
{
dates <- gsub(m1[i], m2[i], dates )
}
M <- substring(dates, 1,2); M1 <- as.numeric(M)
Y <- substring(dates, 4,7); Y1 <- as.numeric(Y)
names(data2) <- paste0(Y, ":", M)
# newdata
newdata <- ts(data2, start=c(Y1[1], M1[1]), end=c(Y1[K], M1[K]), frequency=12 )
return(newdata)
}
####################################################
date.month.sgs <- function(data)
{
# SGS DATES ("%d/%m/%Y") to "%Y:%m"
# format(Sys.Date(), "%d/%m/%Y")
# format(Sys.Date(), "%Y:%m")
data1 <- data[,1]
data2 <- as.numeric(data[,2]); T <- NROW(data2)
M <- substring(data1, 4,5); M1 <- as.numeric(M)
Y <- substring(data1, 7,10); Y1 <- as.numeric(Y)
names(data2) <- paste0(Y, ":", M)
newdata <- ts(data2, start=c(Y1[1], M1[1]), end=c(Y1[T], M1[T]), frequency=12 )
return(newdata)
}
####################################################
# SUMS and Returns
####################################################
# ac4Q # QUARTER
sum4 <- function(data)
{
T <- NROW(data); dates <- names(data)
tmp <- rep(NA, (T-3)); names(tmp) <- dates[-seq(1:3)]
for(i in 1:(T-3))
{
w1 <- seq(i,i+4-1)
tmp[i] <- sum(as.numeric(data[w1]))
}
return(date.quarter(tmp))
}
####################################################
# ret (t/t-4) # QUARTER
ret4 <- function(data)
{
T <- NROW(data); dates <- names(data)
id1 <- seq(1,4); id2 <- seq(T-3,T)
tmp1 <- as.numeric(data[-id1]); tmp2 <- as.numeric(data[-id2])
ret <- 100*(tmp1/tmp2 - 1)
names(ret) <- dates[-id1]
return(date.quarter(ret))
}
####################################################
# y(t) - y(t-4) # QUARTER
dif4 <- function(data)
{
T <- NROW(data); dates <- names(data)
id1 <- seq(1,4); id2 <- seq(T-3,T)
tmp1 <- data[-id1]; tmp2 <- data[-id2]
dif <- as.numeric(tmp1) - as.numeric(tmp2)
names(dif) <- dates[-id1]
return(date.quarter(dif))
# return(list("values"=ret, "dates"=dates[-id1]))
}
####################################################
ret1.q <- function(data)
{
T <- NROW(data); dates <- names(data)
tmp1 <- as.numeric(data[-1]); tmp2 <- as.numeric(data[-T])
ret <- 100*(tmp1/tmp2 - 1)
names(ret) <- dates[-1]
return(date.quarter(ret))
}
####################################################
# sum 12 meses # MONTH
sum12 <- function(data)
{
T <- NROW(data); dates <- names(data)
sum.12 <- rep(NA, T-11)
for(i in 1:(T-11))
{
w1 <- seq(i,i+12-1)
sum.12[i] <- sum(data[w1])/12
}
names(sum.12) <- dates[-c(1:11)]
return(date.month(sum.12) )
}
####################################################
# ret t/t-12 # MONTH
ret12 <- function(data)
{
T <- NROW(data); dates <- names(data)
id1 <- seq(1:12); id2 <- seq(T-11, T)
tmp1 <- data[-id1]; tmp2 <- data[-id2]
ret <- 100*(tmp1/tmp2 - 1)
names(ret) <- dates[-id1]
return(date.month(ret) )
}
####################################################
# dif y(t) - y(t-12) # MONTH
dif12 <- function(data)
{
T <- NROW(data); dates <- names(data)
id1 <- seq(1:12); id2 <- seq(T-11, T)
tmp1 <- data[-id1]; tmp2 <- data[-id2]
dif <- tmp1 - tmp2
names(dif) <- dates[-id1]
return(date.month(dif) )
}
####################################################
# ret t/t-1 # MONTH
ret1.m <- function(data)
{
T <- NROW(data); dates <- names(data)
tmp1 <- data[-1]; tmp2 <- data[-T]
ret <- 100*(tmp1/tmp2 - 1)
names(ret) <- dates[-1]
return(date.month(ret))
}
####################################################
# ret anualizado (ret)^per
ret.an <- function(ret, per)
{
ret.an <- ((1+ret/100)^per - 1)*100
return(ret.an)
}
#######################################################
# AC ANO
ac.yr <- function(index, year)
{
id <- grep(year, names(index) )
return(100*(index[tail(id,1)]/index[(head(id,1)-1)] -1 ) )
}
####################################################
normalize <- function(data, date)
{
T <- NROW(data)
den <- rep(data[date], T)
newdata <- 100*data/den
return(newdata)
}
####################################################
standard <- function(data)
{
return((data-mean(data))/sd(data))
}
####################################################
#
fun.subserie <- function(data, pat)
{
id <- grep(pat, names(data))
newdata <- data[id]
return(newdata)
}
####################################################
coef.lin <- function(data)
{
# data is NOT in LOG
T <- NROW(data); t1 <- seq(1, T)
# But the regression IS in LOG
reg <- lm(log(data)~t1)
return(coef(reg))
}
####################################################
trend.lin <- function(data)
{
# data is NOT in LOG
T <- NROW(data); t1 <- seq(1, T);
# But the regression IS in LOG
reg <- lm(log(data)~t1) # log linear
# answers are NOT in LOG
t1 <- exp(date.quarter(reg$fitted.values))
c1 <- data-t1
c2 <- c1/t1
# Growth Gap
g <- ret1.q(t1)
gg <- ret1.q(data) - g
return(list("fit"=t1, "dif"=c1, "pct"=c2, "g"=g, "gg"=gg))
}
####################################################
trend.quad <- function(data)
{
# data is NOT in LOG
T <- NROW(data); t1 <- seq(1, T); t2 <- t1^2
# But the regression IS in LOG
reg <- lm(log(data)~t1+t2) # quad
# answers are NOT in LOG
t1 <- exp(date.quarter(reg$fitted.values))
c1 <- data-t1
c2 <- c1/t1
# Growth Gap
g <- ret1.q(t1)
gg <- ret1.q(data) - g
return(list("fit"=t1, "dif"=c1, "pct"=c2, "g"=g, "gg"=gg))
}
#############################################
# HF # Hamilton Filter
trend.hf <- function(data, h=8, p=4)
{
# data is NOT in LOG
T <- NROW(data); y <- 100*log(data)
# But the regression IS in LOG
y.00 <- y[(1+h+p-1):(T-0)] # y(t+h) or y(t)
x <- matrix(NA, nrow = length(y.00), ncol = p)
for(i in 1:p)
{
x[,i] <- y[i:(T-h-(p-i))] # y(t) or y(t-h-(p-i)) for i=1, ..., p
}
reg <- lm(y.00 ~ x)
# answers are NOT in LOG
t1 <- date.quarter(reg$fitted.values)
c1 <- date.quarter(reg$residuals)
#
u <- date.quarter(y.00 - x[,p]) # y(t) - y(t-h)
# Growth Gap
g <- ret1.q(exp(t1/100))
gg <- ret1.q(data) - g
return(list("fit"=t1, "c1"=c1, "u"=u, "g"=g, "gg"=gg))
}
####################################################
trend.hpf <- function(data, lambda)
{
require(mFilter)
# data is NOT in LOG
T <- NROW(data); t1 <- seq(1, T); t2 <- t1^2
# But the filter IS in LOG
hpf <- hpfilter(log(data), freq = lambda)
names(hpf$trend) <- names(data)
# answers are NOT in LOG
t1 <- exp(hpf$trend)
c1 <- data-t1
c2 <- c1/t1
# Growth Gap
g <- ret1.q(t1)
gg <- ret1.q(data) - g
return(list("fit"=t1, "dif"=c1, "pct"=c2, "g"=g, "gg"=gg))
}
####################################################
| /funs/download.R | no_license | pfnaibert/macro-report | R | false | false | 14,284 | r | #######################################################
# DOWNLOADS
################################################
# Download GDP long series
download.gdp <- function()
{
# urls
url1 <- "https://sidra.ibge.gov.br/geratabela?format=us.csv&name=tabela1846.csv&terr=N&rank=-&query=t/1846/n1/all/v/all/p/all/c11255/90707/d/v585%204/l/t%2Bc11255%2Bv,,p"
url2 <- "https://sidra.ibge.gov.br/geratabela?format=us.csv&name=tabela6612.csv&terr=N&rank=-&query=t/6612/n1/all/v/all/p/all/c11255/90707/d/v9318%204/l/t%2Bc11255%2Bv,,p"
url3 <- "https://sidra.ibge.gov.br/geratabela?format=us.csv&name=tabela6613.csv&terr=N&rank=-&query=t/6613/n1/all/v/all/p/all/c11255/90707/d/v9319%204/l/v%2Bt,c11255,p"
# Download Data CSV
download.file(url1, "../data/CNT-SIDRA-1846.csv") # current values
download.file(url2, "../data/CNT-SIDRA-6612.csv") # chained BRL from 1995 NSA
download.file(url3, "../data/CNT-SIDRA-6613.csv") # chained BRL from 1995 SA
# load trim CSV
x1 <- load.trim("../data/CNT-SIDRA-1846.csv", 5, 9) # nominal
x2 <- load.trim("../data/CNT-SIDRA-6612.csv", 5, 9) # 1995=100 NSA
x3 <- load.trim("../data/CNT-SIDRA-6613.csv", 5, 9) # 1995=100 SA
# load trim CSV
dates <- x1[,1]
data1 <- x1[,2]; data2 <- x2[,2]; data3 <- x3[,2]
# names and dates
Q <- substring(dates, 1,1)
Y <- substring(dates, 14,17)
names(data1) <- names(data2) <- names(data3) <- paste0(Y, ":","Q", Q)
data1 <- date.quarter(data1)
data2 <- date.quarter(data2)
data3 <- date.quarter(data3)
gdp.list <- list("nominal"=data1, "real.NSA"=data2, "real.SA"=data3)
# SAVE in RDS
saveRDS(gdp.list, "../data/gdp.rds" )
return(1)
}
####################################################
load.gdp <- function()
{
gdp <- readRDS("../data/gdp.rds" )
gdp[["ret1"]] = ret1.q(gdp$real.SA)
gdp[["ret4"]] = ret4(gdp$real.NSA)
gdp[["sum4.nominal"]] = sum4(gdp$nominal)
gdp[["sum4.real"]] = sum4(gdp$real.NSA)
gdp[["ret.ac4q.nominal"]] = ret4(sum4(gdp$nominal))
gdp[["ret.ac4q.real"]] = ret4(sum4(gdp$real.NSA))
return(gdp)
}
####################################################
# LOAD files
load.trim <- function(filename, nhead, ntail)
{
# load trimmed files without the first nhead lines and the last ntail lines
# trim head
data <- read.csv(filename, stringsAsFactors = F, skip = nhead, header = F, sep = ",")
# trim tail
newdata <- head(data, -ntail)
return(newdata)
}
################################################
# Download ipca long series
download.ipca <- function()
{
# urls
url1 <- "https://sidra.ibge.gov.br/geratabela?format=us.csv&name=tabela1737.csv&terr=N&rank=-&query=t/1737/n1/all/v/2266/p/all/d/v2266%2013/l/v%2Bt,,p"
# Download Data CSV
download.file(url1, "../data/IPCA-SIDRA-1737.csv")
# Load Data into R
ipca <- load.trim("../data/IPCA-SIDRA-1737.csv", 4, 11)
ipca1 <- date.month.SIDRA(ipca)
# SAVE in RDS
saveRDS(ipca1, "../data/ipca.rds" )
return(1)
}
################################################
# Download industry long series
download.pimpfbr <- function()
{
#
filename <- "../data/PIMPFBR-SIDRA-3653.csv"
# urls
url1 <- "https://sidra.ibge.gov.br/geratabela?format=us.csv&name=tabela3653.csv&terr=N&rank=-&query=t/3653/n1/all/v/3134,3135,3138/p/all/c544/129314/d/v3134%201,v3135%201,v3138%201/l/t%2Bc544,v,p"
# Download Data CSV
download.file(url1, filename)
# Load Data into R
data <- load.trim(filename,nhead=5,ntail=9)
ind1 <- fun.month.SIDRA(data[,c(1,2)])
ind2 <- fun.month.SIDRA(data[,c(1,3)])
ind3 <- fun.month.SIDRA(data[-seq(1,23),c(1,4)]) - 100
ind <- list(ind1, ind2, ind3); names(ind) <- c("NSA", "SA", "ac12")
# SAVE in RDS
saveRDS(ind, "../data/ind.rds" )
return(1)
}
################################################
# Download commerce long series
download.pmc <- function()
{
#
filename <- "../data/PMC-SIDRA-3416.csv"
# urls
url1 <- "https://sidra.ibge.gov.br/geratabela?format=us.csv&name=tabela3416.csv&terr=N&rank=-&query=t/3416/n1/all/v/564/p/all/c11046/40311,40312/d/v564%201/l/v%2Bt,c11046,p"
# Download Data CSV
download.file(url1, filename)
# Load Data into R
data <- load.trim(filename,nhead=5,ntail=9)
data1 <- fun.month.SIDRA(data[,c(1,2)])
data2 <- fun.month.SIDRA(data[,c(1,3)])
newdata <- list(data1, data2); names(newdata) <- c("NSA", "SA")
# SAVE in RDS
saveRDS(newdata, "../data/pmc.rds" )
return(1)
}
################################################
# Download services long series
download.pms <- function()
{
#
filename <- "../data/PMS-SIDRA-6442.csv"
# urls
url1 <- "https://sidra.ibge.gov.br/geratabela?format=us.csv&name=tabela6442.csv&terr=N&rank=-&query=t/6442/n1/all/v/8677/p/all/c11046/40311,40312/d/v8677%201/l/t%2Bv,c11046,p"
# Download Data CSV
download.file(url1, filename)
# Load Data into R
data <- load.trim(filename,nhead=5,ntail=9)
data1 <- fun.month.SIDRA(data[,c(1,2)])
data2 <- fun.month.SIDRA(data[,c(1,3)])
newdata <- list(data1, data2); names(newdata) <- c("NSA", "SA")
# SAVE in RDS
saveRDS(newdata, "../data/pms.rds" )
return(1)
}
################################################
# Download IBC long series
download.ibc <- function()
{
#
file1 <- "../data/IBC-SGS-24363.csv"
file2 <- "../data/IBC-SGS-24364.csv"
# urs
url1 <- "http://api.bcb.gov.br/dados/serie/bcdata.sgs.24363/dados?formato=csv"
url2 <- "http://api.bcb.gov.br/dados/serie/bcdata.sgs.24364/dados?formato=csv"
# Download Data CSV
download.file(url1, file1) # NSA
download.file(url2, file2) # SA
# Load Data into R
x1 <- read.csv(file1, stringsAsFactors = F,skip=1,header=F, sep=";", dec=",")
x2 <- read.csv(file2, stringsAsFactors = F,skip=1,header=F, sep=";", dec=",")
y <- list(x1, x2)
# Adjust names and dates
x1 <- fun.month.sgs(x1)
x2 <- fun.month.sgs(x2)
y <- list(x1, x2)
names(y) <- c("NSA", "SA")
# SAVE in RDS
saveRDS(y, "../data/IBC.rds" )
return(1)
}
################################################
# Download SELIC
download.selic <- function()
{
#
file1 <- "../data/SELIC-SGS-11.csv"
file2 <- "../data/SELIC-SGS-1178.csv"
file3 <- "../data/SELIC-SGS-432.csv"
file4 <- "../data/SELIC-SGS-4189.csv"
# urls
url1 <- "https://api.bcb.gov.br/dados/serie/bcdata.sgs.11/dados?formato=csv" # selic diaria
url2 <- "https://api.bcb.gov.br/dados/serie/bcdata.sgs.1178/dados?formato=csv" # selic.an diaria
url3 <- "https://api.bcb.gov.br/dados/serie/bcdata.sgs.432/dados?formato=csv" # selic.meta diaria
url4 <- "https://api.bcb.gov.br/dados/serie/bcdata.sgs.4189/dados?formato=csv" # selic.an ac mes
# Download Data CSV
download.file(url1, file1) # selic
download.file(url2, file2) # selic.an
download.file(url3, file3) # selic.meta
download.file(url4, file4) # selic.meta
return(1)
}
####################################################
# DATES
####################################################
# quarter
date.quarter <- function(data)
{
T <- NROW(data); dates <- names(data)
# dates as numeric for TS
Y <- as.numeric(substring(dates, 1,4))
Q <- as.numeric(substring(dates, 7,7))
# Time Series
newdata <- ts(data, start = c(Y[1], Q[1]), end = c(Y[T], Q[T]), frequency = 4)
return(newdata)
}
####################################################
# month
date.month <- function(data)
{
T <- NROW(data); dates <- names(data)
Y <- as.numeric(substring(dates, 1,4))
M <- as.numeric(substring(dates, 6,7))
newdata <- ts(data, start = c(Y[1], Q[1]), end = c(Y[T], Q[T]), frequency = 12)
return(newdata)
}
####################################################
date.month.SIDRA <- function(data)
{
# IPCA DATES (%B %Y) to %Y:%m
# format(Sys.Date(), "%B/%Y")
# format(Sys.Date(), "%Y:%m")
# numeric values
data2 <- as.numeric(as.character(data[,-1]))
K <- NROW(data)
# dates
dates <- data[,1]
m1 <- c("janeiro", "fevereiro", "março", "abril", "maio", "junho", "julho", "agosto", "setembro", "outubro", "novembro", "dezembro")
m2 <- c("01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12")
for(i in 1:12)
{
dates <- gsub(m1[i], m2[i], dates )
}
M <- substring(dates, 1,2); M1 <- as.numeric(M)
Y <- substring(dates, 4,7); Y1 <- as.numeric(Y)
names(data2) <- paste0(Y, ":", M)
# newdata
newdata <- ts(data2, start=c(Y1[1], M1[1]), end=c(Y1[K], M1[K]), frequency=12 )
return(newdata)
}
####################################################
date.month.sgs <- function(data)
{
# SGS DATES ("%d/%m/%Y") to "%Y:%m"
# format(Sys.Date(), "%d/%m/%Y")
# format(Sys.Date(), "%Y:%m")
data1 <- data[,1]
data2 <- as.numeric(data[,2]); T <- NROW(data2)
M <- substring(data1, 4,5); M1 <- as.numeric(M)
Y <- substring(data1, 7,10); Y1 <- as.numeric(Y)
names(data2) <- paste0(Y, ":", M)
newdata <- ts(data2, start=c(Y1[1], M1[1]), end=c(Y1[T], M1[T]), frequency=12 )
return(newdata)
}
####################################################
# SUMS and Returns
####################################################
# ac4Q # QUARTER
sum4 <- function(data)
{
T <- NROW(data); dates <- names(data)
tmp <- rep(NA, (T-3)); names(tmp) <- dates[-seq(1:3)]
for(i in 1:(T-3))
{
w1 <- seq(i,i+4-1)
tmp[i] <- sum(as.numeric(data[w1]))
}
return(date.quarter(tmp))
}
####################################################
# ret (t/t-4) # QUARTER
ret4 <- function(data)
{
T <- NROW(data); dates <- names(data)
id1 <- seq(1,4); id2 <- seq(T-3,T)
tmp1 <- as.numeric(data[-id1]); tmp2 <- as.numeric(data[-id2])
ret <- 100*(tmp1/tmp2 - 1)
names(ret) <- dates[-id1]
return(date.quarter(ret))
}
####################################################
# y(t) - y(t-4) # QUARTER
dif4 <- function(data)
{
T <- NROW(data); dates <- names(data)
id1 <- seq(1,4); id2 <- seq(T-3,T)
tmp1 <- data[-id1]; tmp2 <- data[-id2]
dif <- as.numeric(tmp1) - as.numeric(tmp2)
names(dif) <- dates[-id1]
return(date.quarter(dif))
# return(list("values"=ret, "dates"=dates[-id1]))
}
####################################################
ret1.q <- function(data)
{
T <- NROW(data); dates <- names(data)
tmp1 <- as.numeric(data[-1]); tmp2 <- as.numeric(data[-T])
ret <- 100*(tmp1/tmp2 - 1)
names(ret) <- dates[-1]
return(date.quarter(ret))
}
####################################################
# sum 12 meses # MONTH
sum12 <- function(data)
{
T <- NROW(data); dates <- names(data)
sum.12 <- rep(NA, T-11)
for(i in 1:(T-11))
{
w1 <- seq(i,i+12-1)
sum.12[i] <- sum(data[w1])/12
}
names(sum.12) <- dates[-c(1:11)]
return(date.month(sum.12) )
}
####################################################
# ret t/t-12 # MONTH
ret12 <- function(data)
{
T <- NROW(data); dates <- names(data)
id1 <- seq(1:12); id2 <- seq(T-11, T)
tmp1 <- data[-id1]; tmp2 <- data[-id2]
ret <- 100*(tmp1/tmp2 - 1)
names(ret) <- dates[-id1]
return(date.month(ret) )
}
####################################################
# dif y(t) - y(t-12) # MONTH
dif12 <- function(data)
{
T <- NROW(data); dates <- names(data)
id1 <- seq(1:12); id2 <- seq(T-11, T)
tmp1 <- data[-id1]; tmp2 <- data[-id2]
dif <- tmp1 - tmp2
names(dif) <- dates[-id1]
return(date.month(dif) )
}
####################################################
# ret t/t-1 # MONTH
ret1.m <- function(data)
{
T <- NROW(data); dates <- names(data)
tmp1 <- data[-1]; tmp2 <- data[-T]
ret <- 100*(tmp1/tmp2 - 1)
names(ret) <- dates[-1]
return(date.month(ret))
}
####################################################
# ret anualizado (ret)^per
ret.an <- function(ret, per)
{
ret.an <- ((1+ret/100)^per - 1)*100
return(ret.an)
}
#######################################################
# AC ANO
ac.yr <- function(index, year)
{
id <- grep(year, names(index) )
return(100*(index[tail(id,1)]/index[(head(id,1)-1)] -1 ) )
}
####################################################
normalize <- function(data, date)
{
T <- NROW(data)
den <- rep(data[date], T)
newdata <- 100*data/den
return(newdata)
}
####################################################
standard <- function(data)
{
return((data-mean(data))/sd(data))
}
####################################################
#
fun.subserie <- function(data, pat)
{
id <- grep(pat, names(data))
newdata <- data[id]
return(newdata)
}
####################################################
coef.lin <- function(data)
{
# data is NOT in LOG
T <- NROW(data); t1 <- seq(1, T)
# But the regression IS in LOG
reg <- lm(log(data)~t1)
return(coef(reg))
}
####################################################
trend.lin <- function(data)
{
# data is NOT in LOG
T <- NROW(data); t1 <- seq(1, T);
# But the regression IS in LOG
reg <- lm(log(data)~t1) # log linear
# answers are NOT in LOG
t1 <- exp(date.quarter(reg$fitted.values))
c1 <- data-t1
c2 <- c1/t1
# Growth Gap
g <- ret1.q(t1)
gg <- ret1.q(data) - g
return(list("fit"=t1, "dif"=c1, "pct"=c2, "g"=g, "gg"=gg))
}
####################################################
trend.quad <- function(data)
{
# data is NOT in LOG
T <- NROW(data); t1 <- seq(1, T); t2 <- t1^2
# But the regression IS in LOG
reg <- lm(log(data)~t1+t2) # quad
# answers are NOT in LOG
t1 <- exp(date.quarter(reg$fitted.values))
c1 <- data-t1
c2 <- c1/t1
# Growth Gap
g <- ret1.q(t1)
gg <- ret1.q(data) - g
return(list("fit"=t1, "dif"=c1, "pct"=c2, "g"=g, "gg"=gg))
}
#############################################
# HF # Hamilton Filter
trend.hf <- function(data, h=8, p=4)
{
# data is NOT in LOG
T <- NROW(data); y <- 100*log(data)
# But the regression IS in LOG
y.00 <- y[(1+h+p-1):(T-0)] # y(t+h) or y(t)
x <- matrix(NA, nrow = length(y.00), ncol = p)
for(i in 1:p)
{
x[,i] <- y[i:(T-h-(p-i))] # y(t) or y(t-h-(p-i)) for i=1, ..., p
}
reg <- lm(y.00 ~ x)
# answers are NOT in LOG
t1 <- date.quarter(reg$fitted.values)
c1 <- date.quarter(reg$residuals)
#
u <- date.quarter(y.00 - x[,p]) # y(t) - y(t-h)
# Growth Gap
g <- ret1.q(exp(t1/100))
gg <- ret1.q(data) - g
return(list("fit"=t1, "c1"=c1, "u"=u, "g"=g, "gg"=gg))
}
####################################################
trend.hpf <- function(data, lambda)
{
require(mFilter)
# data is NOT in LOG
T <- NROW(data); t1 <- seq(1, T); t2 <- t1^2
# But the filter IS in LOG
hpf <- hpfilter(log(data), freq = lambda)
names(hpf$trend) <- names(data)
# answers are NOT in LOG
t1 <- exp(hpf$trend)
c1 <- data-t1
c2 <- c1/t1
# Growth Gap
g <- ret1.q(t1)
gg <- ret1.q(data) - g
return(list("fit"=t1, "dif"=c1, "pct"=c2, "g"=g, "gg"=gg))
}
####################################################
|
context("ptOwen")
test_that("ptOwen = pt", {
ncp <- 1
expect_equal(ptOwen(2, nu=3, delta=ncp), pt(2, df=3, ncp=ncp), tolerance=1e-13)
expect_equal(ptOwen(2, nu=4, delta=ncp), pt(2, df=4, ncp=ncp), tolerance=1e-12)
expect_equal(ptOwen(2, nu=5, delta=ncp), pt(2, df=5, ncp=ncp), tolerance=1e-12)
ncp <- 10
expect_equal(ptOwen(2, nu=3, delta=ncp), pt(2, df=3, ncp=ncp), tolerance=1e-12)
expect_equal(ptOwen(2, nu=4, delta=ncp), pt(2, df=4, ncp=ncp), tolerance=1e-13)
expect_equal(ptOwen(2, nu=5, delta=ncp), pt(2, df=5, ncp=ncp), tolerance=1e-12)
q <- -1; ncp <- 1
expect_equal(ptOwen(q, nu=3, delta=ncp), pt(q, df=3, ncp=ncp), tolerance=1e-14)
ncp <- -1
expect_equal(ptOwen(q, nu=3, delta=ncp), pt(q, df=3, ncp=ncp), tolerance=1e-13)
})
test_that("ptOwen for infinite delta", {
expect_true(ptOwen(2, nu=3, delta=100) == 0)
expect_true(ptOwen(2, nu=3, delta=-100) == 1)
})
test_that("ptOwen = Q2 for R=0", {
expect_equal(ptOwen(2, 1, 3), OwenQ2(1, 2, 3, 0), tolerance=1e-14)
})
| /issuestests/OwenQ/tests/testthat/test-ptOwen.R | no_license | akhikolla/RcppDeepStateTest | R | false | false | 1,008 | r | context("ptOwen")
test_that("ptOwen = pt", {
ncp <- 1
expect_equal(ptOwen(2, nu=3, delta=ncp), pt(2, df=3, ncp=ncp), tolerance=1e-13)
expect_equal(ptOwen(2, nu=4, delta=ncp), pt(2, df=4, ncp=ncp), tolerance=1e-12)
expect_equal(ptOwen(2, nu=5, delta=ncp), pt(2, df=5, ncp=ncp), tolerance=1e-12)
ncp <- 10
expect_equal(ptOwen(2, nu=3, delta=ncp), pt(2, df=3, ncp=ncp), tolerance=1e-12)
expect_equal(ptOwen(2, nu=4, delta=ncp), pt(2, df=4, ncp=ncp), tolerance=1e-13)
expect_equal(ptOwen(2, nu=5, delta=ncp), pt(2, df=5, ncp=ncp), tolerance=1e-12)
q <- -1; ncp <- 1
expect_equal(ptOwen(q, nu=3, delta=ncp), pt(q, df=3, ncp=ncp), tolerance=1e-14)
ncp <- -1
expect_equal(ptOwen(q, nu=3, delta=ncp), pt(q, df=3, ncp=ncp), tolerance=1e-13)
})
test_that("ptOwen for infinite delta", {
expect_true(ptOwen(2, nu=3, delta=100) == 0)
expect_true(ptOwen(2, nu=3, delta=-100) == 1)
})
test_that("ptOwen = Q2 for R=0", {
expect_equal(ptOwen(2, 1, 3), OwenQ2(1, 2, 3, 0), tolerance=1e-14)
})
|
# context("rivr")
# test_that("Streaming", {
# skip_if_no_rivr()
# ## TODO: need rivr-style json streams or something here?
# writeLines(jsonlite::toJSON(iris, collapse=FALSE),
# tmp <- tempfile())
# ## Start by building a file iterator:
# it_f <- rivr::file_iterator(tmp)
# it_j <- jq_iterator(it_f, '{len: ."Petal.Length"}')
# x <- it_j$yield()
# y <- sprintf('{"len":%s}', iris$Petal.Length[[1]])
# expect_that(x, equals(y))
# ## Then drain everything else:
# dat <- rivr:::drain(it_j)
# expect_that(unlist(dat),
# equals(sprintf('{"len":%s}', iris$Petal.Length[-1])))
# })
| /RLib/jqr/ignore/test-rivr.R | permissive | robkemp/RPackageUpdates | R | false | false | 633 | r | # context("rivr")
# test_that("Streaming", {
# skip_if_no_rivr()
# ## TODO: need rivr-style json streams or something here?
# writeLines(jsonlite::toJSON(iris, collapse=FALSE),
# tmp <- tempfile())
# ## Start by building a file iterator:
# it_f <- rivr::file_iterator(tmp)
# it_j <- jq_iterator(it_f, '{len: ."Petal.Length"}')
# x <- it_j$yield()
# y <- sprintf('{"len":%s}', iris$Petal.Length[[1]])
# expect_that(x, equals(y))
# ## Then drain everything else:
# dat <- rivr:::drain(it_j)
# expect_that(unlist(dat),
# equals(sprintf('{"len":%s}', iris$Petal.Length[-1])))
# })
|
testlist <- list(A = structure(c(1.38997190089718e-309, 3.81575932257168e-236, 3.81571422914747e-236), .Dim = c(1L, 3L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) | /multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613127267-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 226 | r | testlist <- list(A = structure(c(1.38997190089718e-309, 3.81575932257168e-236, 3.81571422914747e-236), .Dim = c(1L, 3L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) |
#21/04/2021
#R_code_classification.r
#il processo di classificazione è un processo che accorpa i pixel con valori simili e una volta accoprti questi rappresentano una "classe"
setwd("C:/lab/")
# Windows
#salviamo l'immagine "Solar_Orbiter_s_first_views_of_the_Sun_pillars" nella cartella lab
#l'immagine rappresenta attraverso gli ultravioletti dei livelli energetici all'interno del sole
#nella nostra immagine vediamo esplosioni di gas piuttosto potenti sulla destra
#nella parte centrale possiamo notare situazioni a più bassa energia
#situazioni intermedie nella parte grigia tra la parte centrale e le esplosioni
#ora carichiamo l'immagine usando la funzione brick che pesca l'immagine che si trova fuori R
library(raster)
library(RStoolbox)
so <- brick("Solar_Orbiter_s_first_views_of_the_Sun_pillars.jpg")
so
#class : RasterBrick
#dimensions : 1157, 1920, 2221440, 3 (nrow, ncol, ncell, nlayers)
#resolution : 1, 1 (x, y)
#extent : 0, 1920, 0, 1157 (xmin, xmax, ymin, ymax)
#crs : NA
#source : C:/lab/Solar_Orbiter_s_first_views_of_the_Sun_pillars.jpg
#names : Solar_Orbiter_s_first_views_of_the_Sun_pillars.1, Solar_Orbiter_s_first_views_of_the_Sun_pillars.2, Solar_Orbiter_s_first_views_of_the_Sun_pillars.3
#min values : 0, 0, 0
#max values : 255, 255, 255
#visualize RGB levels
plotRGB(so, 1,2,3,stretch="lin")
# come fa il software a classificare l'immagine?
# all'interno della nostra immagine una volta montata in RGB noi abbiamo ad esempio un pixel di boisco e i pixel li vediamo nella banda del blu, del verde e del rosso
# il valore di un pixel di vegetazione sarà: nel blu e nel rosso la pianta assorbe per fare fotosintesi e quindi avrà un valore molto basso
# nella banda del verde a causa del mesofillo fogliare rifletterà la luve quindi avrà un valore alto
# incrociando i valori delle 3 bande avremo un certo pixel e così via...
# creazioni di classi da pixel campione nello spazio multi-spettrale dove gli assi sono le bande
# il software andrà a classificare tutti gli altri pixel dell'immagine come funzione del training-set che si è creato
# processo chiamato di somiglianza massima
# in questo caso ilsoftware tira fuori direttamente il training set
# classificazione non supervisionata: non c'è un impatto dell'utente nel definire a monte le classi
# unsupervised classification
# funzione UnsuperClass opera la classificazione non supervisionata
soc <- unsuperClass(so,nClasses=3)
# unsuperClass ha creato in uscito un modello e la mappa
#nel plottaggio dobbiamo plottare solo la mappa
#plot(soc$map) per legare la mappa
plot(soc$map)
#set.seed per avere tutti la stessa immagine
so1 <- unsuperClass(so,nClasses=20)
plot(so1$map)
sun <- brick("Solar_Orbiter_s_first_view_of_the_Sun.png")
plotRGB(sun, 1,2,3,stretch="lin")
sun1 <- unsuperClass(sun,nClasses=3)
plot(sun1$map)
sun2 <- unsuperClass(sun,nClasses=20)
plot(sun2$map)
#sensori passivi sono quelli che stiamo utilizzando
#fonte interna/diretta es. il laser/radar
#23/04/2021
#Grand Canyon
# https://landsat.visibleearth.nasa.gov/view.php?id=80948
# When John Wesley Powell led an expedition down the Colorado River and through the Grand Canyon in 1869, he was confronted with a daunting landscape. At its highest point, the serpentine gorge plunged 1,829 meters (6,000 feet) from rim to river bottom, making it one of the deepest canyons in the United States. In just 6 million years, water had carved through rock layers that collectively represented more than 2 billion years of geological history, nearly half of the time Earth has existed.
setwd("C:/lab/")
library(raster)
library(RStoolbox)
gc <-brick("dolansprings_oli_2013088_canyon_lrg.jpg")
plotRGB(gc, r=1, g=2, b=3, stretch="lin")
plotRGB(gc, r=1, g=2, b=3, stretch="hist")
#una volta che sono state definite le classi in uscita avremo un modello e una mappa
gcc2 <- unsuperClass(gc, nClasses=2)
plot(gcc2$map)
ggc2
#unsuperClass results
#*************** Map ******************
#$map
#class # : RasterLayer
#dimensions : 6222, 9334, 58076148 (nrow, ncol, ncell)
#resolution : 1, 1 (x, y)
#extent : 0, 9334, 0, 6222 (xmin, xmax, ymin, ymax)
#crs : NA
#source : C:/Users/prima/AppData/Local/Temp/RtmpUP3KbM/raster/r_tmp_2021-05-20_124649_5544_89050.grd
#names : layer
#values : 1, 2 (min, max)
gcc4 <- unsuperClass(gc, nClasses=4)
plot(gcc4$map)
| /R_code_classification.r | no_license | Gp9610/telerilevamento_2021 | R | false | false | 4,681 | r | #21/04/2021
#R_code_classification.r
#il processo di classificazione è un processo che accorpa i pixel con valori simili e una volta accoprti questi rappresentano una "classe"
setwd("C:/lab/")
# Windows
#salviamo l'immagine "Solar_Orbiter_s_first_views_of_the_Sun_pillars" nella cartella lab
#l'immagine rappresenta attraverso gli ultravioletti dei livelli energetici all'interno del sole
#nella nostra immagine vediamo esplosioni di gas piuttosto potenti sulla destra
#nella parte centrale possiamo notare situazioni a più bassa energia
#situazioni intermedie nella parte grigia tra la parte centrale e le esplosioni
#ora carichiamo l'immagine usando la funzione brick che pesca l'immagine che si trova fuori R
library(raster)
library(RStoolbox)
so <- brick("Solar_Orbiter_s_first_views_of_the_Sun_pillars.jpg")
so
#class : RasterBrick
#dimensions : 1157, 1920, 2221440, 3 (nrow, ncol, ncell, nlayers)
#resolution : 1, 1 (x, y)
#extent : 0, 1920, 0, 1157 (xmin, xmax, ymin, ymax)
#crs : NA
#source : C:/lab/Solar_Orbiter_s_first_views_of_the_Sun_pillars.jpg
#names : Solar_Orbiter_s_first_views_of_the_Sun_pillars.1, Solar_Orbiter_s_first_views_of_the_Sun_pillars.2, Solar_Orbiter_s_first_views_of_the_Sun_pillars.3
#min values : 0, 0, 0
#max values : 255, 255, 255
#visualize RGB levels
plotRGB(so, 1,2,3,stretch="lin")
# come fa il software a classificare l'immagine?
# all'interno della nostra immagine una volta montata in RGB noi abbiamo ad esempio un pixel di boisco e i pixel li vediamo nella banda del blu, del verde e del rosso
# il valore di un pixel di vegetazione sarà: nel blu e nel rosso la pianta assorbe per fare fotosintesi e quindi avrà un valore molto basso
# nella banda del verde a causa del mesofillo fogliare rifletterà la luve quindi avrà un valore alto
# incrociando i valori delle 3 bande avremo un certo pixel e così via...
# creazioni di classi da pixel campione nello spazio multi-spettrale dove gli assi sono le bande
# il software andrà a classificare tutti gli altri pixel dell'immagine come funzione del training-set che si è creato
# processo chiamato di somiglianza massima
# in questo caso ilsoftware tira fuori direttamente il training set
# classificazione non supervisionata: non c'è un impatto dell'utente nel definire a monte le classi
# unsupervised classification
# funzione UnsuperClass opera la classificazione non supervisionata
soc <- unsuperClass(so,nClasses=3)
# unsuperClass ha creato in uscito un modello e la mappa
#nel plottaggio dobbiamo plottare solo la mappa
#plot(soc$map) per legare la mappa
plot(soc$map)
#set.seed per avere tutti la stessa immagine
so1 <- unsuperClass(so,nClasses=20)
plot(so1$map)
sun <- brick("Solar_Orbiter_s_first_view_of_the_Sun.png")
plotRGB(sun, 1,2,3,stretch="lin")
sun1 <- unsuperClass(sun,nClasses=3)
plot(sun1$map)
sun2 <- unsuperClass(sun,nClasses=20)
plot(sun2$map)
#sensori passivi sono quelli che stiamo utilizzando
#fonte interna/diretta es. il laser/radar
#23/04/2021
#Grand Canyon
# https://landsat.visibleearth.nasa.gov/view.php?id=80948
# When John Wesley Powell led an expedition down the Colorado River and through the Grand Canyon in 1869, he was confronted with a daunting landscape. At its highest point, the serpentine gorge plunged 1,829 meters (6,000 feet) from rim to river bottom, making it one of the deepest canyons in the United States. In just 6 million years, water had carved through rock layers that collectively represented more than 2 billion years of geological history, nearly half of the time Earth has existed.
setwd("C:/lab/")
library(raster)
library(RStoolbox)
gc <-brick("dolansprings_oli_2013088_canyon_lrg.jpg")
plotRGB(gc, r=1, g=2, b=3, stretch="lin")
plotRGB(gc, r=1, g=2, b=3, stretch="hist")
#una volta che sono state definite le classi in uscita avremo un modello e una mappa
gcc2 <- unsuperClass(gc, nClasses=2)
plot(gcc2$map)
ggc2
#unsuperClass results
#*************** Map ******************
#$map
#class # : RasterLayer
#dimensions : 6222, 9334, 58076148 (nrow, ncol, ncell)
#resolution : 1, 1 (x, y)
#extent : 0, 9334, 0, 6222 (xmin, xmax, ymin, ymax)
#crs : NA
#source : C:/Users/prima/AppData/Local/Temp/RtmpUP3KbM/raster/r_tmp_2021-05-20_124649_5544_89050.grd
#names : layer
#values : 1, 2 (min, max)
gcc4 <- unsuperClass(gc, nClasses=4)
plot(gcc4$map)
|
test_GetPSI_FromTranRef <- function() {
obs <- tryCatch(GetPSI_FromTranRef(Samples="",
PathsxTranscript=NULL,
Bootstrap = FALSE,
Filter = TRUE,
Qn = 0.25), error=conditionMessage)
checkIdentical("PathsxTranscript field is empty", obs)
obs <- tryCatch(GetPSI_FromTranRef(Samples=NULL,
PathsxTranscript="",
Bootstrap = FALSE,
Filter = TRUE,
Qn = 0.25), error=conditionMessage)
checkIdentical("Samples field is empty", obs)
#check example:
data(EventXtrans)
# PathSamples <-system.file("extdata",package="EventPointer")
PathSamples <-system.file("extdata",package="EventPointer")
PathSamples <- paste0(PathSamples,"/output")
PathSamples <- dir(PathSamples,full.names = TRUE)
data_exp <- getbootstrapdata(PathSamples = PathSamples,type = "kallisto")
#same annotation
rownames(data_exp[[1]]) <- gsub("\\|.*","",rownames(data_exp[[1]]))
#Obtain values of PSI
PSIss2 <- GetPSI_FromTranRef(PathsxTranscript = EventXtrans,Samples = data_exp,Bootstrap = TRUE, Filter = FALSE)
data("PSIss")
checkIdentical(PSIss,PSIss2)
} | /inst/unitTests/test_GetPSI_FromTranRef.R | no_license | jpromeror/EventPointer | R | false | false | 1,373 | r | test_GetPSI_FromTranRef <- function() {
obs <- tryCatch(GetPSI_FromTranRef(Samples="",
PathsxTranscript=NULL,
Bootstrap = FALSE,
Filter = TRUE,
Qn = 0.25), error=conditionMessage)
checkIdentical("PathsxTranscript field is empty", obs)
obs <- tryCatch(GetPSI_FromTranRef(Samples=NULL,
PathsxTranscript="",
Bootstrap = FALSE,
Filter = TRUE,
Qn = 0.25), error=conditionMessage)
checkIdentical("Samples field is empty", obs)
#check example:
data(EventXtrans)
# PathSamples <-system.file("extdata",package="EventPointer")
PathSamples <-system.file("extdata",package="EventPointer")
PathSamples <- paste0(PathSamples,"/output")
PathSamples <- dir(PathSamples,full.names = TRUE)
data_exp <- getbootstrapdata(PathSamples = PathSamples,type = "kallisto")
#same annotation
rownames(data_exp[[1]]) <- gsub("\\|.*","",rownames(data_exp[[1]]))
#Obtain values of PSI
PSIss2 <- GetPSI_FromTranRef(PathsxTranscript = EventXtrans,Samples = data_exp,Bootstrap = TRUE, Filter = FALSE)
data("PSIss")
checkIdentical(PSIss,PSIss2)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qfars.R
\name{make_filename}
\alias{make_filename}
\title{Build the filename for a specific year}
\usage{
make_filename(year)
}
\arguments{
\item{year}{The year, expressed in four digit, for which the filename string needs to be returned}
}
\value{
This function returns a character vector which is the filename for the specified year
}
\description{
This function takes as argument a four digit number indicating a year to build the complete
filename for that year, according to FARS standard naming of datasets
}
\examples{
\dontrun{fars_read(2013)}
}
| /man/make_filename.Rd | no_license | ricrossi/qfars | R | false | true | 636 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qfars.R
\name{make_filename}
\alias{make_filename}
\title{Build the filename for a specific year}
\usage{
make_filename(year)
}
\arguments{
\item{year}{The year, expressed in four digit, for which the filename string needs to be returned}
}
\value{
This function returns a character vector which is the filename for the specified year
}
\description{
This function takes as argument a four digit number indicating a year to build the complete
filename for that year, according to FARS standard naming of datasets
}
\examples{
\dontrun{fars_read(2013)}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/leaveoneout.R
\name{mr_leaveoneout}
\alias{mr_leaveoneout}
\title{Leave one out sensitivity analysis}
\usage{
mr_leaveoneout(dat, parameters = default_parameters(), method = mr_ivw)
}
\arguments{
\item{dat}{Output from \code{harmonise_exposure_outcome}}
\item{method=mr_ivw}{Choose which method to use}
}
\value{
List of data frames
}
\description{
Leave one out sensitivity analysis
}
| /man/mr_leaveoneout.Rd | permissive | jonellevillar/TwoSampleMR | R | false | true | 465 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/leaveoneout.R
\name{mr_leaveoneout}
\alias{mr_leaveoneout}
\title{Leave one out sensitivity analysis}
\usage{
mr_leaveoneout(dat, parameters = default_parameters(), method = mr_ivw)
}
\arguments{
\item{dat}{Output from \code{harmonise_exposure_outcome}}
\item{method=mr_ivw}{Choose which method to use}
}
\value{
List of data frames
}
\description{
Leave one out sensitivity analysis
}
|
setwd('F:/Miaozhi/Academic/Data_Science/Bootcamp/Project_Capstone/nycdsa-capstone')
data = read.csv('./data/cs-training-outlier-f10.csv', header =T)
AgeRisk=1:150000
for(i in 1:150000){
if(data$age[i]<29){
AgeRisk[i]= 1-637/850
}else{
if(data$age[i]<39){
AgeRisk[i]=1-654/850
}else{
if(data$age[i]<49){
AgeRisk[i]=1-675/850
}else{
if(data$age[i]<59){
AgeRisk[i]=1-697/850
}else{
if(data$age[i]<69){
AgeRisk[i]=1-722/850
}else{
AgeRisk[i]=1-747/850
}
}
}
}
}
}
r=c(0.105,0.089,0.119)
sum = sum(r)
w1 = r[1]/sum
w2 = r[2]/sum
w3 = r[3]/sum
default_time = w1 * data$NumberOfTime3059DaysPastDueNotWorse +w2 * data$NumberOfTime6089DaysPastDueNotWorse + w3 * data$NumberOfTimes90DaysLate
DebtAmt = data$DebtRatio * as.numeric(data$MonthlyIncome)
new = data %>% filter(MonthlyIncome != 0)%>%group_by(NumberOfDependents) %>% summarise(avg=median(MonthlyIncome,na.rm=T))
DepdRisk = 1:150000
for(i in 1:150000){
if(is.na(data$MonthlyIncome[i])){
DepdRisk[i] = NA
}else{
if(data$MonthlyIncome[i] == 0){
DepdRisk[i] = data$NumberOfDependents[i]/(new[new$NumberOfDependents==data$NumberOfDependents[i],]$avg)
}else{
DepdRisk[i] = data$NumberOfDependents[i]/as.numeric(data$MonthlyIncome[i])
}
}
}
data = as.data.frame(cbind(data,AgeRisk,default_time,DebtAmt,DepdRisk))
data = data[,-c(4,5,6,7,9,11,12)]
write.csv(data,'cs-training-combine-f07.csv')
test = read.csv('./data/cs-test-outlier-f10.csv')
AgeRisk=1:101503
for(i in 1:101503){
if(test$age[i]<29){
AgeRisk[i]= 1-637/850
}else{
if(test$age[i]<39){
AgeRisk[i]=1-654/850
}else{
if(test$age[i]<49){
AgeRisk[i]=1-675/850
}else{
if(test$age[i]<59){
AgeRisk[i]=1-697/850
}else{
if(test$age[i]<69){
AgeRisk[i]=1-722/850
}else{
AgeRisk[i]=1-747/850
}
}
}
}
}
}
default_time = w1 * test$NumberOfTime3059DaysPastDueNotWorse +w2 * test$NumberOfTime6089DaysPastDueNotWorse + w3 * test$NumberOfTimes90DaysLate
DebtAmt = test$DebtRatio * as.numeric(test$MonthlyIncome)
new = test %>% filter(MonthlyIncome != 0)%>%group_by(NumberOfDependents) %>% summarise(avg=median(MonthlyIncome,na.rm=T))
DepdRisk = 1:101503
for(i in 1:101503){
if(is.na(test$MonthlyIncome[i])){
DepdRisk[i] = NA
}else{
if(test$MonthlyIncome[i] == 0){
DepdRisk[i] = test$NumberOfDependents[i]/(new[new$NumberOfDependents==test$NumberOfDependents[i],]$avg)
}else{
DepdRisk[i] = test$NumberOfDependents[i]/as.numeric(test$MonthlyIncome[i])
}
}
}
test = as.data.frame(cbind(test,AgeRisk,default_time,DebtAmt,DepdRisk))
test = test[,-c(4,5,7,9,11,12)]
write.csv(test,'cs-test-combine-f07.csv')
| /feature-eng-combine.R | no_license | MiaozhiYu/Project_Capstone | R | false | false | 2,975 | r | setwd('F:/Miaozhi/Academic/Data_Science/Bootcamp/Project_Capstone/nycdsa-capstone')
data = read.csv('./data/cs-training-outlier-f10.csv', header =T)
AgeRisk=1:150000
for(i in 1:150000){
if(data$age[i]<29){
AgeRisk[i]= 1-637/850
}else{
if(data$age[i]<39){
AgeRisk[i]=1-654/850
}else{
if(data$age[i]<49){
AgeRisk[i]=1-675/850
}else{
if(data$age[i]<59){
AgeRisk[i]=1-697/850
}else{
if(data$age[i]<69){
AgeRisk[i]=1-722/850
}else{
AgeRisk[i]=1-747/850
}
}
}
}
}
}
r=c(0.105,0.089,0.119)
sum = sum(r)
w1 = r[1]/sum
w2 = r[2]/sum
w3 = r[3]/sum
default_time = w1 * data$NumberOfTime3059DaysPastDueNotWorse +w2 * data$NumberOfTime6089DaysPastDueNotWorse + w3 * data$NumberOfTimes90DaysLate
DebtAmt = data$DebtRatio * as.numeric(data$MonthlyIncome)
new = data %>% filter(MonthlyIncome != 0)%>%group_by(NumberOfDependents) %>% summarise(avg=median(MonthlyIncome,na.rm=T))
DepdRisk = 1:150000
for(i in 1:150000){
if(is.na(data$MonthlyIncome[i])){
DepdRisk[i] = NA
}else{
if(data$MonthlyIncome[i] == 0){
DepdRisk[i] = data$NumberOfDependents[i]/(new[new$NumberOfDependents==data$NumberOfDependents[i],]$avg)
}else{
DepdRisk[i] = data$NumberOfDependents[i]/as.numeric(data$MonthlyIncome[i])
}
}
}
data = as.data.frame(cbind(data,AgeRisk,default_time,DebtAmt,DepdRisk))
data = data[,-c(4,5,6,7,9,11,12)]
write.csv(data,'cs-training-combine-f07.csv')
test = read.csv('./data/cs-test-outlier-f10.csv')
AgeRisk=1:101503
for(i in 1:101503){
if(test$age[i]<29){
AgeRisk[i]= 1-637/850
}else{
if(test$age[i]<39){
AgeRisk[i]=1-654/850
}else{
if(test$age[i]<49){
AgeRisk[i]=1-675/850
}else{
if(test$age[i]<59){
AgeRisk[i]=1-697/850
}else{
if(test$age[i]<69){
AgeRisk[i]=1-722/850
}else{
AgeRisk[i]=1-747/850
}
}
}
}
}
}
default_time = w1 * test$NumberOfTime3059DaysPastDueNotWorse +w2 * test$NumberOfTime6089DaysPastDueNotWorse + w3 * test$NumberOfTimes90DaysLate
DebtAmt = test$DebtRatio * as.numeric(test$MonthlyIncome)
new = test %>% filter(MonthlyIncome != 0)%>%group_by(NumberOfDependents) %>% summarise(avg=median(MonthlyIncome,na.rm=T))
DepdRisk = 1:101503
for(i in 1:101503){
if(is.na(test$MonthlyIncome[i])){
DepdRisk[i] = NA
}else{
if(test$MonthlyIncome[i] == 0){
DepdRisk[i] = test$NumberOfDependents[i]/(new[new$NumberOfDependents==test$NumberOfDependents[i],]$avg)
}else{
DepdRisk[i] = test$NumberOfDependents[i]/as.numeric(test$MonthlyIncome[i])
}
}
}
test = as.data.frame(cbind(test,AgeRisk,default_time,DebtAmt,DepdRisk))
test = test[,-c(4,5,7,9,11,12)]
write.csv(test,'cs-test-combine-f07.csv')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qtl_postProcess.R
\name{extractQTLsFromList}
\alias{extractQTLsFromList}
\title{Extract all QTLs at a specific FDR level from a list of min pvalues by condition}
\usage{
extractQTLsFromList(min_pvalue_list, fdr_cutoff = 0.1)
}
\arguments{
\item{min_pvalue_list}{List of QTLs per condition}
\item{fdr_cutoff}{}
}
\value{
Data frame of QTLs
}
\description{
Multiple variants per gene are sorted by p-value
}
| /seqUtils/man/extractQTLsFromList.Rd | permissive | kauralasoo/macrophage-tuQTLs | R | false | true | 485 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qtl_postProcess.R
\name{extractQTLsFromList}
\alias{extractQTLsFromList}
\title{Extract all QTLs at a specific FDR level from a list of min pvalues by condition}
\usage{
extractQTLsFromList(min_pvalue_list, fdr_cutoff = 0.1)
}
\arguments{
\item{min_pvalue_list}{List of QTLs per condition}
\item{fdr_cutoff}{}
}
\value{
Data frame of QTLs
}
\description{
Multiple variants per gene are sorted by p-value
}
|
#Robert Dinterman
print(paste0("Started 0-IRS_Pop at ", Sys.time()))
options(scipen=999) #Turn off scientific notation for write.csv()
library(dplyr)
library(readr)
source("0-Data/0-functions.R")
# Create a directory for the data
localDir <- "0-Data/IRS"
data_source <- paste0(localDir, "/Raw")
if (!file.exists(localDir)) dir.create(localDir)
if (!file.exists(data_source)) dir.create(data_source)
tempDir <- tempfile()
unlink(tempDir, recursive = T)
#####
# IRS Population Data for 1989 to 2009
#http://www.irs.gov/uac/SOI-Tax-Stats-County-Data
url <- "http://www.irs.gov/file_source/pub/irs-soi/"
years <- 1989:2009
urls <- paste0(url, years, "countyincome.zip")
files <- paste(data_source, basename(urls), sep = "/")
if (all(sapply(files, function(x) !file.exists(x)))) {
mapply(download.file, url = urls, destfile = files)
}
# Documentation changes in 1997...added "Gross rents" and "Total money income"
alldata <- data.frame()
for (i in files){
unlink(tempDir, recursive = T)
unzip(i, exdir = tempDir)
# some .zip do not have folders
xlscheck <- list.files(tempDir, pattern = "\\.xls$", full.names = T)
if (length(xlscheck) == 0){
j5 <- list.dirs(tempDir, recursive = F)
xlscheck2 <- list.files(j5, pattern = "\\.xls$") #check if 2007 messes up
if (length(xlscheck2) == 0){
j5. <- list.dirs(j5, recursive = F)
j6 <- list.files(j5., pattern = "\\.xls$", full.names = T)
} else{
j6 <- list.files(j5, pattern = "\\.xls$", full.names = T)
}
} else { # if .zip contains xls files in main folder...
j5 <- NULL
j6 <- xlscheck
}
ydata <- data.frame()
for (j in j6){
data <- read.POP1(j)
data[,c(1:2, 4:9)] <- lapply(data[,c(1:2, 4:9)],
function(x){ # Sometimes characters in values
as.numeric(
gsub(",", "",
gsub("[A-z]", "", x)))
})
data[, 3] <- sapply(data[, 3], function(x){as.character(x)})
year <- as.numeric(substr(basename(i), 1, 4))
data$year <- year
# PROBLEM, in 1989 IRS defines Cali STFIPS as 90, but it's 6
# further...sometimes the State fips is NA when it shouldn't be
st <- median(data$STFIPS, na.rm = T)
data$STFIPS[is.na(data$STFIPS)] <- st
data$CTYFIPS[is.na(data$CTYFIPS)] <- 0
if (st == 90) {
data$fips <- 6000 + data$CTYFIPS
} else{
data$fips <- st*1000 + data$CTYFIPS
}
ind <- apply(data, 1, function(x) all(is.na(x)))
data <- data[!ind, ]
ydata <- bind_rows(ydata, data)
print(paste0("Finished ", basename(j), " at ", Sys.time()))
}
bfile <- gsub('.{4}$', '', basename(i))
ydata <- ydata[!is.na(ydata$County_Name), ] #Remove the pesky NAs
# Remove duplicates
dupes <- duplicated(ydata)
ydata <- ydata[!dupes, ]
# Add in total
add <- ((ydata$fips %% 1000) == 0)
addt <- apply(ydata[add, c(4:9)], 2, function(x) sum(x, na.rm = T))
add <- c(0, 0, NA, addt, year, 0)
names(add) <- names(ydata)
ydata <- bind_rows(ydata, as.data.frame(t(add)))
ydata$County_Name[ydata$fips == 0] <- "Total" #Correct for NA name
write_csv(ydata, paste0(data_source, "/", bfile,".csv"))
alldata <- bind_rows(alldata, ydata)
print(paste0("Finished ", basename(i), " at ", Sys.time()))
}
# Issue with a few counties being messed up, leave it be
# alldata[!complete.cases(alldata),]
# alldata <- alldata[complete.cases(alldata),]
# write_csv(alldata, paste0(localDir, "/countyincome8909.csv"))
# Data from 2010 to 2013
# Problem with 2013, it's called county.zip not countydata.zip
years <- 2010:2012
urls <- paste0(url, years, "countydata.zip")
urls[4]<- paste0(url, "county", 2013, ".zip")
files <- paste(data_source, basename(urls), sep = "/")
if (!all(sapply(files, function(x) file.exists(x)))) {
mapply(download.file, url = urls, destfile = files)
}
tdata <- data.frame()
for (i in files){
unlink(tempDir, recursive = T)
unzip(i, exdir = tempDir)
j5 <- list.files(tempDir, pattern = "*noagi.csv", full.names = T)
# The 2010 and 2011 are .csv but 2012 is .xls
if (length(j5) == 0){
j5 <- list.files(tempDir, pattern = "*all.xls", full.names = T)
data <- read_excel(j5, skip = 5)
data <- data[, c(1, 3, 4, 5, 10, 12, 14, 18, 16)]
} else{
data <- read_csv(j5)
data <- data[, c("STATEFIPS", "COUNTYFIPS", "COUNTYNAME", "N1", "N2",
"A00100", "A00200", "A00600", "A00300")]
}
names(data) <- c("STFIPS", "CTYFIPS", "County_Name", "Return_Num",
"Exmpt_Num", "Aggr_AGI", "Wages", "Dividends", "Interest")
year <- as.numeric(substr(basename(i), 1, 4))
if (is.na(year)) year <- 2013 # QUICK FIX
data$year <- year
data$fips <- data$STFIPS*1000 + data$CTYFIPS
# Add in total
add <- ((data$fips %% 1000) == 0)
addt <- apply(data[add, c(4:9)], 2, function(x) sum(x, na.rm = T))
add <- c(0, 0, NA, addt, year, 0)
names(add) <- names(data)
# 2012 already has a total...
if (year != 2012) data <- bind_rows(data, as.data.frame(t(add)))
tdata <- bind_rows(tdata, data)
tdata$County_Name[tdata$fips == 0] <- "Total" #Correct for NA name
print(paste0("Finished ", basename(i), " at ", Sys.time()))
}
# Remove NAs
tdata <- tdata[!is.na(tdata$STFIPS),]
IRS_POP <- bind_rows(alldata, tdata)
rm(alldata, data, tdata, ydata)
IRS_POP <- filter(IRS_POP, !is.na(Return_Num), !is.na(Exmpt_Num))
IRS_POP %>% filter(fips == 11000, year == 2012) %>%
mutate(fips = 11001, CTYFIPS = 1) %>% bind_rows(IRS_POP) -> IRS_POP
IRS_POP$fips <- ifelse(IRS_POP$fips == 12025, 12086, IRS_POP$fips)
ind <- IRS_POP == -1 & !is.na(IRS_POP) # Turn suppressed into NA
IRS_POP[ind] <- NA
rm(ind)
# Add in state totals...?
IRS_POP %>%
filter(fips %% 1000 != 0) %>%
group_by(year, STFIPS) %>%
summarise(CTYFIPS = 0, Return_Num = sum(Return_Num, na.rm = T),
Exmpt_Num = sum(Exmpt_Num, na.rm = T),
Aggr_AGI = sum(Aggr_AGI, na.rm = T),
Wages = sum(Wages, na.rm = T),
Dividends = sum(Dividends, na.rm = T),
Interest = sum(Interest, na.rm = T)) -> states
states$fips <- 1000*states$STFIPS
states$County_Name <- "State Total"
IRS_POP %>%
filter(fips %% 1000 != 0) %>%
bind_rows(states) -> IRS_POP
IRS_POP <- select(IRS_POP, fips, year, Pop_IRS = Exmpt_Num,
HH_IRS = Return_Num, AGI_IRS = Aggr_AGI, Wages_IRS = Wages,
Dividends_IRS = Dividends, Interest_IRS = Interest)
# Problem with 51515, 51560, 51780:
IRS_POP <- fipssues(IRS_POP, 51019, c(51019, 51515))
IRS_POP <- fipssues(IRS_POP, 51005, c(51005, 51560))
IRS_POP <- fipssues(IRS_POP, 51083, c(51083, 51780))
write_csv(IRS_POP, paste0(localDir, "/countyincome8913.csv"))
save(IRS_POP, file = paste0(localDir, "/CTYPop.Rda"))
rm(list = ls())
print(paste0("Finished 0-IRS_Pop at ", Sys.time())) | /0-Data/0-IRS_Pop.R | no_license | yalunsu/test-counties | R | false | false | 7,089 | r | #Robert Dinterman
print(paste0("Started 0-IRS_Pop at ", Sys.time()))
options(scipen=999) #Turn off scientific notation for write.csv()
library(dplyr)
library(readr)
source("0-Data/0-functions.R")
# Create a directory for the data
localDir <- "0-Data/IRS"
data_source <- paste0(localDir, "/Raw")
if (!file.exists(localDir)) dir.create(localDir)
if (!file.exists(data_source)) dir.create(data_source)
tempDir <- tempfile()
unlink(tempDir, recursive = T)
#####
# IRS Population Data for 1989 to 2009
#http://www.irs.gov/uac/SOI-Tax-Stats-County-Data
url <- "http://www.irs.gov/file_source/pub/irs-soi/"
years <- 1989:2009
urls <- paste0(url, years, "countyincome.zip")
files <- paste(data_source, basename(urls), sep = "/")
if (all(sapply(files, function(x) !file.exists(x)))) {
mapply(download.file, url = urls, destfile = files)
}
# Documentation changes in 1997...added "Gross rents" and "Total money income"
alldata <- data.frame()
for (i in files){
unlink(tempDir, recursive = T)
unzip(i, exdir = tempDir)
# some .zip do not have folders
xlscheck <- list.files(tempDir, pattern = "\\.xls$", full.names = T)
if (length(xlscheck) == 0){
j5 <- list.dirs(tempDir, recursive = F)
xlscheck2 <- list.files(j5, pattern = "\\.xls$") #check if 2007 messes up
if (length(xlscheck2) == 0){
j5. <- list.dirs(j5, recursive = F)
j6 <- list.files(j5., pattern = "\\.xls$", full.names = T)
} else{
j6 <- list.files(j5, pattern = "\\.xls$", full.names = T)
}
} else { # if .zip contains xls files in main folder...
j5 <- NULL
j6 <- xlscheck
}
ydata <- data.frame()
for (j in j6){
data <- read.POP1(j)
data[,c(1:2, 4:9)] <- lapply(data[,c(1:2, 4:9)],
function(x){ # Sometimes characters in values
as.numeric(
gsub(",", "",
gsub("[A-z]", "", x)))
})
data[, 3] <- sapply(data[, 3], function(x){as.character(x)})
year <- as.numeric(substr(basename(i), 1, 4))
data$year <- year
# PROBLEM, in 1989 IRS defines Cali STFIPS as 90, but it's 6
# further...sometimes the State fips is NA when it shouldn't be
st <- median(data$STFIPS, na.rm = T)
data$STFIPS[is.na(data$STFIPS)] <- st
data$CTYFIPS[is.na(data$CTYFIPS)] <- 0
if (st == 90) {
data$fips <- 6000 + data$CTYFIPS
} else{
data$fips <- st*1000 + data$CTYFIPS
}
ind <- apply(data, 1, function(x) all(is.na(x)))
data <- data[!ind, ]
ydata <- bind_rows(ydata, data)
print(paste0("Finished ", basename(j), " at ", Sys.time()))
}
bfile <- gsub('.{4}$', '', basename(i))
ydata <- ydata[!is.na(ydata$County_Name), ] #Remove the pesky NAs
# Remove duplicates
dupes <- duplicated(ydata)
ydata <- ydata[!dupes, ]
# Add in total
add <- ((ydata$fips %% 1000) == 0)
addt <- apply(ydata[add, c(4:9)], 2, function(x) sum(x, na.rm = T))
add <- c(0, 0, NA, addt, year, 0)
names(add) <- names(ydata)
ydata <- bind_rows(ydata, as.data.frame(t(add)))
ydata$County_Name[ydata$fips == 0] <- "Total" #Correct for NA name
write_csv(ydata, paste0(data_source, "/", bfile,".csv"))
alldata <- bind_rows(alldata, ydata)
print(paste0("Finished ", basename(i), " at ", Sys.time()))
}
# Issue with a few counties being messed up, leave it be
# alldata[!complete.cases(alldata),]
# alldata <- alldata[complete.cases(alldata),]
# write_csv(alldata, paste0(localDir, "/countyincome8909.csv"))
# Data from 2010 to 2013
# Problem with 2013, it's called county.zip not countydata.zip
years <- 2010:2012
urls <- paste0(url, years, "countydata.zip")
urls[4]<- paste0(url, "county", 2013, ".zip")
files <- paste(data_source, basename(urls), sep = "/")
if (!all(sapply(files, function(x) file.exists(x)))) {
mapply(download.file, url = urls, destfile = files)
}
tdata <- data.frame()
for (i in files){
unlink(tempDir, recursive = T)
unzip(i, exdir = tempDir)
j5 <- list.files(tempDir, pattern = "*noagi.csv", full.names = T)
# The 2010 and 2011 are .csv but 2012 is .xls
if (length(j5) == 0){
j5 <- list.files(tempDir, pattern = "*all.xls", full.names = T)
data <- read_excel(j5, skip = 5)
data <- data[, c(1, 3, 4, 5, 10, 12, 14, 18, 16)]
} else{
data <- read_csv(j5)
data <- data[, c("STATEFIPS", "COUNTYFIPS", "COUNTYNAME", "N1", "N2",
"A00100", "A00200", "A00600", "A00300")]
}
names(data) <- c("STFIPS", "CTYFIPS", "County_Name", "Return_Num",
"Exmpt_Num", "Aggr_AGI", "Wages", "Dividends", "Interest")
year <- as.numeric(substr(basename(i), 1, 4))
if (is.na(year)) year <- 2013 # QUICK FIX
data$year <- year
data$fips <- data$STFIPS*1000 + data$CTYFIPS
# Add in total
add <- ((data$fips %% 1000) == 0)
addt <- apply(data[add, c(4:9)], 2, function(x) sum(x, na.rm = T))
add <- c(0, 0, NA, addt, year, 0)
names(add) <- names(data)
# 2012 already has a total...
if (year != 2012) data <- bind_rows(data, as.data.frame(t(add)))
tdata <- bind_rows(tdata, data)
tdata$County_Name[tdata$fips == 0] <- "Total" #Correct for NA name
print(paste0("Finished ", basename(i), " at ", Sys.time()))
}
# Remove NAs
tdata <- tdata[!is.na(tdata$STFIPS),]
IRS_POP <- bind_rows(alldata, tdata)
rm(alldata, data, tdata, ydata)
IRS_POP <- filter(IRS_POP, !is.na(Return_Num), !is.na(Exmpt_Num))
IRS_POP %>% filter(fips == 11000, year == 2012) %>%
mutate(fips = 11001, CTYFIPS = 1) %>% bind_rows(IRS_POP) -> IRS_POP
IRS_POP$fips <- ifelse(IRS_POP$fips == 12025, 12086, IRS_POP$fips)
ind <- IRS_POP == -1 & !is.na(IRS_POP) # Turn suppressed into NA
IRS_POP[ind] <- NA
rm(ind)
# Add in state totals...?
IRS_POP %>%
filter(fips %% 1000 != 0) %>%
group_by(year, STFIPS) %>%
summarise(CTYFIPS = 0, Return_Num = sum(Return_Num, na.rm = T),
Exmpt_Num = sum(Exmpt_Num, na.rm = T),
Aggr_AGI = sum(Aggr_AGI, na.rm = T),
Wages = sum(Wages, na.rm = T),
Dividends = sum(Dividends, na.rm = T),
Interest = sum(Interest, na.rm = T)) -> states
states$fips <- 1000*states$STFIPS
states$County_Name <- "State Total"
IRS_POP %>%
filter(fips %% 1000 != 0) %>%
bind_rows(states) -> IRS_POP
IRS_POP <- select(IRS_POP, fips, year, Pop_IRS = Exmpt_Num,
HH_IRS = Return_Num, AGI_IRS = Aggr_AGI, Wages_IRS = Wages,
Dividends_IRS = Dividends, Interest_IRS = Interest)
# Problem with 51515, 51560, 51780:
IRS_POP <- fipssues(IRS_POP, 51019, c(51019, 51515))
IRS_POP <- fipssues(IRS_POP, 51005, c(51005, 51560))
IRS_POP <- fipssues(IRS_POP, 51083, c(51083, 51780))
write_csv(IRS_POP, paste0(localDir, "/countyincome8913.csv"))
save(IRS_POP, file = paste0(localDir, "/CTYPop.Rda"))
rm(list = ls())
print(paste0("Finished 0-IRS_Pop at ", Sys.time())) |
buckets <- read.table("data/buckets.tsv", sep = "\t", header = T)
priors <- list(0.8, 0.1, 0.1)
names(priors) <- c("feedback", "editing", "share")
buckets$date <- as.POSIXct(buckets$date, tz="UTC")
buckets <- buckets[buckets$date < as.POSIXct("2011-12-01", tz="UTC")
& buckets$date > as.POSIXct("2011-07-26"),]
tot_clicks <- sum(buckets$num_clicks)
L = list(sum(buckets$num_feedback) / tot_clicks,
sum(buckets$num_editing) / tot_clicks,
sum(buckets$num_share) / tot_clicks)
names(L) <- c("feedback", "editing", "share")
ef <- log(L$editing) - log(L$feedback) - log(priors$editing) + log(priors$feedback)
sf <- log(L$share) - log(L$feedback) - log(priors$share) + log(priors$feedback)
es <- log(L$editing) - log(L$share) - log(priors$editing) + log(priors$share)
odds <- exp(c(ef, sf, es))
names(odds) <- c("editing vs feedback", "share vs feedback", "editing vs share")
print(odds) | /reports/buckets/odds.R | no_license | glciampaglia/MoodBar | R | false | false | 941 | r | buckets <- read.table("data/buckets.tsv", sep = "\t", header = T)
priors <- list(0.8, 0.1, 0.1)
names(priors) <- c("feedback", "editing", "share")
buckets$date <- as.POSIXct(buckets$date, tz="UTC")
buckets <- buckets[buckets$date < as.POSIXct("2011-12-01", tz="UTC")
& buckets$date > as.POSIXct("2011-07-26"),]
tot_clicks <- sum(buckets$num_clicks)
L = list(sum(buckets$num_feedback) / tot_clicks,
sum(buckets$num_editing) / tot_clicks,
sum(buckets$num_share) / tot_clicks)
names(L) <- c("feedback", "editing", "share")
ef <- log(L$editing) - log(L$feedback) - log(priors$editing) + log(priors$feedback)
sf <- log(L$share) - log(L$feedback) - log(priors$share) + log(priors$feedback)
es <- log(L$editing) - log(L$share) - log(priors$editing) + log(priors$share)
odds <- exp(c(ef, sf, es))
names(odds) <- c("editing vs feedback", "share vs feedback", "editing vs share")
print(odds) |
##############################################################################
# title : function to generate input data to fragstat and fractal analyses;
# purpose : create separated grids by unit of analysis for fragstat and
# fractal analyses;
# producer : prepared by A. Coca;
# last update : in London, UK June 2015 / Updated in July 2015;;
# inputs : deforestation dataset by year, fishnet shapefile;
# outputs : split grids in GeoTIFF format (FRAGSTAT/FRACTAL INPUT);
# remarks 1 : associated with the 0_extract_grids_by_ua_parallel.R code;
###############################################################################
GenUniGrid=function(i){
ei = as(extent(det.data), "SpatialPolygons")
db=data.frame(myshp[i,])
if (startsWith(gRelate(ei, myshp[i,]),"2")){ #check geometry
# CROP
tryPoly <- myshp@polygons[[i]]@Polygons[[1]]@coords
MaxY <- max(tryPoly[,2])
MaxX <- max(tryPoly[,1])
MinY <- min(tryPoly[,2])
MinX <-min(tryPoly[,1])
ext <- extent(cbind(c(MinX,MinY), c(MaxX,MaxY)))
rawdet.masked <- raster::intersect(det.data,ext)
if (!(round(extent(rawdet.masked)) == round(ext))) {
rawdet.masked = extend(rawdet.masked, ext)
}
#settings (to avoid scientific notation in the filename when is exported)
options(scipen=999)
freq_table = freq(rawdet.masked,useNA='no')
if (length(freq_table) > 0){ #avoid rasters with spontaneous NAs
#check point for selecting raster with target values
nonvalues_idx = which(freq_table[,1] %in% c(0,999)) #check indexes of non-target values
check_p = length(freq_table[,1]) - length(nonvalues_idx)
if (length(check_p) > 0){
prop_nontarget= sum(freq_table[nonvalues_idx,2]) / sum(freq_table[,2])
if (prop_nontarget < 1){
index_accum = which(freq_table[,1] >= date.ini & freq_table[,1] <= date.end)
values_accum = freq_table[index_accum,1]
det.math.value0 <- function(x) { x[x %in% values_accum] <- 1; return(x) }
det.initial.1 <- calc(rawdet.masked, det.math.value0)
det.math.value1 <- function(x) { x[!x %in% values_accum & x != as.numeric(bkg_value)] <- 0; return(x) }
det.initial.1 <- calc(det.initial.1, det.math.value1)
det.initial.1[is.na(det.initial.1)] <- as.numeric(bkg_value)
#export raster
writeRaster(det.initial.1, filename=paste(output.TIFF,"/",db$CELLID,"_cummulative_",date.ini,"to",date.end,".tif",sep=""), format="GTiff", overwrite=TRUE)
#save to log
log.file <- paste0(log.path,"/",db$CELLID,".csv")
write.table(data.frame("CELLID" = db$CELLID, "type" = "data"), file=log.file, append = F, sep=",", col.names=T, row.names = F)
} else {
#save to log
log.file <- paste0(log.path,"/",db$CELLID,".csv")
write.table(data.frame("CELLID" = db$CELLID, "type" = "nodata"), file=log.file, append = F, sep=",", col.names=T, row.names = F)
}
} else {
#save to log
log.file <- paste0(log.path,"/",db$CELLID,".csv")
write.table(data.frame("CELLID" = db$CELLID, "type" = "nodata"), file=log.file, append = F, sep=",", col.names=T, row.names = F)
}
} else {
#save to log
log.file <- paste0(log.path,"/",db$CELLID,".csv")
write.table(data.frame("CELLID" = db$CELLID, "type" = "NA"), file=log.file, append = F, sep=",", col.names=T, row.names = F)
}
} else {
#save to log
log.file <- paste0(log.path,"/",db$CELLID,".csv")
write.table(data.frame("CELLID" = db$CELLID, "type" = "NA"), file=log.file, append = F, sep=",", col.names=T, row.names = F)
}
}
| /functions/2b_GenUniGrid.R | permissive | Alowis/postloss-Spattern | R | false | false | 3,833 | r | ##############################################################################
# title : function to generate input data to fragstat and fractal analyses;
# purpose : create separated grids by unit of analysis for fragstat and
# fractal analyses;
# producer : prepared by A. Coca;
# last update : in London, UK June 2015 / Updated in July 2015;;
# inputs : deforestation dataset by year, fishnet shapefile;
# outputs : split grids in GeoTIFF format (FRAGSTAT/FRACTAL INPUT);
# remarks 1 : associated with the 0_extract_grids_by_ua_parallel.R code;
###############################################################################
GenUniGrid=function(i){
ei = as(extent(det.data), "SpatialPolygons")
db=data.frame(myshp[i,])
if (startsWith(gRelate(ei, myshp[i,]),"2")){ #check geometry
# CROP
tryPoly <- myshp@polygons[[i]]@Polygons[[1]]@coords
MaxY <- max(tryPoly[,2])
MaxX <- max(tryPoly[,1])
MinY <- min(tryPoly[,2])
MinX <-min(tryPoly[,1])
ext <- extent(cbind(c(MinX,MinY), c(MaxX,MaxY)))
rawdet.masked <- raster::intersect(det.data,ext)
if (!(round(extent(rawdet.masked)) == round(ext))) {
rawdet.masked = extend(rawdet.masked, ext)
}
#settings (to avoid scientific notation in the filename when is exported)
options(scipen=999)
freq_table = freq(rawdet.masked,useNA='no')
if (length(freq_table) > 0){ #avoid rasters with spontaneous NAs
#check point for selecting raster with target values
nonvalues_idx = which(freq_table[,1] %in% c(0,999)) #check indexes of non-target values
check_p = length(freq_table[,1]) - length(nonvalues_idx)
if (length(check_p) > 0){
prop_nontarget= sum(freq_table[nonvalues_idx,2]) / sum(freq_table[,2])
if (prop_nontarget < 1){
index_accum = which(freq_table[,1] >= date.ini & freq_table[,1] <= date.end)
values_accum = freq_table[index_accum,1]
det.math.value0 <- function(x) { x[x %in% values_accum] <- 1; return(x) }
det.initial.1 <- calc(rawdet.masked, det.math.value0)
det.math.value1 <- function(x) { x[!x %in% values_accum & x != as.numeric(bkg_value)] <- 0; return(x) }
det.initial.1 <- calc(det.initial.1, det.math.value1)
det.initial.1[is.na(det.initial.1)] <- as.numeric(bkg_value)
#export raster
writeRaster(det.initial.1, filename=paste(output.TIFF,"/",db$CELLID,"_cummulative_",date.ini,"to",date.end,".tif",sep=""), format="GTiff", overwrite=TRUE)
#save to log
log.file <- paste0(log.path,"/",db$CELLID,".csv")
write.table(data.frame("CELLID" = db$CELLID, "type" = "data"), file=log.file, append = F, sep=",", col.names=T, row.names = F)
} else {
#save to log
log.file <- paste0(log.path,"/",db$CELLID,".csv")
write.table(data.frame("CELLID" = db$CELLID, "type" = "nodata"), file=log.file, append = F, sep=",", col.names=T, row.names = F)
}
} else {
#save to log
log.file <- paste0(log.path,"/",db$CELLID,".csv")
write.table(data.frame("CELLID" = db$CELLID, "type" = "nodata"), file=log.file, append = F, sep=",", col.names=T, row.names = F)
}
} else {
#save to log
log.file <- paste0(log.path,"/",db$CELLID,".csv")
write.table(data.frame("CELLID" = db$CELLID, "type" = "NA"), file=log.file, append = F, sep=",", col.names=T, row.names = F)
}
} else {
#save to log
log.file <- paste0(log.path,"/",db$CELLID,".csv")
write.table(data.frame("CELLID" = db$CELLID, "type" = "NA"), file=log.file, append = F, sep=",", col.names=T, row.names = F)
}
}
|
#' 处理问题泛化业务
#'
#' @param faq 问题
#'
#' @return 返回值
#' @export
#'
#' @examples
#' generator()
generator <- function(faq){
#faq <- read_excel(file,skip = skip);
# View(faq)
#提取标准问题------
ques <- faq$标准问
ques_count <- length(ques);
#提取模板------
busi_Obj <- faq$`业务对象(近义词)`;
oper_Node <- faq$`操作节点(近义词)`;
#提取泛化内容
busi_Obj_gen<- faq$`业务对象(同义词)`;
oper_Node_gen <- faq$`操作节点(同义词)`;
#数据处理--------
busi_Obj_gen_multi <- str_split(busi_Obj_gen,'~');
oper_Node_gen_multi <-str_split(oper_Node_gen,'~');
#处理情况;
#完善一下逻辑
res <-lapply(1:ques_count,function(i){
q1 <- ques[i];
#标准的业务对象
busi_obj1 <- busi_Obj[i];
#标准的操作节点
oper_node1 <- oper_Node[i];
#业务对象同义词
busi_obj_set1 <-busi_Obj_gen_multi[[i]];
#操作节点同义词
oper_node_set1 <-oper_Node_gen_multi[[i]]
alpha <- length(busi_obj_set1) #业务对象数
belta <- length(oper_node_set1) #操作结点数;
res_count <-alpha*belta;
unit_res <-list_init(res_count);
pcs <-1L #计数
if(alpha==1 & is.na(busi_obj_set1)&belta >=1&!is.na(oper_node_set1)){
#缺少业务对象,泛化操作节点
for (oper_unit in oper_node_set1) {
q001_belta <- str_replace(q1,oper_node1,oper_unit);
#q001_belta;
unit_res[[pcs]] <-q001_belta;
pcs<- pcs+1L;
}
}else if (alpha >=1 &!is.na(busi_obj_set1)& belta ==1 & is.na(oper_node_set1)){
#泛化业务对象,缺少操作节点
for (busi_unit in busi_obj_set1) {
q001_alpha <- str_replace(q1,busi_obj1,busi_unit);
#q001_alpha;
unit_res[[pcs]] <-q001_alpha;
pcs<- pcs+1L;
}
}else if ((alpha >1 & belta >1)|(alpha ==1 & !is.na(busi_obj_set1))|(belta==1 & !is.na(oper_node_set1))){
#处理说明
for (busi_unit in busi_obj_set1) {
for (oper_unit in oper_node_set1) {
q001_alpha <- str_replace(q1,busi_obj1,busi_unit);
#q001_alpha;
q001_belta <- str_replace(q001_alpha,oper_node1,oper_unit);
#q001_belta;
unit_res[[pcs]] <-q001_belta;
pcs<- pcs+1L;
}
}
}else{
unit_res[[pcs]] <-"";
}
#进行标准化处理,取消list
unit_res <- unique(unlist(unit_res))
unit_res <- paste(unit_res,collapse = "||")
return(unit_res)
})
res <- unlist(res);
print(res)
faq2 <- faq[,1:14];
faq2$`相似问题` <- res
return(faq2)
}
| /R/utility.R | no_license | takewiki/nsgenpkg | R | false | false | 2,651 | r | #' 处理问题泛化业务
#'
#' @param faq 问题
#'
#' @return 返回值
#' @export
#'
#' @examples
#' generator()
generator <- function(faq){
#faq <- read_excel(file,skip = skip);
# View(faq)
#提取标准问题------
ques <- faq$标准问
ques_count <- length(ques);
#提取模板------
busi_Obj <- faq$`业务对象(近义词)`;
oper_Node <- faq$`操作节点(近义词)`;
#提取泛化内容
busi_Obj_gen<- faq$`业务对象(同义词)`;
oper_Node_gen <- faq$`操作节点(同义词)`;
#数据处理--------
busi_Obj_gen_multi <- str_split(busi_Obj_gen,'~');
oper_Node_gen_multi <-str_split(oper_Node_gen,'~');
#处理情况;
#完善一下逻辑
res <-lapply(1:ques_count,function(i){
q1 <- ques[i];
#标准的业务对象
busi_obj1 <- busi_Obj[i];
#标准的操作节点
oper_node1 <- oper_Node[i];
#业务对象同义词
busi_obj_set1 <-busi_Obj_gen_multi[[i]];
#操作节点同义词
oper_node_set1 <-oper_Node_gen_multi[[i]]
alpha <- length(busi_obj_set1) #业务对象数
belta <- length(oper_node_set1) #操作结点数;
res_count <-alpha*belta;
unit_res <-list_init(res_count);
pcs <-1L #计数
if(alpha==1 & is.na(busi_obj_set1)&belta >=1&!is.na(oper_node_set1)){
#缺少业务对象,泛化操作节点
for (oper_unit in oper_node_set1) {
q001_belta <- str_replace(q1,oper_node1,oper_unit);
#q001_belta;
unit_res[[pcs]] <-q001_belta;
pcs<- pcs+1L;
}
}else if (alpha >=1 &!is.na(busi_obj_set1)& belta ==1 & is.na(oper_node_set1)){
#泛化业务对象,缺少操作节点
for (busi_unit in busi_obj_set1) {
q001_alpha <- str_replace(q1,busi_obj1,busi_unit);
#q001_alpha;
unit_res[[pcs]] <-q001_alpha;
pcs<- pcs+1L;
}
}else if ((alpha >1 & belta >1)|(alpha ==1 & !is.na(busi_obj_set1))|(belta==1 & !is.na(oper_node_set1))){
#处理说明
for (busi_unit in busi_obj_set1) {
for (oper_unit in oper_node_set1) {
q001_alpha <- str_replace(q1,busi_obj1,busi_unit);
#q001_alpha;
q001_belta <- str_replace(q001_alpha,oper_node1,oper_unit);
#q001_belta;
unit_res[[pcs]] <-q001_belta;
pcs<- pcs+1L;
}
}
}else{
unit_res[[pcs]] <-"";
}
#进行标准化处理,取消list
unit_res <- unique(unlist(unit_res))
unit_res <- paste(unit_res,collapse = "||")
return(unit_res)
})
res <- unlist(res);
print(res)
faq2 <- faq[,1:14];
faq2$`相似问题` <- res
return(faq2)
}
|
library(jsonlite)
library(dplyr)
library(readr)
url104<-"http://ipgod.nchc.org.tw/dataset/b6f36b72-0c4a-4b60-9254-1904e180ddb1/resource/98d5094d-7481-44b5-876a-715a496f922c/download/a17000000j-020066-mah.csv"
url107<-"C:/Users/SAMUEL/Desktop/4133da254dbcdba28a2097de48d8d606_csv/job107.csv"
job104 <- read_csv(url104)
job107 <- read_csv(url107)
job104$大職業別<-gsub("部門","",job104$大職業別)
job107$大職業別<-gsub("_","、",job107$大職業別)
job104$大職業別<-gsub("營造業","營建工程",job104$大職業別)
job107$大職業別<-gsub("出版、影音製作、傳播及資通訊服務業","資訊及通訊傳播業",job107$大職業別)
job107$大職業別<-gsub("教育業","教育服務業",job107$大職業別)
job107$大職業別<-gsub("醫療保健業","醫療保健服務業",job107$大職業別)
job107$`大學-薪資`<-gsub("—","",job107$`大學-薪資`)
job104$`大學-薪資`<-gsub("—","",job104$`大學-薪資`)
job107$`大學-薪資`<-gsub("…","",job107$`大學-薪資`)
job104$`大學-女/男`<-gsub("—","",job104$`大學-女/男`)
job107$`大學-女/男`<-gsub("—","",job107$`大學-女/男`)
job104$`大學-女/男`<-gsub("…","",job104$`大學-女/男`)
job107$`大學-女/男`<-gsub("…","",job107$`大學-女/男`)
job107$`研究所-薪資`<-gsub("—","",job107$`研究所-薪資`)
job107$`研究所-薪資`<-gsub("…","",job107$`研究所-薪資`)
jobJoin<-full_join(job104,job107,c("大職業別"))
#跳
jobJoin$`大學-薪資.y`<-as.numeric(jobJoin$`大學-薪資.y`)
jobJoin$`大學-薪資.x`<-as.numeric(jobJoin$`大學-薪資.x`)
jobRaise<-filter(jobJoin,jobJoin$`大學-薪資.y`>jobJoin$`大學-薪資.x`)
jobRaise<-mutate(jobRaise,Ratio=jobRaise$`大學-薪資.y`/jobRaise$`大學-薪資.x`)
jobRaise<-arrange(jobRaise,desc(Ratio))
knitr::kable(head(jobRaise[order(jobRaise$Ratio,decreasing=T),
c("大職業別","Ratio")],10))
#
fivePercent <-filter(jobRaise,Ratio>1.05)
knitr::kable(fivePercent[,c("大職業別","Ratio")])
#
fivePercent$大職業別<-gsub("-+[\u4e00-\u9fa5]*","",fivePercent$大職業別)
table(fivePercent$大職業別)
#
job104$`大學-女/男`<-as.numeric(job104$`大學-女/男`)
job107$`大學-女/男`<-as.numeric(job107$`大學-女/男`)
gender104<-filter(job104,`大學-女/男`>0)
gender107<-filter(job107,`大學-女/男`>0)
#104年男生薪資比女生薪資多的職業
knitr::kable(head(gender104[order(gender104$`大學-女/男`,decreasing=F),c("大職業別","大學-女/男")],10))
#104年女生生薪資比女男生薪資多的職業
gender104<-filter(gender104,`大學-女/男`>100)
knitr::kable(head(gender104[order(gender104$`大學-女/男`,decreasing=T),c("大職業別","大學-女/男")],10))
#107年男生薪資比女生薪資多的職業
knitr::kable(head(gender107[order(gender107$`大學-女/男`,decreasing=F),c("大職業別","大學-女/男")],10))
#107年女生薪資比男生薪資多的職業
gender107<-filter(job107,`大學-女/男`>100)
knitr::kable(head(gender107[order(gender107$`大學-女/男`,decreasing=T),c("大職業別","大學-女/男")],10))
#跳
job107$`大學-薪資`<-as.numeric(job107$`大學-薪資`)
job107$`研究所-薪資`<-as.numeric(job107$`研究所-薪資`)
job107<-mutate(job107,Institute=job107$`研究所-薪資`/job107$`大學-薪資`)
head(job107[order(job107$Institute,decreasing=T),c("大職業別","Institute")],10)
#
favorite<-job107[c("78","99","127"),c("大職業別","大學-薪資","研究所-薪資")]
knitr::kable(favorite)
#
favorite<-mutate(favorite,Raise=favorite$`研究所-薪資`-favorite$`大學-薪資`)
| /DataAnalysis.R | no_license | CGUIM-BigDataAnalysis/108bigdatacguim-hw1-940223 | R | false | false | 3,622 | r | library(jsonlite)
library(dplyr)
library(readr)
url104<-"http://ipgod.nchc.org.tw/dataset/b6f36b72-0c4a-4b60-9254-1904e180ddb1/resource/98d5094d-7481-44b5-876a-715a496f922c/download/a17000000j-020066-mah.csv"
url107<-"C:/Users/SAMUEL/Desktop/4133da254dbcdba28a2097de48d8d606_csv/job107.csv"
job104 <- read_csv(url104)
job107 <- read_csv(url107)
job104$大職業別<-gsub("部門","",job104$大職業別)
job107$大職業別<-gsub("_","、",job107$大職業別)
job104$大職業別<-gsub("營造業","營建工程",job104$大職業別)
job107$大職業別<-gsub("出版、影音製作、傳播及資通訊服務業","資訊及通訊傳播業",job107$大職業別)
job107$大職業別<-gsub("教育業","教育服務業",job107$大職業別)
job107$大職業別<-gsub("醫療保健業","醫療保健服務業",job107$大職業別)
job107$`大學-薪資`<-gsub("—","",job107$`大學-薪資`)
job104$`大學-薪資`<-gsub("—","",job104$`大學-薪資`)
job107$`大學-薪資`<-gsub("…","",job107$`大學-薪資`)
job104$`大學-女/男`<-gsub("—","",job104$`大學-女/男`)
job107$`大學-女/男`<-gsub("—","",job107$`大學-女/男`)
job104$`大學-女/男`<-gsub("…","",job104$`大學-女/男`)
job107$`大學-女/男`<-gsub("…","",job107$`大學-女/男`)
job107$`研究所-薪資`<-gsub("—","",job107$`研究所-薪資`)
job107$`研究所-薪資`<-gsub("…","",job107$`研究所-薪資`)
jobJoin<-full_join(job104,job107,c("大職業別"))
#跳
jobJoin$`大學-薪資.y`<-as.numeric(jobJoin$`大學-薪資.y`)
jobJoin$`大學-薪資.x`<-as.numeric(jobJoin$`大學-薪資.x`)
jobRaise<-filter(jobJoin,jobJoin$`大學-薪資.y`>jobJoin$`大學-薪資.x`)
jobRaise<-mutate(jobRaise,Ratio=jobRaise$`大學-薪資.y`/jobRaise$`大學-薪資.x`)
jobRaise<-arrange(jobRaise,desc(Ratio))
knitr::kable(head(jobRaise[order(jobRaise$Ratio,decreasing=T),
c("大職業別","Ratio")],10))
#
fivePercent <-filter(jobRaise,Ratio>1.05)
knitr::kable(fivePercent[,c("大職業別","Ratio")])
#
fivePercent$大職業別<-gsub("-+[\u4e00-\u9fa5]*","",fivePercent$大職業別)
table(fivePercent$大職業別)
#
job104$`大學-女/男`<-as.numeric(job104$`大學-女/男`)
job107$`大學-女/男`<-as.numeric(job107$`大學-女/男`)
gender104<-filter(job104,`大學-女/男`>0)
gender107<-filter(job107,`大學-女/男`>0)
#104年男生薪資比女生薪資多的職業
knitr::kable(head(gender104[order(gender104$`大學-女/男`,decreasing=F),c("大職業別","大學-女/男")],10))
#104年女生生薪資比女男生薪資多的職業
gender104<-filter(gender104,`大學-女/男`>100)
knitr::kable(head(gender104[order(gender104$`大學-女/男`,decreasing=T),c("大職業別","大學-女/男")],10))
#107年男生薪資比女生薪資多的職業
knitr::kable(head(gender107[order(gender107$`大學-女/男`,decreasing=F),c("大職業別","大學-女/男")],10))
#107年女生薪資比男生薪資多的職業
gender107<-filter(job107,`大學-女/男`>100)
knitr::kable(head(gender107[order(gender107$`大學-女/男`,decreasing=T),c("大職業別","大學-女/男")],10))
#跳
job107$`大學-薪資`<-as.numeric(job107$`大學-薪資`)
job107$`研究所-薪資`<-as.numeric(job107$`研究所-薪資`)
job107<-mutate(job107,Institute=job107$`研究所-薪資`/job107$`大學-薪資`)
head(job107[order(job107$Institute,decreasing=T),c("大職業別","Institute")],10)
#
favorite<-job107[c("78","99","127"),c("大職業別","大學-薪資","研究所-薪資")]
knitr::kable(favorite)
#
favorite<-mutate(favorite,Raise=favorite$`研究所-薪資`-favorite$`大學-薪資`)
|
original_data <-iris
set.seed(104)
####### Set initial parameters
portion <-0.2 # percent of missing values to occupy the data. 0.02 = 2 %
training_size <-0.7 # percent of data for training
data_length <-nrow(original_data)
missing_data <-original_data
id <-portion*data_length
missing_data[1:id,'Petal.Length'] <-NA
missing_data
m <-lm(missing_data$Petal.Length ~ missing_data$Petal.Width, data=missing_data)
missing_data
for(i in 1:nrow(missing_data))
{
if(is.na(missing_data$Petal.Length[i]))
{
missing_data$Petal.Length[i] = coef(m)[1] + coef(m)[2]*missing_data$Petal.Width[i]
}
}
# #root mean square between imputed and true values
rmse = sqrt(mean( (original_data$Petal.Length - missing_data$Petal.Length)^2, na.rm = TRUE) )
print("RMSE")
rmse
#Random splitting of iris data as 70% train and 30%test datasets
#first we normalize whole dataset
indexes <- sample(1:nrow(iris), floor(training_size*nrow(iris)))
iris.train <- iris[indexes,-5]
iris.train.target <- iris[indexes,5]
iris.test <- iris[-indexes,-5]
iris.test.target <- iris[-indexes,5]
original_prediction <- knn(train=iris.train, test=iris.test, cl=iris.train.target, k=3)
confusion_matrix <- table(iris.test.target, original_prediction)
accuracy <- (sum(diag(confusion_matrix)))/sum(confusion_matrix)
accuracy
set.seed(103)
indexes_imputed <- sample(1:nrow(missing_data), floor(training_size*nrow(missing_data)))
iris.imputed.train <- missing_data[indexes_imputed,-5]
iris.imputed.train.target <- missing_data[indexes_imputed,5]
iris.imputed.test <- missing_data[-indexes_imputed,-5]
iris.imputed.test.target <- missing_data[-indexes_imputed,5]
imputed_prediction <- knn(train=iris.imputed.train, test=iris.imputed.test, cl=iris.imputed.train.target, k=3)
imputed_confusion_matrix <- table(iris.imputed.test.target, imputed_prediction)
imputed_confusion_matrix
imputed.accuracy <- (sum(diag(imputed_confusion_matrix)))/sum(imputed_confusion_matrix)
imputed.accuracy
| /LinearRegressionNonRandom.R | no_license | Alex-Nguyen/CS5331R | R | false | false | 1,951 | r | original_data <-iris
set.seed(104)
####### Set initial parameters
portion <-0.2 # percent of missing values to occupy the data. 0.02 = 2 %
training_size <-0.7 # percent of data for training
data_length <-nrow(original_data)
missing_data <-original_data
id <-portion*data_length
missing_data[1:id,'Petal.Length'] <-NA
missing_data
m <-lm(missing_data$Petal.Length ~ missing_data$Petal.Width, data=missing_data)
missing_data
for(i in 1:nrow(missing_data))
{
if(is.na(missing_data$Petal.Length[i]))
{
missing_data$Petal.Length[i] = coef(m)[1] + coef(m)[2]*missing_data$Petal.Width[i]
}
}
# #root mean square between imputed and true values
rmse = sqrt(mean( (original_data$Petal.Length - missing_data$Petal.Length)^2, na.rm = TRUE) )
print("RMSE")
rmse
#Random splitting of iris data as 70% train and 30%test datasets
#first we normalize whole dataset
indexes <- sample(1:nrow(iris), floor(training_size*nrow(iris)))
iris.train <- iris[indexes,-5]
iris.train.target <- iris[indexes,5]
iris.test <- iris[-indexes,-5]
iris.test.target <- iris[-indexes,5]
original_prediction <- knn(train=iris.train, test=iris.test, cl=iris.train.target, k=3)
confusion_matrix <- table(iris.test.target, original_prediction)
accuracy <- (sum(diag(confusion_matrix)))/sum(confusion_matrix)
accuracy
set.seed(103)
indexes_imputed <- sample(1:nrow(missing_data), floor(training_size*nrow(missing_data)))
iris.imputed.train <- missing_data[indexes_imputed,-5]
iris.imputed.train.target <- missing_data[indexes_imputed,5]
iris.imputed.test <- missing_data[-indexes_imputed,-5]
iris.imputed.test.target <- missing_data[-indexes_imputed,5]
imputed_prediction <- knn(train=iris.imputed.train, test=iris.imputed.test, cl=iris.imputed.train.target, k=3)
imputed_confusion_matrix <- table(iris.imputed.test.target, imputed_prediction)
imputed_confusion_matrix
imputed.accuracy <- (sum(diag(imputed_confusion_matrix)))/sum(imputed_confusion_matrix)
imputed.accuracy
|
# Intrinio API
#
# Welcome to the Intrinio API! Through our Financial Data Marketplace, we offer a wide selection of financial data feed APIs sourced by our own proprietary processes as well as from many data vendors. For a complete API request / response reference please view the [Intrinio API documentation](https://docs.intrinio.com/documentation/api_v2). If you need additional help in using the API, please visit the [Intrinio website](https://intrinio.com) and click on the chat icon in the lower right corner.
#
# OpenAPI spec version: 2.45.0
#
# Generated by: https://github.com/swagger-api/swagger-codegen.git
#' ApiResponseSecurityRelativeStrengthIndex Class
#'
#' @field technicals
#' @field indicator
#' @field security
#' @field next_page
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
ApiResponseSecurityRelativeStrengthIndex <- R6::R6Class(
'ApiResponseSecurityRelativeStrengthIndex',
public = list(
`technicals` = NA,
`technicals_data_frame` = NULL,
`indicator` = NA,
`security` = NA,
`next_page` = NA,
initialize = function(`technicals`, `indicator`, `security`, `next_page`){
if (!missing(`technicals`)) {
self$`technicals` <- `technicals`
}
if (!missing(`indicator`)) {
self$`indicator` <- `indicator`
}
if (!missing(`security`)) {
self$`security` <- `security`
}
if (!missing(`next_page`)) {
self$`next_page` <- `next_page`
}
},
toJSON = function() {
ApiResponseSecurityRelativeStrengthIndexObject <- list()
if (!is.null(self$`technicals`)) {
# If the object is an empty list or a list of R6 Objects
if (is.list(self$`technicals`) && ((length(self$`technicals`) == 0) || ((length(self$`technicals`) != 0 && R6::is.R6(self$`technicals`[[1]]))))) {
ApiResponseSecurityRelativeStrengthIndexObject[['technicals']] <- lapply(self$`technicals`, function(x) x$toJSON())
} else {
ApiResponseSecurityRelativeStrengthIndexObject[['technicals']] <- jsonlite::toJSON(self$`technicals`, auto_unbox = TRUE)
}
}
if (!is.null(self$`indicator`)) {
# If the object is an empty list or a list of R6 Objects
if (is.list(self$`indicator`) && ((length(self$`indicator`) == 0) || ((length(self$`indicator`) != 0 && R6::is.R6(self$`indicator`[[1]]))))) {
ApiResponseSecurityRelativeStrengthIndexObject[['indicator']] <- lapply(self$`indicator`, function(x) x$toJSON())
} else {
ApiResponseSecurityRelativeStrengthIndexObject[['indicator']] <- jsonlite::toJSON(self$`indicator`, auto_unbox = TRUE)
}
}
if (!is.null(self$`security`)) {
# If the object is an empty list or a list of R6 Objects
if (is.list(self$`security`) && ((length(self$`security`) == 0) || ((length(self$`security`) != 0 && R6::is.R6(self$`security`[[1]]))))) {
ApiResponseSecurityRelativeStrengthIndexObject[['security']] <- lapply(self$`security`, function(x) x$toJSON())
} else {
ApiResponseSecurityRelativeStrengthIndexObject[['security']] <- jsonlite::toJSON(self$`security`, auto_unbox = TRUE)
}
}
if (!is.null(self$`next_page`)) {
# If the object is an empty list or a list of R6 Objects
if (is.list(self$`next_page`) && ((length(self$`next_page`) == 0) || ((length(self$`next_page`) != 0 && R6::is.R6(self$`next_page`[[1]]))))) {
ApiResponseSecurityRelativeStrengthIndexObject[['next_page']] <- lapply(self$`next_page`, function(x) x$toJSON())
} else {
ApiResponseSecurityRelativeStrengthIndexObject[['next_page']] <- jsonlite::toJSON(self$`next_page`, auto_unbox = TRUE)
}
}
ApiResponseSecurityRelativeStrengthIndexObject
},
fromJSON = function(ApiResponseSecurityRelativeStrengthIndexJson) {
ApiResponseSecurityRelativeStrengthIndexObject <- jsonlite::fromJSON(ApiResponseSecurityRelativeStrengthIndexJson)
if (!is.null(ApiResponseSecurityRelativeStrengthIndexObject$`technicals`)) {
self$`technicals` <- ApiResponseSecurityRelativeStrengthIndexObject$`technicals`
}
if (!is.null(ApiResponseSecurityRelativeStrengthIndexObject$`indicator`)) {
self$`indicator` <- ApiResponseSecurityRelativeStrengthIndexObject$`indicator`
}
if (!is.null(ApiResponseSecurityRelativeStrengthIndexObject$`security`)) {
self$`security` <- ApiResponseSecurityRelativeStrengthIndexObject$`security`
}
if (!is.null(ApiResponseSecurityRelativeStrengthIndexObject$`next_page`)) {
self$`next_page` <- ApiResponseSecurityRelativeStrengthIndexObject$`next_page`
}
},
toJSONString = function() {
jsonlite::toJSON(self$toJSON(), auto_unbox = TRUE, pretty = TRUE)
},
fromJSONString = function(ApiResponseSecurityRelativeStrengthIndexJson) {
ApiResponseSecurityRelativeStrengthIndexObject <- jsonlite::fromJSON(ApiResponseSecurityRelativeStrengthIndexJson, simplifyDataFrame = FALSE)
self$setFromList(ApiResponseSecurityRelativeStrengthIndexObject)
},
setFromList = function(listObject) {
self$`technicals` <- lapply(listObject$`technicals`, function(x) {
RelativeStrengthIndexTechnicalValueObject <- RelativeStrengthIndexTechnicalValue$new()
RelativeStrengthIndexTechnicalValueObject$setFromList(x)
return(RelativeStrengthIndexTechnicalValueObject)
})
technicals_list <- lapply(self$`technicals`, function(x) {
return(x$getAsList())
})
self$`technicals_data_frame` <- do.call(rbind, lapply(technicals_list, data.frame))
self$`indicator` <- TechnicalIndicator$new()
self$`indicator`$setFromList(listObject$`indicator`)
self$`security` <- SecuritySummary$new()
self$`security`$setFromList(listObject$`security`)
if (!is.null(listObject$`next_page`)) {
self$`next_page` <- listObject$`next_page`
}
else {
self$`next_page` <- NA
}
},
getAsList = function() {
listObject = list()
# listObject[["technicals"]] <- lapply(self$`technicals`, function(o) {
# return(o$getAsList())
# })
indicator_list <- self$`indicator`$getAsList()
for (x in names(indicator_list)) {
listObject[[paste("indicator_",x, sep = "")]] <- self$`indicator`[[x]]
}
security_list <- self$`security`$getAsList()
for (x in names(security_list)) {
listObject[[paste("security_",x, sep = "")]] <- self$`security`[[x]]
}
listObject[["next_page"]] <- self$`next_page`
return(listObject)
}
)
)
| /R/ApiResponseSecurityRelativeStrengthIndex.r | no_license | intrinio/r-sdk | R | false | false | 6,747 | r | # Intrinio API
#
# Welcome to the Intrinio API! Through our Financial Data Marketplace, we offer a wide selection of financial data feed APIs sourced by our own proprietary processes as well as from many data vendors. For a complete API request / response reference please view the [Intrinio API documentation](https://docs.intrinio.com/documentation/api_v2). If you need additional help in using the API, please visit the [Intrinio website](https://intrinio.com) and click on the chat icon in the lower right corner.
#
# OpenAPI spec version: 2.45.0
#
# Generated by: https://github.com/swagger-api/swagger-codegen.git
#' ApiResponseSecurityRelativeStrengthIndex Class
#'
#' @field technicals
#' @field indicator
#' @field security
#' @field next_page
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
ApiResponseSecurityRelativeStrengthIndex <- R6::R6Class(
'ApiResponseSecurityRelativeStrengthIndex',
public = list(
`technicals` = NA,
`technicals_data_frame` = NULL,
`indicator` = NA,
`security` = NA,
`next_page` = NA,
initialize = function(`technicals`, `indicator`, `security`, `next_page`){
if (!missing(`technicals`)) {
self$`technicals` <- `technicals`
}
if (!missing(`indicator`)) {
self$`indicator` <- `indicator`
}
if (!missing(`security`)) {
self$`security` <- `security`
}
if (!missing(`next_page`)) {
self$`next_page` <- `next_page`
}
},
toJSON = function() {
ApiResponseSecurityRelativeStrengthIndexObject <- list()
if (!is.null(self$`technicals`)) {
# If the object is an empty list or a list of R6 Objects
if (is.list(self$`technicals`) && ((length(self$`technicals`) == 0) || ((length(self$`technicals`) != 0 && R6::is.R6(self$`technicals`[[1]]))))) {
ApiResponseSecurityRelativeStrengthIndexObject[['technicals']] <- lapply(self$`technicals`, function(x) x$toJSON())
} else {
ApiResponseSecurityRelativeStrengthIndexObject[['technicals']] <- jsonlite::toJSON(self$`technicals`, auto_unbox = TRUE)
}
}
if (!is.null(self$`indicator`)) {
# If the object is an empty list or a list of R6 Objects
if (is.list(self$`indicator`) && ((length(self$`indicator`) == 0) || ((length(self$`indicator`) != 0 && R6::is.R6(self$`indicator`[[1]]))))) {
ApiResponseSecurityRelativeStrengthIndexObject[['indicator']] <- lapply(self$`indicator`, function(x) x$toJSON())
} else {
ApiResponseSecurityRelativeStrengthIndexObject[['indicator']] <- jsonlite::toJSON(self$`indicator`, auto_unbox = TRUE)
}
}
if (!is.null(self$`security`)) {
# If the object is an empty list or a list of R6 Objects
if (is.list(self$`security`) && ((length(self$`security`) == 0) || ((length(self$`security`) != 0 && R6::is.R6(self$`security`[[1]]))))) {
ApiResponseSecurityRelativeStrengthIndexObject[['security']] <- lapply(self$`security`, function(x) x$toJSON())
} else {
ApiResponseSecurityRelativeStrengthIndexObject[['security']] <- jsonlite::toJSON(self$`security`, auto_unbox = TRUE)
}
}
if (!is.null(self$`next_page`)) {
# If the object is an empty list or a list of R6 Objects
if (is.list(self$`next_page`) && ((length(self$`next_page`) == 0) || ((length(self$`next_page`) != 0 && R6::is.R6(self$`next_page`[[1]]))))) {
ApiResponseSecurityRelativeStrengthIndexObject[['next_page']] <- lapply(self$`next_page`, function(x) x$toJSON())
} else {
ApiResponseSecurityRelativeStrengthIndexObject[['next_page']] <- jsonlite::toJSON(self$`next_page`, auto_unbox = TRUE)
}
}
ApiResponseSecurityRelativeStrengthIndexObject
},
fromJSON = function(ApiResponseSecurityRelativeStrengthIndexJson) {
ApiResponseSecurityRelativeStrengthIndexObject <- jsonlite::fromJSON(ApiResponseSecurityRelativeStrengthIndexJson)
if (!is.null(ApiResponseSecurityRelativeStrengthIndexObject$`technicals`)) {
self$`technicals` <- ApiResponseSecurityRelativeStrengthIndexObject$`technicals`
}
if (!is.null(ApiResponseSecurityRelativeStrengthIndexObject$`indicator`)) {
self$`indicator` <- ApiResponseSecurityRelativeStrengthIndexObject$`indicator`
}
if (!is.null(ApiResponseSecurityRelativeStrengthIndexObject$`security`)) {
self$`security` <- ApiResponseSecurityRelativeStrengthIndexObject$`security`
}
if (!is.null(ApiResponseSecurityRelativeStrengthIndexObject$`next_page`)) {
self$`next_page` <- ApiResponseSecurityRelativeStrengthIndexObject$`next_page`
}
},
toJSONString = function() {
jsonlite::toJSON(self$toJSON(), auto_unbox = TRUE, pretty = TRUE)
},
fromJSONString = function(ApiResponseSecurityRelativeStrengthIndexJson) {
ApiResponseSecurityRelativeStrengthIndexObject <- jsonlite::fromJSON(ApiResponseSecurityRelativeStrengthIndexJson, simplifyDataFrame = FALSE)
self$setFromList(ApiResponseSecurityRelativeStrengthIndexObject)
},
setFromList = function(listObject) {
self$`technicals` <- lapply(listObject$`technicals`, function(x) {
RelativeStrengthIndexTechnicalValueObject <- RelativeStrengthIndexTechnicalValue$new()
RelativeStrengthIndexTechnicalValueObject$setFromList(x)
return(RelativeStrengthIndexTechnicalValueObject)
})
technicals_list <- lapply(self$`technicals`, function(x) {
return(x$getAsList())
})
self$`technicals_data_frame` <- do.call(rbind, lapply(technicals_list, data.frame))
self$`indicator` <- TechnicalIndicator$new()
self$`indicator`$setFromList(listObject$`indicator`)
self$`security` <- SecuritySummary$new()
self$`security`$setFromList(listObject$`security`)
if (!is.null(listObject$`next_page`)) {
self$`next_page` <- listObject$`next_page`
}
else {
self$`next_page` <- NA
}
},
getAsList = function() {
listObject = list()
# listObject[["technicals"]] <- lapply(self$`technicals`, function(o) {
# return(o$getAsList())
# })
indicator_list <- self$`indicator`$getAsList()
for (x in names(indicator_list)) {
listObject[[paste("indicator_",x, sep = "")]] <- self$`indicator`[[x]]
}
security_list <- self$`security`$getAsList()
for (x in names(security_list)) {
listObject[[paste("security_",x, sep = "")]] <- self$`security`[[x]]
}
listObject[["next_page"]] <- self$`next_page`
return(listObject)
}
)
)
|
#
# THESE LOOKUP TABLES ARE CREATED IN THE SCRIPT:
# GENERATE_LOOKUPS.R
minute_lookup <- fread("lookups/minute_lookup.csv")
minute_lookup$V1 <- NULL
prev_minute_lookup <- copy(minute_lookup)
names(minute_lookup) <- c('min_label', 'min_count')
names(prev_minute_lookup) <- c('prev_min_lab', 'prev_min_count')
ten_minute_lookup <- fread("lookups/ten_minute_lookup.csv")
ten_minute_lookup$V1 <- NULL
prev_ten_minute_lookup <- copy(ten_minute_lookup)
penul_ten_minute_lookup <- copy(ten_minute_lookup)
names(ten_minute_lookup) <- c('ten_min_label', 'ten_min_count')
names(prev_ten_minute_lookup) <- c('prev_ten_min_lab', 'prev_ten_min_count')
names(penul_ten_minute_lookup) <- c('penul_ten_min_lab', 'penul_ten_min_count')
hour_lookup <- fread("lookups/hour_lookup.csv")
hour_lookup$V1 <- NULL
prev_hour_lookup <- copy(hour_lookup)
penul_hour_lookup <- copy(hour_lookup)
prev_day_lookup <- copy(hour_lookup)
penul_day_lookup <- copy(hour_lookup)
names(hour_lookup) <- c('hour_label', 'hour_count')
names(prev_hour_lookup) <- c('prev_hour_lab', 'prev_hour_count')
names(penul_hour_lookup) <- c('penul_hour_lab', 'penul_hour_count')
names(prev_day_lookup) <- c('prev_day_lab', 'prev_day_count')
names(penul_day_lookup) <- c('penul_day_lab', 'penul_day_count')
prev_day_attr_lookup <- fread("lookups/hour_attr_lookup.csv")
prev_day_attr_lookup$V1 <- NULL
penul_day_attr_lookup <- copy( prev_day_attr_lookup )
names(prev_day_attr_lookup) <- c('prev_day_lab', 'prev_day_attr')
names(penul_day_attr_lookup) <- c('penul_day_lab', 'penul_day_attr')
ip_ten_minute_lookup <- fread("lookups/ip_ten_minute_lookup.csv")
ip_ten_minute_lookup$V1 <- NULL
prev_ten_minute_ip_lookup <- copy(ip_ten_minute_lookup)
penul_ten_minute_ip_lookup <- copy(ip_ten_minute_lookup)
names(ip_ten_minute_lookup) <- c('ten_min_label', 'ip', 'ten_min_ip_count')
names(prev_ten_minute_ip_lookup) <- c('prev_ten_min_lab', 'ip', 'prev_ten_min_ip_count')
names(penul_ten_minute_ip_lookup) <- c('penul_ten_min_lab', 'ip', 'penul_ten_min_ip_count')
hour_ip_lookup <- fread("lookups/hour_ip_lookup.csv")
hour_ip_lookup$V1 <- NULL
prev_hour_ip_lookup <- copy(hour_ip_lookup)
penul_hour_ip_lookup <- copy(hour_ip_lookup)
prev_day_ip_lookup <- copy(hour_ip_lookup)
penul_day_ip_lookup <- copy(hour_ip_lookup)
names(hour_ip_lookup) <- c('hour_label', 'ip', 'hour_ip_count')
names(prev_hour_ip_lookup) <- c('prev_hour_lab', 'ip', 'prev_hour_ip_count')
names(penul_hour_ip_lookup) <- c('penul_hour_lab', 'ip', 'penul_hour_ip_count')
names(prev_day_ip_lookup) <- c('prev_day_lab', 'ip', 'prev_day_ip_count')
names(penul_day_ip_lookup) <- c('penul_day_lab', 'ip', 'penul_day_ip_count')
prev_day_ip_attr_lookup <- fread("lookups/hour_ip_attr_lookup.csv")
prev_day_ip_attr_lookup$V1 <- NULL
penul_day_ip_attr_lookup <- copy( prev_day_ip_attr_lookup )
names(prev_day_ip_attr_lookup) <- c('prev_day_lab', 'ip', 'prev_day_ip_attr')
names(penul_day_ip_attr_lookup) <- c('penul_day_lab', 'ip', 'penul_day_ip_attr')
app_ten_minute_lookup <- fread("lookups/app_ten_minute_lookup.csv")
app_ten_minute_lookup$V1 <- NULL
prev_ten_minute_app_lookup <- copy(app_ten_minute_lookup)
penul_ten_minute_app_lookup <- copy(app_ten_minute_lookup)
names(app_ten_minute_lookup) <- c('ten_min_label', 'app', 'ten_min_app_count')
names(prev_ten_minute_app_lookup) <- c('prev_ten_min_lab', 'app', 'prev_ten_min_app_count')
names(penul_ten_minute_app_lookup) <- c('penul_ten_min_lab', 'app', 'penul_ten_min_app_count')
hour_app_lookup <- fread("lookups/hour_app_lookup.csv")
hour_app_lookup$V1 <- NULL
prev_hour_app_lookup <- copy(hour_app_lookup)
penul_hour_app_lookup <- copy(hour_app_lookup)
prev_day_app_lookup <- copy(hour_app_lookup)
penul_day_app_lookup <- copy(hour_app_lookup)
names(hour_app_lookup) <- c('hour_label', 'app', 'hour_app_count')
names(prev_hour_app_lookup) <- c('prev_hour_lab', 'app', 'prev_hour_app_count')
names(penul_hour_app_lookup) <- c('penul_hour_lab', 'app', 'penul_hour_app_count')
names(prev_day_app_lookup) <- c('prev_day_lab', 'app', 'prev_day_app_count')
names(penul_day_app_lookup) <- c('penul_day_lab', 'app', 'penul_day_app_count')
prev_day_app_attr_lookup <- fread("lookups/hour_app_attr_lookup.csv")
prev_day_app_attr_lookup$V1 <- NULL
penul_day_app_attr_lookup <- copy( prev_day_app_attr_lookup )
names(prev_day_app_attr_lookup) <- c('prev_day_lab', 'app', 'prev_day_app_attr')
names(penul_day_app_attr_lookup) <- c('penul_day_lab', 'app', 'penul_day_app_attr')
os_ten_minute_lookup <- fread("lookups/os_ten_minute_lookup.csv")
os_ten_minute_lookup$V1 <- NULL
prev_ten_minute_os_lookup <- copy(os_ten_minute_lookup)
penul_ten_minute_os_lookup <- copy(os_ten_minute_lookup)
names(os_ten_minute_lookup) <- c('ten_min_label', 'os', 'ten_min_os_count')
names(prev_ten_minute_os_lookup) <- c('prev_ten_min_lab', 'os', 'prev_ten_min_os_count')
names(penul_ten_minute_os_lookup) <- c('penul_ten_min_lab', 'os', 'penul_ten_min_os_count')
hour_os_lookup <- fread("lookups/hour_os_lookup.csv")
hour_os_lookup$V1 <- NULL
prev_hour_os_lookup <- copy(hour_os_lookup)
penul_hour_os_lookup <- copy(hour_os_lookup)
prev_day_os_lookup <- copy(hour_os_lookup)
penul_day_os_lookup <- copy(hour_os_lookup)
names(hour_os_lookup) <- c('hour_label', 'os', 'hour_os_count')
names(prev_hour_os_lookup) <- c('prev_hour_lab', 'os', 'prev_hour_os_count')
names(penul_hour_os_lookup) <- c('penul_hour_lab', 'os', 'penul_hour_os_count')
names(prev_day_os_lookup) <- c('prev_day_lab', 'os', 'prev_day_os_count')
names(penul_day_os_lookup) <- c('penul_day_lab', 'os', 'penul_day_os_count')
prev_day_os_attr_lookup <- fread("lookups/hour_os_attr_lookup.csv")
prev_day_os_attr_lookup$V1 <- NULL
penul_day_os_attr_lookup <- copy( prev_day_os_attr_lookup )
names(prev_day_os_attr_lookup) <- c('prev_day_lab', 'os', 'prev_day_os_attr')
names(penul_day_os_attr_lookup) <- c('penul_day_lab', 'os', 'penul_day_os_attr')
device_ten_minute_lookup <- fread("lookups/device_ten_minute_lookup.csv")
device_ten_minute_lookup$V1 <- NULL
prev_ten_minute_device_lookup <- copy(device_ten_minute_lookup)
penul_ten_minute_device_lookup <- copy(device_ten_minute_lookup)
names(device_ten_minute_lookup) <- c('ten_min_label', 'device', 'ten_min_device_count')
names(prev_ten_minute_device_lookup)<- c('prev_ten_min_lab', 'device', 'prev_ten_min_device_count')
names(penul_ten_minute_device_lookup)<- c('penul_ten_min_lab', 'device', 'penul_ten_min_device_count')
hour_device_lookup <- fread("lookups/hour_device_lookup.csv")
hour_device_lookup$V1 <- NULL
prev_hour_device_lookup <- copy(hour_device_lookup)
penul_hour_device_lookup <- copy(hour_device_lookup)
prev_day_device_lookup <- copy(hour_device_lookup)
penul_day_device_lookup <- copy(hour_device_lookup)
names(hour_device_lookup) <- c('hour_label', 'device', 'hour_device_count')
names(prev_hour_device_lookup) <- c('prev_hour_lab', 'device', 'prev_hour_device_count')
names(penul_hour_device_lookup) <- c('penul_hour_lab', 'device', 'penul_hour_device_count')
names(prev_day_device_lookup) <- c('prev_day_lab', 'device', 'prev_day_device_count')
names(penul_day_device_lookup) <- c('penul_day_lab', 'device', 'penul_day_device_count')
prev_day_device_attr_lookup <- fread("lookups/hour_device_attr_lookup.csv")
prev_day_device_attr_lookup$V1 <- NULL
penul_day_device_attr_lookup <- copy( prev_day_device_attr_lookup )
names(prev_day_device_attr_lookup) <- c('prev_day_lab', 'device', 'prev_day_device_attr')
names(penul_day_device_attr_lookup) <- c('penul_day_lab', 'device', 'penul_day_device_attr')
channel_ten_minute_lookup <- fread("lookups/channel_ten_minute_lookup.csv")
channel_ten_minute_lookup$V1 <- NULL
prev_ten_minute_channel_lookup <- copy(channel_ten_minute_lookup)
penul_ten_minute_channel_lookup <- copy(channel_ten_minute_lookup)
names(channel_ten_minute_lookup) <- c('ten_min_label', 'channel', 'ten_min_channel_count')
names(prev_ten_minute_channel_lookup) <- c('prev_ten_min_lab', 'channel', 'prev_ten_min_channel_count')
names(penul_ten_minute_channel_lookup) <- c('penul_ten_min_lab', 'channel', 'penul_ten_min_channel_count')
hour_channel_lookup <- fread("lookups/hour_channel_lookup.csv")
hour_channel_lookup$V1 <- NULL
prev_hour_channel_lookup <- copy(hour_channel_lookup)
penul_hour_channel_lookup <- copy(hour_channel_lookup)
prev_day_channel_lookup <- copy(hour_channel_lookup)
penul_day_channel_lookup <- copy(hour_channel_lookup)
names(hour_channel_lookup) <- c('hour_label', 'channel', 'hour_channel_count')
names(prev_hour_channel_lookup) <- c('prev_hour_lab', 'channel', 'prev_hour_channel_count')
names(penul_hour_channel_lookup) <- c('penul_hour_lab', 'channel', 'penul_hour_channel_count')
names(prev_day_channel_lookup) <- c('prev_day_lab', 'channel', 'prev_day_channel_count')
names(penul_day_channel_lookup) <- c('penul_day_lab', 'channel', 'penul_day_channel_count')
prev_day_channel_attr_lookup <- fread("lookups/hour_channel_attr_lookup.csv")
prev_day_channel_attr_lookup$V1 <- NULL
penul_day_channel_attr_lookup <- copy( prev_day_channel_attr_lookup )
names(prev_day_channel_attr_lookup) <- c('prev_day_lab', 'channel', 'prev_day_channel_attr')
names(penul_day_channel_attr_lookup) <- c('penul_day_lab', 'channel', 'penul_day_channel_attr')
| /AppDownloads/LOAD_LOOKUPS.R | no_license | john-hawkins/Experiments | R | false | false | 10,941 | r | #
# THESE LOOKUP TABLES ARE CREATED IN THE SCRIPT:
# GENERATE_LOOKUPS.R
minute_lookup <- fread("lookups/minute_lookup.csv")
minute_lookup$V1 <- NULL
prev_minute_lookup <- copy(minute_lookup)
names(minute_lookup) <- c('min_label', 'min_count')
names(prev_minute_lookup) <- c('prev_min_lab', 'prev_min_count')
ten_minute_lookup <- fread("lookups/ten_minute_lookup.csv")
ten_minute_lookup$V1 <- NULL
prev_ten_minute_lookup <- copy(ten_minute_lookup)
penul_ten_minute_lookup <- copy(ten_minute_lookup)
names(ten_minute_lookup) <- c('ten_min_label', 'ten_min_count')
names(prev_ten_minute_lookup) <- c('prev_ten_min_lab', 'prev_ten_min_count')
names(penul_ten_minute_lookup) <- c('penul_ten_min_lab', 'penul_ten_min_count')
hour_lookup <- fread("lookups/hour_lookup.csv")
hour_lookup$V1 <- NULL
prev_hour_lookup <- copy(hour_lookup)
penul_hour_lookup <- copy(hour_lookup)
prev_day_lookup <- copy(hour_lookup)
penul_day_lookup <- copy(hour_lookup)
names(hour_lookup) <- c('hour_label', 'hour_count')
names(prev_hour_lookup) <- c('prev_hour_lab', 'prev_hour_count')
names(penul_hour_lookup) <- c('penul_hour_lab', 'penul_hour_count')
names(prev_day_lookup) <- c('prev_day_lab', 'prev_day_count')
names(penul_day_lookup) <- c('penul_day_lab', 'penul_day_count')
prev_day_attr_lookup <- fread("lookups/hour_attr_lookup.csv")
prev_day_attr_lookup$V1 <- NULL
penul_day_attr_lookup <- copy( prev_day_attr_lookup )
names(prev_day_attr_lookup) <- c('prev_day_lab', 'prev_day_attr')
names(penul_day_attr_lookup) <- c('penul_day_lab', 'penul_day_attr')
ip_ten_minute_lookup <- fread("lookups/ip_ten_minute_lookup.csv")
ip_ten_minute_lookup$V1 <- NULL
prev_ten_minute_ip_lookup <- copy(ip_ten_minute_lookup)
penul_ten_minute_ip_lookup <- copy(ip_ten_minute_lookup)
names(ip_ten_minute_lookup) <- c('ten_min_label', 'ip', 'ten_min_ip_count')
names(prev_ten_minute_ip_lookup) <- c('prev_ten_min_lab', 'ip', 'prev_ten_min_ip_count')
names(penul_ten_minute_ip_lookup) <- c('penul_ten_min_lab', 'ip', 'penul_ten_min_ip_count')
hour_ip_lookup <- fread("lookups/hour_ip_lookup.csv")
hour_ip_lookup$V1 <- NULL
prev_hour_ip_lookup <- copy(hour_ip_lookup)
penul_hour_ip_lookup <- copy(hour_ip_lookup)
prev_day_ip_lookup <- copy(hour_ip_lookup)
penul_day_ip_lookup <- copy(hour_ip_lookup)
names(hour_ip_lookup) <- c('hour_label', 'ip', 'hour_ip_count')
names(prev_hour_ip_lookup) <- c('prev_hour_lab', 'ip', 'prev_hour_ip_count')
names(penul_hour_ip_lookup) <- c('penul_hour_lab', 'ip', 'penul_hour_ip_count')
names(prev_day_ip_lookup) <- c('prev_day_lab', 'ip', 'prev_day_ip_count')
names(penul_day_ip_lookup) <- c('penul_day_lab', 'ip', 'penul_day_ip_count')
prev_day_ip_attr_lookup <- fread("lookups/hour_ip_attr_lookup.csv")
prev_day_ip_attr_lookup$V1 <- NULL
penul_day_ip_attr_lookup <- copy( prev_day_ip_attr_lookup )
names(prev_day_ip_attr_lookup) <- c('prev_day_lab', 'ip', 'prev_day_ip_attr')
names(penul_day_ip_attr_lookup) <- c('penul_day_lab', 'ip', 'penul_day_ip_attr')
app_ten_minute_lookup <- fread("lookups/app_ten_minute_lookup.csv")
app_ten_minute_lookup$V1 <- NULL
prev_ten_minute_app_lookup <- copy(app_ten_minute_lookup)
penul_ten_minute_app_lookup <- copy(app_ten_minute_lookup)
names(app_ten_minute_lookup) <- c('ten_min_label', 'app', 'ten_min_app_count')
names(prev_ten_minute_app_lookup) <- c('prev_ten_min_lab', 'app', 'prev_ten_min_app_count')
names(penul_ten_minute_app_lookup) <- c('penul_ten_min_lab', 'app', 'penul_ten_min_app_count')
hour_app_lookup <- fread("lookups/hour_app_lookup.csv")
hour_app_lookup$V1 <- NULL
prev_hour_app_lookup <- copy(hour_app_lookup)
penul_hour_app_lookup <- copy(hour_app_lookup)
prev_day_app_lookup <- copy(hour_app_lookup)
penul_day_app_lookup <- copy(hour_app_lookup)
names(hour_app_lookup) <- c('hour_label', 'app', 'hour_app_count')
names(prev_hour_app_lookup) <- c('prev_hour_lab', 'app', 'prev_hour_app_count')
names(penul_hour_app_lookup) <- c('penul_hour_lab', 'app', 'penul_hour_app_count')
names(prev_day_app_lookup) <- c('prev_day_lab', 'app', 'prev_day_app_count')
names(penul_day_app_lookup) <- c('penul_day_lab', 'app', 'penul_day_app_count')
prev_day_app_attr_lookup <- fread("lookups/hour_app_attr_lookup.csv")
prev_day_app_attr_lookup$V1 <- NULL
penul_day_app_attr_lookup <- copy( prev_day_app_attr_lookup )
names(prev_day_app_attr_lookup) <- c('prev_day_lab', 'app', 'prev_day_app_attr')
names(penul_day_app_attr_lookup) <- c('penul_day_lab', 'app', 'penul_day_app_attr')
os_ten_minute_lookup <- fread("lookups/os_ten_minute_lookup.csv")
os_ten_minute_lookup$V1 <- NULL
prev_ten_minute_os_lookup <- copy(os_ten_minute_lookup)
penul_ten_minute_os_lookup <- copy(os_ten_minute_lookup)
names(os_ten_minute_lookup) <- c('ten_min_label', 'os', 'ten_min_os_count')
names(prev_ten_minute_os_lookup) <- c('prev_ten_min_lab', 'os', 'prev_ten_min_os_count')
names(penul_ten_minute_os_lookup) <- c('penul_ten_min_lab', 'os', 'penul_ten_min_os_count')
hour_os_lookup <- fread("lookups/hour_os_lookup.csv")
hour_os_lookup$V1 <- NULL
prev_hour_os_lookup <- copy(hour_os_lookup)
penul_hour_os_lookup <- copy(hour_os_lookup)
prev_day_os_lookup <- copy(hour_os_lookup)
penul_day_os_lookup <- copy(hour_os_lookup)
names(hour_os_lookup) <- c('hour_label', 'os', 'hour_os_count')
names(prev_hour_os_lookup) <- c('prev_hour_lab', 'os', 'prev_hour_os_count')
names(penul_hour_os_lookup) <- c('penul_hour_lab', 'os', 'penul_hour_os_count')
names(prev_day_os_lookup) <- c('prev_day_lab', 'os', 'prev_day_os_count')
names(penul_day_os_lookup) <- c('penul_day_lab', 'os', 'penul_day_os_count')
prev_day_os_attr_lookup <- fread("lookups/hour_os_attr_lookup.csv")
prev_day_os_attr_lookup$V1 <- NULL
penul_day_os_attr_lookup <- copy( prev_day_os_attr_lookup )
names(prev_day_os_attr_lookup) <- c('prev_day_lab', 'os', 'prev_day_os_attr')
names(penul_day_os_attr_lookup) <- c('penul_day_lab', 'os', 'penul_day_os_attr')
device_ten_minute_lookup <- fread("lookups/device_ten_minute_lookup.csv")
device_ten_minute_lookup$V1 <- NULL
prev_ten_minute_device_lookup <- copy(device_ten_minute_lookup)
penul_ten_minute_device_lookup <- copy(device_ten_minute_lookup)
names(device_ten_minute_lookup) <- c('ten_min_label', 'device', 'ten_min_device_count')
names(prev_ten_minute_device_lookup)<- c('prev_ten_min_lab', 'device', 'prev_ten_min_device_count')
names(penul_ten_minute_device_lookup)<- c('penul_ten_min_lab', 'device', 'penul_ten_min_device_count')
hour_device_lookup <- fread("lookups/hour_device_lookup.csv")
hour_device_lookup$V1 <- NULL
prev_hour_device_lookup <- copy(hour_device_lookup)
penul_hour_device_lookup <- copy(hour_device_lookup)
prev_day_device_lookup <- copy(hour_device_lookup)
penul_day_device_lookup <- copy(hour_device_lookup)
names(hour_device_lookup) <- c('hour_label', 'device', 'hour_device_count')
names(prev_hour_device_lookup) <- c('prev_hour_lab', 'device', 'prev_hour_device_count')
names(penul_hour_device_lookup) <- c('penul_hour_lab', 'device', 'penul_hour_device_count')
names(prev_day_device_lookup) <- c('prev_day_lab', 'device', 'prev_day_device_count')
names(penul_day_device_lookup) <- c('penul_day_lab', 'device', 'penul_day_device_count')
prev_day_device_attr_lookup <- fread("lookups/hour_device_attr_lookup.csv")
prev_day_device_attr_lookup$V1 <- NULL
penul_day_device_attr_lookup <- copy( prev_day_device_attr_lookup )
names(prev_day_device_attr_lookup) <- c('prev_day_lab', 'device', 'prev_day_device_attr')
names(penul_day_device_attr_lookup) <- c('penul_day_lab', 'device', 'penul_day_device_attr')
channel_ten_minute_lookup <- fread("lookups/channel_ten_minute_lookup.csv")
channel_ten_minute_lookup$V1 <- NULL
prev_ten_minute_channel_lookup <- copy(channel_ten_minute_lookup)
penul_ten_minute_channel_lookup <- copy(channel_ten_minute_lookup)
names(channel_ten_minute_lookup) <- c('ten_min_label', 'channel', 'ten_min_channel_count')
names(prev_ten_minute_channel_lookup) <- c('prev_ten_min_lab', 'channel', 'prev_ten_min_channel_count')
names(penul_ten_minute_channel_lookup) <- c('penul_ten_min_lab', 'channel', 'penul_ten_min_channel_count')
hour_channel_lookup <- fread("lookups/hour_channel_lookup.csv")
hour_channel_lookup$V1 <- NULL
prev_hour_channel_lookup <- copy(hour_channel_lookup)
penul_hour_channel_lookup <- copy(hour_channel_lookup)
prev_day_channel_lookup <- copy(hour_channel_lookup)
penul_day_channel_lookup <- copy(hour_channel_lookup)
names(hour_channel_lookup) <- c('hour_label', 'channel', 'hour_channel_count')
names(prev_hour_channel_lookup) <- c('prev_hour_lab', 'channel', 'prev_hour_channel_count')
names(penul_hour_channel_lookup) <- c('penul_hour_lab', 'channel', 'penul_hour_channel_count')
names(prev_day_channel_lookup) <- c('prev_day_lab', 'channel', 'prev_day_channel_count')
names(penul_day_channel_lookup) <- c('penul_day_lab', 'channel', 'penul_day_channel_count')
prev_day_channel_attr_lookup <- fread("lookups/hour_channel_attr_lookup.csv")
prev_day_channel_attr_lookup$V1 <- NULL
penul_day_channel_attr_lookup <- copy( prev_day_channel_attr_lookup )
names(prev_day_channel_attr_lookup) <- c('prev_day_lab', 'channel', 'prev_day_channel_attr')
names(penul_day_channel_attr_lookup) <- c('penul_day_lab', 'channel', 'penul_day_channel_attr')
|
#install.packages("dlnm")
library(dlnm)
head(drug,3)
#make the matrix of exposure histories
Qdrug<-as.matrix(drug[,rep(7:4,each=7)])
colnames(Qdrug)<-paste("lag",0:27,sep="")
head(Qdrug)
#define a cross-basis function: argvar deines the exposure-response function, and arglag deine the lag-response function
#"lin": linear function "ns": natural cubic spline
cbdrug<-crossbasis(Qdrug,lag=27,argvar=list("lin"),arglag=list(fun="ns",knots=c(9,18)))
summary(cbdrug)
#add the cross-basis matrix to the simple linear model
mdrug<-lm(out~cbdrug+sex,drug)
#predicting specific effect summaries to interpret the estimated exsposure-lag-response association
pdrug<-crosspred(cbdrug,mdrug,at=0:20*5)
#plots
plot(pdrug,zlab="Effect",xlab="Dose",ylab="Lag (days)")
plot(pdrug,var=60,ylab="Effect at dose 60",xlab="Lag (days)",ylim=c(-1,5))
plot(pdrug,lag=10,ylab="Effect at lag 10",xlab="Dose",ylim=c(-1,5))
#the outcome associated with an intake of a dose level of 10 five days earlier
pdrug$matfit["10","lag5"]
#overall cumulative effects associated with an exposure to 10
with(pdrug,cbind(allfit,alllow,allhigh)["10",]) | /DLM.R | no_license | benhhu/DLM | R | false | false | 1,119 | r | #install.packages("dlnm")
library(dlnm)
head(drug,3)
#make the matrix of exposure histories
Qdrug<-as.matrix(drug[,rep(7:4,each=7)])
colnames(Qdrug)<-paste("lag",0:27,sep="")
head(Qdrug)
#define a cross-basis function: argvar deines the exposure-response function, and arglag deine the lag-response function
#"lin": linear function "ns": natural cubic spline
cbdrug<-crossbasis(Qdrug,lag=27,argvar=list("lin"),arglag=list(fun="ns",knots=c(9,18)))
summary(cbdrug)
#add the cross-basis matrix to the simple linear model
mdrug<-lm(out~cbdrug+sex,drug)
#predicting specific effect summaries to interpret the estimated exsposure-lag-response association
pdrug<-crosspred(cbdrug,mdrug,at=0:20*5)
#plots
plot(pdrug,zlab="Effect",xlab="Dose",ylab="Lag (days)")
plot(pdrug,var=60,ylab="Effect at dose 60",xlab="Lag (days)",ylim=c(-1,5))
plot(pdrug,lag=10,ylab="Effect at lag 10",xlab="Dose",ylim=c(-1,5))
#the outcome associated with an intake of a dose level of 10 five days earlier
pdrug$matfit["10","lag5"]
#overall cumulative effects associated with an exposure to 10
with(pdrug,cbind(allfit,alllow,allhigh)["10",]) |
#-----FunMap-----
library(mvtnorm)
salttable <- read.csv("./path/saltstable.csv",header = T,row.names = 1)
cktable <- read.csv("./path/ckstable.csv",header = T,row.names = 1)
genotable <- read.csv("./path/genostable.csv",header = T,row.names = 1)
get_miu = function(miu_par,t){
miu = miu_par[1]/(1 + miu_par[2] * exp(-miu_par[3] * t)) - (miu_par[4] * exp(-miu_par[5] * t))
miu
}
get_SAD1 <- function(x,t){
n=length(t)
tmp1 <- (1-x[1]^2)
tmp2 <- (1-x[3]^2)
sig1 <- array(1, dim=c(n,n))
for(i in 1:n)
{
sig1[i,i:n] <- x[1]^( c(i:n) - i ) * (1-x[1]^(2*i))/tmp1
sig1[i:n,i] <- sig1[i,i:n]
}
sig1 <- sig1*x[2]^2
sig2 <- array(1, dim=c(n,n))
for(i in 1:n){
sig2[i,i:n] <- x[3]^( c(i:n) - i ) * (1-x[3]^(2*i))/tmp2
sig2[i:n,i] <- sig2[i,i:n]
}
sig2 <- sig2*x[4]^2
sig12 <- array(0, dim=c(n,n))
sigma1 <- cbind(sig1,sig12)
sigma2 <- cbind(sig12,sig2)
sigma <- rbind(sigma1,sigma2)
return(sigma)
}
merge_miu <- function(x,t){
merge_miu <- c(get_miu(x[1:5],t),get_miu(x[6:10],t))
merge_miu
}
L0 = function(par,t,y){
miu = merge_miu(par[1:10],t)
SAD1 = get_SAD1(par[11:14],t)
L0 = -sum(dmvnorm(y,miu,SAD1,log = T))
L0
}
L1 = function(par,t,marker){
ckgeno1 <- cktable[which(marker==0),]
saltgeno1 <- salttable[which(marker==0),]
ckgeno2 <- cktable[which(marker==1),]
saltgeno2 <- salttable[which(marker==1),]
ckgeno3 <- cktable[which(marker==2),]
saltgeno3 <- salttable[which(marker==2),]
SAD1 = get_SAD1(par[31:34],t)
miu1 = merge_miu(par[1:10],t)
miu2 = merge_miu(par[11:20],t)
miu3 = merge_miu(par[21:30],t)
l1_1 <- sum(dmvnorm(cbind(ckgeno1,saltgeno1),miu1,SAD1,log = T))
l1_2 <- sum(dmvnorm(cbind(ckgeno2,saltgeno2),miu2,SAD1,log = T))
l1_3 <- sum(dmvnorm(cbind(ckgeno3,saltgeno3),miu3,SAD1,log = T))
L1 <- -(l1_1 + l1_2 + l1_3)
L1
}
L2 = function(par,t,marker){
ckgeno1 <- cktable[which(marker==0),]
saltgeno1 <- salttable[which(marker==0),]
ckgeno2 <- cktable[which(marker==1),]
saltgeno2 <- salttable[which(marker==1),]
SAD1 = get_SAD1(par[21:24],t)
miu1 = merge_miu(par[1:10],t)
miu2 = merge_miu(par[11:20],t)
l1_1 <- sum(dmvnorm(cbind(ckgeno1,saltgeno1),miu1,SAD1,log = T))
l1_2 <- sum(dmvnorm(cbind(ckgeno2,saltgeno2),miu2,SAD1,log = T))
L2 <- -(l1_1 + l1_2)
L2
}
t = seq(13,78,5)
LR = function(marker){
t = t
ckgeno1 <- cktable[which(marker==0),]
saltgeno1 <- salttable[which(marker==0),]
ckgeno2 <- cktable[which(marker==1),]
saltgeno2 <- salttable[which(marker==1),]
ckgeno3 <- cktable[which(marker==2),]
saltgeno3 <- salttable[which(marker==2),]
if(length(which(marker==9))==0){
y_all = cbind(cktable,salttable)
}else{
y_all <- cbind(cktable[-which(marker == 9),],salttable[-which(marker == 9),])
}
if(2 %in% marker){
NH0 = optim(c(71,21,0.062,7.8,0.0165,71.8,16.3,0.0336,12,0.05,0.001,50,0.001,50),L0,t=t,y=y_all,method = "BFGS",control=list(maxit=50000))
pars <- c(NH0$par[1:10],NH0$par[1:10],NH0$par[1:10],NH0$par[11:12],NH0$par[11:12])
NH1 <- optim(pars,L1,t=t,marker=marker,method="BFGS",control=list(maxit=50000))
LR<- 2*(NH0$value - NH1$value)
cat(i,NH0$value,NH1$value,LR,"\n")
LR
}else{
NH0 = optim(c(71,21,0.062,7.8,0.0165,71.8,16.3,0.0336,12,0.05,0.001,50,0.001,50),L0,t=t,y=y_all,method = "BFGS",control=list(maxit=50000))
pars <- c(NH0$par[1:10],NH0$par[1:10],NH0$par[11:12],NH0$par[11:12])
NH1 <- optim(pars,L2,t=t,marker=marker,method="BFGS",control=list(maxit=50000))
LR<- 2*(NH0$value - NH1$value)
cat(i,NH0$value,NH1$value,LR,"\n")
LR
}
}
lr = rep(0,(dim(genotable)[1]))
for (i in 1:dim(genotable)[1]) {
lr[i] = LR(as.numeric(genotable[i,]))
}
save(lr,file = "filename.Rdata")
#-----FunCluster-----
library(MASS)
setwd("./path")
SsquareDiff <<- 1.0e-5
IncLimit <<- 3
REPEAT_LIMIT <<- 200
LGD_J <<- 5
START_J <<- 8
END_J <<- 15
LIKELIHOOD_DIFF <<- 0.5
rhoIncreasement <<- 0.002
rhoStart <<-0.8
datafile.L <<- "ck_gene_effect.csv.csv"
datafile.H <<- "salt_gene_effect.csv.csv"
set.seed(Sys.time())
# Legendre Polynominals
LgdP <- expression( tt,
( 3* tt^2 - 1 )/2 ,
( 5 * tt^3 - 3* tt )/2,
( 35 * tt^4 - 30 * tt^2 + 3)/8,
( 63 * tt^5 - 70 * tt^3 + 15 * tt )/8,
( 231 * tt^6 - 315 * tt^4 + 105 * tt^2 - 5)/16 )
GetMR <- function(rho,times)
{
MR <- matrix(1,length(times),length(times))
for ( i in 1:length(times)){
for(j in 1:length(times)){
MR[i,j]= rho^(abs(times[j] - times[i]))
}
}
return (MR)
}
GetMX <- function(times,r)
{
tnum = length(times)
X <- matrix(1,tnum,r+1)
for(t in 1:tnum ){
tt <- -1 + 2*(times[t] - times[1])/(times[tnum] - times[1])
for(i in 1:r){
X[t,i+1] <- eval(LgdP[i])
}
}
return (X)
}
GetInitPij <- function(N,J)
{
P <- matrix(1/J,N,J)
for (i in 1:N){
P[i,] <- rnorm(J, mean=1/J, sd= 0.5 * 1/J )
P[i,] <- P[i,]/sum(P[i,])
}
return (P)
}
GetMeanMatrix <- function(J,times,P,X,Asdata,InvMSigema)
{
m <- matrix(NA,J,length(times))
N <- length(Asdata[,1])
r <- length(X[1,])
xInvSigema <- t(X) %*% InvMSigema
xInvSigemax <- xInvSigema%*% X
for( j in 1:J){
ud <- matrix(0, r, r)
for( i in 1: N){
ud <- ud + P[i,j]*xInvSigemax
}
ubd <- matrix(0, r, 1)
for( i in 1: N){
ubd <- ubd + P[i,j]*( xInvSigema %*% (Asdata[i,]) )
}
uj <- ginv(ud) %*% ubd
m[j,] <- X %*% uj
}
return(m)
}
GetNewSsquare <- function(Asdata,m,MR,times,P,J)
{
N <- length(Asdata[,1])
InvMR <- ginv(MR)
newSsquare <- 0
for(i in 1:N){
SumJ <- 0
for(j in 1:J){
yi_mj <- Asdata[i,]-m[j,]
SumJ <- SumJ + P[i,j] * ((yi_mj) %*% InvMR %*% (yi_mj) )
}
newSsquare <- newSsquare + SumJ
}
newSsquare <- as.numeric(newSsquare/(length(times)*N))
return(newSsquare)
}
GetNewRho.b <- function(rho,rhoDir)
{
newrho <- as.numeric(rho + rhoIncreasement*rhoDir)
if (newrho > 1) newrho <- 1
if (newrho < 0) newrho <- 0
return (newrho)
}
GetNewRho<- function(Asdata,m,MR,times,P,J,rho,Ssquare)
{
N <- length(Asdata[,1])
newrho <- 0
for(i in 1:N){
SumJ <- 0
for(j in 1:J){
yi_mj <- Asdata[i,]-m[j,]
Item1 <- (1/(1 - rho*rho))*((yi_mj) %*% MR %*% (yi_mj) )
Item2 <- 0
for(k in 2:(length(times)-1) )
Item2 <- Item2 + (yi_mj[k]^2)
Item2 <- Item2 * rho
Item3 <- 0
for(k in 1:(length(times)-1) )
Item2 <- Item3 + yi_mj[k] * yi_mj[k+1]
SumJ <- SumJ + P[i,j] * (Item1 + Item2 - Item3)
}
newrho <- newrho + SumJ
}
newrho <- as.numeric(newrho/( (length(times)-1)* N * Ssquare))
if(abs(newrho) >= 1) return( sign(newrho)*.5)
else return(newrho)
}
GetLikelihood <- function(Asdata.H, m.H, MSigema.H, Asdata.L, m.L, MSigema.L, omiga,P,J,times)
{
N <- length(Asdata.H[,1])
InvMSigema.H <- ginv(MSigema.H)
InvMSigema.L <- ginv(MSigema.L)
DetMSigema.H <- det(MSigema.H)
DetMSigema.L <- det(MSigema.L)
LogDetMSigema.H <- log(DetMSigema.H)/2
LogDetMSigema.L <- log(DetMSigema.L)/2
LogM2Pi <- length(times)*log(2*pi)
oneterm <- function(i, j) {
f <- function(i,j)
P[i,j]*(log(omiga[j]) - LogM2Pi - LogDetMSigema.H - LogDetMSigema.L
- ( ((Asdata.H[i,]-m.H[j,])) %*% InvMSigema.H %*% (Asdata.H[i,]-m.H[j,])) /2
- ( ((Asdata.L[i,]-m.L[j,])) %*% InvMSigema.L %*% (Asdata.L[i,]-m.L[j,])) /2)
mapply(f, i, j)
}
tmp <- outer(1:N, 1:J, oneterm)
tmp[!is.finite(tmp)] <- min(tmp[is.finite(tmp)])
return(sum(tmp))
}
StepE <- function(Asdata.H, m.H, MSigema.H, Asdata.L, m.L, MSigema.L, omiga,P,J,times)
{
InvMSigema.H <- ginv(MSigema.H)
InvMSigema.L <- ginv(MSigema.L)
N <- length(Asdata.H[,1])
for( i in 1:N){
Fi <- rep(0,J)
for( j in 1:J){
yi_mj.H <- Asdata.H[i,]-m.H[j,]
Fi.H = exp( ( (yi_mj.H) %*% InvMSigema.H %*% (yi_mj.H) ) / -2)
yi_mj.L <- Asdata.L[i,]-m.L[j,]
Fi.L = exp( ( (yi_mj.L) %*% InvMSigema.L %*% (yi_mj.L) ) / -2)
Fi[j] = Fi.H * Fi.L
}
OmigaF <- omiga %*% Fi
P[i,] <- (omiga * Fi) / c(OmigaF)
}
if (all(is.nan(P))!=TRUE) {
for (q in 1:length(P[1,])) {
sit <- which(is.nan(P[,q]) == TRUE)
P[sit,q] <- P[which.min(P[,q]),q]
}
}
return(P)
}
StepM <- function(Asdata,m,MR,times,Ssquare,P,rho,rhoDir,J,rpt)
{
newSsquare <- GetNewSsquare(Asdata,m,MR,times,P,J)
if (rpt > 0)
newrho <- GetNewRho(Asdata,m,MR,times,P,J,rho,Ssquare)
else
newrho <- rho
return( c(newSsquare, newrho))
}
StepM.b <- function(Asdata.H,Asdata.L,m.H,m.L,MR,times,P,rho,rhoDir,J,rpt)
{
newSsquare.H <- GetNewSsquare(Asdata.H,m.H,MR,times,P,J)
newSsquare.L <- GetNewSsquare(Asdata.L,m.L,MR,times,P,J)
if (rpt > 0){
newrho <- GetNewRho.b(rho,rhoDir)
}else{
newrho <- rho
}
return( c(newSsquare.H, newSsquare.L, newrho))
}
RunEM.Joint <- function(Asdata.H,Asdata.L,times,X,P,MR,rho,MSigema.H,MSigema.L,omiga,m.H,m.L,J,r){
rpt <- 1
Likelihood <- -Inf
rhoDir <- 1
rhoIncCount <- 0
while(TRUE){
OldLikelihood <- Likelihood
P <- StepE(Asdata.H, m.H, MSigema.H, Asdata.L, m.L, MSigema.L, omiga,P,J,times)
newpars <- StepM.b(Asdata.H, Asdata.L, m.H, m.L, MR, times,P,rho,rhoDir,J,rpt)
Ssquare.H <- newpars[1]
Ssquare.L <- newpars[2]
rho <- newpars[3]
MR <- GetMR(rho,times)
MSigema.H <- Ssquare.H * MR
InvMSigema.H <- ginv(MSigema.H)
MSigema.L <- Ssquare.L * MR
InvMSigema.L <- ginv(MSigema.L)
N <- length(Asdata.H[,1])
omiga <- colSums(P)/ N
m.H <- GetMeanMatrix(J,times,P,X,Asdata.H,InvMSigema.H)
m.L <- GetMeanMatrix(J,times,P,X,Asdata.L,InvMSigema.L)
Likelihood <- GetLikelihood(Asdata.H, m.H, MSigema.H, Asdata.L, m.L, MSigema.L, omiga,P,J,times)
if ( Likelihood >= OldLikelihood){
rhoIncCount <- 0
}else{
rhoIncCount <- rhoIncCount + 1
if (rhoIncCount >= IncLimit){
rhoIncCount <- 0
rhoDir <- rhoDir * -1
}
}
cat("J:",J," rpt:", rpt, "\n")
cat("Ssquare.H:", Ssquare.H, " Ssquare.L:", Ssquare.L, "\n")
cat("rho:", rho, "\n")
cat("omiga:",omiga,"\n")
cat("Likelihood:",Likelihood,"\n\n")
if( (abs(abs(OldLikelihood - Likelihood) - Likelihood) < LIKELIHOOD_DIFF) ) {
cat("quit due to likelihood\n")
cat("LIKELIHOOD_DIFF:",LIKELIHOOD_DIFF,"\n")
break
}
if( rpt >= REPEAT_LIMIT ){
cat("quit due to rpt\n")
break
}
rpt <- rpt + 1
}
OpiPfileName <- sprintf("OpiP%02d.LGD%d.csv",J,r)
write.csv(P,OpiPfileName,row.names = FALSE)
OpiMfileName <- sprintf("OpiM%02d.LGD%d.H.csv",J,r)
write.csv(m.H,OpiMfileName,row.names = FALSE)
OpiMfileName <- sprintf("OpiM%02d.LGD%d.L.csv",J,r)
write.csv(m.L,OpiMfileName,row.names = FALSE)
return(c(rho,Ssquare.H,Ssquare.L,Likelihood))
}
InitAndRunEM.Joint <- function(J,r)
{
cat("r:",r,"\n")
Asdata.H <- read.csv(datafile.H,row.names = 1)
Asdata.H <- Asdata.H[,-1]
Asdata.H <- as.matrix(Asdata.H)
colnames(Asdata.H) <- NULL
Asdata.L <- read.csv(datafile.L,row.names = 1)
Asdata.L <- Asdata.L[,-1]
Asdata.L <- as.matrix(Asdata.L)
colnames(Asdata.L) <- NULL
times <- seq(13,78,5)
rho <- rhoStart
Ssquare <- 20
X <- GetMX(times,r)
N <- length(Asdata.H[,1])
P <- GetInitPij(N,J)
MR <- GetMR(rho,times)
MSigema <- Ssquare * MR
InvMSigema <- ginv(MSigema)
DetMSigema <- (det(MSigema))^0.5
omiga <- colSums(P)/ N
m.H <- GetMeanMatrix(J,times,P,X,Asdata.H,InvMSigema)
m.L <- GetMeanMatrix(J,times,P,X,Asdata.L,InvMSigema)
EMResults <- RunEM.Joint(Asdata.H, Asdata.L,times,X,P,MR,rho,MSigema,MSigema,omiga,m.H,m.L,J,r)
return(EMResults)
}
EMResults <- matrix(0, END_J - START_J + 1, 5)
for(j in START_J:END_J){
EMResults[j - START_J + 1,1] <- j
EMResults[j - START_J + 1,2:5] <- InitAndRunEM.Joint(J=j,r=LGD_J)
}
resultFileName <- sprintf("EMResults%02d~%02d.LGD%d.csv",START_J,END_J,LGD_J)
write.csv(EMResults,resultFileName,row.names = FALSE)
#-----NetRestructure-----
library(glmnet)
data_effect <- read.csv("./path/ck_gene_effect.csv")
score <- matrix(NA,length(data_effect[,1]),1)
clusterscore <- read.csv(OpiPfileName,row.names = 1)
for (i in 1:length(clusterscore[,1])) {
score[i] <- which.max(clusterscore[i,])
}
mod = 1
means <- matrix(NA,length(which(score == mod)),14)
for (i in 1:length(table(score))) {
sit <- which(score == i)
means[i,] = apply(data_effect[sit,],2,mean)
}
means <- t(means)
colnames(means) <- c(1:length(table(score)))
name <- c(1:length(means[1,]))
marker_list <- list()
for (col in mod) {
ridge1_cv <- cv.glmnet(x = means[,-col], y = means[,col],type.measure = "mse",nfold = 10,alpha = 0,grouped=FALSE)
best_ridge_coef <- as.numeric(coef(ridge1_cv, s = ridge1_cv$lambda.min))[-1]
fit_res <- cv.glmnet(x = means[,-col], y = means[,col],type.measure = "mse",nfold = 10,alpha = 1,penalty.factor = 1 / abs(best_ridge_coef),keep = TRUE,grouped=FALSE)
best_alasso_coef1 <- coef(fit_res, s = fit_res$lambda.min)
marker_list_one <- list()
marker_list_one[[1]] <- name[col]#第一个列表是直接qtl的名字
marker_list_one[[2]] <- as.numeric(best_alasso_coef1@Dimnames[[1]][best_alasso_coef1@i[-1]+1])#第二个列表是间接qtl的名字
marker_list_one[[3]] <- best_alasso_coef1@x[-1]#第三个列表是变量选择系数
marker_list[[col]] <- marker_list_one
#proc.time() - tim
}
load("./path/Effect.Rdata")
get_LOPm <- function(X){
len = length(X)
LOP <- function(r){
t <- seq(-1,1,2/(len-1))
temp <- rep(0,len)
for (m in 0:as.integer(r/2)) {
temp <- temp + (-1)^m*gamma(2*r - 2*m + 1)/(2^r*gamma(m+1)*gamma(r-m+1)*gamma(r-2*m + 1)) * t^(r-2*m)
}
return(temp)
}
LOPm <- cbind(LOP(0),LOP(1),LOP(2),LOP(3),LOP(4),LOP(5),LOP(6) )
return(LOPm[,1:ORDER])
}
ORDER <- 6
library(mvtnorm)
f1 <- function(x,t){
y = t[1]/(1 + t[2] * exp(-t[3] * x)) - (t[4] * exp(-t[5] * x))
return(y)
}
fy <- function(t,X){
if(t[1] == 2){
e1 = f1(X,t[2:6])
e2 = f1(X,t[8:12])
additive = 0.5 * (e1 - e2)
dominant = 0
y <- (2 * t[7] * t[13] * (additive + (t[7] - t[13]) * dominant)^2 + 4 * t[7]^2 * t[13]^2 * dominant^2) ^ 0.5
}else{
e1 = f1(X,t[2:6])
e2 = f1(X,t[8:12])
e3 = f1(X,t[14:18])
additive = 0.5 * (e1 - e3)
dominant = e2 - 0.5 * (e1 + e3)
y <- (2 * t[7] * t[13] * (additive + (t[7] - t[13]) * dominant)^2 + 4 * t[7]^2 * t[13]^2 * dominant^2) ^ 0.5
}
}
get_origin <- function(dy,X,y0){
y0 <- c(y0)
for (i in 2:(length(X)-1)) {
slope <- dy[i-1]
y_before <- y0[length(y0)]
add <- y_before + slope*(X[2]-X[1])
y0 <- c(y0,add)
}
return(y0)
}
fl_new <- function(t,X,dep,ind,dep_per,ind_per,LOPm){
ydep <- matrix(NA,length(dep_per[,1]),length(X))
for (i in 1:length(dep_per[,1])) {
ydep[i,] <- fy(c(dep_per[i,]),X)
}
ydep <- apply(ydep, 2, mean)
d <- 2*(ydep[-1] - ydep[-length(ydep)])/(X[2]-X[1])
tm <- matrix(t,ncol=ORDER,byrow = T)
temp1 <- LOPm[-1,]%*%t(tm) # mp * m个线
temp1 <- temp1*matrix(rep(ydep[-1],length(ind)+1),ncol = length(ind)+1,byrow = F)
num = 0
fy1 <- matrix(NA,length(ind),length(X))
for (i in 1:length(ind)) {
sit = which(score == ind[i])
leng = length(sit)
raw = matrix(NA,leng,length(X))
if (i == 1) {
parameter = ind_per[1:leng,]
for (o in 1:leng) {
raw[o,] = fy(parameter[o,] ,X)
}
fy1[i,] <- apply(raw,2,mean)
num = num + leng
}else{
parameter = ind_per[(num+1):(num +leng),]
for (o in 1:leng) {
raw[o,] = fy(parameter[o,] ,X)
}
fy1[i,] <- apply(raw,2,mean)
num = num + leng
}
}
for (i in 1:length(ind)) {
yind <- fy1[i,]
temp1[,i+1] <- temp1[,i+1]*yind[-1]
}
x0 <- X[-length(X)]
temp0 <- LOPm[-length(X),]%*%t(tm)
temp0 <- temp0*matrix(rep(ydep[-length(X)],length(ind)+1),ncol = length(ind)+1,byrow = F)
for (i in 1:length(ind)) {
yind <- fy1[i,]
temp0[,i+1] <- temp0[,i+1]*yind[-length(X)]
}
#----------------
d_mat <- LOPm%*%t(tm)
for (i in 1:length(tm[,1])) {
if (i == 1) {
d_mat[,i] <- d_mat[,i]*ydep
}else{
d_mat[,i] <- d_mat[,i]*fy1[i-1,]
}
}
# 将d_mat转化为o_mat
o_mat <- c()
for (i in 1:length(d_mat[1,]) ) {
if(i == 1){
o_mat <- cbind(o_mat,get_origin(d_mat[,i],X,ydep[1]))
}else{
o_mat <- cbind(o_mat,get_origin(d_mat[,i],X,0))
}
}
o_mat <- as.data.frame(o_mat)
y <- ydep[-length(ydep)]
e <- colSums(t(o_mat))
ssr <- sum((y-e)^2)
sst <- sum((y-mean(y))^2)
r <- 1-(ssr/sst)
#----------------
return(sum((d - colSums(t(temp1 + temp0)) )^2) + abs(1-r))
}
for (col in length(marker_list) ) {
cat(col,'\n')
dep <- marker_list[[col]][[1]]
dep <- which(score == dep)
ind <- marker_list[[col]][[2]]
sit = list()
for (i in 1:length(ind)) {
sit[[i]] = which(score == ind[i])
}
sit = unlist(sit)
if( length(sit) == 0 ){
next
}
dep_per <- matrix(NA, ncol = 18,nrow = length(dep))
for (i in 1:length(dep)) {
dep_per[i,1:length(picture[[dep[i]]])] <- picture[[dep[i]]]
}
ind_per <- matrix(NA, ncol = 18,nrow = length(sit))
for (i in 1:length(sit)) {
ind_per[i,1:length(picture[[sit[i]]])] <- picture[[sit[i]]]
}
X <- seq(0,78,78/((length(ind)+1)*ORDER*4))
# 等式的右边有两部分,1~n/0~n-1
t0 <- rep(0.001,(length(ind)+1)*ORDER)
itimes <- 1
repeat{
s1 <- optim(t0,fl_new,method = 'Nelder-Mead',X = X,dep = dep,ind = ind,
dep_per = dep_per, ind_per = ind_per, LOPm = get_LOPm(X))
r1 <- s1$par
s2 <- optim(r1,fl_new,method = 'Nelder-Mead',X = X,dep = dep,ind = ind,
dep_per = dep_per, ind_per = ind_per, LOPm = get_LOPm(X))
cat(col,'-',itimes,s2$value,'\n')
itimes <- itimes + 1
if(all( abs(r1-s2$par) == 0 )||itimes == 10){ #*** itimes越高精度越高,计算速度越慢,有条件部署在集群时,应该尽可能大与1000 ***#
break
}else{
t0 <- s2$par
}
}
marker_list[[col]][[4]] <- matrix(s2$par,ncol=ORDER,byrow=TRUE)
tm <- matrix(s2$par,ncol=ORDER,byrow=TRUE)
ydep <- matrix(NA,length(dep_per[,1]),length(X))
for (i in 1:length(dep_per[,1])) {
ydep[i,] <- fy(c(dep_per[i,]),X)
}
ydep <- apply(ydep, 2, mean)
d_mat <- get_LOPm(X)%*%t(tm)
num = 0
fy1 <- matrix(NA,length(ind),length(X))
for (i in 1:length(ind)) {
sit = which(score == ind[i])
leng = length(sit)
raw = matrix(NA,leng,length(X))
if (i == 1) {
parameter = ind_per[1:leng,]
for (o in 1:leng) {
raw[o,] = fy(parameter[o,] ,X)
}
fy1[i,] <- apply(raw,2,mean)
num = num + leng
}else{
parameter = ind_per[(num+1):(num +leng),]
for (o in 1:leng) {
raw[o,] = fy(parameter[o,] ,X)
}
fy1[i,] <- apply(raw,2,mean)
num = num + leng
}
}
for (i in 1:length(tm[,1])) {
if (i == 1) {
d_mat[,i] <- d_mat[,i]*ydep
}else{
d_mat[,i] <- d_mat[,i]*fy1[i-1,]
}
}
# 将d_mat转化为o_mat
o_mat <- c()
for (i in 1:length(d_mat[1,]) ) {
if(i == 1){
o_mat <- cbind(o_mat,get_origin(d_mat[,i],X,ydep[1]))
}else{
o_mat <- cbind(o_mat,get_origin(d_mat[,i],X,0))
}
}
o_mat <- as.data.frame(o_mat)
if( dim(o_mat)[2] <= 2 ){
marker_list[[col]][[5]] <- sum(o_mat[,length(o_mat[1,])]*(X[2]-X[1]))
}else{
marker_list[[col]][[5]] <- colSums(o_mat[,2:(length(ind)+1)]*(X[2]-X[1]))
}
}
filename <- paste0("salt",mod,".Rdata")
save(marker_list,file = filename) | /EuphratesGWEISNetwork.R | no_license | LiboJiang/EuphratesGWEISNetwork | R | false | false | 20,408 | r | #-----FunMap-----
library(mvtnorm)
salttable <- read.csv("./path/saltstable.csv",header = T,row.names = 1)
cktable <- read.csv("./path/ckstable.csv",header = T,row.names = 1)
genotable <- read.csv("./path/genostable.csv",header = T,row.names = 1)
get_miu = function(miu_par,t){
miu = miu_par[1]/(1 + miu_par[2] * exp(-miu_par[3] * t)) - (miu_par[4] * exp(-miu_par[5] * t))
miu
}
get_SAD1 <- function(x,t){
n=length(t)
tmp1 <- (1-x[1]^2)
tmp2 <- (1-x[3]^2)
sig1 <- array(1, dim=c(n,n))
for(i in 1:n)
{
sig1[i,i:n] <- x[1]^( c(i:n) - i ) * (1-x[1]^(2*i))/tmp1
sig1[i:n,i] <- sig1[i,i:n]
}
sig1 <- sig1*x[2]^2
sig2 <- array(1, dim=c(n,n))
for(i in 1:n){
sig2[i,i:n] <- x[3]^( c(i:n) - i ) * (1-x[3]^(2*i))/tmp2
sig2[i:n,i] <- sig2[i,i:n]
}
sig2 <- sig2*x[4]^2
sig12 <- array(0, dim=c(n,n))
sigma1 <- cbind(sig1,sig12)
sigma2 <- cbind(sig12,sig2)
sigma <- rbind(sigma1,sigma2)
return(sigma)
}
merge_miu <- function(x,t){
merge_miu <- c(get_miu(x[1:5],t),get_miu(x[6:10],t))
merge_miu
}
L0 = function(par,t,y){
miu = merge_miu(par[1:10],t)
SAD1 = get_SAD1(par[11:14],t)
L0 = -sum(dmvnorm(y,miu,SAD1,log = T))
L0
}
L1 = function(par,t,marker){
ckgeno1 <- cktable[which(marker==0),]
saltgeno1 <- salttable[which(marker==0),]
ckgeno2 <- cktable[which(marker==1),]
saltgeno2 <- salttable[which(marker==1),]
ckgeno3 <- cktable[which(marker==2),]
saltgeno3 <- salttable[which(marker==2),]
SAD1 = get_SAD1(par[31:34],t)
miu1 = merge_miu(par[1:10],t)
miu2 = merge_miu(par[11:20],t)
miu3 = merge_miu(par[21:30],t)
l1_1 <- sum(dmvnorm(cbind(ckgeno1,saltgeno1),miu1,SAD1,log = T))
l1_2 <- sum(dmvnorm(cbind(ckgeno2,saltgeno2),miu2,SAD1,log = T))
l1_3 <- sum(dmvnorm(cbind(ckgeno3,saltgeno3),miu3,SAD1,log = T))
L1 <- -(l1_1 + l1_2 + l1_3)
L1
}
L2 = function(par,t,marker){
ckgeno1 <- cktable[which(marker==0),]
saltgeno1 <- salttable[which(marker==0),]
ckgeno2 <- cktable[which(marker==1),]
saltgeno2 <- salttable[which(marker==1),]
SAD1 = get_SAD1(par[21:24],t)
miu1 = merge_miu(par[1:10],t)
miu2 = merge_miu(par[11:20],t)
l1_1 <- sum(dmvnorm(cbind(ckgeno1,saltgeno1),miu1,SAD1,log = T))
l1_2 <- sum(dmvnorm(cbind(ckgeno2,saltgeno2),miu2,SAD1,log = T))
L2 <- -(l1_1 + l1_2)
L2
}
t = seq(13,78,5)
LR = function(marker){
t = t
ckgeno1 <- cktable[which(marker==0),]
saltgeno1 <- salttable[which(marker==0),]
ckgeno2 <- cktable[which(marker==1),]
saltgeno2 <- salttable[which(marker==1),]
ckgeno3 <- cktable[which(marker==2),]
saltgeno3 <- salttable[which(marker==2),]
if(length(which(marker==9))==0){
y_all = cbind(cktable,salttable)
}else{
y_all <- cbind(cktable[-which(marker == 9),],salttable[-which(marker == 9),])
}
if(2 %in% marker){
NH0 = optim(c(71,21,0.062,7.8,0.0165,71.8,16.3,0.0336,12,0.05,0.001,50,0.001,50),L0,t=t,y=y_all,method = "BFGS",control=list(maxit=50000))
pars <- c(NH0$par[1:10],NH0$par[1:10],NH0$par[1:10],NH0$par[11:12],NH0$par[11:12])
NH1 <- optim(pars,L1,t=t,marker=marker,method="BFGS",control=list(maxit=50000))
LR<- 2*(NH0$value - NH1$value)
cat(i,NH0$value,NH1$value,LR,"\n")
LR
}else{
NH0 = optim(c(71,21,0.062,7.8,0.0165,71.8,16.3,0.0336,12,0.05,0.001,50,0.001,50),L0,t=t,y=y_all,method = "BFGS",control=list(maxit=50000))
pars <- c(NH0$par[1:10],NH0$par[1:10],NH0$par[11:12],NH0$par[11:12])
NH1 <- optim(pars,L2,t=t,marker=marker,method="BFGS",control=list(maxit=50000))
LR<- 2*(NH0$value - NH1$value)
cat(i,NH0$value,NH1$value,LR,"\n")
LR
}
}
lr = rep(0,(dim(genotable)[1]))
for (i in 1:dim(genotable)[1]) {
lr[i] = LR(as.numeric(genotable[i,]))
}
save(lr,file = "filename.Rdata")
#-----FunCluster-----
library(MASS)
setwd("./path")
SsquareDiff <<- 1.0e-5
IncLimit <<- 3
REPEAT_LIMIT <<- 200
LGD_J <<- 5
START_J <<- 8
END_J <<- 15
LIKELIHOOD_DIFF <<- 0.5
rhoIncreasement <<- 0.002
rhoStart <<-0.8
datafile.L <<- "ck_gene_effect.csv.csv"
datafile.H <<- "salt_gene_effect.csv.csv"
set.seed(Sys.time())
# Legendre Polynominals
LgdP <- expression( tt,
( 3* tt^2 - 1 )/2 ,
( 5 * tt^3 - 3* tt )/2,
( 35 * tt^4 - 30 * tt^2 + 3)/8,
( 63 * tt^5 - 70 * tt^3 + 15 * tt )/8,
( 231 * tt^6 - 315 * tt^4 + 105 * tt^2 - 5)/16 )
GetMR <- function(rho,times)
{
MR <- matrix(1,length(times),length(times))
for ( i in 1:length(times)){
for(j in 1:length(times)){
MR[i,j]= rho^(abs(times[j] - times[i]))
}
}
return (MR)
}
GetMX <- function(times,r)
{
tnum = length(times)
X <- matrix(1,tnum,r+1)
for(t in 1:tnum ){
tt <- -1 + 2*(times[t] - times[1])/(times[tnum] - times[1])
for(i in 1:r){
X[t,i+1] <- eval(LgdP[i])
}
}
return (X)
}
GetInitPij <- function(N,J)
{
P <- matrix(1/J,N,J)
for (i in 1:N){
P[i,] <- rnorm(J, mean=1/J, sd= 0.5 * 1/J )
P[i,] <- P[i,]/sum(P[i,])
}
return (P)
}
GetMeanMatrix <- function(J,times,P,X,Asdata,InvMSigema)
{
m <- matrix(NA,J,length(times))
N <- length(Asdata[,1])
r <- length(X[1,])
xInvSigema <- t(X) %*% InvMSigema
xInvSigemax <- xInvSigema%*% X
for( j in 1:J){
ud <- matrix(0, r, r)
for( i in 1: N){
ud <- ud + P[i,j]*xInvSigemax
}
ubd <- matrix(0, r, 1)
for( i in 1: N){
ubd <- ubd + P[i,j]*( xInvSigema %*% (Asdata[i,]) )
}
uj <- ginv(ud) %*% ubd
m[j,] <- X %*% uj
}
return(m)
}
GetNewSsquare <- function(Asdata,m,MR,times,P,J)
{
N <- length(Asdata[,1])
InvMR <- ginv(MR)
newSsquare <- 0
for(i in 1:N){
SumJ <- 0
for(j in 1:J){
yi_mj <- Asdata[i,]-m[j,]
SumJ <- SumJ + P[i,j] * ((yi_mj) %*% InvMR %*% (yi_mj) )
}
newSsquare <- newSsquare + SumJ
}
newSsquare <- as.numeric(newSsquare/(length(times)*N))
return(newSsquare)
}
GetNewRho.b <- function(rho,rhoDir)
{
newrho <- as.numeric(rho + rhoIncreasement*rhoDir)
if (newrho > 1) newrho <- 1
if (newrho < 0) newrho <- 0
return (newrho)
}
GetNewRho<- function(Asdata,m,MR,times,P,J,rho,Ssquare)
{
N <- length(Asdata[,1])
newrho <- 0
for(i in 1:N){
SumJ <- 0
for(j in 1:J){
yi_mj <- Asdata[i,]-m[j,]
Item1 <- (1/(1 - rho*rho))*((yi_mj) %*% MR %*% (yi_mj) )
Item2 <- 0
for(k in 2:(length(times)-1) )
Item2 <- Item2 + (yi_mj[k]^2)
Item2 <- Item2 * rho
Item3 <- 0
for(k in 1:(length(times)-1) )
Item2 <- Item3 + yi_mj[k] * yi_mj[k+1]
SumJ <- SumJ + P[i,j] * (Item1 + Item2 - Item3)
}
newrho <- newrho + SumJ
}
newrho <- as.numeric(newrho/( (length(times)-1)* N * Ssquare))
if(abs(newrho) >= 1) return( sign(newrho)*.5)
else return(newrho)
}
GetLikelihood <- function(Asdata.H, m.H, MSigema.H, Asdata.L, m.L, MSigema.L, omiga,P,J,times)
{
N <- length(Asdata.H[,1])
InvMSigema.H <- ginv(MSigema.H)
InvMSigema.L <- ginv(MSigema.L)
DetMSigema.H <- det(MSigema.H)
DetMSigema.L <- det(MSigema.L)
LogDetMSigema.H <- log(DetMSigema.H)/2
LogDetMSigema.L <- log(DetMSigema.L)/2
LogM2Pi <- length(times)*log(2*pi)
oneterm <- function(i, j) {
f <- function(i,j)
P[i,j]*(log(omiga[j]) - LogM2Pi - LogDetMSigema.H - LogDetMSigema.L
- ( ((Asdata.H[i,]-m.H[j,])) %*% InvMSigema.H %*% (Asdata.H[i,]-m.H[j,])) /2
- ( ((Asdata.L[i,]-m.L[j,])) %*% InvMSigema.L %*% (Asdata.L[i,]-m.L[j,])) /2)
mapply(f, i, j)
}
tmp <- outer(1:N, 1:J, oneterm)
tmp[!is.finite(tmp)] <- min(tmp[is.finite(tmp)])
return(sum(tmp))
}
StepE <- function(Asdata.H, m.H, MSigema.H, Asdata.L, m.L, MSigema.L, omiga,P,J,times)
{
InvMSigema.H <- ginv(MSigema.H)
InvMSigema.L <- ginv(MSigema.L)
N <- length(Asdata.H[,1])
for( i in 1:N){
Fi <- rep(0,J)
for( j in 1:J){
yi_mj.H <- Asdata.H[i,]-m.H[j,]
Fi.H = exp( ( (yi_mj.H) %*% InvMSigema.H %*% (yi_mj.H) ) / -2)
yi_mj.L <- Asdata.L[i,]-m.L[j,]
Fi.L = exp( ( (yi_mj.L) %*% InvMSigema.L %*% (yi_mj.L) ) / -2)
Fi[j] = Fi.H * Fi.L
}
OmigaF <- omiga %*% Fi
P[i,] <- (omiga * Fi) / c(OmigaF)
}
if (all(is.nan(P))!=TRUE) {
for (q in 1:length(P[1,])) {
sit <- which(is.nan(P[,q]) == TRUE)
P[sit,q] <- P[which.min(P[,q]),q]
}
}
return(P)
}
StepM <- function(Asdata,m,MR,times,Ssquare,P,rho,rhoDir,J,rpt)
{
newSsquare <- GetNewSsquare(Asdata,m,MR,times,P,J)
if (rpt > 0)
newrho <- GetNewRho(Asdata,m,MR,times,P,J,rho,Ssquare)
else
newrho <- rho
return( c(newSsquare, newrho))
}
StepM.b <- function(Asdata.H,Asdata.L,m.H,m.L,MR,times,P,rho,rhoDir,J,rpt)
{
newSsquare.H <- GetNewSsquare(Asdata.H,m.H,MR,times,P,J)
newSsquare.L <- GetNewSsquare(Asdata.L,m.L,MR,times,P,J)
if (rpt > 0){
newrho <- GetNewRho.b(rho,rhoDir)
}else{
newrho <- rho
}
return( c(newSsquare.H, newSsquare.L, newrho))
}
RunEM.Joint <- function(Asdata.H,Asdata.L,times,X,P,MR,rho,MSigema.H,MSigema.L,omiga,m.H,m.L,J,r){
rpt <- 1
Likelihood <- -Inf
rhoDir <- 1
rhoIncCount <- 0
while(TRUE){
OldLikelihood <- Likelihood
P <- StepE(Asdata.H, m.H, MSigema.H, Asdata.L, m.L, MSigema.L, omiga,P,J,times)
newpars <- StepM.b(Asdata.H, Asdata.L, m.H, m.L, MR, times,P,rho,rhoDir,J,rpt)
Ssquare.H <- newpars[1]
Ssquare.L <- newpars[2]
rho <- newpars[3]
MR <- GetMR(rho,times)
MSigema.H <- Ssquare.H * MR
InvMSigema.H <- ginv(MSigema.H)
MSigema.L <- Ssquare.L * MR
InvMSigema.L <- ginv(MSigema.L)
N <- length(Asdata.H[,1])
omiga <- colSums(P)/ N
m.H <- GetMeanMatrix(J,times,P,X,Asdata.H,InvMSigema.H)
m.L <- GetMeanMatrix(J,times,P,X,Asdata.L,InvMSigema.L)
Likelihood <- GetLikelihood(Asdata.H, m.H, MSigema.H, Asdata.L, m.L, MSigema.L, omiga,P,J,times)
if ( Likelihood >= OldLikelihood){
rhoIncCount <- 0
}else{
rhoIncCount <- rhoIncCount + 1
if (rhoIncCount >= IncLimit){
rhoIncCount <- 0
rhoDir <- rhoDir * -1
}
}
cat("J:",J," rpt:", rpt, "\n")
cat("Ssquare.H:", Ssquare.H, " Ssquare.L:", Ssquare.L, "\n")
cat("rho:", rho, "\n")
cat("omiga:",omiga,"\n")
cat("Likelihood:",Likelihood,"\n\n")
if( (abs(abs(OldLikelihood - Likelihood) - Likelihood) < LIKELIHOOD_DIFF) ) {
cat("quit due to likelihood\n")
cat("LIKELIHOOD_DIFF:",LIKELIHOOD_DIFF,"\n")
break
}
if( rpt >= REPEAT_LIMIT ){
cat("quit due to rpt\n")
break
}
rpt <- rpt + 1
}
OpiPfileName <- sprintf("OpiP%02d.LGD%d.csv",J,r)
write.csv(P,OpiPfileName,row.names = FALSE)
OpiMfileName <- sprintf("OpiM%02d.LGD%d.H.csv",J,r)
write.csv(m.H,OpiMfileName,row.names = FALSE)
OpiMfileName <- sprintf("OpiM%02d.LGD%d.L.csv",J,r)
write.csv(m.L,OpiMfileName,row.names = FALSE)
return(c(rho,Ssquare.H,Ssquare.L,Likelihood))
}
InitAndRunEM.Joint <- function(J,r)
{
cat("r:",r,"\n")
Asdata.H <- read.csv(datafile.H,row.names = 1)
Asdata.H <- Asdata.H[,-1]
Asdata.H <- as.matrix(Asdata.H)
colnames(Asdata.H) <- NULL
Asdata.L <- read.csv(datafile.L,row.names = 1)
Asdata.L <- Asdata.L[,-1]
Asdata.L <- as.matrix(Asdata.L)
colnames(Asdata.L) <- NULL
times <- seq(13,78,5)
rho <- rhoStart
Ssquare <- 20
X <- GetMX(times,r)
N <- length(Asdata.H[,1])
P <- GetInitPij(N,J)
MR <- GetMR(rho,times)
MSigema <- Ssquare * MR
InvMSigema <- ginv(MSigema)
DetMSigema <- (det(MSigema))^0.5
omiga <- colSums(P)/ N
m.H <- GetMeanMatrix(J,times,P,X,Asdata.H,InvMSigema)
m.L <- GetMeanMatrix(J,times,P,X,Asdata.L,InvMSigema)
EMResults <- RunEM.Joint(Asdata.H, Asdata.L,times,X,P,MR,rho,MSigema,MSigema,omiga,m.H,m.L,J,r)
return(EMResults)
}
EMResults <- matrix(0, END_J - START_J + 1, 5)
for(j in START_J:END_J){
EMResults[j - START_J + 1,1] <- j
EMResults[j - START_J + 1,2:5] <- InitAndRunEM.Joint(J=j,r=LGD_J)
}
resultFileName <- sprintf("EMResults%02d~%02d.LGD%d.csv",START_J,END_J,LGD_J)
write.csv(EMResults,resultFileName,row.names = FALSE)
#-----NetRestructure-----
library(glmnet)
data_effect <- read.csv("./path/ck_gene_effect.csv")
score <- matrix(NA,length(data_effect[,1]),1)
clusterscore <- read.csv(OpiPfileName,row.names = 1)
for (i in 1:length(clusterscore[,1])) {
score[i] <- which.max(clusterscore[i,])
}
mod = 1
means <- matrix(NA,length(which(score == mod)),14)
for (i in 1:length(table(score))) {
sit <- which(score == i)
means[i,] = apply(data_effect[sit,],2,mean)
}
means <- t(means)
colnames(means) <- c(1:length(table(score)))
name <- c(1:length(means[1,]))
marker_list <- list()
for (col in mod) {
ridge1_cv <- cv.glmnet(x = means[,-col], y = means[,col],type.measure = "mse",nfold = 10,alpha = 0,grouped=FALSE)
best_ridge_coef <- as.numeric(coef(ridge1_cv, s = ridge1_cv$lambda.min))[-1]
fit_res <- cv.glmnet(x = means[,-col], y = means[,col],type.measure = "mse",nfold = 10,alpha = 1,penalty.factor = 1 / abs(best_ridge_coef),keep = TRUE,grouped=FALSE)
best_alasso_coef1 <- coef(fit_res, s = fit_res$lambda.min)
marker_list_one <- list()
marker_list_one[[1]] <- name[col]#第一个列表是直接qtl的名字
marker_list_one[[2]] <- as.numeric(best_alasso_coef1@Dimnames[[1]][best_alasso_coef1@i[-1]+1])#第二个列表是间接qtl的名字
marker_list_one[[3]] <- best_alasso_coef1@x[-1]#第三个列表是变量选择系数
marker_list[[col]] <- marker_list_one
#proc.time() - tim
}
load("./path/Effect.Rdata")
get_LOPm <- function(X){
len = length(X)
LOP <- function(r){
t <- seq(-1,1,2/(len-1))
temp <- rep(0,len)
for (m in 0:as.integer(r/2)) {
temp <- temp + (-1)^m*gamma(2*r - 2*m + 1)/(2^r*gamma(m+1)*gamma(r-m+1)*gamma(r-2*m + 1)) * t^(r-2*m)
}
return(temp)
}
LOPm <- cbind(LOP(0),LOP(1),LOP(2),LOP(3),LOP(4),LOP(5),LOP(6) )
return(LOPm[,1:ORDER])
}
ORDER <- 6
library(mvtnorm)
f1 <- function(x,t){
y = t[1]/(1 + t[2] * exp(-t[3] * x)) - (t[4] * exp(-t[5] * x))
return(y)
}
fy <- function(t,X){
if(t[1] == 2){
e1 = f1(X,t[2:6])
e2 = f1(X,t[8:12])
additive = 0.5 * (e1 - e2)
dominant = 0
y <- (2 * t[7] * t[13] * (additive + (t[7] - t[13]) * dominant)^2 + 4 * t[7]^2 * t[13]^2 * dominant^2) ^ 0.5
}else{
e1 = f1(X,t[2:6])
e2 = f1(X,t[8:12])
e3 = f1(X,t[14:18])
additive = 0.5 * (e1 - e3)
dominant = e2 - 0.5 * (e1 + e3)
y <- (2 * t[7] * t[13] * (additive + (t[7] - t[13]) * dominant)^2 + 4 * t[7]^2 * t[13]^2 * dominant^2) ^ 0.5
}
}
get_origin <- function(dy,X,y0){
y0 <- c(y0)
for (i in 2:(length(X)-1)) {
slope <- dy[i-1]
y_before <- y0[length(y0)]
add <- y_before + slope*(X[2]-X[1])
y0 <- c(y0,add)
}
return(y0)
}
fl_new <- function(t,X,dep,ind,dep_per,ind_per,LOPm){
ydep <- matrix(NA,length(dep_per[,1]),length(X))
for (i in 1:length(dep_per[,1])) {
ydep[i,] <- fy(c(dep_per[i,]),X)
}
ydep <- apply(ydep, 2, mean)
d <- 2*(ydep[-1] - ydep[-length(ydep)])/(X[2]-X[1])
tm <- matrix(t,ncol=ORDER,byrow = T)
temp1 <- LOPm[-1,]%*%t(tm) # mp * m个线
temp1 <- temp1*matrix(rep(ydep[-1],length(ind)+1),ncol = length(ind)+1,byrow = F)
num = 0
fy1 <- matrix(NA,length(ind),length(X))
for (i in 1:length(ind)) {
sit = which(score == ind[i])
leng = length(sit)
raw = matrix(NA,leng,length(X))
if (i == 1) {
parameter = ind_per[1:leng,]
for (o in 1:leng) {
raw[o,] = fy(parameter[o,] ,X)
}
fy1[i,] <- apply(raw,2,mean)
num = num + leng
}else{
parameter = ind_per[(num+1):(num +leng),]
for (o in 1:leng) {
raw[o,] = fy(parameter[o,] ,X)
}
fy1[i,] <- apply(raw,2,mean)
num = num + leng
}
}
for (i in 1:length(ind)) {
yind <- fy1[i,]
temp1[,i+1] <- temp1[,i+1]*yind[-1]
}
x0 <- X[-length(X)]
temp0 <- LOPm[-length(X),]%*%t(tm)
temp0 <- temp0*matrix(rep(ydep[-length(X)],length(ind)+1),ncol = length(ind)+1,byrow = F)
for (i in 1:length(ind)) {
yind <- fy1[i,]
temp0[,i+1] <- temp0[,i+1]*yind[-length(X)]
}
#----------------
d_mat <- LOPm%*%t(tm)
for (i in 1:length(tm[,1])) {
if (i == 1) {
d_mat[,i] <- d_mat[,i]*ydep
}else{
d_mat[,i] <- d_mat[,i]*fy1[i-1,]
}
}
# 将d_mat转化为o_mat
o_mat <- c()
for (i in 1:length(d_mat[1,]) ) {
if(i == 1){
o_mat <- cbind(o_mat,get_origin(d_mat[,i],X,ydep[1]))
}else{
o_mat <- cbind(o_mat,get_origin(d_mat[,i],X,0))
}
}
o_mat <- as.data.frame(o_mat)
y <- ydep[-length(ydep)]
e <- colSums(t(o_mat))
ssr <- sum((y-e)^2)
sst <- sum((y-mean(y))^2)
r <- 1-(ssr/sst)
#----------------
return(sum((d - colSums(t(temp1 + temp0)) )^2) + abs(1-r))
}
for (col in length(marker_list) ) {
cat(col,'\n')
dep <- marker_list[[col]][[1]]
dep <- which(score == dep)
ind <- marker_list[[col]][[2]]
sit = list()
for (i in 1:length(ind)) {
sit[[i]] = which(score == ind[i])
}
sit = unlist(sit)
if( length(sit) == 0 ){
next
}
dep_per <- matrix(NA, ncol = 18,nrow = length(dep))
for (i in 1:length(dep)) {
dep_per[i,1:length(picture[[dep[i]]])] <- picture[[dep[i]]]
}
ind_per <- matrix(NA, ncol = 18,nrow = length(sit))
for (i in 1:length(sit)) {
ind_per[i,1:length(picture[[sit[i]]])] <- picture[[sit[i]]]
}
X <- seq(0,78,78/((length(ind)+1)*ORDER*4))
# 等式的右边有两部分,1~n/0~n-1
t0 <- rep(0.001,(length(ind)+1)*ORDER)
itimes <- 1
repeat{
s1 <- optim(t0,fl_new,method = 'Nelder-Mead',X = X,dep = dep,ind = ind,
dep_per = dep_per, ind_per = ind_per, LOPm = get_LOPm(X))
r1 <- s1$par
s2 <- optim(r1,fl_new,method = 'Nelder-Mead',X = X,dep = dep,ind = ind,
dep_per = dep_per, ind_per = ind_per, LOPm = get_LOPm(X))
cat(col,'-',itimes,s2$value,'\n')
itimes <- itimes + 1
if(all( abs(r1-s2$par) == 0 )||itimes == 10){ #*** itimes越高精度越高,计算速度越慢,有条件部署在集群时,应该尽可能大与1000 ***#
break
}else{
t0 <- s2$par
}
}
marker_list[[col]][[4]] <- matrix(s2$par,ncol=ORDER,byrow=TRUE)
tm <- matrix(s2$par,ncol=ORDER,byrow=TRUE)
ydep <- matrix(NA,length(dep_per[,1]),length(X))
for (i in 1:length(dep_per[,1])) {
ydep[i,] <- fy(c(dep_per[i,]),X)
}
ydep <- apply(ydep, 2, mean)
d_mat <- get_LOPm(X)%*%t(tm)
num = 0
fy1 <- matrix(NA,length(ind),length(X))
for (i in 1:length(ind)) {
sit = which(score == ind[i])
leng = length(sit)
raw = matrix(NA,leng,length(X))
if (i == 1) {
parameter = ind_per[1:leng,]
for (o in 1:leng) {
raw[o,] = fy(parameter[o,] ,X)
}
fy1[i,] <- apply(raw,2,mean)
num = num + leng
}else{
parameter = ind_per[(num+1):(num +leng),]
for (o in 1:leng) {
raw[o,] = fy(parameter[o,] ,X)
}
fy1[i,] <- apply(raw,2,mean)
num = num + leng
}
}
for (i in 1:length(tm[,1])) {
if (i == 1) {
d_mat[,i] <- d_mat[,i]*ydep
}else{
d_mat[,i] <- d_mat[,i]*fy1[i-1,]
}
}
# 将d_mat转化为o_mat
o_mat <- c()
for (i in 1:length(d_mat[1,]) ) {
if(i == 1){
o_mat <- cbind(o_mat,get_origin(d_mat[,i],X,ydep[1]))
}else{
o_mat <- cbind(o_mat,get_origin(d_mat[,i],X,0))
}
}
o_mat <- as.data.frame(o_mat)
if( dim(o_mat)[2] <= 2 ){
marker_list[[col]][[5]] <- sum(o_mat[,length(o_mat[1,])]*(X[2]-X[1]))
}else{
marker_list[[col]][[5]] <- colSums(o_mat[,2:(length(ind)+1)]*(X[2]-X[1]))
}
}
filename <- paste0("salt",mod,".Rdata")
save(marker_list,file = filename) |
library(RSAGA)
### Name: rsaga.target
### Title: Define target grid for interpolation
### Aliases: rsaga.target
### Keywords: interface spatial
### ** Examples
## Not run:
##D # IDW interpolation of attribute "z" from the point shapefile
##D # 'points.shp' to a grid with the same extent and resolution
##D # as the (pre-existing) geology grid:
##D rsaga.inverse.distance("points", "dem", field = "z", maxdist = 1000,
##D target = rsaga.target(target="target.grid",
##D target.grid = "geology"))
## End(Not run)
| /data/genthat_extracted_code/RSAGA/examples/rsaga.target.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 528 | r | library(RSAGA)
### Name: rsaga.target
### Title: Define target grid for interpolation
### Aliases: rsaga.target
### Keywords: interface spatial
### ** Examples
## Not run:
##D # IDW interpolation of attribute "z" from the point shapefile
##D # 'points.shp' to a grid with the same extent and resolution
##D # as the (pre-existing) geology grid:
##D rsaga.inverse.distance("points", "dem", field = "z", maxdist = 1000,
##D target = rsaga.target(target="target.grid",
##D target.grid = "geology"))
## End(Not run)
|
library("rvest")
library(dplyr)
main.table <- data.frame(Minutes=integer(), FG=integer(), FGA=integer(), FGP=double(), ThreePoint=integer(),
ThreePointA=integer(), ThreePointP=double(), FT=integer(), FTA=integer(),
FTP=double(), ORB = integer(), DRB=integer(), TRB=integer(), AST = integer(),
STL=integer(), BLK=integer(), TOV=integer(), PF=integer(), PTS=integer())
#October Games
team.names <- read.csv("Data/teamsOctober.csv")
url <- 'https://www.basketball-reference.com/leagues/NBA_2018_games.html'
session <- html_session(url)
for (i in 1:104) {
game.link <- follow_link(session, css = paste("tr:nth-child(", i, ") .center a"))
team1.name <- team.names$Team1[[i]]
team1 <- game.link %>% html_nodes(paste("#all_box_",team1.name,"_basic h2", sep="")) %>% html_text()
table1 <- game.link %>% html_nodes(paste("#box_",team1.name,"_basic tfoot .right", sep="")) %>% html_text()
table1.df <- table1 %>% as.data.frame()
names(table1.df) = c(team1)
team2.name <- team.names$Team2[[i]]
team2 <- game.link %>% html_nodes(paste("#all_box_",team2.name,"_basic h2", sep="")) %>% html_text()
table2 <- game.link %>% html_nodes(paste("#box_", team2.name, "_basic tfoot .right", sep="")) %>% html_text()
table2.df <- table2 %>% as.data.frame()
names(table2.df) = c(team2)
table1.df = data.frame(t(table1.df))
colnames(table1.df) = c("Minutes", "FG", "FGA", "FGP", "ThreePoint",
"ThreePointA", "ThreePointP", "FT", "FTA",
"FTP", "ORB", "DRB", "TRB", "AST",
"STL", "BLK", "TOV", "PF", "PTS", "Blank")
table1.df = subset(table1.df, select=-c(Blank))
table2.df = data.frame(t(table2.df))
colnames(table2.df) = c("Minutes", "FG", "FGA", "FGP", "ThreePoint",
"ThreePointA", "ThreePointP", "FT", "FTA",
"FTP", "ORB", "DRB", "TRB", "AST",
"STL", "BLK", "TOV", "PF", "PTS", "Blank")
table2.df = subset(table2.df, select=-c(Blank))
main.table = rbind(main.table, table1.df)
main.table = rbind(main.table, table2.df)
session.return <- html_session(url)
}
#November Games
url <- 'https://www.basketball-reference.com/leagues/NBA_2018_games-november.html'
session <- html_session(url)
team.names <- read.csv("Data/teamsNovember.csv")
for (i in 1:213) {
game.link <- follow_link(session, css = paste("tr:nth-child(", i, ") .center a"))
team1.name <- team.names$Team1[[i]]
team1 <- game.link %>% html_nodes(paste("#all_box_",team1.name,"_basic h2", sep="")) %>% html_text()
table1 <- game.link %>% html_nodes(paste("#box_",team1.name,"_basic tfoot .right", sep="")) %>% html_text()
table1.df <- table1 %>% as.data.frame()
names(table1.df) = c(team1)
team2.name <- team.names$Team2[[i]]
team2 <- game.link %>% html_nodes(paste("#all_box_",team2.name,"_basic h2", sep="")) %>% html_text()
table2 <- game.link %>% html_nodes(paste("#box_", team2.name, "_basic tfoot .right", sep="")) %>% html_text()
table2.df <- table2 %>% as.data.frame()
names(table2.df) = c(team2)
table1.df = data.frame(t(table1.df))
colnames(table1.df) = c("Minutes", "FG", "FGA", "FGP", "ThreePoint",
"ThreePointA", "ThreePointP", "FT", "FTA",
"FTP", "ORB", "DRB", "TRB", "AST",
"STL", "BLK", "TOV", "PF", "PTS", "Blank")
table1.df = subset(table1.df, select=-c(Blank))
table2.df = data.frame(t(table2.df))
colnames(table2.df) = c("Minutes", "FG", "FGA", "FGP", "ThreePoint",
"ThreePointA", "ThreePointP", "FT", "FTA",
"FTP", "ORB", "DRB", "TRB", "AST",
"STL", "BLK", "TOV", "PF", "PTS", "Blank")
table2.df = subset(table2.df, select=-c(Blank))
main.table = rbind(main.table, table1.df)
main.table = rbind(main.table, table2.df)
session.return <- html_session(url)
}
#December Games
url <- 'https://www.basketball-reference.com/leagues/NBA_2018_games-december.html'
session <- html_session(url)
team.names <- read.csv("Data/teamsDecember.csv")
for (i in 1:227) {
game.link <- follow_link(session, css = paste("tr:nth-child(", i, ") .center a"))
team1.name <- team.names$Team1[[i]]
team1 <- game.link %>% html_nodes(paste("#all_box_",team1.name,"_basic h2", sep="")) %>% html_text()
table1 <- game.link %>% html_nodes(paste("#box_",team1.name,"_basic tfoot .right", sep="")) %>% html_text()
table1.df <- table1 %>% as.data.frame()
names(table1.df) = c(team1)
team2.name <- team.names$Team2[[i]]
team2 <- game.link %>% html_nodes(paste("#all_box_",team2.name,"_basic h2", sep="")) %>% html_text()
table2 <- game.link %>% html_nodes(paste("#box_", team2.name, "_basic tfoot .right", sep="")) %>% html_text()
table2.df <- table2 %>% as.data.frame()
names(table2.df) = c(team2)
table1.df = data.frame(t(table1.df))
colnames(table1.df) = c("Minutes", "FG", "FGA", "FGP", "ThreePoint",
"ThreePointA", "ThreePointP", "FT", "FTA",
"FTP", "ORB", "DRB", "TRB", "AST",
"STL", "BLK", "TOV", "PF", "PTS", "Blank")
table1.df = subset(table1.df, select=-c(Blank))
table2.df = data.frame(t(table2.df))
colnames(table2.df) = c("Minutes", "FG", "FGA", "FGP", "ThreePoint",
"ThreePointA", "ThreePointP", "FT", "FTA",
"FTP", "ORB", "DRB", "TRB", "AST",
"STL", "BLK", "TOV", "PF", "PTS", "Blank")
table2.df = subset(table2.df, select=-c(Blank))
main.table = rbind(main.table, table1.df)
main.table = rbind(main.table, table2.df)
session.return <- html_session(url)
}
#January Games
url <- 'https://www.basketball-reference.com/leagues/NBA_2018_games-january.html'
session <- html_session(url)
team.names <- read.csv("Data/teamsJanuary.csv")
for (i in 1:216) {
game.link <- follow_link(session, css = paste("tr:nth-child(", i, ") .center a"))
team1.name <- team.names$Team1[[i]]
team1 <- game.link %>% html_nodes(paste("#all_box_",team1.name,"_basic h2", sep="")) %>% html_text()
table1 <- game.link %>% html_nodes(paste("#box_",team1.name,"_basic tfoot .right", sep="")) %>% html_text()
table1.df <- table1 %>% as.data.frame()
names(table1.df) = c(team1)
team2.name <- team.names$Team2[[i]]
team2 <- game.link %>% html_nodes(paste("#all_box_",team2.name,"_basic h2", sep="")) %>% html_text()
table2 <- game.link %>% html_nodes(paste("#box_", team2.name, "_basic tfoot .right", sep="")) %>% html_text()
table2.df <- table2 %>% as.data.frame()
names(table2.df) = c(team2)
table1.df = data.frame(t(table1.df))
colnames(table1.df) = c("Minutes", "FG", "FGA", "FGP", "ThreePoint",
"ThreePointA", "ThreePointP", "FT", "FTA",
"FTP", "ORB", "DRB", "TRB", "AST",
"STL", "BLK", "TOV", "PF", "PTS", "Blank")
table1.df = subset(table1.df, select=-c(Blank))
table2.df = data.frame(t(table2.df))
colnames(table2.df) = c("Minutes", "FG", "FGA", "FGP", "ThreePoint",
"ThreePointA", "ThreePointP", "FT", "FTA",
"FTP", "ORB", "DRB", "TRB", "AST",
"STL", "BLK", "TOV", "PF", "PTS", "Blank")
table2.df = subset(table2.df, select=-c(Blank))
main.table = rbind(main.table, table1.df)
main.table = rbind(main.table, table2.df)
session.return <- html_session(url)
}
#February Games
url <- 'https://www.basketball-reference.com/leagues/NBA_2018_games-february.html'
session <- html_session(url)
team.names <- read.csv("Data/teamsFebruary.csv")
for (i in 1:160) {
game.link <- follow_link(session, css = paste("tr:nth-child(", i, ") .center a"))
team1.name <- team.names$Team1[[i]]
team1 <- game.link %>% html_nodes(paste("#all_box_",team1.name,"_basic h2", sep="")) %>% html_text()
table1 <- game.link %>% html_nodes(paste("#box_",team1.name,"_basic tfoot .right", sep="")) %>% html_text()
table1.df <- table1 %>% as.data.frame()
names(table1.df) = c(team1)
team2.name <- team.names$Team2[[i]]
team2 <- game.link %>% html_nodes(paste("#all_box_",team2.name,"_basic h2", sep="")) %>% html_text()
table2 <- game.link %>% html_nodes(paste("#box_", team2.name, "_basic tfoot .right", sep="")) %>% html_text()
table2.df <- table2 %>% as.data.frame()
names(table2.df) = c(team2)
table1.df = data.frame(t(table1.df))
colnames(table1.df) = c("Minutes", "FG", "FGA", "FGP", "ThreePoint",
"ThreePointA", "ThreePointP", "FT", "FTA",
"FTP", "ORB", "DRB", "TRB", "AST",
"STL", "BLK", "TOV", "PF", "PTS", "Blank")
table1.df = subset(table1.df, select=-c(Blank))
table2.df = data.frame(t(table2.df))
colnames(table2.df) = c("Minutes", "FG", "FGA", "FGP", "ThreePoint",
"ThreePointA", "ThreePointP", "FT", "FTA",
"FTP", "ORB", "DRB", "TRB", "AST",
"STL", "BLK", "TOV", "PF", "PTS", "Blank")
table2.df = subset(table2.df, select=-c(Blank))
main.table = rbind(main.table, table1.df)
main.table = rbind(main.table, table2.df)
session.return <- html_session(url)
}
#March Games
## Note: not all march games are in. Goes up to 222
url <- 'https://www.basketball-reference.com/leagues/NBA_2018_games-march.html'
session <- html_session(url)
team.names <- read.csv("Data/teamsMarch.csv")
for (i in 1:83) {
game.link <- follow_link(session, css = paste("tr:nth-child(", i, ") .center a"))
team1.name <- team.names$Team1[[i]]
team1 <- game.link %>% html_nodes(paste("#all_box_",team1.name,"_basic h2", sep="")) %>% html_text()
table1 <- game.link %>% html_nodes(paste("#box_",team1.name,"_basic tfoot .right", sep="")) %>% html_text()
table1.df <- table1 %>% as.data.frame()
names(table1.df) = c(team1)
team2.name <- team.names$Team2[[i]]
team2 <- game.link %>% html_nodes(paste("#all_box_",team2.name,"_basic h2", sep="")) %>% html_text()
table2 <- game.link %>% html_nodes(paste("#box_", team2.name, "_basic tfoot .right", sep="")) %>% html_text()
table2.df <- table2 %>% as.data.frame()
names(table2.df) = c(team2)
table1.df = data.frame(t(table1.df))
colnames(table1.df) = c("Minutes", "FG", "FGA", "FGP", "ThreePoint",
"ThreePointA", "ThreePointP", "FT", "FTA",
"FTP", "ORB", "DRB", "TRB", "AST",
"STL", "BLK", "TOV", "PF", "PTS", "Blank")
table1.df = subset(table1.df, select=-c(Blank))
table2.df = data.frame(t(table2.df))
colnames(table2.df) = c("Minutes", "FG", "FGA", "FGP", "ThreePoint",
"ThreePointA", "ThreePointP", "FT", "FTA",
"FTP", "ORB", "DRB", "TRB", "AST",
"STL", "BLK", "TOV", "PF", "PTS", "Blank")
table2.df = subset(table2.df, select=-c(Blank))
main.table = rbind(main.table, table1.df)
main.table = rbind(main.table, table2.df)
session.return <- html_session(url)
}
#All data is in. Clean table and make into csv.
main.table = subset(main.table, select=-c(Minutes))
write.csv(main.table, "Data/GameData.csv")
| /basketball_win_prediction/basketball_scrape.R | no_license | thpossidente/COGS-298-Project | R | false | false | 11,514 | r | library("rvest")
library(dplyr)
main.table <- data.frame(Minutes=integer(), FG=integer(), FGA=integer(), FGP=double(), ThreePoint=integer(),
ThreePointA=integer(), ThreePointP=double(), FT=integer(), FTA=integer(),
FTP=double(), ORB = integer(), DRB=integer(), TRB=integer(), AST = integer(),
STL=integer(), BLK=integer(), TOV=integer(), PF=integer(), PTS=integer())
#October Games
team.names <- read.csv("Data/teamsOctober.csv")
url <- 'https://www.basketball-reference.com/leagues/NBA_2018_games.html'
session <- html_session(url)
for (i in 1:104) {
game.link <- follow_link(session, css = paste("tr:nth-child(", i, ") .center a"))
team1.name <- team.names$Team1[[i]]
team1 <- game.link %>% html_nodes(paste("#all_box_",team1.name,"_basic h2", sep="")) %>% html_text()
table1 <- game.link %>% html_nodes(paste("#box_",team1.name,"_basic tfoot .right", sep="")) %>% html_text()
table1.df <- table1 %>% as.data.frame()
names(table1.df) = c(team1)
team2.name <- team.names$Team2[[i]]
team2 <- game.link %>% html_nodes(paste("#all_box_",team2.name,"_basic h2", sep="")) %>% html_text()
table2 <- game.link %>% html_nodes(paste("#box_", team2.name, "_basic tfoot .right", sep="")) %>% html_text()
table2.df <- table2 %>% as.data.frame()
names(table2.df) = c(team2)
table1.df = data.frame(t(table1.df))
colnames(table1.df) = c("Minutes", "FG", "FGA", "FGP", "ThreePoint",
"ThreePointA", "ThreePointP", "FT", "FTA",
"FTP", "ORB", "DRB", "TRB", "AST",
"STL", "BLK", "TOV", "PF", "PTS", "Blank")
table1.df = subset(table1.df, select=-c(Blank))
table2.df = data.frame(t(table2.df))
colnames(table2.df) = c("Minutes", "FG", "FGA", "FGP", "ThreePoint",
"ThreePointA", "ThreePointP", "FT", "FTA",
"FTP", "ORB", "DRB", "TRB", "AST",
"STL", "BLK", "TOV", "PF", "PTS", "Blank")
table2.df = subset(table2.df, select=-c(Blank))
main.table = rbind(main.table, table1.df)
main.table = rbind(main.table, table2.df)
session.return <- html_session(url)
}
#November Games
url <- 'https://www.basketball-reference.com/leagues/NBA_2018_games-november.html'
session <- html_session(url)
team.names <- read.csv("Data/teamsNovember.csv")
for (i in 1:213) {
game.link <- follow_link(session, css = paste("tr:nth-child(", i, ") .center a"))
team1.name <- team.names$Team1[[i]]
team1 <- game.link %>% html_nodes(paste("#all_box_",team1.name,"_basic h2", sep="")) %>% html_text()
table1 <- game.link %>% html_nodes(paste("#box_",team1.name,"_basic tfoot .right", sep="")) %>% html_text()
table1.df <- table1 %>% as.data.frame()
names(table1.df) = c(team1)
team2.name <- team.names$Team2[[i]]
team2 <- game.link %>% html_nodes(paste("#all_box_",team2.name,"_basic h2", sep="")) %>% html_text()
table2 <- game.link %>% html_nodes(paste("#box_", team2.name, "_basic tfoot .right", sep="")) %>% html_text()
table2.df <- table2 %>% as.data.frame()
names(table2.df) = c(team2)
table1.df = data.frame(t(table1.df))
colnames(table1.df) = c("Minutes", "FG", "FGA", "FGP", "ThreePoint",
"ThreePointA", "ThreePointP", "FT", "FTA",
"FTP", "ORB", "DRB", "TRB", "AST",
"STL", "BLK", "TOV", "PF", "PTS", "Blank")
table1.df = subset(table1.df, select=-c(Blank))
table2.df = data.frame(t(table2.df))
colnames(table2.df) = c("Minutes", "FG", "FGA", "FGP", "ThreePoint",
"ThreePointA", "ThreePointP", "FT", "FTA",
"FTP", "ORB", "DRB", "TRB", "AST",
"STL", "BLK", "TOV", "PF", "PTS", "Blank")
table2.df = subset(table2.df, select=-c(Blank))
main.table = rbind(main.table, table1.df)
main.table = rbind(main.table, table2.df)
session.return <- html_session(url)
}
#December Games
url <- 'https://www.basketball-reference.com/leagues/NBA_2018_games-december.html'
session <- html_session(url)
team.names <- read.csv("Data/teamsDecember.csv")
for (i in 1:227) {
game.link <- follow_link(session, css = paste("tr:nth-child(", i, ") .center a"))
team1.name <- team.names$Team1[[i]]
team1 <- game.link %>% html_nodes(paste("#all_box_",team1.name,"_basic h2", sep="")) %>% html_text()
table1 <- game.link %>% html_nodes(paste("#box_",team1.name,"_basic tfoot .right", sep="")) %>% html_text()
table1.df <- table1 %>% as.data.frame()
names(table1.df) = c(team1)
team2.name <- team.names$Team2[[i]]
team2 <- game.link %>% html_nodes(paste("#all_box_",team2.name,"_basic h2", sep="")) %>% html_text()
table2 <- game.link %>% html_nodes(paste("#box_", team2.name, "_basic tfoot .right", sep="")) %>% html_text()
table2.df <- table2 %>% as.data.frame()
names(table2.df) = c(team2)
table1.df = data.frame(t(table1.df))
colnames(table1.df) = c("Minutes", "FG", "FGA", "FGP", "ThreePoint",
"ThreePointA", "ThreePointP", "FT", "FTA",
"FTP", "ORB", "DRB", "TRB", "AST",
"STL", "BLK", "TOV", "PF", "PTS", "Blank")
table1.df = subset(table1.df, select=-c(Blank))
table2.df = data.frame(t(table2.df))
colnames(table2.df) = c("Minutes", "FG", "FGA", "FGP", "ThreePoint",
"ThreePointA", "ThreePointP", "FT", "FTA",
"FTP", "ORB", "DRB", "TRB", "AST",
"STL", "BLK", "TOV", "PF", "PTS", "Blank")
table2.df = subset(table2.df, select=-c(Blank))
main.table = rbind(main.table, table1.df)
main.table = rbind(main.table, table2.df)
session.return <- html_session(url)
}
#January Games
url <- 'https://www.basketball-reference.com/leagues/NBA_2018_games-january.html'
session <- html_session(url)
team.names <- read.csv("Data/teamsJanuary.csv")
for (i in 1:216) {
game.link <- follow_link(session, css = paste("tr:nth-child(", i, ") .center a"))
team1.name <- team.names$Team1[[i]]
team1 <- game.link %>% html_nodes(paste("#all_box_",team1.name,"_basic h2", sep="")) %>% html_text()
table1 <- game.link %>% html_nodes(paste("#box_",team1.name,"_basic tfoot .right", sep="")) %>% html_text()
table1.df <- table1 %>% as.data.frame()
names(table1.df) = c(team1)
team2.name <- team.names$Team2[[i]]
team2 <- game.link %>% html_nodes(paste("#all_box_",team2.name,"_basic h2", sep="")) %>% html_text()
table2 <- game.link %>% html_nodes(paste("#box_", team2.name, "_basic tfoot .right", sep="")) %>% html_text()
table2.df <- table2 %>% as.data.frame()
names(table2.df) = c(team2)
table1.df = data.frame(t(table1.df))
colnames(table1.df) = c("Minutes", "FG", "FGA", "FGP", "ThreePoint",
"ThreePointA", "ThreePointP", "FT", "FTA",
"FTP", "ORB", "DRB", "TRB", "AST",
"STL", "BLK", "TOV", "PF", "PTS", "Blank")
table1.df = subset(table1.df, select=-c(Blank))
table2.df = data.frame(t(table2.df))
colnames(table2.df) = c("Minutes", "FG", "FGA", "FGP", "ThreePoint",
"ThreePointA", "ThreePointP", "FT", "FTA",
"FTP", "ORB", "DRB", "TRB", "AST",
"STL", "BLK", "TOV", "PF", "PTS", "Blank")
table2.df = subset(table2.df, select=-c(Blank))
main.table = rbind(main.table, table1.df)
main.table = rbind(main.table, table2.df)
session.return <- html_session(url)
}
#February Games
url <- 'https://www.basketball-reference.com/leagues/NBA_2018_games-february.html'
session <- html_session(url)
team.names <- read.csv("Data/teamsFebruary.csv")
for (i in 1:160) {
game.link <- follow_link(session, css = paste("tr:nth-child(", i, ") .center a"))
team1.name <- team.names$Team1[[i]]
team1 <- game.link %>% html_nodes(paste("#all_box_",team1.name,"_basic h2", sep="")) %>% html_text()
table1 <- game.link %>% html_nodes(paste("#box_",team1.name,"_basic tfoot .right", sep="")) %>% html_text()
table1.df <- table1 %>% as.data.frame()
names(table1.df) = c(team1)
team2.name <- team.names$Team2[[i]]
team2 <- game.link %>% html_nodes(paste("#all_box_",team2.name,"_basic h2", sep="")) %>% html_text()
table2 <- game.link %>% html_nodes(paste("#box_", team2.name, "_basic tfoot .right", sep="")) %>% html_text()
table2.df <- table2 %>% as.data.frame()
names(table2.df) = c(team2)
table1.df = data.frame(t(table1.df))
colnames(table1.df) = c("Minutes", "FG", "FGA", "FGP", "ThreePoint",
"ThreePointA", "ThreePointP", "FT", "FTA",
"FTP", "ORB", "DRB", "TRB", "AST",
"STL", "BLK", "TOV", "PF", "PTS", "Blank")
table1.df = subset(table1.df, select=-c(Blank))
table2.df = data.frame(t(table2.df))
colnames(table2.df) = c("Minutes", "FG", "FGA", "FGP", "ThreePoint",
"ThreePointA", "ThreePointP", "FT", "FTA",
"FTP", "ORB", "DRB", "TRB", "AST",
"STL", "BLK", "TOV", "PF", "PTS", "Blank")
table2.df = subset(table2.df, select=-c(Blank))
main.table = rbind(main.table, table1.df)
main.table = rbind(main.table, table2.df)
session.return <- html_session(url)
}
#March Games
## Note: not all march games are in. Goes up to 222
url <- 'https://www.basketball-reference.com/leagues/NBA_2018_games-march.html'
session <- html_session(url)
team.names <- read.csv("Data/teamsMarch.csv")
for (i in 1:83) {
game.link <- follow_link(session, css = paste("tr:nth-child(", i, ") .center a"))
team1.name <- team.names$Team1[[i]]
team1 <- game.link %>% html_nodes(paste("#all_box_",team1.name,"_basic h2", sep="")) %>% html_text()
table1 <- game.link %>% html_nodes(paste("#box_",team1.name,"_basic tfoot .right", sep="")) %>% html_text()
table1.df <- table1 %>% as.data.frame()
names(table1.df) = c(team1)
team2.name <- team.names$Team2[[i]]
team2 <- game.link %>% html_nodes(paste("#all_box_",team2.name,"_basic h2", sep="")) %>% html_text()
table2 <- game.link %>% html_nodes(paste("#box_", team2.name, "_basic tfoot .right", sep="")) %>% html_text()
table2.df <- table2 %>% as.data.frame()
names(table2.df) = c(team2)
table1.df = data.frame(t(table1.df))
colnames(table1.df) = c("Minutes", "FG", "FGA", "FGP", "ThreePoint",
"ThreePointA", "ThreePointP", "FT", "FTA",
"FTP", "ORB", "DRB", "TRB", "AST",
"STL", "BLK", "TOV", "PF", "PTS", "Blank")
table1.df = subset(table1.df, select=-c(Blank))
table2.df = data.frame(t(table2.df))
colnames(table2.df) = c("Minutes", "FG", "FGA", "FGP", "ThreePoint",
"ThreePointA", "ThreePointP", "FT", "FTA",
"FTP", "ORB", "DRB", "TRB", "AST",
"STL", "BLK", "TOV", "PF", "PTS", "Blank")
table2.df = subset(table2.df, select=-c(Blank))
main.table = rbind(main.table, table1.df)
main.table = rbind(main.table, table2.df)
session.return <- html_session(url)
}
#All data is in. Clean table and make into csv.
main.table = subset(main.table, select=-c(Minutes))
write.csv(main.table, "Data/GameData.csv")
|
\name{pdsoft.cv}
\alias{pdsoft.cv}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Tuning parameter selection and computation for the positive definite and sparse covariance matrix estimator
}
\description{
Computes and selects the tuning parameter for
the sparse and positive definite covariance matrix estimator proposed by Rothman (2012).
}
\usage{
pdsoft.cv(x, lam.vec = NULL, standard = TRUE,
init = c("diag", "soft", "dense"), tau = 1e-04,
nsplits = 10, n.tr = NULL, tolin = 1e-08, tolout = 1e-08,
maxitin = 10000, maxitout = 1000, quiet = TRUE)
}
\arguments{
\item{x}{
A data matrix with \eqn{n} rows and \eqn{p} columns. The rows are assumed to be a realization of \eqn{n}
independent copies of a \eqn{p}-variate random vector.
}
\item{lam.vec}{
An optional vector of candidate lasso-type penalty tuning parameter values.
The default for \code{standard=TRUE} is \code{seq(from=0, to=1, by=0.05)}
and the default for \code{standard=FALSE} is \code{seq(from=0, to=m, length.out=20)},
where \code{m} is the maximum magnitude of the off-diagonal entries in \code{s}. Both of these default choices
are far from excellent and are time consuming, particularly for values close to zero.
The user should consider refining this set by increasing its resolution in a narrower range.
}
\item{standard}{
Logical: \code{standard=TRUE} first computes the observed sample correlation matrix from \code{s}, then
computes the sparse correlation matrix estimate, and finally rescales to return the sparse covariance
matrix estimate. The strongly recommended default is \code{standard=TRUE}.
}
\item{init}{
The type of initialization used for the estimate computed at the maximum element in \code{lam.vec}. Subsequent
initializations use the final iterates for \code{sigma} and \code{omega} at the previous value in \code{lam.vec}.
The default option \code{init="diag"} uses
diagonal starting values. The second option
\code{init="soft"} uses a positive definite version of the soft thresholded
covariance or correlation estimate, depending on \code{standard}. The third option \code{init="dense"}
uses the closed-form solution when \code{lam=0}.
}
\item{tau}{
The logarithmic barrier parameter. The default is \code{tau=1e-4}, which works well when \code{standard=TRUE}
with the default choices for the convergence tolerances.
}
\item{nsplits}{
The number of random splits to use for the tuning parameter selection.
}
\item{n.tr}{
Optional number of cases to use in the training set. The default is the nearest
integer to \eqn{n(1-1/\log(n))}. The value must be in \eqn{\{3, \ldots, n-2\}}.
}
\item{tolin}{
Convergence tolerance for the inner loop of the algorithm that solves the lasso regression.
}
\item{tolout}{
Convergence tolerance for the outer loop of the algorithm.
}
\item{maxitin}{
Maximum number of inner-loop iterations allowed
}
\item{maxitout}{
Maximum number of outer-loop iterations allowed
}
\item{quiet}{
Logical: \code{quiet=TRUE} suppresses the printing of progress updates.
}
}
\details{
See Rothman (2012) for the objective function and more information.
}
\value{
A list with
\item{sigma}{covariance estimate at the selected tuning parameter}
\item{omega}{inverse covariance estimate at the selected tuning parameter}
\item{best.lam}{the selected value of the tuning parameter}
\item{cv.err}{a vector of the validation errors, one for each element in \code{lam.vec}}
\item{lam.vec}{the vector of candidate tuning parameter values}
\item{n.tr}{the number of cases used for the training set}
}
\references{
Rothman, A. J. (2012). Positive definite estimators of large covariance matrices. Biometrika 99(3): 733-740
}
\author{
Adam J. Rothman
}
\note{
It is always the case that \code{omega} is positive definite. If \code{tolin} and \code{tolout} are too large,
or \code{maxitin} and \code{maxitout} are too small, then \code{sigma} may be indefinite.
}
\seealso{
\code{\link{pdsoft}}
}
\examples{
set.seed(1)
n=50
p=20
true.cov=diag(p)
true.cov[cbind(1:(p-1), 2:p)]=0.4
true.cov[cbind(2:p, 1:(p-1))]=0.4
eo=eigen(true.cov, symmetric=TRUE)
z=matrix(rnorm(n*p), nrow=n, ncol=p)
x=z\%*\% tcrossprod(eo$vec*rep(eo$val^(0.5), each=p),eo$vec)
output=pdsoft.cv(x=x)
plot(output$lam.vec, output$cv.err)
output$best.lam
output$sigma
}
| /man/pdsoft.cv.Rd | no_license | cran/PDSCE | R | false | false | 4,344 | rd | \name{pdsoft.cv}
\alias{pdsoft.cv}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Tuning parameter selection and computation for the positive definite and sparse covariance matrix estimator
}
\description{
Computes and selects the tuning parameter for
the sparse and positive definite covariance matrix estimator proposed by Rothman (2012).
}
\usage{
pdsoft.cv(x, lam.vec = NULL, standard = TRUE,
init = c("diag", "soft", "dense"), tau = 1e-04,
nsplits = 10, n.tr = NULL, tolin = 1e-08, tolout = 1e-08,
maxitin = 10000, maxitout = 1000, quiet = TRUE)
}
\arguments{
\item{x}{
A data matrix with \eqn{n} rows and \eqn{p} columns. The rows are assumed to be a realization of \eqn{n}
independent copies of a \eqn{p}-variate random vector.
}
\item{lam.vec}{
An optional vector of candidate lasso-type penalty tuning parameter values.
The default for \code{standard=TRUE} is \code{seq(from=0, to=1, by=0.05)}
and the default for \code{standard=FALSE} is \code{seq(from=0, to=m, length.out=20)},
where \code{m} is the maximum magnitude of the off-diagonal entries in \code{s}. Both of these default choices
are far from excellent and are time consuming, particularly for values close to zero.
The user should consider refining this set by increasing its resolution in a narrower range.
}
\item{standard}{
Logical: \code{standard=TRUE} first computes the observed sample correlation matrix from \code{s}, then
computes the sparse correlation matrix estimate, and finally rescales to return the sparse covariance
matrix estimate. The strongly recommended default is \code{standard=TRUE}.
}
\item{init}{
The type of initialization used for the estimate computed at the maximum element in \code{lam.vec}. Subsequent
initializations use the final iterates for \code{sigma} and \code{omega} at the previous value in \code{lam.vec}.
The default option \code{init="diag"} uses
diagonal starting values. The second option
\code{init="soft"} uses a positive definite version of the soft thresholded
covariance or correlation estimate, depending on \code{standard}. The third option \code{init="dense"}
uses the closed-form solution when \code{lam=0}.
}
\item{tau}{
The logarithmic barrier parameter. The default is \code{tau=1e-4}, which works well when \code{standard=TRUE}
with the default choices for the convergence tolerances.
}
\item{nsplits}{
The number of random splits to use for the tuning parameter selection.
}
\item{n.tr}{
Optional number of cases to use in the training set. The default is the nearest
integer to \eqn{n(1-1/\log(n))}. The value must be in \eqn{\{3, \ldots, n-2\}}.
}
\item{tolin}{
Convergence tolerance for the inner loop of the algorithm that solves the lasso regression.
}
\item{tolout}{
Convergence tolerance for the outer loop of the algorithm.
}
\item{maxitin}{
Maximum number of inner-loop iterations allowed
}
\item{maxitout}{
Maximum number of outer-loop iterations allowed
}
\item{quiet}{
Logical: \code{quiet=TRUE} suppresses the printing of progress updates.
}
}
\details{
See Rothman (2012) for the objective function and more information.
}
\value{
A list with
\item{sigma}{covariance estimate at the selected tuning parameter}
\item{omega}{inverse covariance estimate at the selected tuning parameter}
\item{best.lam}{the selected value of the tuning parameter}
\item{cv.err}{a vector of the validation errors, one for each element in \code{lam.vec}}
\item{lam.vec}{the vector of candidate tuning parameter values}
\item{n.tr}{the number of cases used for the training set}
}
\references{
Rothman, A. J. (2012). Positive definite estimators of large covariance matrices. Biometrika 99(3): 733-740
}
\author{
Adam J. Rothman
}
\note{
It is always the case that \code{omega} is positive definite. If \code{tolin} and \code{tolout} are too large,
or \code{maxitin} and \code{maxitout} are too small, then \code{sigma} may be indefinite.
}
\seealso{
\code{\link{pdsoft}}
}
\examples{
set.seed(1)
n=50
p=20
true.cov=diag(p)
true.cov[cbind(1:(p-1), 2:p)]=0.4
true.cov[cbind(2:p, 1:(p-1))]=0.4
eo=eigen(true.cov, symmetric=TRUE)
z=matrix(rnorm(n*p), nrow=n, ncol=p)
x=z\%*\% tcrossprod(eo$vec*rep(eo$val^(0.5), each=p),eo$vec)
output=pdsoft.cv(x=x)
plot(output$lam.vec, output$cv.err)
output$best.lam
output$sigma
}
|
#################################################
############ gamlss.dist #############
############ Distribution tests #############
# Zero inflated beta negative binomial: ZIBNB() #
#################################################
## Context
testthat::context("ZIBNB 2")
## Seed
set.seed(619)
## Family
fam <- ZIBNB()
## Random Values
i <- 0.5
n <- 1e5
mu <- 1 + i
sigma <- 0.1 + i
nu <- 1 + i
tau <- 0.1 + i
rvec <- rZIBNB(n, mu, sigma, nu, tau)
## Empirical Moments
ex_emp <- mean(rvec)
vx_emp <- var(rvec)
## Theoretical moments
ex_theo <- fam$mean(mu, sigma, nu, tau)
vx_theo <- fam$variance(mu, sigma, nu, tau)
## Test here if they are about the same
expect_true(abs(ex_emp - ex_theo) < 0.02)
expect_true(abs(vx_emp - vx_theo) < 0.02)
| /tests/testthat/test_01_ZIBNB.R | no_license | JENScoding/gamlss.dist | R | false | false | 774 | r | #################################################
############ gamlss.dist #############
############ Distribution tests #############
# Zero inflated beta negative binomial: ZIBNB() #
#################################################
## Context
testthat::context("ZIBNB 2")
## Seed
set.seed(619)
## Family
fam <- ZIBNB()
## Random Values
i <- 0.5
n <- 1e5
mu <- 1 + i
sigma <- 0.1 + i
nu <- 1 + i
tau <- 0.1 + i
rvec <- rZIBNB(n, mu, sigma, nu, tau)
## Empirical Moments
ex_emp <- mean(rvec)
vx_emp <- var(rvec)
## Theoretical moments
ex_theo <- fam$mean(mu, sigma, nu, tau)
vx_theo <- fam$variance(mu, sigma, nu, tau)
## Test here if they are about the same
expect_true(abs(ex_emp - ex_theo) < 0.02)
expect_true(abs(vx_emp - vx_theo) < 0.02)
|
plot4 <- function() {
filename <- "household_power_consumption.txt"
# Read data
d <- read.table(file=filename, header=T, sep=";", na.strings="?",
comment.char="", nrows=2100000,
colClasses=c("character", "character", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric"))
# Convert Date and Time columns
d$Time <- strptime(paste(d$Date, d$Time, sep=" "),
format="%d/%m/%Y %T", tz="GMT")
d$Date <- as.Date(d$Date, format="%d/%m/%Y")
# Create plot framework
png(filename="plot4.png", width=480, height=480, units="px")
# Generate a 2x2 array for plots
par(mfrow=c(2, 2))
# Include just the required two days
dates <- seq.Date(from=as.Date("2007/02/01"), length.out=2, by="day")
attach(subset(d, subset=Date %in% dates))
# Plot Global_active_power variable
plot(Time, Global_active_power, type="l", xlab=NA, ylab="Global Active Power")
# Plot Voltage variable
plot(Time, Voltage, type="l", xlab="datetime", ylab="Voltage")
# Multivariate plot of Sub_metering_1, Sub_metering_2, Sub_metering_3
plot(Time, Sub_metering_1, type="l", xlab=NA, ylab="Energy sub metering")
lines(Time, Sub_metering_2, type="l", col="red")
lines(Time, Sub_metering_3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
bty="n", lty=1, col=c("black", "red", "blue"), cex=0.95)
# Plot of Voltage variable
plot(Time, Global_reactive_power, type="l", xlab="datetime",
ylab="Global Reactive Power")
detach()
# Save the file
dev.off()
} | /plot4.R | no_license | andrabuca/ExData_Plotting1 | R | false | false | 1,671 | r | plot4 <- function() {
filename <- "household_power_consumption.txt"
# Read data
d <- read.table(file=filename, header=T, sep=";", na.strings="?",
comment.char="", nrows=2100000,
colClasses=c("character", "character", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric"))
# Convert Date and Time columns
d$Time <- strptime(paste(d$Date, d$Time, sep=" "),
format="%d/%m/%Y %T", tz="GMT")
d$Date <- as.Date(d$Date, format="%d/%m/%Y")
# Create plot framework
png(filename="plot4.png", width=480, height=480, units="px")
# Generate a 2x2 array for plots
par(mfrow=c(2, 2))
# Include just the required two days
dates <- seq.Date(from=as.Date("2007/02/01"), length.out=2, by="day")
attach(subset(d, subset=Date %in% dates))
# Plot Global_active_power variable
plot(Time, Global_active_power, type="l", xlab=NA, ylab="Global Active Power")
# Plot Voltage variable
plot(Time, Voltage, type="l", xlab="datetime", ylab="Voltage")
# Multivariate plot of Sub_metering_1, Sub_metering_2, Sub_metering_3
plot(Time, Sub_metering_1, type="l", xlab=NA, ylab="Energy sub metering")
lines(Time, Sub_metering_2, type="l", col="red")
lines(Time, Sub_metering_3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
bty="n", lty=1, col=c("black", "red", "blue"), cex=0.95)
# Plot of Voltage variable
plot(Time, Global_reactive_power, type="l", xlab="datetime",
ylab="Global Reactive Power")
detach()
# Save the file
dev.off()
} |
\name{stimuli}
\alias{stimuli}
\title{ Stimulus extraction function }
\description{
\code{stimuli} is a convenience function to extract the stimulus parameters from an \code{aldmck}, \code{blackbox}, or \code{blackbt} object.
}
\usage{
stimuli(object)
}
\arguments{
\item{object}{ an \code{aldmck}, \code{blackbox}, or \code{blackbt} output object. }
}
\value{
The stimuli of the estimated output, which can also be recovered as \code{object$stimuli}.
Please refer to the documentation of \code{aldmck}, \code{blackbox}, or \code{blackbox_transpose}
for specifics.
}
\author{
Keith Poole \email{ktpoole@uga.edu}
Howard Rosenthal \email{hr31@nyu.edu}
Jeffrey Lewis \email{jblewis@ucla.edu}
James Lo \email{lojames@usc.edu}
Royce Carroll \email{rcarroll@rice.edu}
}
\seealso{
'\link{aldmck}', '\link{blackbox}', '\link{blackbox_transpose}'.
}
\examples{
data(Issues1980)
Issues1980[Issues1980[,"abortion1"]==7,"abortion1"] <- 8 #missing recode
Issues1980[Issues1980[,"abortion2"]==7,"abortion2"] <- 8 #missing recode
### This command conducts estimates, which we instead load using data()
# Issues1980_bb <- blackbox(Issues1980,missing=c(0,8,9),verbose=FALSE,dims=3,minscale=8)
data(Issues1980_bb)
stimuli(Issues1980_bb)
}
\keyword{ multivariate }
| /man/stimuli.Rd | no_license | cran/basicspace | R | false | false | 1,324 | rd | \name{stimuli}
\alias{stimuli}
\title{ Stimulus extraction function }
\description{
\code{stimuli} is a convenience function to extract the stimulus parameters from an \code{aldmck}, \code{blackbox}, or \code{blackbt} object.
}
\usage{
stimuli(object)
}
\arguments{
\item{object}{ an \code{aldmck}, \code{blackbox}, or \code{blackbt} output object. }
}
\value{
The stimuli of the estimated output, which can also be recovered as \code{object$stimuli}.
Please refer to the documentation of \code{aldmck}, \code{blackbox}, or \code{blackbox_transpose}
for specifics.
}
\author{
Keith Poole \email{ktpoole@uga.edu}
Howard Rosenthal \email{hr31@nyu.edu}
Jeffrey Lewis \email{jblewis@ucla.edu}
James Lo \email{lojames@usc.edu}
Royce Carroll \email{rcarroll@rice.edu}
}
\seealso{
'\link{aldmck}', '\link{blackbox}', '\link{blackbox_transpose}'.
}
\examples{
data(Issues1980)
Issues1980[Issues1980[,"abortion1"]==7,"abortion1"] <- 8 #missing recode
Issues1980[Issues1980[,"abortion2"]==7,"abortion2"] <- 8 #missing recode
### This command conducts estimates, which we instead load using data()
# Issues1980_bb <- blackbox(Issues1980,missing=c(0,8,9),verbose=FALSE,dims=3,minscale=8)
data(Issues1980_bb)
stimuli(Issues1980_bb)
}
\keyword{ multivariate }
|
### =========================================================================
### Some low-level S4 classes and utilities
### -------------------------------------------------------------------------
###
### Not really a S4-related utility but I don't have a better place to put
### this at the moment.
drop_AsIs <- function(x)
{
x_class <- class(x)
class(x) <- x_class[x_class != "AsIs"]
x
}
setAs("ANY", "AsIs", function(from) I(from))
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Some convenient union classes
###
setClassUnion("character_OR_NULL", c("character", "NULL"))
### WARNING: The behavior of is.vector(), is( , "vector"), is.list(), and
### is( ,"list") makes no sense:
### 1. is.vector(matrix()) is FALSE but is(matrix(), "vector") is TRUE.
### 2. is.list(data.frame()) is TRUE but is(data.frame(), "list") is FALSE.
### 3. is(data.frame(), "list") is FALSE but extends("data.frame", "list")
### is TRUE.
### 4. is.vector(data.frame()) is FALSE but is.list(data.frame()) and
### is.vector(list()) are both TRUE. In other words: a data frame is a
### list and a list is a vector but a data frame is not a vector.
### 5. I'm sure there is more but you get it!
### Building our software on top of such a mess won't give us anything good.
### For example, it's not too surprising that the union class we define below
### is broken:
### 6. is(data.frame(), "vector_OR_factor") is TRUE even though
### is(data.frame(), "vector") and is(data.frame(), "factor") are both
### FALSE.
### Results above obtained with R-3.1.2 and R-3.2.0.
### TODO: Be brave and report this craziness to the R bug tracker.
setClassUnion("vector_OR_factor", c("vector", "factor"))
### We define the coercion method below as a workaround to the following
### bug in R:
###
### setClass("A", representation(stuff="numeric"))
### setMethod("as.vector", "A", function(x, mode="any") x@stuff)
###
### a <- new("A", stuff=3:-5)
### > as.vector(a)
### [1] 3 2 1 0 -1 -2 -3 -4 -5
### > as(a, "vector")
### Error in as.vector(from) :
### no method for coercing this S4 class to a vector
### > selectMethod("coerce", c("A", "vector"))
### Method Definition:
###
### function (from, to, strict = TRUE)
### {
### value <- as.vector(from)
### if (strict)
### attributes(value) <- NULL
### value
### }
### <environment: namespace:methods>
###
### Signatures:
### from to
### target "A" "vector"
### defined "ANY" "vector"
### > setAs("ANY", "vector", function(from) as.vector(from))
### > as(a, "vector")
### [1] 3 2 1 0 -1 -2 -3 -4 -5
###
### ML: The problem is that the default coercion method is defined
### in the methods namespace, which does not see the as.vector()
### generic we define. Solution in this case would probably be to
### make as.vector a dispatching primitive like as.character(), but
### the "mode" argument makes things complicated.
setAs("ANY", "vector", function(from) as.vector(from))
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Coercion utilities
###
coercerToClass <- function(class) {
if (extends(class, "vector"))
.as <- get(paste0("as.", class))
else .as <- function(from) as(from, class)
function(from) {
to <- .as(from)
if (!is.null(names(from)) && is.null(names(to))) {
names(to) <- names(from)
}
to
}
}
### A version of coerce() that tries to do a better job at coercing to an
### S3 class. Dispatch on the 2nd argument only!
setGeneric("coerce2", signature="to",
function(from, to) standardGeneric("coerce2")
)
### TODO: Should probably use coercerToClass() internally (but coercerToClass()
### would first need to be improved).
setMethod("coerce2", "ANY",
function(from, to)
{
to_class <- class(to)
if (is(from, to_class))
return(from)
if (is.data.frame(to)) {
ans <- as.data.frame(from, check.names=FALSE,
stringsAsFactors=FALSE)
} else {
S3coerceFUN <- try(match.fun(paste0("as.", to_class)),
silent=TRUE)
if (!inherits(S3coerceFUN, "try-error")) {
ans <- S3coerceFUN(from)
} else {
ans <- as(from, to_class, strict=FALSE)
}
}
if (length(ans) != length(from))
stop(wmsg("coercion of ", class(from), " object to ", to_class,
" didn't preserve its length"))
## Try to restore the names if they were lost (e.g. by as.integer())
## or altered (e.g. by as.data.frame(), which will alter names equal
## to the empty string even if called with 'check.names=FALSE').
if (!identical(names(ans), names(from))) {
tmp <- try(`names<-`(ans, value=names(from)) , silent=TRUE)
if (!inherits(tmp, "try-error"))
ans <- tmp
}
ans
}
)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### setValidity2(), new2()
###
### Give more contol over when object validation should happen.
###
.validity_options <- new.env(hash=TRUE, parent=emptyenv())
assign("debug", FALSE, envir=.validity_options)
assign("disabled", FALSE, envir=.validity_options)
debugValidity <- function(debug)
{
if (missing(debug))
return(get("debug", envir=.validity_options))
debug <- isTRUE(debug)
assign("debug", debug, envir=.validity_options)
debug
}
disableValidity <- function(disabled)
{
if (missing(disabled))
return(get("disabled", envir=.validity_options))
disabled <- isTRUE(disabled)
assign("disabled", disabled, envir=.validity_options)
disabled
}
setValidity2 <- function(Class, valid.func, where=topenv(parent.frame()))
{
setValidity(Class,
function(object)
{
if (disableValidity())
return(TRUE)
if (debugValidity()) {
whoami <- paste("validity method for", Class, "object")
cat("[debugValidity] Entering ", whoami, "\n", sep="")
on.exit(cat("[debugValidity] Leaving ", whoami, "\n", sep=""))
}
problems <- valid.func(object)
if (isTRUE(problems) || length(problems) == 0L)
return(TRUE)
problems
},
where=where
)
}
new2 <- function(..., check=TRUE)
{
if (!isTRUEorFALSE(check))
stop("'check' must be TRUE or FALSE")
old_val <- disableValidity()
on.exit(disableValidity(old_val))
disableValidity(!check)
new(...)
}
stopIfProblems <- function(problems)
if (!is.null(problems)) stop(paste(problems, collapse="\n "))
### 'signatures' must be a list of character vectors. To use when many methods
### share the same implementation.
setMethods <- function(f, signatures=list(), definition,
where=topenv(parent.frame()), ...)
{
for (signature in signatures)
setMethod(f, signature=signature, definition, where=where, ...)
}
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### setReplaceAs()
###
### Supplying a "coerce<-" method to the 'replace' argument of setAs() is
### optional but not supplying a "coerce" method (thru the 'def' argument).
### However there are legitimate situations where we want to define a
### "coerce<-" method only. setReplaceAs() can be used for that.
###
### Same interface as setAs() (but no 'replace' argument).
setReplaceAs <- function(from, to, def, where=topenv(parent.frame()))
{
## Code below taken from setAs() and slightly adapted.
args <- formalArgs(def)
if (identical(args, c("from", "to", "value"))) {
method <- def
} else {
if (length(args) != 2L)
stop(gettextf("the method definition must be a function of 2 ",
"arguments, got %d", length(args)), domain=NA)
def <- body(def)
if (!identical(args, c("from", "value"))) {
ll <- list(quote(from), quote(value))
names(ll) <- args
def <- substituteDirect(def, ll)
warning(gettextf("argument names in method definition changed ",
"to agree with 'coerce<-' generic:\n%s",
paste(deparse(def), sep="\n ")), domain=NA)
}
method <- eval(function(from, to, value) NULL)
functionBody(method, envir=.GlobalEnv) <- def
}
setMethod("coerce<-", c(from, to), method, where=where)
}
### We also provide 2 canonical "coerce<-" methods that can be used when the
### "from class" is a subclass of the "to class". They do what the methods
### automatically generated by the methods package are expected to do except
### that the latter are broken. See
### https://bugs.r-project.org/bugzilla/show_bug.cgi?id=16421
### for the bug report.
### Naive/straight-forward implementation (easy to understand so it explains
### the semantic of canonical "coerce<-").
canonical_replace_as <- function(from, to, value)
{
for (what in slotNames(to))
slot(from, what) <- slot(value, what)
from
}
### Does the same as canonical_replace_as() but tries to generate only one
### copy of 'from' instead of one copy each time one of its slots is modified.
canonical_replace_as_2 <- function(from, to, value)
{
firstTime <- TRUE
for (what in slotNames(to)) {
v <- slot(value, what)
if (firstTime) {
slot(from, what, FALSE) <- v
firstTime <- FALSE
} else {
`slot<-`(from, what, FALSE, v)
}
}
from
}
### Usage (assuming B is a subclass of A):
###
### setReplaceAs("B", "A", canonical_replace_as_2)
###
### Note that this is used in the VariantAnnotation package.
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Manipulating the prototype of an S4 class.
###
### Gets or sets the default value of the given slot of the given class by
### reading or altering the prototype of the class. setDefaultSlotValue() is
### typically used in the .onLoad() hook of a package when the DLL of the
### package needs to be loaded *before* the default value of a slot can be
### computed.
getDefaultSlotValue <- function(classname, slotname, where=.GlobalEnv)
{
classdef <- getClass(classname, where=where)
if (!(slotname %in% names(attributes(classdef@prototype))))
stop("prototype for class \"", classname, "\" ",
"has no \"", slotname, "\" attribute")
attr(classdef@prototype, slotname, exact=TRUE)
}
setDefaultSlotValue <- function(classname, slotname, value, where=.GlobalEnv)
{
classdef <- getClass(classname, where=where)
if (!(slotname %in% names(attributes(classdef@prototype))))
stop("prototype for class \"", classname, "\" ",
"has no \"", slotname, "\" attribute")
attr(classdef@prototype, slotname) <- value
assignClassDef(classname, classdef, where=where)
## Re-compute the complete definition of the class. methods::setValidity()
## does that after calling assignClassDef() so we do it too.
resetClass(classname, classdef, where=where)
}
setPrototypeFromObject <- function(classname, object, where=.GlobalEnv)
{
classdef <- getClass(classname, where=where)
if (class(object) != classname)
stop("'object' must be a ", classname, " instance")
object_attribs <- attributes(object)
object_attribs$class <- NULL
## Sanity check.
stopifnot(identical(names(object_attribs),
names(attributes(classdef@prototype))))
attributes(classdef@prototype) <- object_attribs
assignClassDef(classname, classdef, where=where)
## Re-compute the complete definition of the class. methods::setValidity()
## does that after calling assignClassDef() so we do it too.
resetClass(classname, classdef, where=where)
}
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### allEqualsS4: just a hack that auomatically digs down
### deeply nested objects to detect differences.
###
.allEqualS4 <- function(x, y) {
eq <- all.equal(x, y)
canCompareS4 <- !isTRUE(eq) && isS4(x) && isS4(y) && class(x) == class(y)
if (canCompareS4) {
child.diffs <- mapply(.allEqualS4, attributes(x), attributes(y),
SIMPLIFY=FALSE)
child.diffs$class <- NULL
dfs <- mapply(function(d, nm) {
if (!is.data.frame(d)) {
data.frame(comparison = I(list(d)))
} else d
}, child.diffs, names(child.diffs), SIMPLIFY=FALSE)
do.call(rbind, dfs)
} else {
eq[1]
}
}
allEqualS4 <- function(x, y) {
eq <- .allEqualS4(x, y)
setNames(eq$comparison, rownames(eq))[sapply(eq$comparison, Negate(isTRUE))]
}
| /R/S4-utils.R | no_license | gong-yuan/S4Vectors | R | false | false | 12,915 | r | ### =========================================================================
### Some low-level S4 classes and utilities
### -------------------------------------------------------------------------
###
### Not really a S4-related utility but I don't have a better place to put
### this at the moment.
drop_AsIs <- function(x)
{
x_class <- class(x)
class(x) <- x_class[x_class != "AsIs"]
x
}
setAs("ANY", "AsIs", function(from) I(from))
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Some convenient union classes
###
setClassUnion("character_OR_NULL", c("character", "NULL"))
### WARNING: The behavior of is.vector(), is( , "vector"), is.list(), and
### is( ,"list") makes no sense:
### 1. is.vector(matrix()) is FALSE but is(matrix(), "vector") is TRUE.
### 2. is.list(data.frame()) is TRUE but is(data.frame(), "list") is FALSE.
### 3. is(data.frame(), "list") is FALSE but extends("data.frame", "list")
### is TRUE.
### 4. is.vector(data.frame()) is FALSE but is.list(data.frame()) and
### is.vector(list()) are both TRUE. In other words: a data frame is a
### list and a list is a vector but a data frame is not a vector.
### 5. I'm sure there is more but you get it!
### Building our software on top of such a mess won't give us anything good.
### For example, it's not too surprising that the union class we define below
### is broken:
### 6. is(data.frame(), "vector_OR_factor") is TRUE even though
### is(data.frame(), "vector") and is(data.frame(), "factor") are both
### FALSE.
### Results above obtained with R-3.1.2 and R-3.2.0.
### TODO: Be brave and report this craziness to the R bug tracker.
setClassUnion("vector_OR_factor", c("vector", "factor"))
### We define the coercion method below as a workaround to the following
### bug in R:
###
### setClass("A", representation(stuff="numeric"))
### setMethod("as.vector", "A", function(x, mode="any") x@stuff)
###
### a <- new("A", stuff=3:-5)
### > as.vector(a)
### [1] 3 2 1 0 -1 -2 -3 -4 -5
### > as(a, "vector")
### Error in as.vector(from) :
### no method for coercing this S4 class to a vector
### > selectMethod("coerce", c("A", "vector"))
### Method Definition:
###
### function (from, to, strict = TRUE)
### {
### value <- as.vector(from)
### if (strict)
### attributes(value) <- NULL
### value
### }
### <environment: namespace:methods>
###
### Signatures:
### from to
### target "A" "vector"
### defined "ANY" "vector"
### > setAs("ANY", "vector", function(from) as.vector(from))
### > as(a, "vector")
### [1] 3 2 1 0 -1 -2 -3 -4 -5
###
### ML: The problem is that the default coercion method is defined
### in the methods namespace, which does not see the as.vector()
### generic we define. Solution in this case would probably be to
### make as.vector a dispatching primitive like as.character(), but
### the "mode" argument makes things complicated.
setAs("ANY", "vector", function(from) as.vector(from))
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Coercion utilities
###
coercerToClass <- function(class) {
if (extends(class, "vector"))
.as <- get(paste0("as.", class))
else .as <- function(from) as(from, class)
function(from) {
to <- .as(from)
if (!is.null(names(from)) && is.null(names(to))) {
names(to) <- names(from)
}
to
}
}
### A version of coerce() that tries to do a better job at coercing to an
### S3 class. Dispatch on the 2nd argument only!
setGeneric("coerce2", signature="to",
function(from, to) standardGeneric("coerce2")
)
### TODO: Should probably use coercerToClass() internally (but coercerToClass()
### would first need to be improved).
setMethod("coerce2", "ANY",
function(from, to)
{
to_class <- class(to)
if (is(from, to_class))
return(from)
if (is.data.frame(to)) {
ans <- as.data.frame(from, check.names=FALSE,
stringsAsFactors=FALSE)
} else {
S3coerceFUN <- try(match.fun(paste0("as.", to_class)),
silent=TRUE)
if (!inherits(S3coerceFUN, "try-error")) {
ans <- S3coerceFUN(from)
} else {
ans <- as(from, to_class, strict=FALSE)
}
}
if (length(ans) != length(from))
stop(wmsg("coercion of ", class(from), " object to ", to_class,
" didn't preserve its length"))
## Try to restore the names if they were lost (e.g. by as.integer())
## or altered (e.g. by as.data.frame(), which will alter names equal
## to the empty string even if called with 'check.names=FALSE').
if (!identical(names(ans), names(from))) {
tmp <- try(`names<-`(ans, value=names(from)) , silent=TRUE)
if (!inherits(tmp, "try-error"))
ans <- tmp
}
ans
}
)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### setValidity2(), new2()
###
### Give more contol over when object validation should happen.
###
.validity_options <- new.env(hash=TRUE, parent=emptyenv())
assign("debug", FALSE, envir=.validity_options)
assign("disabled", FALSE, envir=.validity_options)
debugValidity <- function(debug)
{
if (missing(debug))
return(get("debug", envir=.validity_options))
debug <- isTRUE(debug)
assign("debug", debug, envir=.validity_options)
debug
}
disableValidity <- function(disabled)
{
if (missing(disabled))
return(get("disabled", envir=.validity_options))
disabled <- isTRUE(disabled)
assign("disabled", disabled, envir=.validity_options)
disabled
}
setValidity2 <- function(Class, valid.func, where=topenv(parent.frame()))
{
setValidity(Class,
function(object)
{
if (disableValidity())
return(TRUE)
if (debugValidity()) {
whoami <- paste("validity method for", Class, "object")
cat("[debugValidity] Entering ", whoami, "\n", sep="")
on.exit(cat("[debugValidity] Leaving ", whoami, "\n", sep=""))
}
problems <- valid.func(object)
if (isTRUE(problems) || length(problems) == 0L)
return(TRUE)
problems
},
where=where
)
}
new2 <- function(..., check=TRUE)
{
if (!isTRUEorFALSE(check))
stop("'check' must be TRUE or FALSE")
old_val <- disableValidity()
on.exit(disableValidity(old_val))
disableValidity(!check)
new(...)
}
stopIfProblems <- function(problems)
if (!is.null(problems)) stop(paste(problems, collapse="\n "))
### 'signatures' must be a list of character vectors. To use when many methods
### share the same implementation.
setMethods <- function(f, signatures=list(), definition,
where=topenv(parent.frame()), ...)
{
for (signature in signatures)
setMethod(f, signature=signature, definition, where=where, ...)
}
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### setReplaceAs()
###
### Supplying a "coerce<-" method to the 'replace' argument of setAs() is
### optional but not supplying a "coerce" method (thru the 'def' argument).
### However there are legitimate situations where we want to define a
### "coerce<-" method only. setReplaceAs() can be used for that.
###
### Same interface as setAs() (but no 'replace' argument).
setReplaceAs <- function(from, to, def, where=topenv(parent.frame()))
{
## Code below taken from setAs() and slightly adapted.
args <- formalArgs(def)
if (identical(args, c("from", "to", "value"))) {
method <- def
} else {
if (length(args) != 2L)
stop(gettextf("the method definition must be a function of 2 ",
"arguments, got %d", length(args)), domain=NA)
def <- body(def)
if (!identical(args, c("from", "value"))) {
ll <- list(quote(from), quote(value))
names(ll) <- args
def <- substituteDirect(def, ll)
warning(gettextf("argument names in method definition changed ",
"to agree with 'coerce<-' generic:\n%s",
paste(deparse(def), sep="\n ")), domain=NA)
}
method <- eval(function(from, to, value) NULL)
functionBody(method, envir=.GlobalEnv) <- def
}
setMethod("coerce<-", c(from, to), method, where=where)
}
### We also provide 2 canonical "coerce<-" methods that can be used when the
### "from class" is a subclass of the "to class". They do what the methods
### automatically generated by the methods package are expected to do except
### that the latter are broken. See
### https://bugs.r-project.org/bugzilla/show_bug.cgi?id=16421
### for the bug report.
### Naive/straight-forward implementation (easy to understand so it explains
### the semantic of canonical "coerce<-").
canonical_replace_as <- function(from, to, value)
{
for (what in slotNames(to))
slot(from, what) <- slot(value, what)
from
}
### Does the same as canonical_replace_as() but tries to generate only one
### copy of 'from' instead of one copy each time one of its slots is modified.
canonical_replace_as_2 <- function(from, to, value)
{
firstTime <- TRUE
for (what in slotNames(to)) {
v <- slot(value, what)
if (firstTime) {
slot(from, what, FALSE) <- v
firstTime <- FALSE
} else {
`slot<-`(from, what, FALSE, v)
}
}
from
}
### Usage (assuming B is a subclass of A):
###
### setReplaceAs("B", "A", canonical_replace_as_2)
###
### Note that this is used in the VariantAnnotation package.
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Manipulating the prototype of an S4 class.
###
### Gets or sets the default value of the given slot of the given class by
### reading or altering the prototype of the class. setDefaultSlotValue() is
### typically used in the .onLoad() hook of a package when the DLL of the
### package needs to be loaded *before* the default value of a slot can be
### computed.
getDefaultSlotValue <- function(classname, slotname, where=.GlobalEnv)
{
classdef <- getClass(classname, where=where)
if (!(slotname %in% names(attributes(classdef@prototype))))
stop("prototype for class \"", classname, "\" ",
"has no \"", slotname, "\" attribute")
attr(classdef@prototype, slotname, exact=TRUE)
}
setDefaultSlotValue <- function(classname, slotname, value, where=.GlobalEnv)
{
classdef <- getClass(classname, where=where)
if (!(slotname %in% names(attributes(classdef@prototype))))
stop("prototype for class \"", classname, "\" ",
"has no \"", slotname, "\" attribute")
attr(classdef@prototype, slotname) <- value
assignClassDef(classname, classdef, where=where)
## Re-compute the complete definition of the class. methods::setValidity()
## does that after calling assignClassDef() so we do it too.
resetClass(classname, classdef, where=where)
}
setPrototypeFromObject <- function(classname, object, where=.GlobalEnv)
{
classdef <- getClass(classname, where=where)
if (class(object) != classname)
stop("'object' must be a ", classname, " instance")
object_attribs <- attributes(object)
object_attribs$class <- NULL
## Sanity check.
stopifnot(identical(names(object_attribs),
names(attributes(classdef@prototype))))
attributes(classdef@prototype) <- object_attribs
assignClassDef(classname, classdef, where=where)
## Re-compute the complete definition of the class. methods::setValidity()
## does that after calling assignClassDef() so we do it too.
resetClass(classname, classdef, where=where)
}
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### allEqualsS4: just a hack that auomatically digs down
### deeply nested objects to detect differences.
###
.allEqualS4 <- function(x, y) {
eq <- all.equal(x, y)
canCompareS4 <- !isTRUE(eq) && isS4(x) && isS4(y) && class(x) == class(y)
if (canCompareS4) {
child.diffs <- mapply(.allEqualS4, attributes(x), attributes(y),
SIMPLIFY=FALSE)
child.diffs$class <- NULL
dfs <- mapply(function(d, nm) {
if (!is.data.frame(d)) {
data.frame(comparison = I(list(d)))
} else d
}, child.diffs, names(child.diffs), SIMPLIFY=FALSE)
do.call(rbind, dfs)
} else {
eq[1]
}
}
allEqualS4 <- function(x, y) {
eq <- .allEqualS4(x, y)
setNames(eq$comparison, rownames(eq))[sapply(eq$comparison, Negate(isTRUE))]
}
|
rm(list = ls())
library(Daniel)
library(dplyr)
library(nnet)
CalcCImultinom <- function(fit)
{
s <- summary(fit)
coef <- s$coefficients
ses <- s$standard.errors
ci.1 <- coef[1,2] + c(-1, 1)*1.96*ses[1, 2]
ci.2 <- coef[2,2] + c(-1, 1)*1.96*ses[2, 2]
return(rbind(ci.1,ci.2))
}
#key
# A, B,C,D,E,F - betaE[2] = 1.25, 1.5, 1.75, 2, 2.25, 2.5
# A,B,C, D, E,F - betaU = 2,3,4,5,6,7
patt <- "CA"
beta0 <- c(-6, -5)
betaE <- c(log(2.5), log(1.75))
betaU <- c(log(2), log(2.5))
sigmaU <- 1
n.sample <- 50000
n.sim <- 1000
AllY <- matrix(nr = n.sim, nc = 3)
sace.diff1 <- sace.diff2 <- ace.diff1 <- ace.diff2 <-
sace.or1 <- sace.or2 <- ace.or1 <- ace.or2 <-
or.approx1 <- or.approx2 <- or.approx.true1 <- or.approx.true2 <-
pop.never.s1 <- pop.never.s2 <- vector(length = n.sim)
ci1 <- ci2 <- matrix(nr = n.sim, nc = 2)
for (j in 1:n.sim)
{
CatIndex(j)
# Simulate genetic score
U <- rnorm(n.sample, 0, sd = sigmaU)
#### Calcualte probabilites for each subtype with and without the exposure ####
e1E0 <- exp(beta0[1] + betaU[1]*U)
e1E1 <- exp(beta0[1] + betaE[1] + betaU[1]*U)
e2E0 <- exp(beta0[2] + betaU[2]*U)
e2E1 <- exp(beta0[2] + betaE[2] + betaU[2]*U)
prE0Y1 <- e1E0/(1 + e1E0 + e2E0)
prE0Y2 <- e2E0/(1 + e1E0 + e2E0)
prE1Y1 <- e1E1/(1 + e1E1 + e2E1)
prE1Y2 <- e2E1/(1 + e1E1 + e2E1)
probsE0 <- cbind(prE0Y1, prE0Y2, 1 - prE0Y1 - prE0Y2)
probsE1 <- cbind(prE1Y1, prE1Y2, 1 - prE1Y1 - prE1Y2)
# Simulate subtypes #
Yctrl <- Ytrt <- vector(length = n.sample)
X <- rbinom(n = n.sample, 1, 0.5)
for (i in 1:n.sample)
{
Yctrl[i] <- sample(c(1,2,0), 1, replace = T, prob = probsE0[i, ])
Ytrt[i] <- sample(c(1,2,0), 1, replace = T, prob = probsE1[i, ])
}
Y <- (1-X)*Yctrl + X*Ytrt
AllY[j, ] <- table(Y)
Y1ctrl <- Yctrl==1
Y1trt <- Ytrt==1
Y2ctrl <- Yctrl==2
Y2trt <- Ytrt==2
pop.never.s1[j] <- mean(Y1ctrl==0 & Y1trt==0)
pop.never.s2[j] <- mean(Y2ctrl==0 & Y2trt==0)
# estimate causal parameters
sace.diff1[j] <- mean((Y1trt - Y1ctrl)[Y2ctrl==0 & Y2trt==0])
sace.diff2[j]<- mean((Y2trt - Y2ctrl)[Y1ctrl==0 & Y1trt==0])
ace.diff1[j] <- mean((Y1trt[Y2trt==0 & X==1]) - mean(Y1ctrl[Y2ctrl==0 & X==0]))
ace.diff2[j] <- mean((Y2trt[Y1trt==0 & X==1]) - mean(Y2ctrl[Y1ctrl==0 & X==0]))
# Ypo <- c(Yctrl, Ytrt)
# Upo <- rep(U,2)
# Xpo <- rep(x = c(0,1), each = n.sample)
# fit.full.po <- multinom(Ypo~ Xpo + Upo)
# fit.po <- multinom(Ypo~ Xpo)
fit <- multinom(Y~ X)
cis <- CalcCImultinom(fit)
ci1[j, ] <- cis[1, ]
ci2[j, ] <- cis[2, ]
Y1only <- Y[Y<2]
X1only <- X[Y<2]
U1only <-U[Y<2]
Y2only <- Y[Y!=1]
X2only <- X[Y!=1]
U2only <-U[Y!=1]
Y2only[Y2only>0] <- 1
vec.for.or.1only <- c(sum((1 - Y1only) * (1 - X1only)) , sum(Y1only * (1 - X1only)),
sum((1 - Y1only) * X1only), sum(Y1only*X1only))
vec.for.or.2only <- c(sum((1 - Y2only) * (1 - X2only)) , sum(Y2only * (1 - X2only)),
sum((1 - Y2only) * X2only), sum(Y2only*X2only))
ace.or1[j] <- CalcOR(vec.for.or.1only)
ace.or2[j] <- CalcOR(vec.for.or.2only)
Y1only.sace <- Y[Ytrt <2 & Yctrl < 2]
X1only.sace <- X[Ytrt <2 & Yctrl < 2]
U1only.sace <-U[Ytrt <2 & Yctrl < 2]
Y2only.sace <- Y[Ytrt!=1 & Y1ctrl!=1]
X2only.sace <- X[Ytrt!=1 & Y1ctrl!=1]
U2only.sace <-U[Ytrt!=1 & Y1ctrl!=1]
Y2only.sace[Y2only.sace>0] <- 1
vec.for.or.sace1 <- c(sum((1 - Y1only.sace) * (1 - X1only.sace)) , sum(Y1only.sace * (1 - X1only.sace)),
sum((1 - Y1only.sace) * X1only.sace), sum(Y1only.sace*X1only.sace))
vec.for.or.sace2 <- c(sum((1 - Y2only.sace) * (1 - X2only.sace)) , sum(Y2only.sace * (1 - X2only.sace)),
sum((1 - Y2only.sace) * X2only.sace), sum(Y2only.sace*X2only.sace))
sace.or1[j] <- CalcOR(vec.for.or.sace1)
sace.or2[j] <- CalcOR(vec.for.or.sace2)
Y1 <- Y==1
Y2 <- Y==2
fit.logistic.Y1 <- glm(Y1 ~ X, family = "binomial")
fit.logistic.true.Y1 <- glm(Y1 ~ X + U, family = "binomial")
fit.logistic.Y2 <- glm(Y2 ~ X, family = "binomial")
fit.logistic.true.Y2 <- glm(Y2 ~ X + U, family = "binomial")
or.approx1[j] <- exp(coef(fit.logistic.Y1)[2])
or.approx.true1[j] <- exp(coef(fit.logistic.true.Y1)[2])
or.approx2[j] <- exp(coef(fit.logistic.Y2)[2])
or.approx.true2[j] <- exp(coef(fit.logistic.true.Y2)[2])
}
save.image(paste0("CMPEn50krareScen14",patt,".RData"))
| /Simulations/Scripts/R/Rare/Scenario 14/CMPEn50KrareScen14CA.R | no_license | yadevi/CausalMPE | R | false | false | 4,221 | r | rm(list = ls())
library(Daniel)
library(dplyr)
library(nnet)
CalcCImultinom <- function(fit)
{
s <- summary(fit)
coef <- s$coefficients
ses <- s$standard.errors
ci.1 <- coef[1,2] + c(-1, 1)*1.96*ses[1, 2]
ci.2 <- coef[2,2] + c(-1, 1)*1.96*ses[2, 2]
return(rbind(ci.1,ci.2))
}
#key
# A, B,C,D,E,F - betaE[2] = 1.25, 1.5, 1.75, 2, 2.25, 2.5
# A,B,C, D, E,F - betaU = 2,3,4,5,6,7
patt <- "CA"
beta0 <- c(-6, -5)
betaE <- c(log(2.5), log(1.75))
betaU <- c(log(2), log(2.5))
sigmaU <- 1
n.sample <- 50000
n.sim <- 1000
AllY <- matrix(nr = n.sim, nc = 3)
sace.diff1 <- sace.diff2 <- ace.diff1 <- ace.diff2 <-
sace.or1 <- sace.or2 <- ace.or1 <- ace.or2 <-
or.approx1 <- or.approx2 <- or.approx.true1 <- or.approx.true2 <-
pop.never.s1 <- pop.never.s2 <- vector(length = n.sim)
ci1 <- ci2 <- matrix(nr = n.sim, nc = 2)
for (j in 1:n.sim)
{
CatIndex(j)
# Simulate genetic score
U <- rnorm(n.sample, 0, sd = sigmaU)
#### Calcualte probabilites for each subtype with and without the exposure ####
e1E0 <- exp(beta0[1] + betaU[1]*U)
e1E1 <- exp(beta0[1] + betaE[1] + betaU[1]*U)
e2E0 <- exp(beta0[2] + betaU[2]*U)
e2E1 <- exp(beta0[2] + betaE[2] + betaU[2]*U)
prE0Y1 <- e1E0/(1 + e1E0 + e2E0)
prE0Y2 <- e2E0/(1 + e1E0 + e2E0)
prE1Y1 <- e1E1/(1 + e1E1 + e2E1)
prE1Y2 <- e2E1/(1 + e1E1 + e2E1)
probsE0 <- cbind(prE0Y1, prE0Y2, 1 - prE0Y1 - prE0Y2)
probsE1 <- cbind(prE1Y1, prE1Y2, 1 - prE1Y1 - prE1Y2)
# Simulate subtypes #
Yctrl <- Ytrt <- vector(length = n.sample)
X <- rbinom(n = n.sample, 1, 0.5)
for (i in 1:n.sample)
{
Yctrl[i] <- sample(c(1,2,0), 1, replace = T, prob = probsE0[i, ])
Ytrt[i] <- sample(c(1,2,0), 1, replace = T, prob = probsE1[i, ])
}
Y <- (1-X)*Yctrl + X*Ytrt
AllY[j, ] <- table(Y)
Y1ctrl <- Yctrl==1
Y1trt <- Ytrt==1
Y2ctrl <- Yctrl==2
Y2trt <- Ytrt==2
pop.never.s1[j] <- mean(Y1ctrl==0 & Y1trt==0)
pop.never.s2[j] <- mean(Y2ctrl==0 & Y2trt==0)
# estimate causal parameters
sace.diff1[j] <- mean((Y1trt - Y1ctrl)[Y2ctrl==0 & Y2trt==0])
sace.diff2[j]<- mean((Y2trt - Y2ctrl)[Y1ctrl==0 & Y1trt==0])
ace.diff1[j] <- mean((Y1trt[Y2trt==0 & X==1]) - mean(Y1ctrl[Y2ctrl==0 & X==0]))
ace.diff2[j] <- mean((Y2trt[Y1trt==0 & X==1]) - mean(Y2ctrl[Y1ctrl==0 & X==0]))
# Ypo <- c(Yctrl, Ytrt)
# Upo <- rep(U,2)
# Xpo <- rep(x = c(0,1), each = n.sample)
# fit.full.po <- multinom(Ypo~ Xpo + Upo)
# fit.po <- multinom(Ypo~ Xpo)
fit <- multinom(Y~ X)
cis <- CalcCImultinom(fit)
ci1[j, ] <- cis[1, ]
ci2[j, ] <- cis[2, ]
Y1only <- Y[Y<2]
X1only <- X[Y<2]
U1only <-U[Y<2]
Y2only <- Y[Y!=1]
X2only <- X[Y!=1]
U2only <-U[Y!=1]
Y2only[Y2only>0] <- 1
vec.for.or.1only <- c(sum((1 - Y1only) * (1 - X1only)) , sum(Y1only * (1 - X1only)),
sum((1 - Y1only) * X1only), sum(Y1only*X1only))
vec.for.or.2only <- c(sum((1 - Y2only) * (1 - X2only)) , sum(Y2only * (1 - X2only)),
sum((1 - Y2only) * X2only), sum(Y2only*X2only))
ace.or1[j] <- CalcOR(vec.for.or.1only)
ace.or2[j] <- CalcOR(vec.for.or.2only)
Y1only.sace <- Y[Ytrt <2 & Yctrl < 2]
X1only.sace <- X[Ytrt <2 & Yctrl < 2]
U1only.sace <-U[Ytrt <2 & Yctrl < 2]
Y2only.sace <- Y[Ytrt!=1 & Y1ctrl!=1]
X2only.sace <- X[Ytrt!=1 & Y1ctrl!=1]
U2only.sace <-U[Ytrt!=1 & Y1ctrl!=1]
Y2only.sace[Y2only.sace>0] <- 1
vec.for.or.sace1 <- c(sum((1 - Y1only.sace) * (1 - X1only.sace)) , sum(Y1only.sace * (1 - X1only.sace)),
sum((1 - Y1only.sace) * X1only.sace), sum(Y1only.sace*X1only.sace))
vec.for.or.sace2 <- c(sum((1 - Y2only.sace) * (1 - X2only.sace)) , sum(Y2only.sace * (1 - X2only.sace)),
sum((1 - Y2only.sace) * X2only.sace), sum(Y2only.sace*X2only.sace))
sace.or1[j] <- CalcOR(vec.for.or.sace1)
sace.or2[j] <- CalcOR(vec.for.or.sace2)
Y1 <- Y==1
Y2 <- Y==2
fit.logistic.Y1 <- glm(Y1 ~ X, family = "binomial")
fit.logistic.true.Y1 <- glm(Y1 ~ X + U, family = "binomial")
fit.logistic.Y2 <- glm(Y2 ~ X, family = "binomial")
fit.logistic.true.Y2 <- glm(Y2 ~ X + U, family = "binomial")
or.approx1[j] <- exp(coef(fit.logistic.Y1)[2])
or.approx.true1[j] <- exp(coef(fit.logistic.true.Y1)[2])
or.approx2[j] <- exp(coef(fit.logistic.Y2)[2])
or.approx.true2[j] <- exp(coef(fit.logistic.true.Y2)[2])
}
save.image(paste0("CMPEn50krareScen14",patt,".RData"))
|
library(readr)
mtp_params_data <- read_tsv('data-raw/plate-params.tsv', col_types = 'iiidddddd')
# no longer use these data for computing graphical elements
# usethis::use_data(mtp_params_data, internal = TRUE, overwrite = TRUE)
| /data-raw/save-plate-params.R | no_license | npjc/mtpview1 | R | false | false | 229 | r | library(readr)
mtp_params_data <- read_tsv('data-raw/plate-params.tsv', col_types = 'iiidddddd')
# no longer use these data for computing graphical elements
# usethis::use_data(mtp_params_data, internal = TRUE, overwrite = TRUE)
|
##the wording dir is set to default, so need redirect to where raw data located
plot1 <- function(){
data <- read.csv("~/myR/data/household_power_consumption.txt",sep=";", stringsAsFactors = FALSE)
data2<- tbl_df(data)
data3<-data2[data2["Date"] == "1/2/2007" | data2["Date"] == "2/2/2007",]
data3<-data3[data3["Global_active_power"] != "?",]
gap<-data3["Global_active_power"]
gap2 <- as.numeric(unlist(gap))
hist(gap2, main = "Global Active Power", col = "red", xlab = "Global Active Power (kilowatts)")
dev.copy(png, "plot1.png")
}
| /plot1.R | no_license | TerryDuan/ExData_Plotting1 | R | false | false | 550 | r | ##the wording dir is set to default, so need redirect to where raw data located
plot1 <- function(){
data <- read.csv("~/myR/data/household_power_consumption.txt",sep=";", stringsAsFactors = FALSE)
data2<- tbl_df(data)
data3<-data2[data2["Date"] == "1/2/2007" | data2["Date"] == "2/2/2007",]
data3<-data3[data3["Global_active_power"] != "?",]
gap<-data3["Global_active_power"]
gap2 <- as.numeric(unlist(gap))
hist(gap2, main = "Global Active Power", col = "red", xlab = "Global Active Power (kilowatts)")
dev.copy(png, "plot1.png")
}
|
testlist <- list(G = numeric(0), Rn = numeric(0), atmp = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), ra = numeric(0), relh = 1.26179021194602e-307, rs = numeric(0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161))
result <- do.call(meteor:::ET0_PenmanMonteith,testlist)
str(result) | /meteor/inst/testfiles/ET0_PenmanMonteith/AFL_ET0_PenmanMonteith/ET0_PenmanMonteith_valgrind_files/1615841244-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 890 | r | testlist <- list(G = numeric(0), Rn = numeric(0), atmp = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), ra = numeric(0), relh = 1.26179021194602e-307, rs = numeric(0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161))
result <- do.call(meteor:::ET0_PenmanMonteith,testlist)
str(result) |
########################
####### Quiz Two #######
########################
# import organics data #
organics<-read.csv("organics.csv", header=TRUE, na.strings=c(".", "NA", "", "?"))
# fix measurement levels
organics$ID<-as.factor(organics$ID)
organics$DemCluster<-as.factor(organics$DemCluster)
organics$TargetBuy<-as.factor(organics$TargetBuy)
# part 1a #
# Stacked bar plot #
counts = table(organics$TargetBuy, organics$DemGender)
barplot(counts, col=c("darkblue","red"), legend.text = TRUE,
main = "DemGender by TargetBuy",
xlab="DemGender", ylab="Freq")
# Or, generate mosaic plot via Rattle
# part 1b #
by(organics$PromTime, organics$TargetBuy, summary)
# part 1c #
by(organics$PromClass, organics$TargetBuy, summary)
# part 2a #
index.cat<-sapply(organics, is.factor)
index.cat[c("ID","TargetBuy")]<-FALSE
chi2pvalues<- sapply(organics[index.cat],
function(x) chisq.test(x, organics$TargetBuy)$p.value)
sort(chi2pvalues)
chisq.test(organics$DemGender, organics$TargetBuy)$statistic
# part 2b #
library(caret)
rocValues<- filterVarImp(x = organics[!(names(organics) %in% "TargetBuy")], y = organics$TargetBuy)
rocValues[order(-rocValues$X1),]
# part 2c #
indx <- sapply(organics, is.numeric)
indx["TargetAmt"]<-FALSE
corrValues <- sapply(organics[indx],
function(x) abs(cor(x, organics$TargetAmt, use = "complete.obs")))
sort(corrValues, decreasing = TRUE)
cor(organics$DemAffl, organics$TargetAmt, use = "complete.obs")
cor(organics$DemAge, organics$TargetAmt, use = "complete.obs")
# part 3a #
library(caret)
TransformParams <- preProcess(organics["PromSpend"], method=c("BoxCox"))
TransformParams$bc
organics.xf<-predict(TransformParams, organics)
# part 3b #
par(mfrow=c(1,2))
hist(organics$PromSpend)
hist(organics.xf$PromSpend)
par(mfrow=c(1,1))
# part 3c #
library(fBasics)
basicStats(organics$PromSpend)
basicStats(organics.xf$PromSpend)
| /quiz2.R | no_license | amykelly36/MAT8480 | R | false | false | 2,028 | r | ########################
####### Quiz Two #######
########################
# import organics data #
organics<-read.csv("organics.csv", header=TRUE, na.strings=c(".", "NA", "", "?"))
# fix measurement levels
organics$ID<-as.factor(organics$ID)
organics$DemCluster<-as.factor(organics$DemCluster)
organics$TargetBuy<-as.factor(organics$TargetBuy)
# part 1a #
# Stacked bar plot #
counts = table(organics$TargetBuy, organics$DemGender)
barplot(counts, col=c("darkblue","red"), legend.text = TRUE,
main = "DemGender by TargetBuy",
xlab="DemGender", ylab="Freq")
# Or, generate mosaic plot via Rattle
# part 1b #
by(organics$PromTime, organics$TargetBuy, summary)
# part 1c #
by(organics$PromClass, organics$TargetBuy, summary)
# part 2a #
index.cat<-sapply(organics, is.factor)
index.cat[c("ID","TargetBuy")]<-FALSE
chi2pvalues<- sapply(organics[index.cat],
function(x) chisq.test(x, organics$TargetBuy)$p.value)
sort(chi2pvalues)
chisq.test(organics$DemGender, organics$TargetBuy)$statistic
# part 2b #
library(caret)
rocValues<- filterVarImp(x = organics[!(names(organics) %in% "TargetBuy")], y = organics$TargetBuy)
rocValues[order(-rocValues$X1),]
# part 2c #
indx <- sapply(organics, is.numeric)
indx["TargetAmt"]<-FALSE
corrValues <- sapply(organics[indx],
function(x) abs(cor(x, organics$TargetAmt, use = "complete.obs")))
sort(corrValues, decreasing = TRUE)
cor(organics$DemAffl, organics$TargetAmt, use = "complete.obs")
cor(organics$DemAge, organics$TargetAmt, use = "complete.obs")
# part 3a #
library(caret)
TransformParams <- preProcess(organics["PromSpend"], method=c("BoxCox"))
TransformParams$bc
organics.xf<-predict(TransformParams, organics)
# part 3b #
par(mfrow=c(1,2))
hist(organics$PromSpend)
hist(organics.xf$PromSpend)
par(mfrow=c(1,1))
# part 3c #
library(fBasics)
basicStats(organics$PromSpend)
basicStats(organics.xf$PromSpend)
|
library(plotly)
library(quantmod)
library(dplyr)
setwd("C:/Users/Aeint Thet Ngon/Documents/Georgetown University/Data Viz/Takehome")
Sys.setenv("plotly_username"="aeint31")
Sys.setenv("plotly_api_key"="64YagDOY29TsffYihgol")
write.csv(ds, "ds1_plotly.csv")
#ds <- read.csv("plotly_rangesliderdata.csv")
ds <- read.csv("ds1_plotly.csv")
ds$X.1 <- NULL
ds$X.2 <- NULL
ds$X <- NULL
ds$totalwound <- NULL
p <- plot_ly(ds, x = ~date) %>%
add_lines(y = ~totalkilled, name = "Number of people killed") %>%
# add_lines(y = ~totalwound, name = "Number of people wounded") %>%
add_lines(y = ~n, name = "Number number of attacks") %>%
layout(
title = "Number of wounded and killed",
xaxis = list(
rangeselector = list(
buttons = list(
list(
count = 3,
label = "3 mo",
step = "month",
stepmode = "backward"),
list(
count = 6,
label = "6 mo",
step = "month",
stepmode = "backward"),
list(
count = 1,
label = "1 yr",
step = "year",
stepmode = "backward"),
list(
count = 1,
label = "YTD",
step = "year",
stepmode = "todate"),
list(step = "all"))),
rangeslider = list(type = "date")),
yaxis = list(title = "Count"))
api_create(p, filename = "rangeslider_killedandtotalattacks")
| /plotly_rangeslider.R | no_license | aeintngon/TerrorismViz | R | false | false | 1,511 | r | library(plotly)
library(quantmod)
library(dplyr)
setwd("C:/Users/Aeint Thet Ngon/Documents/Georgetown University/Data Viz/Takehome")
Sys.setenv("plotly_username"="aeint31")
Sys.setenv("plotly_api_key"="64YagDOY29TsffYihgol")
write.csv(ds, "ds1_plotly.csv")
#ds <- read.csv("plotly_rangesliderdata.csv")
ds <- read.csv("ds1_plotly.csv")
ds$X.1 <- NULL
ds$X.2 <- NULL
ds$X <- NULL
ds$totalwound <- NULL
p <- plot_ly(ds, x = ~date) %>%
add_lines(y = ~totalkilled, name = "Number of people killed") %>%
# add_lines(y = ~totalwound, name = "Number of people wounded") %>%
add_lines(y = ~n, name = "Number number of attacks") %>%
layout(
title = "Number of wounded and killed",
xaxis = list(
rangeselector = list(
buttons = list(
list(
count = 3,
label = "3 mo",
step = "month",
stepmode = "backward"),
list(
count = 6,
label = "6 mo",
step = "month",
stepmode = "backward"),
list(
count = 1,
label = "1 yr",
step = "year",
stepmode = "backward"),
list(
count = 1,
label = "YTD",
step = "year",
stepmode = "todate"),
list(step = "all"))),
rangeslider = list(type = "date")),
yaxis = list(title = "Count"))
api_create(p, filename = "rangeslider_killedandtotalattacks")
|
## Auto formatting page numbers ----
#' Return RTF encoding for current page number
#'
#' @param properties Properties for displaying page number information
#'
#' @return String of RTF encoding that will display the current page
#' @noRd
page_num <- function(properties='') {
# TODO: Add style and font support
page_str <- sprintf("{%s\\field\\flddirty{\\*\\fldinst{ PAGE \\\\* MERGEFORMAT }}}", properties)
page_str
}
#' Return RTF encoding for total page number.
#'
#' @param properties Properties for displaying page number information
#'
#' @return String of the RTF encoding representing total page numbers
#' @noRd
page_total <- function(properties='') {
tot_str <- sprintf("{%s\\field{\\*\\fldinst{ NUMPAGES}}}", properties)
tot_str
}
#' Add page number information
#'
#' Adds current and total page number. First %s is current page, second %s is
#' total pages.
#'
#' @param format Format for string replacement
#' @param properties Properties for displaying page number information
#'
#' @return String of RTF encoding that displays the current and total pages.
#' @noRd
add_page_num <- function(format="Page %s of %s", properties='') {
# Make sure there's only a replacement for current and total pages
token_ct <- unlist(gregexpr("\\%s", format))
assert_that(length(token_ct) <= 2,
msg = "Too many replacement strings - limited to 2 for current page and total pages.")
# Split out the tokens of the string, apply brackets, and bring them back together
chunks <- unlist(strsplit(format, "%s"))
# Build the string to be formatted and incorporate the properties from the line
fmt_str <- paste(paste0("{", rep(properties, length(chunks)), chunks, "}"), collapse="%s")
# If the last replacement token was found at the second to last character, then it was not maintained
# with the string split, so add it back on
if (token_ct[length(token_ct)] == nchar(format) - 1) fmt_str <- paste(fmt_str, "%s", sep="")
# Format in the
page_str <- sprintf(fmt_str, page_num(properties), page_total(properties))
page_str
}
#' Font table
#'
#' @param doc RTF document
#'
#' @return String of RTF encoding with font information
#' @noRd
font_table_string <- function(doc){
fonts <- unique(c("Times", font(doc)))
font_tbl_body <- paste0(" {\\f", seq(0, along = fonts), " ", fonts, ";}", collapse = "\n")
paste("{\\fonttbl", font_tbl_body , "}", sep = "\n")
}
## Color Table ----
# Not investing in this as the moment so write out a default blank table
#' Create document color table
#'
#' Not currently implemented
#'
#' @param doc RTF document
#'
#' @return String of RTF encoding with color table information
#' @noRd
color_table_string <- function(doc){
paste('{\\colortbl;;}\n')
}
#' Generate document properties string
#'
#' @param doc RTF document
#'
#' @return String encoding with document property information
#' @noRd
doc_properties_string <- function(doc){
# Get margins and convert to twips
mrgs <- sapply(margins(doc), function(x) x*1440)
# Make margin string
mrg_str <- sprintf("\\margl%s\\margr%s\\margt%s\\margb%s\n", mrgs['left'], mrgs['right'], mrgs['top'], mrgs['bottom'])
# Height and width string
ps <- pagesize(doc)
# Header and footer heights
hf_ht <- sprintf("\\headery%s\\footery%s", header_height(doc) * 1440, footer_height(doc) * 1440)
# Get orientation string
if (orientation(doc) == 'landscape') {
ortn <- '\\lndscpsxn\n'
# If the orientation is landscape, reverse the height and width, effectively flipping 90 degrees
ht_wd <- sprintf('\\paperw%s\\paperh%s', ps['width'] * 1440, ps['height'] * 1440)
} else{
ortn <- ''
# For portrait, use the values as they were entered
ht_wd <- sprintf('\\paperw%s\\paperh%s', ps['height'] * 1440, ps['width'] * 1440)
}
# Font size
fs <- sprintf("\\fs%s\n", font_size(doc)*2)
# Other information
other <- '\\widowctrl\\ftnbj\\fet0\\sectd\\linex0\n'
paste(ht_wd, other, ortn, mrg_str, hf_ht, fs, sep='')
}
## Header and footer string generation ----
#' Create a single line of RTF header/footnote information
#'
#' @param line A single title/footnote to write
#' @param doc RTF document
#'
#' @return String of RTF encoding for title/footnotes
#' @noRd
hf_line_string <- function(line, doc=NULL) {
# Placeholders
ft <- '\\f1' # Font (comes from a list, but using \f0 doesn't seem to work)
fs <- sprintf("\\fs%s", font_size(doc) * 2) # Font size - no way to set universal document font size, just defaults to 12
# so use the documents set size
bd <- '' # Bold (On or off - default off)
it <- '' # Italic (One or off - default off)
al <- '\\ql\n' # Alignment (Defaults to left \ql - left aligned)
tabs <- '' # Overwritten if split alignment
# Read the font information
# If font is overridden generate the string
if (!is.na(font(line))) {
# In huxtable they subtract one because the font list is 0 based, but instead of
# storing an unused font in the font attribute of the document, I'm just writing out an used font
# to the font table in the RTF document and matching the index as if it were 1 based.
ft <- sprintf("\\f%s", match(font(line), font(doc)))
}
# If font size is overridden generate the string
if (!is.na(font_size(line))) {
fs <- sprintf("\\fs%s", font_size(line)*2)
}
# Styling
# use the bold string if on
if (bold(line)) bd <- "\\b"
# Use the italics string if on
if (italic(line)) it <- "\\i"
# Concatenate all of the text level properties
properties <- paste(ft, fs, bd, it, ' ', sep='')
# Alignment
if (align(line) == 'center') al <- '\\qc\n'
else if (align(line) == 'right') al <- '\\qr\n'
# Split will align left and designate tab locations, where the right most is flush right
else if (align(line) == 'split') {
al <- "\\ql\\tx7245\\tqr\\tx12960\n"
tabs <- '\\pmartabqr \n'
}
txt_string <- sapply(line$text, format_text_string, properties = properties, USE.NAMES=FALSE)
# Patch
if (length(txt_string) > 1) {
txt_string <- paste(txt_string[1], tabs, txt_string[2], sep='')
}
paste(al, txt_string, sep='')
}
#' General function to write the header or the footer
#'
#' @param doc doc RTF document
#' @param type 'header' of 'footer'
#'
#' @return String RTF encoding with the header/footnote information
#' @noRd
hf_string <- function(doc, type=NULL) {
# Get a character vector of the formatted RTF string
lines <- sapply(order_lines(doc[[type]]), hf_line_string, doc=doc)
# Piece together each of the lines
body <- paste(lines, collapse="\n\\par")
# Get the opening command word for the header or footer
if (type == 'titles') command <- '\\header\n'
else if (type == 'footnotes') command <- '\\footer\n'
# Generate the final string
if (type == "titles") {
# If generating titles then take the headers of the table
paste('{', command, body, '\n\\par\n', get_column_headers(doc), '\n}', sep='')
} else {
paste('{', command, body, '\\par\n}', sep='')
}
}
#' Create the header string
#'
#' @param doc RTF document
#'
#' @return String RTF encoding with the header information
#' @noRd
header_string <- function(doc) {
hf_string(doc, type='titles')
}
#' Create the footer string
#'
#' @param doc RTF document
#'
#' @return String RTF encoding with the footer information
#' @noRd
footer_string <- function(doc) {
hf_string(doc, type='footnotes')
}
#' Write RTF document
#'
#' Writes the RTF document to a specified file.
#'
#' @param doc The RTF document to be written.
#' @param file A character string naming a file open for writing.
#'
#' @return File is written to the file provided by sinking the console output.
#' No output is returned to the R environment.
#'
#' @examples
#' ## Create and write RTF document
#' ht <- huxtable::huxtable(
#' column1 = 1:5,
#' column2 = letters[1:5]
#' )
#' rtf <- rtf_doc(ht)
#'
#' write_rtf(rtf, file=tempfile()) #writes a table with no header/footnotes to 'test.rtf'
#'
#' @seealso \url{http://www.biblioscape.com/rtf15_spec.htm},
#' \url{http://latex2rtf.sourceforge.net/rtfspec_7.html#rtfspec_tabledef}
#'
#' @importFrom assertthat is.writeable
#' @export
write_rtf <- function(doc, file=NULL) {
# Make sure the file parameter was provided
assert_that(!is.null(file), msg="File cannot be NULL Please specify a valid file path")
force(file)
# Write to the specified file
sink(file)
tryCatch({
# RTF Header line
cat("{\\rtf1\\ansi\\deff1\n")
# Fot table
cat(font_table_string(doc))
# Color table
cat(color_table_string(doc))
cat("\n\n\n")
# Document properties
cat(doc_properties_string(doc))
cat("\n\n\n")
# Titles
cat(header_string(doc))
cat("\n")
# Footnotes
cat(footer_string(doc))
# Table content
cat(get_table_body(doc))
cat("\n}")
},
error = function(err) {stop(paste(err))},
finally = {sink()}
)
}
| /R/rtf-code-generators.R | permissive | elisa-young/pharmaRTF | R | false | false | 8,965 | r | ## Auto formatting page numbers ----
#' Return RTF encoding for current page number
#'
#' @param properties Properties for displaying page number information
#'
#' @return String of RTF encoding that will display the current page
#' @noRd
page_num <- function(properties='') {
# TODO: Add style and font support
page_str <- sprintf("{%s\\field\\flddirty{\\*\\fldinst{ PAGE \\\\* MERGEFORMAT }}}", properties)
page_str
}
#' Return RTF encoding for total page number.
#'
#' @param properties Properties for displaying page number information
#'
#' @return String of the RTF encoding representing total page numbers
#' @noRd
page_total <- function(properties='') {
tot_str <- sprintf("{%s\\field{\\*\\fldinst{ NUMPAGES}}}", properties)
tot_str
}
#' Add page number information
#'
#' Adds current and total page number. First %s is current page, second %s is
#' total pages.
#'
#' @param format Format for string replacement
#' @param properties Properties for displaying page number information
#'
#' @return String of RTF encoding that displays the current and total pages.
#' @noRd
add_page_num <- function(format="Page %s of %s", properties='') {
# Make sure there's only a replacement for current and total pages
token_ct <- unlist(gregexpr("\\%s", format))
assert_that(length(token_ct) <= 2,
msg = "Too many replacement strings - limited to 2 for current page and total pages.")
# Split out the tokens of the string, apply brackets, and bring them back together
chunks <- unlist(strsplit(format, "%s"))
# Build the string to be formatted and incorporate the properties from the line
fmt_str <- paste(paste0("{", rep(properties, length(chunks)), chunks, "}"), collapse="%s")
# If the last replacement token was found at the second to last character, then it was not maintained
# with the string split, so add it back on
if (token_ct[length(token_ct)] == nchar(format) - 1) fmt_str <- paste(fmt_str, "%s", sep="")
# Format in the
page_str <- sprintf(fmt_str, page_num(properties), page_total(properties))
page_str
}
#' Font table
#'
#' @param doc RTF document
#'
#' @return String of RTF encoding with font information
#' @noRd
font_table_string <- function(doc){
fonts <- unique(c("Times", font(doc)))
font_tbl_body <- paste0(" {\\f", seq(0, along = fonts), " ", fonts, ";}", collapse = "\n")
paste("{\\fonttbl", font_tbl_body , "}", sep = "\n")
}
## Color Table ----
# Not investing in this as the moment so write out a default blank table
#' Create document color table
#'
#' Not currently implemented
#'
#' @param doc RTF document
#'
#' @return String of RTF encoding with color table information
#' @noRd
color_table_string <- function(doc){
paste('{\\colortbl;;}\n')
}
#' Generate document properties string
#'
#' @param doc RTF document
#'
#' @return String encoding with document property information
#' @noRd
doc_properties_string <- function(doc){
# Get margins and convert to twips
mrgs <- sapply(margins(doc), function(x) x*1440)
# Make margin string
mrg_str <- sprintf("\\margl%s\\margr%s\\margt%s\\margb%s\n", mrgs['left'], mrgs['right'], mrgs['top'], mrgs['bottom'])
# Height and width string
ps <- pagesize(doc)
# Header and footer heights
hf_ht <- sprintf("\\headery%s\\footery%s", header_height(doc) * 1440, footer_height(doc) * 1440)
# Get orientation string
if (orientation(doc) == 'landscape') {
ortn <- '\\lndscpsxn\n'
# If the orientation is landscape, reverse the height and width, effectively flipping 90 degrees
ht_wd <- sprintf('\\paperw%s\\paperh%s', ps['width'] * 1440, ps['height'] * 1440)
} else{
ortn <- ''
# For portrait, use the values as they were entered
ht_wd <- sprintf('\\paperw%s\\paperh%s', ps['height'] * 1440, ps['width'] * 1440)
}
# Font size
fs <- sprintf("\\fs%s\n", font_size(doc)*2)
# Other information
other <- '\\widowctrl\\ftnbj\\fet0\\sectd\\linex0\n'
paste(ht_wd, other, ortn, mrg_str, hf_ht, fs, sep='')
}
## Header and footer string generation ----
#' Create a single line of RTF header/footnote information
#'
#' @param line A single title/footnote to write
#' @param doc RTF document
#'
#' @return String of RTF encoding for title/footnotes
#' @noRd
hf_line_string <- function(line, doc=NULL) {
# Placeholders
ft <- '\\f1' # Font (comes from a list, but using \f0 doesn't seem to work)
fs <- sprintf("\\fs%s", font_size(doc) * 2) # Font size - no way to set universal document font size, just defaults to 12
# so use the documents set size
bd <- '' # Bold (On or off - default off)
it <- '' # Italic (One or off - default off)
al <- '\\ql\n' # Alignment (Defaults to left \ql - left aligned)
tabs <- '' # Overwritten if split alignment
# Read the font information
# If font is overridden generate the string
if (!is.na(font(line))) {
# In huxtable they subtract one because the font list is 0 based, but instead of
# storing an unused font in the font attribute of the document, I'm just writing out an used font
# to the font table in the RTF document and matching the index as if it were 1 based.
ft <- sprintf("\\f%s", match(font(line), font(doc)))
}
# If font size is overridden generate the string
if (!is.na(font_size(line))) {
fs <- sprintf("\\fs%s", font_size(line)*2)
}
# Styling
# use the bold string if on
if (bold(line)) bd <- "\\b"
# Use the italics string if on
if (italic(line)) it <- "\\i"
# Concatenate all of the text level properties
properties <- paste(ft, fs, bd, it, ' ', sep='')
# Alignment
if (align(line) == 'center') al <- '\\qc\n'
else if (align(line) == 'right') al <- '\\qr\n'
# Split will align left and designate tab locations, where the right most is flush right
else if (align(line) == 'split') {
al <- "\\ql\\tx7245\\tqr\\tx12960\n"
tabs <- '\\pmartabqr \n'
}
txt_string <- sapply(line$text, format_text_string, properties = properties, USE.NAMES=FALSE)
# Patch
if (length(txt_string) > 1) {
txt_string <- paste(txt_string[1], tabs, txt_string[2], sep='')
}
paste(al, txt_string, sep='')
}
#' General function to write the header or the footer
#'
#' @param doc doc RTF document
#' @param type 'header' of 'footer'
#'
#' @return String RTF encoding with the header/footnote information
#' @noRd
hf_string <- function(doc, type=NULL) {
# Get a character vector of the formatted RTF string
lines <- sapply(order_lines(doc[[type]]), hf_line_string, doc=doc)
# Piece together each of the lines
body <- paste(lines, collapse="\n\\par")
# Get the opening command word for the header or footer
if (type == 'titles') command <- '\\header\n'
else if (type == 'footnotes') command <- '\\footer\n'
# Generate the final string
if (type == "titles") {
# If generating titles then take the headers of the table
paste('{', command, body, '\n\\par\n', get_column_headers(doc), '\n}', sep='')
} else {
paste('{', command, body, '\\par\n}', sep='')
}
}
#' Create the header string
#'
#' @param doc RTF document
#'
#' @return String RTF encoding with the header information
#' @noRd
header_string <- function(doc) {
hf_string(doc, type='titles')
}
#' Create the footer string
#'
#' @param doc RTF document
#'
#' @return String RTF encoding with the footer information
#' @noRd
footer_string <- function(doc) {
hf_string(doc, type='footnotes')
}
#' Write RTF document
#'
#' Writes the RTF document to a specified file.
#'
#' @param doc The RTF document to be written.
#' @param file A character string naming a file open for writing.
#'
#' @return File is written to the file provided by sinking the console output.
#' No output is returned to the R environment.
#'
#' @examples
#' ## Create and write RTF document
#' ht <- huxtable::huxtable(
#' column1 = 1:5,
#' column2 = letters[1:5]
#' )
#' rtf <- rtf_doc(ht)
#'
#' write_rtf(rtf, file=tempfile()) #writes a table with no header/footnotes to 'test.rtf'
#'
#' @seealso \url{http://www.biblioscape.com/rtf15_spec.htm},
#' \url{http://latex2rtf.sourceforge.net/rtfspec_7.html#rtfspec_tabledef}
#'
#' @importFrom assertthat is.writeable
#' @export
write_rtf <- function(doc, file=NULL) {
# Make sure the file parameter was provided
assert_that(!is.null(file), msg="File cannot be NULL Please specify a valid file path")
force(file)
# Write to the specified file
sink(file)
tryCatch({
# RTF Header line
cat("{\\rtf1\\ansi\\deff1\n")
# Fot table
cat(font_table_string(doc))
# Color table
cat(color_table_string(doc))
cat("\n\n\n")
# Document properties
cat(doc_properties_string(doc))
cat("\n\n\n")
# Titles
cat(header_string(doc))
cat("\n")
# Footnotes
cat(footer_string(doc))
# Table content
cat(get_table_body(doc))
cat("\n}")
},
error = function(err) {stop(paste(err))},
finally = {sink()}
)
}
|
testlist <- list(ends = c(-1125300777L, 765849512L, -1760774663L, 791623263L, 1358782356L, -128659642L, -14914341L, 1092032927L, 1837701012L, 1632068659L), pts = c(1758370433L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), starts = c(17301504L, 0L, 738263040L, 682962941L, 1612840977L, 150997320L, 747898999L, -1195392662L, 2024571419L, 808515032L, 1373469055L, -282236997L, -207881465L, -237801926L, -168118689L, -1090227888L, 235129118L, 949454105L, 1651285440L, -1119277667L, -1328604284L), members = NULL, total_members = 0L)
result <- do.call(IntervalSurgeon:::rcpp_pile,testlist)
str(result) | /IntervalSurgeon/inst/testfiles/rcpp_pile/AFL_rcpp_pile/rcpp_pile_valgrind_files/1609860007-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 729 | r | testlist <- list(ends = c(-1125300777L, 765849512L, -1760774663L, 791623263L, 1358782356L, -128659642L, -14914341L, 1092032927L, 1837701012L, 1632068659L), pts = c(1758370433L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), starts = c(17301504L, 0L, 738263040L, 682962941L, 1612840977L, 150997320L, 747898999L, -1195392662L, 2024571419L, 808515032L, 1373469055L, -282236997L, -207881465L, -237801926L, -168118689L, -1090227888L, 235129118L, 949454105L, 1651285440L, -1119277667L, -1328604284L), members = NULL, total_members = 0L)
result <- do.call(IntervalSurgeon:::rcpp_pile,testlist)
str(result) |
# Apr/12/2016 anova.mpt() now works with stats::print.anova()
#
# Mar/18/2016 better fix using eval(..., as.list(spec$par)); this renders more
# expressions fittable via EM including base functions
#
# Aug/27/2015 BUG FIX: mpt(..., method = "EM") could fail when a symbol in the
# model was also used as an object name in the work space; fixed
# by using evalq() instead of eval(); problem with evalq() is that
# it hides base functions like sqrt()
#
# Mar/11/2015 add multinomial constant to logLik
#
# Sep/10/2014 new infrastructure, mptspec(), mpt(..., method = "BFGS")
#
# Dec/15/2013 simplify extraction of EM constants (a, b, c)
#
# Jan/24/2013 BUG FIX: typo in vcov.mpt(), hence wrong standard errors
# (reported by Rainer Alexandrowicz and Bartosz Gula)
## Fit MPT model via maximum likelihood (BFGS or EM)
mpt <- function(spec, data, start = NULL, method = c("BFGS", "EM"),
treeid = "treeid", freqvar = "freq",
optimargs =
if(method == "BFGS") list(control =
list(reltol = .Machine$double.eps^(1/1.2), maxit = 1000))
else list())
{
stopifnot(class(spec) == "mptspec")
## Either, 'data' is a dataframe
if(is.data.frame(data)) {
y <- data[, freqvar]
tid <- if(length(treeid) == length(y)) factor(treeid)
else if(length(treeid) == 1 && treeid %in% names(data))
factor(data[, treeid])
else if(length(names(spec$prob)) == length(y)) # read from spec
factor(gsub("^(.+)\\..*", "\\1", names(spec$prob)))
else rep(1, length(y))
data <- matrix(y, nrow=1L)
## Or a matrix/vector of frequencies
} else {
## sanity checking and reordering of data
if(is.null(dim(data))) data <- matrix(data, nrow=1L,
dimnames=list(NULL, names(data)))
if(!is.null(dnam <- colnames(data)) & !is.null(snam <- names(spec$prob))){
if(!all(snam == dnam)) warning("variable names do not match")
# if(!all(snam %in% dnam)) {
# warning("variable names do not match")
# } else {
# data <- data[, snam, drop = FALSE]
# }
}
tid <- if(length(treeid) == NCOL(data)) factor(treeid)
else if(length(names(spec$prob)) == NCOL(data))
factor(gsub("^(.+)\\..*", "\\1", names(spec$prob)))
else if(!is.null(colnames(data)))
factor(gsub("^(.+)\\..*", "\\1", colnames(data))) # before 1st dot
else rep(1, NCOL(data))
}
if(NCOL(data) != length(spec$prob))
stop("number of response categories and model equations do not match")
## for fitting only sums are needed
y <- colSums(data)
method <- match.arg(method)
## determine number of parameters and starting values
if(is.null(start)) {
start <- spec$par[is.na(spec$par)] # FIX ME: is.na still necessary?
start[] <- if (method == "EM") 0.5 else 0 # completely ad hoc
} else {
## do sanity checking of starting values/names/etc.
if(is.null(names(start))) names(start) <- names(spec$par[is.na(spec$par)])
if (method == "BFGS") start <- qlogis(start) # logit transform
}
if (method == "BFGS") {
## set up log-likelihood and gradient
nll <- function(par) -sum(y * log(spec$par2prob(plogis(par))))
grad <- function(par) {
yp <- drop(y/spec$par2prob(plogis(par)))
dp <- spec$par2deriv(plogis(par))$deriv
-drop(dp %*% yp) * dlogis(par) # FIX ME: dlogis(par) optional? Ask Z.
}
optArgs <- list(par=start, fn=nll, gr=grad, method="BFGS")
optArgs <- c(optArgs, as.list(optimargs))
opt <- do.call(optim, optArgs)
# opt <- optim(start, nll, gr = grad, method = "BFGS",
# control = list(reltol = .Machine$double.eps^(1/1.2), maxit = 1000))
coef <- opt$par
loglik <- -opt$value
pcat <- spec$par2prob(plogis(coef))
aa <- bb <- cc <- NULL
# } else if (method == "BFGS") {
# opt <- optim(start, nll, gr = grad, method = "BFGS",
# control = list(reltol = .Machine$double.eps^(1/1.2), maxit = 1000))
# coef <- plogis(opt$par)
# vc <- solve(H(coef))
# loglik <- -sum(log(spec$par2prob(coef)) * y)
} else { # EM
## Get constants for EM algorithm
terms <- sapply(lapply(spec$prob, as.character), strsplit, "\\+") # "+"
terms <- lapply(terms, function(x) gsub("[[:space:]]", "", x))
aa <- bb <- array(NA, c(max(sapply(terms, length)), # max paths to categ
length(terms), # n categories
length(start))) # n pars
cc <- matrix(1, dim(aa)[1], dim(aa)[2])
for(j in 1:dim(aa)[2]){
for(i in 1:sapply(terms, length)[j]){
pterms <- strsplit(terms[[j]][i], "\\*")[[1]]
cc[i, j] <- prod(sapply(parse(text=pterms), eval, as.list(spec$par)),
na.rm=TRUE)
for(s in seq_along(start)){
tname <- names(start)[s]
aa[i, j, s] <- sum(grepl(paste0("^", tname, "$"), pterms))
powix <- grepl(paste0("^", tname, "\\^[0-9]+"), pterms)
aa[i, j, s] <- sum(aa[i, j, s],
as.numeric(gsub(paste0("^", tname, "\\^([0-9]+)"), "\\1",
pterms)[powix]))
## Brackets () are optional
bb[i, j, s] <- sum(grepl(paste0("^\\(?1-", tname, "\\)?$"), pterms))
powix <- grepl(paste0("^\\(1-", tname, "\\)\\^[0-9]+"), pterms)
bb[i, j, s] <- sum(bb[i, j, s],
as.numeric(gsub(paste0("^\\(1-", tname, "\\)\\^([0-9]+)"), "\\1",
pterms)[powix]))
}
}
}
dimnames(aa)[[3]] <- dimnames(bb)[[3]] <- as.list(names(start))
## Call mptEM
optArgs <- list(theta=start, data=y, a=aa, b=bb, c=cc)
optArgs <- c(optArgs, as.list(optimargs))
opt <- do.call(mptEM, optArgs)
# opt <- mptEM(start, y, aa, bb, cc, ...)
coef <- opt$theta
loglik <- opt$loglik
pcat <- opt$pcat
}
snam <- if(!is.null(names(spec$prob))) names(spec$prob)
else if(!is.null(colnames(data))) colnames(data)
else paste(tid, unlist(lapply(rle(as.character(tid))$lengths,
seq_len)), sep=".")
ncat <- table(tid)
nobs <- sum(ncat - 1)
ntrees <- length(ncat)
# n <- setNames(tapply(y, tid, sum)[as.character(tid)], snam)
nbytree <- tapply(y, tid, sum)
n <- setNames(nbytree[as.character(tid)], snam)
fitted <- n*pcat
G2 <- 2*sum(y*log(y/fitted), na.rm=TRUE)
df <- nobs - length(coef)
gof <- c(G2=G2, df=df, pval = pchisq(G2, df, lower.tail=FALSE))
rval <- list(
coefficients = coef,
loglik = sum(lfactorial(nbytree)) - sum(lfactorial(y)) + loglik,
nobs = nobs, # nrow(data),
# df = length(start),
fitted = fitted,
goodness.of.fit = gof,
ntrees = ntrees,
n = n,
y = setNames(y, snam),
pcat = setNames(pcat, snam),
treeid = tid,
a = aa, b = bb, c = cc,
spec = spec,
method = method,
optim = opt
)
class(rval) <- "mpt"
return(rval)
}
## EM algorithm
mptEM <- function(theta, data, a, b, c, maxit = 1000, tolerance = 1e-8,
stepsize = 1, verbose = FALSE){
nbranch <- dim(a)[1]
pbranch <- matrix(NA_real_, nbranch, length(data))
loglik0 <- -Inf
theta1 <- theta
iter <- 1
while(iter < maxit){
if(verbose) print(c(iter, loglik0))
## E step
for(i in seq_len(nbranch))
for(j in seq_along(data))
pbranch[i, j] <- c[i,j] * prod(theta^a[i,j,] * (1 - theta)^b[i,j,])
pcat <- colSums(pbranch, na.rm=TRUE)
loglik1 <- sum(data*log(pcat))
if(loglik1 - loglik0 < tolerance) break # stop if converged
loglik0 <- loglik1
m <- t(data*t(pbranch)/pcat)
## M step
for(s in seq_along(theta))
theta1[s] <-
sum(a[,,s]*m, na.rm=TRUE)/sum((a[,,s] + b[,,s])*m, na.rm=TRUE)
theta <- theta - stepsize*(theta - theta1)
iter <- iter + 1
}
if(iter >= maxit) warning("iteration maximum has been exceeded")
out <- list(theta=theta, loglik=loglik1, pcat=pcat, pbranch=pbranch,
iter=iter)
out
}
coef.mpt <- function(object, logit = FALSE, ...){
coef <- object$coefficients
if (logit) {
nm <- paste0("logit(", names(coef), ")")
if (object$method == "EM")
setNames(qlogis(coef), nm)
else
setNames(coef, nm)
} else {
if (object$method == "EM")
coef
else
plogis(coef)
}
}
vcov.mpt <- function(object, logit = FALSE, what = c("vcov", "fisher"),
...){
what <- match.arg(what)
coef <- coef(object, logit=logit)
# Negative Hessian (information) on probability scale.
# Which one is correct? Should be very similar. Stick with I.obs.
# %*% is slightly faster than sum( * )
H <- function(par, y = object$y, spec = object$spec,
type = c("observed", "estimated", "expected")){
pp <- spec$par2prob(par)
yp <- drop(y/pp)
dp <- spec$par2deriv(par)$deriv
d2p <- spec$par2deriv(par)$deriv2
npar <- length(par)
H <- matrix(NA, npar, npar)
for (i in seq_len(npar))
for (j in i:npar)
switch(EXPR = match.arg(type),
observed =
H[i, j] <- yp %*% (dp[i, ]*dp[j, ]/pp - d2p[i, j, ]),
# H[i, j] <- sum(yp * (dp[i, ]*dp[j, ]/pp - d2p[i, j, ]))
estimated =
H[i, j] <- yp %*% (dp[i, ]*dp[j, ]/pp),
# H[i, j] <- sum(y*dp[i, ]*dp[j, ]/pp^2)
# H[i, j] <- sum(yp*dp[i, ]*dp[j, ]/pp)
# H[i, j] <- sum(yp/pp * dp[i, ]*dp[j, ])
# Only correct for single-tree models; for joint MPT models,
# calculate per-tree info and add them.
expected =
H[i, j] <- sum(y)*sum(dp[i, ]*dp[j, ]/pp)
)
H[lower.tri(H)] <- t(H)[lower.tri(H)]
dimnames(H) <- list(names(par), names(par))
H
}
if (logit) { # delta method
# Note that plogis(x)*(1 - plogis(x)) == dlogis(x)
# Automatic dimnames for tcrossprod(...)
# Conjecture: the smaller the matrix the more reliable its inverse
# dhessian <- diag(1/(plogis(coef) * (1 - plogis(coef))), length(coef))
# dhessian %*% solve(H(plogis(coef))) %*% dhessian
# solve(tcrossprod(dlogis(coef)) * H(plogis(coef))) # possibly singular
if (what == "vcov") 1/tcrossprod(dlogis(coef)) * solve(H(plogis(coef)))
else tcrossprod(dlogis(coef)) * H(plogis(coef))
} else {
if (what == "vcov") solve(H(coef))
else H(coef)
}
}
## Based on stats::confint.default
confint.mpt <- function(object, parm, level = 0.95, logit = TRUE, ...)
{
cf <- coef(object, logit=logit)
pnames <- names(cf)
if (missing(parm))
parm <- pnames
else if (is.numeric(parm))
parm <- pnames[parm]
a <- (1 - level)/2
a <- c(a, 1 - a)
pct <- paste(format(100*a, trim=TRUE, scientific=FALSE, digits=3), "%")
fac <- qnorm(a)
ci <- array(NA, dim = c(length(parm), 2L), dimnames = list(parm, pct))
ses <- sqrt(diag(vcov(object, logit=logit)))[parm]
ci[] <- cf[parm] + ses %o% fac
ci
}
# logLik.mpt <- function(object, ...)
# structure(object$loglik, df = object$df, class = "logLik")
print.mpt <- function(x, digits = max(3, getOption("digits") - 3),
logit=FALSE, ...){
cat("\nMultinomial processing tree (MPT) models\n\n")
cat("Parameter estimates:\n")
print.default(format(coef(x, logit=logit), digits=digits), print.gap=2,
quote = FALSE)
G2 <- x$goodness.of.fit[1]
df <- x$goodness.of.fit[2]
pval <- x$goodness.of.fit[3]
cat("\nGoodness of fit (2 log likelihood ratio):\n")
cat("\tG2(", df, ") = ", format(G2, digits=digits), ", p = ",
format(pval, digits=digits), "\n", sep="")
cat("\n")
invisible(x)
}
anova.mpt <- function (object, ..., test = c("Chisq", "none")){
## Adapted form MASS::anova.polr and stats::anova.glmlist
test <- match.arg(test)
dots <- list(...)
if (length(dots) == 0)
stop('anova is not implemented for a single "mpt" object')
mlist <- list(object, ...)
nmodels <- length(mlist)
names(mlist) <- c(deparse(substitute(object)),
as.character(substitute(...[]))[2:nmodels])
if (any(!sapply(mlist, inherits, "mpt")))
stop('not all objects are of class "mpt"')
ns <- sapply(mlist, function(x) length(x$fitted))
if (any(ns != ns[1]))
stop("models were not all fitted to the same size of dataset")
dfs <- sapply(mlist, function(x) x$goodness.of.fit["df"])
lls <- sapply(mlist, function(x) x$goodness.of.fit["G2"])
df <- c(NA, -diff(dfs))
x2 <- c(NA, -diff(lls))
pr <- c(NA, pchisq(x2[-1], df[-1], lower.tail = FALSE))
out <- data.frame(Resid.df = dfs, Deviance = lls, Df = df, Chisq = x2,
Prob = pr)
dimnames(out) <- list(1:nmodels, c("Resid. Df", "Resid. Dev", "Df",
"Deviance", "Pr(>Chi)"))
if (test == "none") out <- out[, -ncol(out)]
structure(out,
heading = c("Analysis of Deviance Table\n",
paste0("Model ", format(1L:nmodels), ": ",
names(mlist), collapse = "\n")),
class = c("anova", "data.frame"))
}
## Log-likelihood for mpt objects
logLik.mpt <- function(object, ...){
if(length(list(...)))
warning("extra arguments discarded")
p <- length(object$coefficients)
val <- object$loglik
attr(val, "df") <- p
attr(val, "nobs") <- object$nobs
class(val) <- "logLik"
val
}
## Number of observations
nobs.mpt <- function(object, ...) object$nobs
## Residuals for mpt models
residuals.mpt <- function(object, type=c("deviance", "pearson"), ...){
dev.resids <- function(y, mu, wt)
2 * wt * (y * log(ifelse(y == 0, 1, y/mu)) - (y - mu))
type <- match.arg(type)
wts <- object$n
y <- object$y / wts
mu <- object$pcat
res <- switch(type,
deviance = if(object$goodness['df'] > 0){
d.res <- sqrt(pmax(dev.resids(y, mu, wts), 0))
ifelse(y > mu, d.res, -d.res) # sign
}
else rep.int(0, length(mu)),
pearson = (y - mu) * sqrt(wts)/sqrt(mu)
)
if(!is.null(object$na.action)) res <- naresid(object$na.action, res)
res
}
## Diagnostic plot for mpt models
plot.mpt <- function(x, showNames = TRUE,
xlab="Predicted response probabilities",
ylab="Deviance residuals", ...){
xres <- resid(x)
mu <- x$pcat
plot(mu, xres, xlab = xlab, ylab = ylab, type="n", ...)
abline(h = 0, lty = 2)
if(showNames){
text(mu, xres, names(xres), cex=0.8)
panel.smooth(mu, xres, cex=0)
}else{
panel.smooth(mu, xres)
}
}
# ## Covariance matrix for MPT model parameters
# vcov.mpt <- function(object, ..., what = c("vcov", "fisher")){
# a <- object$a
# b <- object$b
# y <- object$y
# pcat <- object$pcat
# pbranch <- object$pbranch
# theta <- coef(object)
#
# ## as(Theta), bs(Theta)
# as.t <- bs.t <- numeric(length(theta))
# for(s in seq_along(theta)){
# for(j in seq_along(pcat)){
# as.t[s] <- as.t[s] + y[j]*sum(a[,j,s]*pbranch[,j]/pcat[j], na.rm=TRUE)
# bs.t[s] <- bs.t[s] + y[j]*sum(b[,j,s]*pbranch[,j]/pcat[j], na.rm=TRUE)
# }
# }
#
# ## d as(Theta)/d t, d bs(Theta)/d t
# das.t <- dbs.t <- matrix(0, length(theta), length(theta))
# for(s in seq_along(theta)){
# for(r in seq_along(theta)){
# for(j in seq_along(pcat)){
# das.t[s, r] <- das.t[s, r] + y[j] * (
# sum(a[,j,s] * pbranch[,j] *
# sum((a[,j,r]/theta[r] - b[,j,r]/(1 - theta[r])) * pbranch[,j],
# na.rm = TRUE) /
# pcat[j]^2, na.rm = TRUE) -
# sum(a[,j,s] *
# (a[,j,r]/theta[r] - b[,j,r]/(1 - theta[r])) * pbranch[,j] / pcat[j],
# na.rm = TRUE)
# )
#
# dbs.t[s, r] <- dbs.t[s, r] + y[j] * (
# sum(b[,j,s] * pbranch[,j] *
# sum((a[,j,r]/theta[r] - b[,j,r]/(1 - theta[r])) * pbranch[,j],
# na.rm = TRUE) /
# pcat[j]^2, na.rm = TRUE) -
# sum(b[,j,s] *
# (a[,j,r]/theta[r] - b[,j,r]/(1 - theta[r])) * pbranch[,j] / pcat[j],
# na.rm = TRUE)
# )
# }
# }
# }
#
# ## I(Theta)
# info.t <- das.t/theta - dbs.t/(1 - theta) +
# diag(as.t/theta^2 + bs.t/(1 - theta)^2)
# dimnames(info.t) <- list(names(theta), names(theta))
# what <- match.arg(what)
# if (what == "vcov") solve(info.t) else info.t
# }
summary.mpt <- function(object, ...){
x <- object
coef <- coef(x, logit=TRUE)
pcoef <- coef(x, logit=FALSE)
## Catch vcov error, so there are at least some NA's in the summary
s.err <- tryCatch(sqrt(diag(vcov(x, logit=TRUE))),
error = function(e) rep(NA, length(coef)))
tvalue <- coef / s.err
pvalue <- 2 * pnorm(-abs(tvalue))
dn <- c("Estimate", "Logit Estim.", "Std. Error")
coef.table <- cbind(pcoef, coef, s.err, tvalue, pvalue)
dimnames(coef.table) <- list(names(pcoef), c(dn, "z value", "Pr(>|z|)"))
aic <- AIC(x)
ans <- list(ntrees=x$ntrees, coefficients=coef.table, aic=aic,
gof=x$goodness.of.fit, X2=sum(resid(x, "pearson")^2))
class(ans) <- "summary.mpt"
return(ans)
}
print.summary.mpt <- function(x, digits = max(3, getOption("digits") - 3),
cs.ind = 2:3, ...){
cat("\nCoefficients:\n")
printCoefmat(x$coef, digits=digits, cs.ind=cs.ind, ...)
# cat("\nGoodness of fit:\n")
cat("\nLikelihood ratio G2:", format(x$gof[1], digits=digits), "on",
x$gof[2], "df,", "p-value:", format(x$gof[3], digits=digits), "\n")
cat("Pearson X2: ", format(x$X2, digits=digits), ", ",
"AIC: ", format(x$aic, digits=max(4, digits + 1)), sep="")
cat("\n")
cat("Number of trees:", x$ntrees, "\n")
invisible(x)
}
## Simulate responses from mpt model
simulate.mpt <- function(object, nsim, seed, pool = TRUE, ...){
if(pool){
tid <- object$treeid
freq <- unlist( lapply(unique(tid),
function(i) rmultinom(1, object$n[tid == i], object$pcat[tid == i])) )
names(freq) <- tid
}else{
stop("individual response simulation not yet implemented")
}
setNames(freq, names(object$fitted))
}
deviance.mpt <- function(object, ...) object$goodness.of.fit["G2"]
predict.mpt <- function(object, newdata = NULL, type = c("freq", "prob"),
...){
type <- match.arg(type)
if(type == "prob") object$pcat
else
if(is.null(newdata)) fitted(object)
else {
stopifnot(length(newdata) == length(object$pcat))
tid <- object$treeid
object$pcat * setNames(tapply(newdata, tid, sum)[as.character(tid)],
names(object$pcat))
}
}
| /mpt/R/mpt.R | no_license | ingted/R-Examples | R | false | false | 18,784 | r | # Apr/12/2016 anova.mpt() now works with stats::print.anova()
#
# Mar/18/2016 better fix using eval(..., as.list(spec$par)); this renders more
# expressions fittable via EM including base functions
#
# Aug/27/2015 BUG FIX: mpt(..., method = "EM") could fail when a symbol in the
# model was also used as an object name in the work space; fixed
# by using evalq() instead of eval(); problem with evalq() is that
# it hides base functions like sqrt()
#
# Mar/11/2015 add multinomial constant to logLik
#
# Sep/10/2014 new infrastructure, mptspec(), mpt(..., method = "BFGS")
#
# Dec/15/2013 simplify extraction of EM constants (a, b, c)
#
# Jan/24/2013 BUG FIX: typo in vcov.mpt(), hence wrong standard errors
# (reported by Rainer Alexandrowicz and Bartosz Gula)
## Fit MPT model via maximum likelihood (BFGS or EM)
mpt <- function(spec, data, start = NULL, method = c("BFGS", "EM"),
treeid = "treeid", freqvar = "freq",
optimargs =
if(method == "BFGS") list(control =
list(reltol = .Machine$double.eps^(1/1.2), maxit = 1000))
else list())
{
stopifnot(class(spec) == "mptspec")
## Either, 'data' is a dataframe
if(is.data.frame(data)) {
y <- data[, freqvar]
tid <- if(length(treeid) == length(y)) factor(treeid)
else if(length(treeid) == 1 && treeid %in% names(data))
factor(data[, treeid])
else if(length(names(spec$prob)) == length(y)) # read from spec
factor(gsub("^(.+)\\..*", "\\1", names(spec$prob)))
else rep(1, length(y))
data <- matrix(y, nrow=1L)
## Or a matrix/vector of frequencies
} else {
## sanity checking and reordering of data
if(is.null(dim(data))) data <- matrix(data, nrow=1L,
dimnames=list(NULL, names(data)))
if(!is.null(dnam <- colnames(data)) & !is.null(snam <- names(spec$prob))){
if(!all(snam == dnam)) warning("variable names do not match")
# if(!all(snam %in% dnam)) {
# warning("variable names do not match")
# } else {
# data <- data[, snam, drop = FALSE]
# }
}
tid <- if(length(treeid) == NCOL(data)) factor(treeid)
else if(length(names(spec$prob)) == NCOL(data))
factor(gsub("^(.+)\\..*", "\\1", names(spec$prob)))
else if(!is.null(colnames(data)))
factor(gsub("^(.+)\\..*", "\\1", colnames(data))) # before 1st dot
else rep(1, NCOL(data))
}
if(NCOL(data) != length(spec$prob))
stop("number of response categories and model equations do not match")
## for fitting only sums are needed
y <- colSums(data)
method <- match.arg(method)
## determine number of parameters and starting values
if(is.null(start)) {
start <- spec$par[is.na(spec$par)] # FIX ME: is.na still necessary?
start[] <- if (method == "EM") 0.5 else 0 # completely ad hoc
} else {
## do sanity checking of starting values/names/etc.
if(is.null(names(start))) names(start) <- names(spec$par[is.na(spec$par)])
if (method == "BFGS") start <- qlogis(start) # logit transform
}
if (method == "BFGS") {
## set up log-likelihood and gradient
nll <- function(par) -sum(y * log(spec$par2prob(plogis(par))))
grad <- function(par) {
yp <- drop(y/spec$par2prob(plogis(par)))
dp <- spec$par2deriv(plogis(par))$deriv
-drop(dp %*% yp) * dlogis(par) # FIX ME: dlogis(par) optional? Ask Z.
}
optArgs <- list(par=start, fn=nll, gr=grad, method="BFGS")
optArgs <- c(optArgs, as.list(optimargs))
opt <- do.call(optim, optArgs)
# opt <- optim(start, nll, gr = grad, method = "BFGS",
# control = list(reltol = .Machine$double.eps^(1/1.2), maxit = 1000))
coef <- opt$par
loglik <- -opt$value
pcat <- spec$par2prob(plogis(coef))
aa <- bb <- cc <- NULL
# } else if (method == "BFGS") {
# opt <- optim(start, nll, gr = grad, method = "BFGS",
# control = list(reltol = .Machine$double.eps^(1/1.2), maxit = 1000))
# coef <- plogis(opt$par)
# vc <- solve(H(coef))
# loglik <- -sum(log(spec$par2prob(coef)) * y)
} else { # EM
## Get constants for EM algorithm
terms <- sapply(lapply(spec$prob, as.character), strsplit, "\\+") # "+"
terms <- lapply(terms, function(x) gsub("[[:space:]]", "", x))
aa <- bb <- array(NA, c(max(sapply(terms, length)), # max paths to categ
length(terms), # n categories
length(start))) # n pars
cc <- matrix(1, dim(aa)[1], dim(aa)[2])
for(j in 1:dim(aa)[2]){
for(i in 1:sapply(terms, length)[j]){
pterms <- strsplit(terms[[j]][i], "\\*")[[1]]
cc[i, j] <- prod(sapply(parse(text=pterms), eval, as.list(spec$par)),
na.rm=TRUE)
for(s in seq_along(start)){
tname <- names(start)[s]
aa[i, j, s] <- sum(grepl(paste0("^", tname, "$"), pterms))
powix <- grepl(paste0("^", tname, "\\^[0-9]+"), pterms)
aa[i, j, s] <- sum(aa[i, j, s],
as.numeric(gsub(paste0("^", tname, "\\^([0-9]+)"), "\\1",
pterms)[powix]))
## Brackets () are optional
bb[i, j, s] <- sum(grepl(paste0("^\\(?1-", tname, "\\)?$"), pterms))
powix <- grepl(paste0("^\\(1-", tname, "\\)\\^[0-9]+"), pterms)
bb[i, j, s] <- sum(bb[i, j, s],
as.numeric(gsub(paste0("^\\(1-", tname, "\\)\\^([0-9]+)"), "\\1",
pterms)[powix]))
}
}
}
dimnames(aa)[[3]] <- dimnames(bb)[[3]] <- as.list(names(start))
## Call mptEM
optArgs <- list(theta=start, data=y, a=aa, b=bb, c=cc)
optArgs <- c(optArgs, as.list(optimargs))
opt <- do.call(mptEM, optArgs)
# opt <- mptEM(start, y, aa, bb, cc, ...)
coef <- opt$theta
loglik <- opt$loglik
pcat <- opt$pcat
}
snam <- if(!is.null(names(spec$prob))) names(spec$prob)
else if(!is.null(colnames(data))) colnames(data)
else paste(tid, unlist(lapply(rle(as.character(tid))$lengths,
seq_len)), sep=".")
ncat <- table(tid)
nobs <- sum(ncat - 1)
ntrees <- length(ncat)
# n <- setNames(tapply(y, tid, sum)[as.character(tid)], snam)
nbytree <- tapply(y, tid, sum)
n <- setNames(nbytree[as.character(tid)], snam)
fitted <- n*pcat
G2 <- 2*sum(y*log(y/fitted), na.rm=TRUE)
df <- nobs - length(coef)
gof <- c(G2=G2, df=df, pval = pchisq(G2, df, lower.tail=FALSE))
rval <- list(
coefficients = coef,
loglik = sum(lfactorial(nbytree)) - sum(lfactorial(y)) + loglik,
nobs = nobs, # nrow(data),
# df = length(start),
fitted = fitted,
goodness.of.fit = gof,
ntrees = ntrees,
n = n,
y = setNames(y, snam),
pcat = setNames(pcat, snam),
treeid = tid,
a = aa, b = bb, c = cc,
spec = spec,
method = method,
optim = opt
)
class(rval) <- "mpt"
return(rval)
}
## EM algorithm
mptEM <- function(theta, data, a, b, c, maxit = 1000, tolerance = 1e-8,
stepsize = 1, verbose = FALSE){
nbranch <- dim(a)[1]
pbranch <- matrix(NA_real_, nbranch, length(data))
loglik0 <- -Inf
theta1 <- theta
iter <- 1
while(iter < maxit){
if(verbose) print(c(iter, loglik0))
## E step
for(i in seq_len(nbranch))
for(j in seq_along(data))
pbranch[i, j] <- c[i,j] * prod(theta^a[i,j,] * (1 - theta)^b[i,j,])
pcat <- colSums(pbranch, na.rm=TRUE)
loglik1 <- sum(data*log(pcat))
if(loglik1 - loglik0 < tolerance) break # stop if converged
loglik0 <- loglik1
m <- t(data*t(pbranch)/pcat)
## M step
for(s in seq_along(theta))
theta1[s] <-
sum(a[,,s]*m, na.rm=TRUE)/sum((a[,,s] + b[,,s])*m, na.rm=TRUE)
theta <- theta - stepsize*(theta - theta1)
iter <- iter + 1
}
if(iter >= maxit) warning("iteration maximum has been exceeded")
out <- list(theta=theta, loglik=loglik1, pcat=pcat, pbranch=pbranch,
iter=iter)
out
}
coef.mpt <- function(object, logit = FALSE, ...){
coef <- object$coefficients
if (logit) {
nm <- paste0("logit(", names(coef), ")")
if (object$method == "EM")
setNames(qlogis(coef), nm)
else
setNames(coef, nm)
} else {
if (object$method == "EM")
coef
else
plogis(coef)
}
}
vcov.mpt <- function(object, logit = FALSE, what = c("vcov", "fisher"),
...){
what <- match.arg(what)
coef <- coef(object, logit=logit)
# Negative Hessian (information) on probability scale.
# Which one is correct? Should be very similar. Stick with I.obs.
# %*% is slightly faster than sum( * )
H <- function(par, y = object$y, spec = object$spec,
type = c("observed", "estimated", "expected")){
pp <- spec$par2prob(par)
yp <- drop(y/pp)
dp <- spec$par2deriv(par)$deriv
d2p <- spec$par2deriv(par)$deriv2
npar <- length(par)
H <- matrix(NA, npar, npar)
for (i in seq_len(npar))
for (j in i:npar)
switch(EXPR = match.arg(type),
observed =
H[i, j] <- yp %*% (dp[i, ]*dp[j, ]/pp - d2p[i, j, ]),
# H[i, j] <- sum(yp * (dp[i, ]*dp[j, ]/pp - d2p[i, j, ]))
estimated =
H[i, j] <- yp %*% (dp[i, ]*dp[j, ]/pp),
# H[i, j] <- sum(y*dp[i, ]*dp[j, ]/pp^2)
# H[i, j] <- sum(yp*dp[i, ]*dp[j, ]/pp)
# H[i, j] <- sum(yp/pp * dp[i, ]*dp[j, ])
# Only correct for single-tree models; for joint MPT models,
# calculate per-tree info and add them.
expected =
H[i, j] <- sum(y)*sum(dp[i, ]*dp[j, ]/pp)
)
H[lower.tri(H)] <- t(H)[lower.tri(H)]
dimnames(H) <- list(names(par), names(par))
H
}
if (logit) { # delta method
# Note that plogis(x)*(1 - plogis(x)) == dlogis(x)
# Automatic dimnames for tcrossprod(...)
# Conjecture: the smaller the matrix the more reliable its inverse
# dhessian <- diag(1/(plogis(coef) * (1 - plogis(coef))), length(coef))
# dhessian %*% solve(H(plogis(coef))) %*% dhessian
# solve(tcrossprod(dlogis(coef)) * H(plogis(coef))) # possibly singular
if (what == "vcov") 1/tcrossprod(dlogis(coef)) * solve(H(plogis(coef)))
else tcrossprod(dlogis(coef)) * H(plogis(coef))
} else {
if (what == "vcov") solve(H(coef))
else H(coef)
}
}
## Based on stats::confint.default
confint.mpt <- function(object, parm, level = 0.95, logit = TRUE, ...)
{
cf <- coef(object, logit=logit)
pnames <- names(cf)
if (missing(parm))
parm <- pnames
else if (is.numeric(parm))
parm <- pnames[parm]
a <- (1 - level)/2
a <- c(a, 1 - a)
pct <- paste(format(100*a, trim=TRUE, scientific=FALSE, digits=3), "%")
fac <- qnorm(a)
ci <- array(NA, dim = c(length(parm), 2L), dimnames = list(parm, pct))
ses <- sqrt(diag(vcov(object, logit=logit)))[parm]
ci[] <- cf[parm] + ses %o% fac
ci
}
# logLik.mpt <- function(object, ...)
# structure(object$loglik, df = object$df, class = "logLik")
print.mpt <- function(x, digits = max(3, getOption("digits") - 3),
logit=FALSE, ...){
cat("\nMultinomial processing tree (MPT) models\n\n")
cat("Parameter estimates:\n")
print.default(format(coef(x, logit=logit), digits=digits), print.gap=2,
quote = FALSE)
G2 <- x$goodness.of.fit[1]
df <- x$goodness.of.fit[2]
pval <- x$goodness.of.fit[3]
cat("\nGoodness of fit (2 log likelihood ratio):\n")
cat("\tG2(", df, ") = ", format(G2, digits=digits), ", p = ",
format(pval, digits=digits), "\n", sep="")
cat("\n")
invisible(x)
}
anova.mpt <- function (object, ..., test = c("Chisq", "none")){
## Adapted form MASS::anova.polr and stats::anova.glmlist
test <- match.arg(test)
dots <- list(...)
if (length(dots) == 0)
stop('anova is not implemented for a single "mpt" object')
mlist <- list(object, ...)
nmodels <- length(mlist)
names(mlist) <- c(deparse(substitute(object)),
as.character(substitute(...[]))[2:nmodels])
if (any(!sapply(mlist, inherits, "mpt")))
stop('not all objects are of class "mpt"')
ns <- sapply(mlist, function(x) length(x$fitted))
if (any(ns != ns[1]))
stop("models were not all fitted to the same size of dataset")
dfs <- sapply(mlist, function(x) x$goodness.of.fit["df"])
lls <- sapply(mlist, function(x) x$goodness.of.fit["G2"])
df <- c(NA, -diff(dfs))
x2 <- c(NA, -diff(lls))
pr <- c(NA, pchisq(x2[-1], df[-1], lower.tail = FALSE))
out <- data.frame(Resid.df = dfs, Deviance = lls, Df = df, Chisq = x2,
Prob = pr)
dimnames(out) <- list(1:nmodels, c("Resid. Df", "Resid. Dev", "Df",
"Deviance", "Pr(>Chi)"))
if (test == "none") out <- out[, -ncol(out)]
structure(out,
heading = c("Analysis of Deviance Table\n",
paste0("Model ", format(1L:nmodels), ": ",
names(mlist), collapse = "\n")),
class = c("anova", "data.frame"))
}
## Log-likelihood for mpt objects
logLik.mpt <- function(object, ...){
if(length(list(...)))
warning("extra arguments discarded")
p <- length(object$coefficients)
val <- object$loglik
attr(val, "df") <- p
attr(val, "nobs") <- object$nobs
class(val) <- "logLik"
val
}
## Number of observations
nobs.mpt <- function(object, ...) object$nobs
## Residuals for mpt models
residuals.mpt <- function(object, type=c("deviance", "pearson"), ...){
dev.resids <- function(y, mu, wt)
2 * wt * (y * log(ifelse(y == 0, 1, y/mu)) - (y - mu))
type <- match.arg(type)
wts <- object$n
y <- object$y / wts
mu <- object$pcat
res <- switch(type,
deviance = if(object$goodness['df'] > 0){
d.res <- sqrt(pmax(dev.resids(y, mu, wts), 0))
ifelse(y > mu, d.res, -d.res) # sign
}
else rep.int(0, length(mu)),
pearson = (y - mu) * sqrt(wts)/sqrt(mu)
)
if(!is.null(object$na.action)) res <- naresid(object$na.action, res)
res
}
## Diagnostic plot for mpt models
plot.mpt <- function(x, showNames = TRUE,
xlab="Predicted response probabilities",
ylab="Deviance residuals", ...){
xres <- resid(x)
mu <- x$pcat
plot(mu, xres, xlab = xlab, ylab = ylab, type="n", ...)
abline(h = 0, lty = 2)
if(showNames){
text(mu, xres, names(xres), cex=0.8)
panel.smooth(mu, xres, cex=0)
}else{
panel.smooth(mu, xres)
}
}
# ## Covariance matrix for MPT model parameters
# vcov.mpt <- function(object, ..., what = c("vcov", "fisher")){
# a <- object$a
# b <- object$b
# y <- object$y
# pcat <- object$pcat
# pbranch <- object$pbranch
# theta <- coef(object)
#
# ## as(Theta), bs(Theta)
# as.t <- bs.t <- numeric(length(theta))
# for(s in seq_along(theta)){
# for(j in seq_along(pcat)){
# as.t[s] <- as.t[s] + y[j]*sum(a[,j,s]*pbranch[,j]/pcat[j], na.rm=TRUE)
# bs.t[s] <- bs.t[s] + y[j]*sum(b[,j,s]*pbranch[,j]/pcat[j], na.rm=TRUE)
# }
# }
#
# ## d as(Theta)/d t, d bs(Theta)/d t
# das.t <- dbs.t <- matrix(0, length(theta), length(theta))
# for(s in seq_along(theta)){
# for(r in seq_along(theta)){
# for(j in seq_along(pcat)){
# das.t[s, r] <- das.t[s, r] + y[j] * (
# sum(a[,j,s] * pbranch[,j] *
# sum((a[,j,r]/theta[r] - b[,j,r]/(1 - theta[r])) * pbranch[,j],
# na.rm = TRUE) /
# pcat[j]^2, na.rm = TRUE) -
# sum(a[,j,s] *
# (a[,j,r]/theta[r] - b[,j,r]/(1 - theta[r])) * pbranch[,j] / pcat[j],
# na.rm = TRUE)
# )
#
# dbs.t[s, r] <- dbs.t[s, r] + y[j] * (
# sum(b[,j,s] * pbranch[,j] *
# sum((a[,j,r]/theta[r] - b[,j,r]/(1 - theta[r])) * pbranch[,j],
# na.rm = TRUE) /
# pcat[j]^2, na.rm = TRUE) -
# sum(b[,j,s] *
# (a[,j,r]/theta[r] - b[,j,r]/(1 - theta[r])) * pbranch[,j] / pcat[j],
# na.rm = TRUE)
# )
# }
# }
# }
#
# ## I(Theta)
# info.t <- das.t/theta - dbs.t/(1 - theta) +
# diag(as.t/theta^2 + bs.t/(1 - theta)^2)
# dimnames(info.t) <- list(names(theta), names(theta))
# what <- match.arg(what)
# if (what == "vcov") solve(info.t) else info.t
# }
summary.mpt <- function(object, ...){
x <- object
coef <- coef(x, logit=TRUE)
pcoef <- coef(x, logit=FALSE)
## Catch vcov error, so there are at least some NA's in the summary
s.err <- tryCatch(sqrt(diag(vcov(x, logit=TRUE))),
error = function(e) rep(NA, length(coef)))
tvalue <- coef / s.err
pvalue <- 2 * pnorm(-abs(tvalue))
dn <- c("Estimate", "Logit Estim.", "Std. Error")
coef.table <- cbind(pcoef, coef, s.err, tvalue, pvalue)
dimnames(coef.table) <- list(names(pcoef), c(dn, "z value", "Pr(>|z|)"))
aic <- AIC(x)
ans <- list(ntrees=x$ntrees, coefficients=coef.table, aic=aic,
gof=x$goodness.of.fit, X2=sum(resid(x, "pearson")^2))
class(ans) <- "summary.mpt"
return(ans)
}
print.summary.mpt <- function(x, digits = max(3, getOption("digits") - 3),
cs.ind = 2:3, ...){
cat("\nCoefficients:\n")
printCoefmat(x$coef, digits=digits, cs.ind=cs.ind, ...)
# cat("\nGoodness of fit:\n")
cat("\nLikelihood ratio G2:", format(x$gof[1], digits=digits), "on",
x$gof[2], "df,", "p-value:", format(x$gof[3], digits=digits), "\n")
cat("Pearson X2: ", format(x$X2, digits=digits), ", ",
"AIC: ", format(x$aic, digits=max(4, digits + 1)), sep="")
cat("\n")
cat("Number of trees:", x$ntrees, "\n")
invisible(x)
}
## Simulate responses from mpt model
simulate.mpt <- function(object, nsim, seed, pool = TRUE, ...){
if(pool){
tid <- object$treeid
freq <- unlist( lapply(unique(tid),
function(i) rmultinom(1, object$n[tid == i], object$pcat[tid == i])) )
names(freq) <- tid
}else{
stop("individual response simulation not yet implemented")
}
setNames(freq, names(object$fitted))
}
deviance.mpt <- function(object, ...) object$goodness.of.fit["G2"]
predict.mpt <- function(object, newdata = NULL, type = c("freq", "prob"),
...){
type <- match.arg(type)
if(type == "prob") object$pcat
else
if(is.null(newdata)) fitted(object)
else {
stopifnot(length(newdata) == length(object$pcat))
tid <- object$treeid
object$pcat * setNames(tapply(newdata, tid, sum)[as.character(tid)],
names(object$pcat))
}
}
|
library(tidyverse)
coffee_ratings <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-07-07/coffee_ratings.csv')
| /coffee/coffee.R | no_license | rhi-batstone/tidy_tuesday | R | false | false | 165 | r | library(tidyverse)
coffee_ratings <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-07-07/coffee_ratings.csv')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/auxiliary.R
\name{samplecovs}
\alias{samplecovs}
\title{Generate Sample Covariances of 2 groups}
\usage{
samplecovs(ncopy, size)
}
\arguments{
\item{ncopy}{the total number of sample covariances to be generated.}
\item{size}{dimension \eqn{p}.}
}
\value{
a \eqn{(p\times p\times ncopy)} array of strictly positive definite sample covariances.
}
\description{
For visualization purpose, \code{samplecovs} generates a 3d array
of stacked sample covariances where - in 3rd dimension, the first half
are sample covariances of samples generated independently from
normal distribution with identity covariance, where the latter half
consists of samples covariances from dense random population covariance.
}
\examples{
## generate total of 20 samples covariances of size 5-by-5.
samples <- samplecovs(20,5)
}
| /CovTools/man/samplecovs.Rd | no_license | akhikolla/TestedPackages-NoIssues | R | false | true | 884 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/auxiliary.R
\name{samplecovs}
\alias{samplecovs}
\title{Generate Sample Covariances of 2 groups}
\usage{
samplecovs(ncopy, size)
}
\arguments{
\item{ncopy}{the total number of sample covariances to be generated.}
\item{size}{dimension \eqn{p}.}
}
\value{
a \eqn{(p\times p\times ncopy)} array of strictly positive definite sample covariances.
}
\description{
For visualization purpose, \code{samplecovs} generates a 3d array
of stacked sample covariances where - in 3rd dimension, the first half
are sample covariances of samples generated independently from
normal distribution with identity covariance, where the latter half
consists of samples covariances from dense random population covariance.
}
\examples{
## generate total of 20 samples covariances of size 5-by-5.
samples <- samplecovs(20,5)
}
|
/地图可视化/ggmap包.R | no_license | LC-R-learing/sharing-data-DataScientists | R | false | false | 2,335 | r | ||
setwd('~/Documentos/academic/agregador_exercicios/')
library(rstan)
library(tidyverse)
library(lubridate)
# Actual results for Brazilian elections
resultados <- read.csv('Resultados - Presidente.csv', stringsAsFactors = F)
# Poll results for 2014 - Presidential elections (2º round)
polls <- read.csv('polls_2014_Presidential_2.csv', stringsAsFactors = F)
#polls <- read.csv('PollingData - 2014-T2-BRASIL-BR-Presidente.csv', stringsAsFactors = F, sep=';')
#polls <- select(polls, Data, Instituto, Dilma_PT = Dilma..PT., Aecio_PSDB = Aecio..PSDB.,
# Br.Nulo.Nenhum, NS.NR, n_entrevistas = Entrevistas )
#polls <- gather(polls, candidate, percentage, Dilma_PT, Aecio_PSDB, Br.Nulo.Nenhum, NS.NR)
#write.csv(polls, "polls_2014_Presidential_2.csv", row.names=F)
# wrangling data
election_day <- ymd("2014-10-26")
start_2round_day <- ymd("2014-10-5")
polls <- polls %>%
mutate(election_day = election_day,
Data = ymd(Data),
percentage = percentage %>% str_replace(',', '.') %>% as.numeric %>% `/`(., 100)
) %>%
filter(Data >= start_2round_day) %>%
mutate(t = as.integer(Data - start_2round_day) + 1,
id_instituto = Instituto %>% as.factor %>% as.integer)
##################################################################
# E-1
# Running a model without likelihood
##################################################################
agg_model <- '
data {
int<lower=1> n_days; // number of days
}
parameters {
real<lower=0, upper=1> mu[n_days]; // underlying state of vote intention
}
model {
// state model
mu[1] ~ normal(0.5, 0.0025);
for (i in 2:n_days)
mu[i] ~ normal(mu[i - 1], 0.0025);
}
'
model_ex1 <- stan(model_code = agg_model, data = list(n_days = max(polls$t)), chains = 1, iter = 2500)
extract_summary <- function(model, first_day) {
# Extract summaries from Stan simulated data
tibble(
median = apply(model$mu, 2,median),
p10 = apply(model$mu, 2,function(x) quantile(x, .1)),
p90 = apply(model$mu, 2,function(x) quantile(x, .90)),
p05 = apply(model$mu, 2,function(x) quantile(x, .05)),
p95 = apply(model$mu, 2,function(x) quantile(x, .95)),
t = 1:dim(model$mu)[2],
days = first_day + 1:dim(model$mu)[2]
)
}
# Checking convergence
traceplot(model_ex1)
model_ex1_data <- rstan::extract(model_ex1)
# Plotting data
model_ex1_data %>%
extract_summary(., start_2round_day) %>%
ggplot() +
geom_line(aes(x = days, y = median )) +
geom_ribbon(aes(x = days, ymin = p05, ymax = p95), alpha = 0.2) +
theme_bw() + labs(y = "percentage")
##################################################################
# E-2
# Running a model with likelihood
##################################################################
create_datalist <- function(df,candidate_name) {
# Return data list for using inside Stan
df <- dplyr::filter(df, candidate == candidate_name)
return(
list(n_days = max(df$t),
y_n = nrow(df),
y_values = df$percentage,
y_days = df$t,
n_sample = df$n_entrevistas,
id_company = df$id_instituto
)
)
}
agg_model2 <- '
data {
int<lower=1> n_days; // number of days
int<lower=1> y_n; // number of polls
real y_values[y_n]; // actual values in polls
int<lower=0> y_days[y_n]; // the number of days since starting election each poll was taken
real n_sample[y_n]; // sample size for each poll
// int<lower=0> id_company; // id for research companies
}
parameters {
real<lower=0, upper=1> mu[n_days]; // underlying state of vote intention
}
model {
mu[1] ~ uniform(0, 1);
for (i in 2:n_days)
mu[i] ~ normal(mu[i - 1], 0.0025);
for(x in 1:y_n) // likelihood
// y_values[x] ~ normal(mu[y_days[x]], 0.01 );
y_values[x] ~ normal(mu[y_days[x]], sqrt(y_values[x]*(1-y_values[x])/n_sample[x]) );
}
'
ex2_input_data_dilma <- create_datalist(polls, "Dilma_PT")
model_ex2_dilma <- stan(model_code = agg_model2, data = ex2_input_data_dilma, chains = 1, iter = 2500)
ex2_input_data_aecio <- create_datalist(polls, "Aecio_PSDB")
model_ex2_aecio <- stan(model_code = agg_model2, data = ex2_input_data_aecio, chains = 1, iter = 2500)
# Checking convergence
traceplot(model_ex2_dilma)
traceplot(model_ex2_aecio)
model_ex2_data_dilma <- rstan::extract(model_ex2_dilma)
model_ex2_data_aecio <- rstan::extract(model_ex2_aecio)
# Merging data
model_ex2_data_dilma <- model_ex2_data_dilma %>%
extract_summary(., start_2round_day) %>%
inner_join(filter(polls, candidate == "Dilma_PT"))
model_ex2_data_aecio <- model_ex2_data_aecio %>%
extract_summary(., start_2round_day) %>%
inner_join(filter(polls, candidate == "Aecio_PSDB"))
bind_rows(model_ex2_data_dilma, model_ex2_data_aecio) %>%
ggplot() +
geom_line(aes(x = days, y = median, colour = candidate )) +
geom_ribbon(aes(x = days, ymin = p05, ymax = p95, fill = candidate), alpha = 0.2) +
geom_point(aes(x = days, y = percentage, shape = Instituto)) +
theme_bw() + labs(y = "percentage") +
scale_fill_manual(values=c("#0000ff", "#ff0000")) +
scale_colour_manual(values=c("#0000ff", "#ff0000"))
##################################################################
# E-3
# Giving weight to each company
##################################################################
polls %>%
filter(Instituto == "Ibope") %>%
select(id_instituto) %>%
slice(1)
polls %>%
filter(Instituto == "Datafolha") %>%
select(id_instituto) %>%
slice(1)
# Datafolha -> id = 1
# Ibope -> id = 2
agg_model3 <- '
data {
int<lower=1> n_days; // number of days
int<lower=1> y_n; // number of polls
real y_values[y_n]; // actual values in polls
int<lower=0> y_days[y_n]; // the number of days since starting election each poll was taken
real n_sample[y_n]; // sample size for each poll
int<lower=0> id_company[y_n]; // id for research companies
}
parameters {
real<lower=0, upper=1> mu[n_days]; // underlying state of vote intention
}
model {
mu[1] ~ uniform(0, 1);
for (i in 2:n_days)
mu[i] ~ normal(mu[i - 1], 0.0025);
for(x in 1:y_n) {
if (id_company[x] < 3) {
y_values[x] ~ normal(mu[y_days[x]], sqrt(y_values[x]*(1-y_values[x])/n_sample[x]) );
} else {
y_values[x] ~ normal(mu[y_days[x]], 2*sqrt(y_values[x]*(1-y_values[x])/n_sample[x]) );
}
}
}'
ex3_input_data_dilma <- create_datalist(polls, "Dilma_PT")
model_ex3_dilma <- stan(model_code = agg_model3, data = ex3_input_data_dilma, chains = 1, iter = 2500)
ex3_input_data_aecio <- create_datalist(polls, "Aecio_PSDB")
model_ex3_aecio <- stan(model_code = agg_model3, data = ex3_input_data_aecio, chains = 1, iter = 2500)
# Checking convergence
traceplot(model_ex3_dilma)
traceplot(model_ex3_aecio)
model_ex3_data_dilma <- rstan::extract(model_ex3_dilma)
model_ex3_data_aecio <- rstan::extract(model_ex3_aecio)
# Merging data
model_ex3_data_dilma <- model_ex3_data_dilma %>%
extract_summary(., start_2round_day) %>%
inner_join(filter(polls, candidate == "Dilma_PT"))
model_ex3_data_aecio <- model_ex3_data_aecio %>%
extract_summary(., start_2round_day) %>%
inner_join(filter(polls, candidate == "Aecio_PSDB"))
bind_rows(model_ex3_data_dilma, model_ex3_data_aecio) %>%
ggplot() +
geom_line(aes(x = Data, y = median, colour = candidate )) +
geom_ribbon(aes(x = Data, ymin = p05, ymax = p95, fill = candidate), alpha = 0.2) +
geom_point(aes(x = Data, y = percentage, shape = Instituto)) +
theme_bw() + labs(y = "percentage") +
scale_fill_manual(values=c("#0000ff", "#ff0000")) +
scale_colour_manual(values=c("#0000ff", "#ff0000"))
##################################################################
# E-4
# Calculating house effects
##################################################################
create_datalist4 <- function(df,candidate_name,actual_result) {
# Return data list for using inside Stan
df <- dplyr::filter(df, candidate == candidate_name)
return(
list(n_days = max(df$t),
y_n = nrow(df),
y_values = df$percentage,
y_days = df$t,
n_sample = df$n_entrevistas,
id_company = df$id_instituto,
n_companies = length(unique(df$id_instituto)),
actual_result = actual_result
)
)
}
agg_model4 <- '
data {
int<lower=1> n_days; // number of days
int<lower=1> y_n; // number of polls
real y_values[y_n]; // actual values in polls
int<lower=0> y_days[y_n]; // the number of days since starting election each poll was taken
real n_sample[y_n]; // sample size for each poll
int<lower=0> id_company[y_n]; // id for research companies
int<lower=1> n_companies; // number of companies
real actual_result;
}
parameters {
real<lower=0, upper=1> mu[n_days]; // underlying state of vote intention
real gamma[n_companies];
}
model {
mu[1] ~ uniform(0, 1);
for (i in 2:(n_days-1))
mu[i] ~ normal(mu[i - 1], 0.0025);
mu[n_days] ~ normal(actual_result, 0.0025 );
gamma ~ normal(0, 0.02);
for(x in 1:y_n) {
y_values[x] ~ normal(mu[y_days[x]] + gamma[id_company[x]],
sqrt(y_values[x]*(1-y_values[x])/n_sample[x]) );
}
}'
ex4_input_data_dilma <- create_datalist4(polls, "Dilma_PT", 0.48366024)
model_ex4_dilma <- stan(model_code = agg_model4, data = ex4_input_data_dilma, chains = 1, iter = 2500)
ex4_input_data_aecio <- create_datalist4(polls, "Aecio_PSDB", 0.45293976)
model_ex4_aecio <- stan(model_code = agg_model4, data = ex4_input_data_aecio, chains = 1, iter = 2500)
# Checking convergence
traceplot(model_ex4_dilma)
traceplot(model_ex4_aecio)
model_ex4_data_dilma <- rstan::extract(model_ex4_dilma)
model_ex4_data_aecio <- rstan::extract(model_ex4_aecio)
# Merging data
model_ex4_data_dilma <- model_ex4_data_dilma %>%
extract_summary(., start_2round_day) %>%
inner_join(filter(polls, candidate == "Dilma_PT"))
model_ex4_data_aecio <- model_ex4_data_aecio %>%
extract_summary(., start_2round_day) %>%
inner_join(filter(polls, candidate == "Aecio_PSDB"))
bind_rows(model_ex4_data_dilma, model_ex4_data_aecio) %>%
ggplot() +
geom_line(aes(x = Data, y = median, colour = candidate )) +
geom_ribbon(aes(x = Data, ymin = p05, ymax = p95, fill = candidate), alpha = 0.2) +
geom_point(aes(x = Data, y = percentage, shape = Instituto)) +
theme_bw() + labs(y = "percentage") +
scale_fill_manual(values=c("#0000ff", "#ff0000")) +
scale_colour_manual(values=c("#0000ff", "#ff0000"))
# Analyzing house effects
extract_house_effects <- function(model) {
tibble(
median = apply(model$gamma, 2,median),
p10 = apply(model$gamma, 2,function(x) quantile(x, .1)),
p90 = apply(model$gamma, 2,function(x) quantile(x, .90)),
p05 = apply(model$gamma, 2,function(x) quantile(x, .05)),
p95 = apply(model$gamma, 2,function(x) quantile(x, .95)),
id_instituto = 1:dim(model$gamma)[2]
)
}
gamma_dilma <- rstan::extract(model_ex4_dilma) %>%
extract_house_effects %>%
inner_join( polls %>%
distinct(id_instituto, Instituto)
) %>%
mutate(candidate = "Dilma_PT")
# Convergence for gamma
traceplot(model_ex4_dilma, "gamma")
# House effects (Companies)
gamma_dilma %>%
ggplot(aes(x = Instituto, y = median)) +
geom_pointrange(aes(ymin = p05, ymax = p95)) +
theme_bw() +
labs(y = "House Effect") +
coord_flip()
| /analise_agregador.R | no_license | gijocr/agregador_exercicios | R | false | false | 11,542 | r | setwd('~/Documentos/academic/agregador_exercicios/')
library(rstan)
library(tidyverse)
library(lubridate)
# Actual results for Brazilian elections
resultados <- read.csv('Resultados - Presidente.csv', stringsAsFactors = F)
# Poll results for 2014 - Presidential elections (2º round)
polls <- read.csv('polls_2014_Presidential_2.csv', stringsAsFactors = F)
#polls <- read.csv('PollingData - 2014-T2-BRASIL-BR-Presidente.csv', stringsAsFactors = F, sep=';')
#polls <- select(polls, Data, Instituto, Dilma_PT = Dilma..PT., Aecio_PSDB = Aecio..PSDB.,
# Br.Nulo.Nenhum, NS.NR, n_entrevistas = Entrevistas )
#polls <- gather(polls, candidate, percentage, Dilma_PT, Aecio_PSDB, Br.Nulo.Nenhum, NS.NR)
#write.csv(polls, "polls_2014_Presidential_2.csv", row.names=F)
# wrangling data
election_day <- ymd("2014-10-26")
start_2round_day <- ymd("2014-10-5")
polls <- polls %>%
mutate(election_day = election_day,
Data = ymd(Data),
percentage = percentage %>% str_replace(',', '.') %>% as.numeric %>% `/`(., 100)
) %>%
filter(Data >= start_2round_day) %>%
mutate(t = as.integer(Data - start_2round_day) + 1,
id_instituto = Instituto %>% as.factor %>% as.integer)
##################################################################
# E-1
# Running a model without likelihood
##################################################################
agg_model <- '
data {
int<lower=1> n_days; // number of days
}
parameters {
real<lower=0, upper=1> mu[n_days]; // underlying state of vote intention
}
model {
// state model
mu[1] ~ normal(0.5, 0.0025);
for (i in 2:n_days)
mu[i] ~ normal(mu[i - 1], 0.0025);
}
'
model_ex1 <- stan(model_code = agg_model, data = list(n_days = max(polls$t)), chains = 1, iter = 2500)
extract_summary <- function(model, first_day) {
# Extract summaries from Stan simulated data
tibble(
median = apply(model$mu, 2,median),
p10 = apply(model$mu, 2,function(x) quantile(x, .1)),
p90 = apply(model$mu, 2,function(x) quantile(x, .90)),
p05 = apply(model$mu, 2,function(x) quantile(x, .05)),
p95 = apply(model$mu, 2,function(x) quantile(x, .95)),
t = 1:dim(model$mu)[2],
days = first_day + 1:dim(model$mu)[2]
)
}
# Checking convergence
traceplot(model_ex1)
model_ex1_data <- rstan::extract(model_ex1)
# Plotting data
model_ex1_data %>%
extract_summary(., start_2round_day) %>%
ggplot() +
geom_line(aes(x = days, y = median )) +
geom_ribbon(aes(x = days, ymin = p05, ymax = p95), alpha = 0.2) +
theme_bw() + labs(y = "percentage")
##################################################################
# E-2
# Running a model with likelihood
##################################################################
create_datalist <- function(df,candidate_name) {
# Return data list for using inside Stan
df <- dplyr::filter(df, candidate == candidate_name)
return(
list(n_days = max(df$t),
y_n = nrow(df),
y_values = df$percentage,
y_days = df$t,
n_sample = df$n_entrevistas,
id_company = df$id_instituto
)
)
}
agg_model2 <- '
data {
int<lower=1> n_days; // number of days
int<lower=1> y_n; // number of polls
real y_values[y_n]; // actual values in polls
int<lower=0> y_days[y_n]; // the number of days since starting election each poll was taken
real n_sample[y_n]; // sample size for each poll
// int<lower=0> id_company; // id for research companies
}
parameters {
real<lower=0, upper=1> mu[n_days]; // underlying state of vote intention
}
model {
mu[1] ~ uniform(0, 1);
for (i in 2:n_days)
mu[i] ~ normal(mu[i - 1], 0.0025);
for(x in 1:y_n) // likelihood
// y_values[x] ~ normal(mu[y_days[x]], 0.01 );
y_values[x] ~ normal(mu[y_days[x]], sqrt(y_values[x]*(1-y_values[x])/n_sample[x]) );
}
'
ex2_input_data_dilma <- create_datalist(polls, "Dilma_PT")
model_ex2_dilma <- stan(model_code = agg_model2, data = ex2_input_data_dilma, chains = 1, iter = 2500)
ex2_input_data_aecio <- create_datalist(polls, "Aecio_PSDB")
model_ex2_aecio <- stan(model_code = agg_model2, data = ex2_input_data_aecio, chains = 1, iter = 2500)
# Checking convergence
traceplot(model_ex2_dilma)
traceplot(model_ex2_aecio)
model_ex2_data_dilma <- rstan::extract(model_ex2_dilma)
model_ex2_data_aecio <- rstan::extract(model_ex2_aecio)
# Merging data
model_ex2_data_dilma <- model_ex2_data_dilma %>%
extract_summary(., start_2round_day) %>%
inner_join(filter(polls, candidate == "Dilma_PT"))
model_ex2_data_aecio <- model_ex2_data_aecio %>%
extract_summary(., start_2round_day) %>%
inner_join(filter(polls, candidate == "Aecio_PSDB"))
bind_rows(model_ex2_data_dilma, model_ex2_data_aecio) %>%
ggplot() +
geom_line(aes(x = days, y = median, colour = candidate )) +
geom_ribbon(aes(x = days, ymin = p05, ymax = p95, fill = candidate), alpha = 0.2) +
geom_point(aes(x = days, y = percentage, shape = Instituto)) +
theme_bw() + labs(y = "percentage") +
scale_fill_manual(values=c("#0000ff", "#ff0000")) +
scale_colour_manual(values=c("#0000ff", "#ff0000"))
##################################################################
# E-3
# Giving weight to each company
##################################################################
polls %>%
filter(Instituto == "Ibope") %>%
select(id_instituto) %>%
slice(1)
polls %>%
filter(Instituto == "Datafolha") %>%
select(id_instituto) %>%
slice(1)
# Datafolha -> id = 1
# Ibope -> id = 2
agg_model3 <- '
data {
int<lower=1> n_days; // number of days
int<lower=1> y_n; // number of polls
real y_values[y_n]; // actual values in polls
int<lower=0> y_days[y_n]; // the number of days since starting election each poll was taken
real n_sample[y_n]; // sample size for each poll
int<lower=0> id_company[y_n]; // id for research companies
}
parameters {
real<lower=0, upper=1> mu[n_days]; // underlying state of vote intention
}
model {
mu[1] ~ uniform(0, 1);
for (i in 2:n_days)
mu[i] ~ normal(mu[i - 1], 0.0025);
for(x in 1:y_n) {
if (id_company[x] < 3) {
y_values[x] ~ normal(mu[y_days[x]], sqrt(y_values[x]*(1-y_values[x])/n_sample[x]) );
} else {
y_values[x] ~ normal(mu[y_days[x]], 2*sqrt(y_values[x]*(1-y_values[x])/n_sample[x]) );
}
}
}'
ex3_input_data_dilma <- create_datalist(polls, "Dilma_PT")
model_ex3_dilma <- stan(model_code = agg_model3, data = ex3_input_data_dilma, chains = 1, iter = 2500)
ex3_input_data_aecio <- create_datalist(polls, "Aecio_PSDB")
model_ex3_aecio <- stan(model_code = agg_model3, data = ex3_input_data_aecio, chains = 1, iter = 2500)
# Checking convergence
traceplot(model_ex3_dilma)
traceplot(model_ex3_aecio)
model_ex3_data_dilma <- rstan::extract(model_ex3_dilma)
model_ex3_data_aecio <- rstan::extract(model_ex3_aecio)
# Merging data
model_ex3_data_dilma <- model_ex3_data_dilma %>%
extract_summary(., start_2round_day) %>%
inner_join(filter(polls, candidate == "Dilma_PT"))
model_ex3_data_aecio <- model_ex3_data_aecio %>%
extract_summary(., start_2round_day) %>%
inner_join(filter(polls, candidate == "Aecio_PSDB"))
bind_rows(model_ex3_data_dilma, model_ex3_data_aecio) %>%
ggplot() +
geom_line(aes(x = Data, y = median, colour = candidate )) +
geom_ribbon(aes(x = Data, ymin = p05, ymax = p95, fill = candidate), alpha = 0.2) +
geom_point(aes(x = Data, y = percentage, shape = Instituto)) +
theme_bw() + labs(y = "percentage") +
scale_fill_manual(values=c("#0000ff", "#ff0000")) +
scale_colour_manual(values=c("#0000ff", "#ff0000"))
##################################################################
# E-4
# Calculating house effects
##################################################################
create_datalist4 <- function(df,candidate_name,actual_result) {
# Return data list for using inside Stan
df <- dplyr::filter(df, candidate == candidate_name)
return(
list(n_days = max(df$t),
y_n = nrow(df),
y_values = df$percentage,
y_days = df$t,
n_sample = df$n_entrevistas,
id_company = df$id_instituto,
n_companies = length(unique(df$id_instituto)),
actual_result = actual_result
)
)
}
agg_model4 <- '
data {
int<lower=1> n_days; // number of days
int<lower=1> y_n; // number of polls
real y_values[y_n]; // actual values in polls
int<lower=0> y_days[y_n]; // the number of days since starting election each poll was taken
real n_sample[y_n]; // sample size for each poll
int<lower=0> id_company[y_n]; // id for research companies
int<lower=1> n_companies; // number of companies
real actual_result;
}
parameters {
real<lower=0, upper=1> mu[n_days]; // underlying state of vote intention
real gamma[n_companies];
}
model {
mu[1] ~ uniform(0, 1);
for (i in 2:(n_days-1))
mu[i] ~ normal(mu[i - 1], 0.0025);
mu[n_days] ~ normal(actual_result, 0.0025 );
gamma ~ normal(0, 0.02);
for(x in 1:y_n) {
y_values[x] ~ normal(mu[y_days[x]] + gamma[id_company[x]],
sqrt(y_values[x]*(1-y_values[x])/n_sample[x]) );
}
}'
ex4_input_data_dilma <- create_datalist4(polls, "Dilma_PT", 0.48366024)
model_ex4_dilma <- stan(model_code = agg_model4, data = ex4_input_data_dilma, chains = 1, iter = 2500)
ex4_input_data_aecio <- create_datalist4(polls, "Aecio_PSDB", 0.45293976)
model_ex4_aecio <- stan(model_code = agg_model4, data = ex4_input_data_aecio, chains = 1, iter = 2500)
# Checking convergence
traceplot(model_ex4_dilma)
traceplot(model_ex4_aecio)
model_ex4_data_dilma <- rstan::extract(model_ex4_dilma)
model_ex4_data_aecio <- rstan::extract(model_ex4_aecio)
# Merging data
model_ex4_data_dilma <- model_ex4_data_dilma %>%
extract_summary(., start_2round_day) %>%
inner_join(filter(polls, candidate == "Dilma_PT"))
model_ex4_data_aecio <- model_ex4_data_aecio %>%
extract_summary(., start_2round_day) %>%
inner_join(filter(polls, candidate == "Aecio_PSDB"))
bind_rows(model_ex4_data_dilma, model_ex4_data_aecio) %>%
ggplot() +
geom_line(aes(x = Data, y = median, colour = candidate )) +
geom_ribbon(aes(x = Data, ymin = p05, ymax = p95, fill = candidate), alpha = 0.2) +
geom_point(aes(x = Data, y = percentage, shape = Instituto)) +
theme_bw() + labs(y = "percentage") +
scale_fill_manual(values=c("#0000ff", "#ff0000")) +
scale_colour_manual(values=c("#0000ff", "#ff0000"))
# Analyzing house effects
extract_house_effects <- function(model) {
tibble(
median = apply(model$gamma, 2,median),
p10 = apply(model$gamma, 2,function(x) quantile(x, .1)),
p90 = apply(model$gamma, 2,function(x) quantile(x, .90)),
p05 = apply(model$gamma, 2,function(x) quantile(x, .05)),
p95 = apply(model$gamma, 2,function(x) quantile(x, .95)),
id_instituto = 1:dim(model$gamma)[2]
)
}
gamma_dilma <- rstan::extract(model_ex4_dilma) %>%
extract_house_effects %>%
inner_join( polls %>%
distinct(id_instituto, Instituto)
) %>%
mutate(candidate = "Dilma_PT")
# Convergence for gamma
traceplot(model_ex4_dilma, "gamma")
# House effects (Companies)
gamma_dilma %>%
ggplot(aes(x = Instituto, y = median)) +
geom_pointrange(aes(ymin = p05, ymax = p95)) +
theme_bw() +
labs(y = "House Effect") +
coord_flip()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggcorrplot.R
\name{ggcorrplot}
\alias{ggcorrplot}
\alias{cor_pmat}
\title{Visualization of a correlation matrix using ggplot2}
\usage{
ggcorrplot(
corr,
method = c("square", "circle"),
type = c("full", "lower", "upper"),
ggtheme = ggplot2::theme_minimal,
title = "",
show.legend = TRUE,
legend.title = "Corr",
show.diag = NULL,
colors = c("blue", "white", "red"),
outline.color = "gray",
hc.order = FALSE,
hc.method = "complete",
lab = FALSE,
lab_col = "black",
lab_size = 4,
p.mat = NULL,
sig.level = 0.05,
insig = c("pch", "blank"),
pch = 4,
pch.col = "black",
pch.cex = 5,
tl.cex = 12,
tl.col = "black",
tl.srt = 45,
digits = 2,
as.is = FALSE
)
cor_pmat(x, ...)
}
\arguments{
\item{corr}{the correlation matrix to visualize}
\item{method}{character, the visualization method of correlation matrix to be
used. Allowed values are "square" (default), "circle".}
\item{type}{character, "full" (default), "lower" or "upper" display.}
\item{ggtheme}{ggplot2 function or theme object. Default value is
`theme_minimal`. Allowed values are the official ggplot2 themes including
theme_gray, theme_bw, theme_minimal, theme_classic, theme_void, .... Theme
objects are also allowed (e.g., `theme_classic()`).}
\item{title}{character, title of the graph.}
\item{show.legend}{logical, if TRUE the legend is displayed.}
\item{legend.title}{a character string for the legend title. lower
triangular, upper triangular or full matrix.}
\item{show.diag}{NULL or logical, whether display the correlation
coefficients on the principal diagonal. If \code{NULL}, the default is to
show diagonal correlation for \code{type = "full"} and to remove it when
\code{type} is one of "upper" or "lower".}
\item{colors}{a vector of 3 colors for low, mid and high correlation values.}
\item{outline.color}{the outline color of square or circle. Default value is
"gray".}
\item{hc.order}{logical value. If TRUE, correlation matrix will be hc.ordered
using hclust function.}
\item{hc.method}{the agglomeration method to be used in hclust (see ?hclust).}
\item{lab}{logical value. If TRUE, add correlation coefficient on the plot.}
\item{lab_col, lab_size}{size and color to be used for the correlation
coefficient labels. used when lab = TRUE.}
\item{p.mat}{matrix of p-value. If NULL, arguments sig.level, insig, pch,
pch.col, pch.cex is invalid.}
\item{sig.level}{significant level, if the p-value in p-mat is bigger than
sig.level, then the corresponding correlation coefficient is regarded as
insignificant.}
\item{insig}{character, specialized insignificant correlation coefficients,
"pch" (default), "blank". If "blank", wipe away the corresponding glyphs;
if "pch", add characters (see pch for details) on corresponding glyphs.}
\item{pch}{add character on the glyphs of insignificant correlation
coefficients (only valid when insig is "pch"). Default value is 4.}
\item{pch.col, pch.cex}{the color and the cex (size) of pch (only valid when
insig is "pch").}
\item{tl.cex, tl.col, tl.srt}{the size, the color and the string rotation of
text label (variable names).}
\item{digits}{Decides the number of decimal digits to be displayed (Default:
`2`).}
\item{as.is}{A logical passed to \code{\link[reshape2]{melt.array}}. If
\code{TRUE}, dimnames will be left as strings instead of being converted
using \code{\link[utils]{type.convert}}.}
\item{x}{numeric matrix or data frame}
\item{...}{other arguments to be passed to the function cor.test.}
}
\value{
\itemize{ \item ggcorrplot(): Returns a ggplot2 \item cor_pmat():
Returns a matrix containing the p-values of correlations }
}
\description{
\itemize{ \item ggcorrplot(): A graphical display of a
correlation matrix using ggplot2. \item cor_pmat(): Compute a correlation
matrix p-values. }
}
\examples{
# Compute a correlation matrix
data(mtcars)
corr <- round(cor(mtcars), 1)
corr
# Compute a matrix of correlation p-values
p.mat <- cor_pmat(mtcars)
p.mat
# Visualize the correlation matrix
# --------------------------------
# method = "square" or "circle"
ggcorrplot(corr)
ggcorrplot(corr, method = "circle")
# Reordering the correlation matrix
# --------------------------------
# using hierarchical clustering
ggcorrplot(corr, hc.order = TRUE, outline.color = "white")
# Types of correlogram layout
# --------------------------------
# Get the lower triangle
ggcorrplot(corr,
hc.order = TRUE, type = "lower",
outline.color = "white"
)
# Get the upeper triangle
ggcorrplot(corr,
hc.order = TRUE, type = "upper",
outline.color = "white"
)
# Change colors and theme
# --------------------------------
# Argument colors
ggcorrplot(corr,
hc.order = TRUE, type = "lower",
outline.color = "white",
ggtheme = ggplot2::theme_gray,
colors = c("#6D9EC1", "white", "#E46726")
)
# Add correlation coefficients
# --------------------------------
# argument lab = TRUE
ggcorrplot(corr,
hc.order = TRUE, type = "lower",
lab = TRUE,
ggtheme = ggplot2::theme_dark(),
)
# Add correlation significance level
# --------------------------------
# Argument p.mat
# Barring the no significant coefficient
ggcorrplot(corr,
hc.order = TRUE,
type = "lower", p.mat = p.mat
)
# Leave blank on no significant coefficient
ggcorrplot(corr,
p.mat = p.mat, hc.order = TRUE,
type = "lower", insig = "blank"
)
# Changing number of digits for correlation coeffcient
# --------------------------------
ggcorrplot(cor(mtcars),
type = "lower",
insig = "blank",
lab = TRUE,
digits = 3
)
}
| /man/ggcorrplot.Rd | no_license | mdedonno1337/ggcorrplot | R | false | true | 5,584 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggcorrplot.R
\name{ggcorrplot}
\alias{ggcorrplot}
\alias{cor_pmat}
\title{Visualization of a correlation matrix using ggplot2}
\usage{
ggcorrplot(
corr,
method = c("square", "circle"),
type = c("full", "lower", "upper"),
ggtheme = ggplot2::theme_minimal,
title = "",
show.legend = TRUE,
legend.title = "Corr",
show.diag = NULL,
colors = c("blue", "white", "red"),
outline.color = "gray",
hc.order = FALSE,
hc.method = "complete",
lab = FALSE,
lab_col = "black",
lab_size = 4,
p.mat = NULL,
sig.level = 0.05,
insig = c("pch", "blank"),
pch = 4,
pch.col = "black",
pch.cex = 5,
tl.cex = 12,
tl.col = "black",
tl.srt = 45,
digits = 2,
as.is = FALSE
)
cor_pmat(x, ...)
}
\arguments{
\item{corr}{the correlation matrix to visualize}
\item{method}{character, the visualization method of correlation matrix to be
used. Allowed values are "square" (default), "circle".}
\item{type}{character, "full" (default), "lower" or "upper" display.}
\item{ggtheme}{ggplot2 function or theme object. Default value is
`theme_minimal`. Allowed values are the official ggplot2 themes including
theme_gray, theme_bw, theme_minimal, theme_classic, theme_void, .... Theme
objects are also allowed (e.g., `theme_classic()`).}
\item{title}{character, title of the graph.}
\item{show.legend}{logical, if TRUE the legend is displayed.}
\item{legend.title}{a character string for the legend title. lower
triangular, upper triangular or full matrix.}
\item{show.diag}{NULL or logical, whether display the correlation
coefficients on the principal diagonal. If \code{NULL}, the default is to
show diagonal correlation for \code{type = "full"} and to remove it when
\code{type} is one of "upper" or "lower".}
\item{colors}{a vector of 3 colors for low, mid and high correlation values.}
\item{outline.color}{the outline color of square or circle. Default value is
"gray".}
\item{hc.order}{logical value. If TRUE, correlation matrix will be hc.ordered
using hclust function.}
\item{hc.method}{the agglomeration method to be used in hclust (see ?hclust).}
\item{lab}{logical value. If TRUE, add correlation coefficient on the plot.}
\item{lab_col, lab_size}{size and color to be used for the correlation
coefficient labels. used when lab = TRUE.}
\item{p.mat}{matrix of p-value. If NULL, arguments sig.level, insig, pch,
pch.col, pch.cex is invalid.}
\item{sig.level}{significant level, if the p-value in p-mat is bigger than
sig.level, then the corresponding correlation coefficient is regarded as
insignificant.}
\item{insig}{character, specialized insignificant correlation coefficients,
"pch" (default), "blank". If "blank", wipe away the corresponding glyphs;
if "pch", add characters (see pch for details) on corresponding glyphs.}
\item{pch}{add character on the glyphs of insignificant correlation
coefficients (only valid when insig is "pch"). Default value is 4.}
\item{pch.col, pch.cex}{the color and the cex (size) of pch (only valid when
insig is "pch").}
\item{tl.cex, tl.col, tl.srt}{the size, the color and the string rotation of
text label (variable names).}
\item{digits}{Decides the number of decimal digits to be displayed (Default:
`2`).}
\item{as.is}{A logical passed to \code{\link[reshape2]{melt.array}}. If
\code{TRUE}, dimnames will be left as strings instead of being converted
using \code{\link[utils]{type.convert}}.}
\item{x}{numeric matrix or data frame}
\item{...}{other arguments to be passed to the function cor.test.}
}
\value{
\itemize{ \item ggcorrplot(): Returns a ggplot2 \item cor_pmat():
Returns a matrix containing the p-values of correlations }
}
\description{
\itemize{ \item ggcorrplot(): A graphical display of a
correlation matrix using ggplot2. \item cor_pmat(): Compute a correlation
matrix p-values. }
}
\examples{
# Compute a correlation matrix
data(mtcars)
corr <- round(cor(mtcars), 1)
corr
# Compute a matrix of correlation p-values
p.mat <- cor_pmat(mtcars)
p.mat
# Visualize the correlation matrix
# --------------------------------
# method = "square" or "circle"
ggcorrplot(corr)
ggcorrplot(corr, method = "circle")
# Reordering the correlation matrix
# --------------------------------
# using hierarchical clustering
ggcorrplot(corr, hc.order = TRUE, outline.color = "white")
# Types of correlogram layout
# --------------------------------
# Get the lower triangle
ggcorrplot(corr,
hc.order = TRUE, type = "lower",
outline.color = "white"
)
# Get the upeper triangle
ggcorrplot(corr,
hc.order = TRUE, type = "upper",
outline.color = "white"
)
# Change colors and theme
# --------------------------------
# Argument colors
ggcorrplot(corr,
hc.order = TRUE, type = "lower",
outline.color = "white",
ggtheme = ggplot2::theme_gray,
colors = c("#6D9EC1", "white", "#E46726")
)
# Add correlation coefficients
# --------------------------------
# argument lab = TRUE
ggcorrplot(corr,
hc.order = TRUE, type = "lower",
lab = TRUE,
ggtheme = ggplot2::theme_dark(),
)
# Add correlation significance level
# --------------------------------
# Argument p.mat
# Barring the no significant coefficient
ggcorrplot(corr,
hc.order = TRUE,
type = "lower", p.mat = p.mat
)
# Leave blank on no significant coefficient
ggcorrplot(corr,
p.mat = p.mat, hc.order = TRUE,
type = "lower", insig = "blank"
)
# Changing number of digits for correlation coeffcient
# --------------------------------
ggcorrplot(cor(mtcars),
type = "lower",
insig = "blank",
lab = TRUE,
digits = 3
)
}
|
"domain" <-
function(kasc, pts, type=c("value", "potential"),
thresh=0.95)
{
## Verifications
if (!inherits(kasc, "kasc"))
stop("should be an object of class \"kasc\"")
if (ncol(pts)!=2)
stop("pts should have 2 columns")
typ<-""
for (i in 1:length(kasc)) {
if (is.factor(kasc[[i]])) {
typ[i] <- "factor"
}
else {
typ[i] <- "numeric"
}
}
if (!all(typ=="numeric"))
stop("All variables in kasc should be of mode numeric")
type<-match.arg(type)
## Preparation of the data to be passed to the C function "fctdomain"
## 1. spatial join of the points
ptsmod<-as.matrix(join.kasc(pts, kasc))
## 2. deletes the missing values
kasct<-kasc2df(kasc)
kascmod<-as.matrix(kasct$tab)
if (any(is.na(kascmod)))
stop("the same area should be provided for all variables")
## 3. Computation of the range of environmental variables
rg<-apply(kascmod, 2, function(x) range(x)[2] - range(x)[1])
## Call to the C function
toto<-.C("fctdomain", as.double(t(kascmod)), as.double(t(ptsmod)),
as.double(rg), as.integer(nrow(ptsmod)),
as.integer(nrow(kascmod)), as.integer(ncol(ptsmod)),
double(nrow(kascmod)), PACKAGE="adehabitat")[[7]]
## Transfo of the output vector into a map (equivalent to df2kasc)
N <- nrow(kasc)
indw <- c(1:N)
n1 <- length(toto)
compl <- rep(NA, N - n1)
output <- c(toto, compl)
indcompl <- indw[is.na(match(indw, kasct$index))]
indtot <- c(kasct$index, indcompl)
output <- output[sort(indtot, index.return = TRUE)$ix]
output<-matrix(output, attr(kasc,"ncol"))
## Should the value or the potential habitat be exported in the output ?
if (type!="value") {
output[output<=thresh]<-NA
output[output>thresh]<-1
}
## Output
attr(output, "xll") <- attr(kasc, "xll")
attr(output, "yll") <- attr(kasc, "yll")
attr(output, "cellsize") <- attr(kasc, "cellsize")
attr(output, "type") <- "numeric"
class(output)<-"asc"
return(output)
}
| /R/domain.r | no_license | ClementCalenge/adehabitat | R | false | false | 2,211 | r | "domain" <-
function(kasc, pts, type=c("value", "potential"),
thresh=0.95)
{
## Verifications
if (!inherits(kasc, "kasc"))
stop("should be an object of class \"kasc\"")
if (ncol(pts)!=2)
stop("pts should have 2 columns")
typ<-""
for (i in 1:length(kasc)) {
if (is.factor(kasc[[i]])) {
typ[i] <- "factor"
}
else {
typ[i] <- "numeric"
}
}
if (!all(typ=="numeric"))
stop("All variables in kasc should be of mode numeric")
type<-match.arg(type)
## Preparation of the data to be passed to the C function "fctdomain"
## 1. spatial join of the points
ptsmod<-as.matrix(join.kasc(pts, kasc))
## 2. deletes the missing values
kasct<-kasc2df(kasc)
kascmod<-as.matrix(kasct$tab)
if (any(is.na(kascmod)))
stop("the same area should be provided for all variables")
## 3. Computation of the range of environmental variables
rg<-apply(kascmod, 2, function(x) range(x)[2] - range(x)[1])
## Call to the C function
toto<-.C("fctdomain", as.double(t(kascmod)), as.double(t(ptsmod)),
as.double(rg), as.integer(nrow(ptsmod)),
as.integer(nrow(kascmod)), as.integer(ncol(ptsmod)),
double(nrow(kascmod)), PACKAGE="adehabitat")[[7]]
## Transfo of the output vector into a map (equivalent to df2kasc)
N <- nrow(kasc)
indw <- c(1:N)
n1 <- length(toto)
compl <- rep(NA, N - n1)
output <- c(toto, compl)
indcompl <- indw[is.na(match(indw, kasct$index))]
indtot <- c(kasct$index, indcompl)
output <- output[sort(indtot, index.return = TRUE)$ix]
output<-matrix(output, attr(kasc,"ncol"))
## Should the value or the potential habitat be exported in the output ?
if (type!="value") {
output[output<=thresh]<-NA
output[output>thresh]<-1
}
## Output
attr(output, "xll") <- attr(kasc, "xll")
attr(output, "yll") <- attr(kasc, "yll")
attr(output, "cellsize") <- attr(kasc, "cellsize")
attr(output, "type") <- "numeric"
class(output)<-"asc"
return(output)
}
|
library(qqman)
library(ggplot2)
#qq plot bayes factors with multiple window sizes:
manhatplot_fst_lfmm_xtx_bf <- function(inpath_fst,inpath_xtx,inpath_bf,inpref_lfmm,insuf_lfmm,outpref,npops,wins,zs,bfs,fsts,
xtx_thresh = NA, bf_thresh = NA, lfmm_thresh = NA, fst_thresh = NA,
xtx_hits = NA, bf_hits = NA, lfmm_hits = NA, fst_hits = NA,
xtx_winthresh=NA,bf_winthresh=NA,lfmm_winthresh=NA,fst_winthresh=NA, lfmmwins = NA) {
#here, inpath is the path to a snptable with bf values, and outpref is the prefix for all plots
#npops is the number of populations (used for computing degrees of freedom)
#wins is a vector of the sliding window sizes to be used for plotting
tifoutpath = paste(outpref,"_multistat_forpaper_agnostic.tif",sep="")
tiff(tifoutpath,
width=2*4*600, height=3*2*600, res=600, compression="lzw")
par(mfrow=c(3,1))
par(mar=c(5.1,4.1,4.1,2.1))
all_labels = LETTERS
labels_index = 1
tlin=2
label_cex = 1.5
point_cex=0.5
print("done1")
######
#lfmm:
#if (is.na(lfmmwins)) {lfmmwins = wins}
#for (myz in zs){
# inpath = paste(inpref_lfmm,myz,insuf_lfmm,sep="")
# data <- readRDS(inpath)
# for (mywin in wins){
# if (mywin == 1) {
# mycolname = "adjusted.p.values"
# tempthresh = lfmm_thresh
# } else {
# mycolname = paste("lfmm_pcombo_win",mywin,sep="")
# tempthresh = lfmm_winthresh
# }
# mydat <- data[,mycolname]
# mychrom <- data$sort_variable
# mypos <- data$POS
# #theorybfs <- seq(1/length(mydat),1-(1/length(mydat)),length.out=length(mydat))
# manhatdat <- data.frame(BP=mypos,P=mydat,CHR=mychrom)
# manhatdatlog <- data.frame(BP=mypos,P=-log10(mydat),CHR=mychrom)
#
# manhattan(manhatdatlog,logp=FALSE,xlab="Contig",ylab="BF",main="11-population LFMM p-value (censored, logged)",ylim=c(min(manhatdatlog$P),max(manhatdatlog$P)),
# suggestiveline=FALSE,genomewideline=tempthresh)
# mtext(all_labels[labels_index],3, line=tlin, adj=0, cex=label_cex)
# labels_index = labels_index + 1
#
# }
#}
print("done2")
#######
#fst:
data <- readRDS(inpath_fst)
for (myfst in fsts){
for (mywin in 1){
if (mywin == 1) {
mycolname = paste("fst_",myfst,sep="")
tempthresh = fst_thresh
} else {
mycolname = paste("fst_",myfst,"_win",mywin,sep="")
tempthresh = fst_winthresh
}
mydat <- data[,mycolname]
mychrom <- data$chromnum
mypos <- data$POS
mylambda = 1/mean(mydat,na.rm=TRUE)
theorybfs <- qexp(seq(1/length(mydat),1-(1/length(mydat)),length.out=length(mydat)),rate=mylambda)
manhatdat <- data.frame(BP=mypos,P=mydat,CHR=mychrom)
manhatdat <- manhatdat[complete.cases(manhatdat),]
manhatdat$CHR <- factor(manhatdat$CHR)
llevels <- length(levels(manhatdat$CHR))
levels(manhatdat$CHR) <- 1:llevels
manhatdat$CHR <- as.numeric(manhatdat$CHR)
manhatdatlog <- data.frame(BP=mypos,P=log10(sapply(mydat,FUN = function(x){
if (!is.na(x)) {
if (x <= 0) {1e-9} else {x}
} else {x}
})),CHR=mychrom)
manhattan(manhatdat,logp=FALSE,xlab="Contig",ylab=expression("F"["ST"]),main=expression("11-population F"["ST"]),
suggestiveline=FALSE,genomewideline=tempthresh,cex=point_cex)
mtext(all_labels[labels_index],3, line=tlin, adj=0, cex=label_cex)
labels_index = labels_index + 1
}
}
print("done4")
#######
#xtx:
data <- readRDS(inpath_xtx)
for (mywin in wins){
if (mywin == 1) {
tempthresh=xtx_thresh
mycolname = "xtx"
myylab=expression("X"^"T"*"X")
mymain=expression("11-population X"^"T"*"X")
} else {
tempthresh=xtx_winthresh
mycolname = paste("xtx_win",mywin,sep="")
myylab=expression("25-SNP X"^"T"*"X")
mymain=expression("11-population X"^"T"*"X, 25-SNP average")
}
mydat <- data[,mycolname]
mychrom <- data$chromnum
mypos <- data$chrompos
theorybfs <- seq(1/length(mydat),1-(1/length(mydat)),length.out=length(mydat))
manhatdat <- data.frame(BP=mypos,P=mydat,CHR=mychrom)
#manhatdatlog <- data.frame(BP=mypos,P=log10(mydat),CHR=mychrom)
if (!is.na(xtx_thresh)) {
manhattan(manhatdat,logp=FALSE,xlab="Contig",ylab=myylab,main=mymain,ylim=c(min(manhatdat$P),max(manhatdat$P)),
suggestiveline=FALSE,genomewideline=tempthresh,cex=point_cex)
} else {
manhattan(manhatdat,logp=FALSE,xlab="Contig",ylab=myylab,main=mymain,ylim=c(min(manhatdat$P),max(manhatdat$P)),
suggestiveline=FALSE,genomewideline=FALSE,cex=point_cex)
}
mtext(all_labels[labels_index],3, line=tlin, adj=0, cex=label_cex)
labels_index = labels_index + 1
}
print("done5")
######
dev.off()
}
myinpref_lfmm <- "/home/jbaldwin/new_home/wild_shrimp_data/for_shrimp_paper_1/bwa_alignments/snp_tables/deduped/new_downsampled/full_fst_dsbig_fused/bayenv_5repavg/v4_full_data_fixedinput_splitout/lfmm_multiwin/dsbig_snp_freqmat_fused_cens.txt.K9.s"
myinsuf_lfmm <- ".9.zscoreavg.withchroms.withhead.sorted.multiwin_1000.RDS"
myin_fst <- "/home/jbaldwin/new_home/wild_shrimp_data/for_shrimp_paper_1/bwa_alignments/snp_tables/deduped/new_downsampled/full_fst_dsbig_fused/python_fst/fst_multiwin/only-PASS-Q30-SNPS-cov_v2_ds_7_11_v1_fused_sorted_fst_mean_multiwin_snpnames_chromnums.RDS"
myin_xtx <- "dsbig_fused_xtx_and_bf_5combo_withchroms_2sorted_cens_withhead_uniq_multiwin_plusxtx.RDS"
myin_bf <- myin_xtx
myoutpref <- "manhat_plot_allstats_withlabels_withlines_5avg"
mynpops <- 11
mywins <- c(1,25)
mylfmmwins <- c(1,1000)
myzs <- c(8)
mybfs <- c(23)
myfsts <- c("mean")
#mybfs_conv1 <- (mybfs - 2) / 3 + 1
mybfs_conv1 <- mybfs - 1
myxtx_thresh <- read.table("xtx_thresh_11pop.txt")[1,1]
mybf_thresh <- read.table("bf_thresh_11pop.txt")[1,mybfs_conv1]
mylfmm_thresh <- read.table("lfmm_thresh_11pop.txt")[1,((myzs-1)*3)+1]
myfst_thresh <- read.table("gene_association/fst_significance_data/fst_mean_sigthresh_fst_nowin.txt")[1,1]
#myxtx_thresh_win <- read.table("xtx_thresh_11pop_25win.txt")[1,1]
myxtx_thresh_win <- 25
mybf_thresh_win <- read.table("bf_thresh_11pop_25win.txt")[1,mybfs_conv1]
mylfmm_thresh_win <- read.table("lfmm_thresh_11pop_100win.txt")[1,((myzs-1)*3)+1]
myfst_thresh_win <- read.table("gene_association/fst_significance_data/fst_mean_sigthresh_fst.txt")[1,1]
manhatplot_fst_lfmm_xtx_bf(myin_fst,myin_xtx,myin_bf,myinpref_lfmm,myinsuf_lfmm,
myoutpref,mynpops,mywins,
myzs,mybfs,myfsts,
xtx_thresh=myxtx_thresh,bf_thresh=mybf_thresh,lfmm_thresh=mylfmm_thresh,fst_thresh=myfst_thresh,
xtx_winthresh=myxtx_thresh_win,bf_winthresh=mybf_thresh_win,lfmm_winthresh=mylfmm_thresh_win,fst_winthresh=myfst_thresh_win,
lfmmwins=mylfmmwins)
# a<- rnorm(1000,0,1)
# b<-rnorm(1000,0,2)
# c<-rnorm(1000,0,3)
# d<-rnorm(1000,0,4)
# #quartz(w=6,h=8)
# par(mfrow=c(2,2))
# #par(mai=c(1,0,1,1))
# par(mar=c(3,2,4,1))
# #par(plt=c(1.1,1.1,1.1,1.1))
# tlin=2
# hist(a)
# mtext("A",3, line=tlin, adj=0, cex=2)
# hist(b)
# mtext("B",3, line=tlin, adj=0, cex=2)
# hist(c)
# mtext("C",3, line=tlin, adj=0, cex=2)
# hist(d)
# mtext("D",3, line=tlin, adj=0, cex=2)
| /Baldwin-Brown_2017_Scripts/main_scripts/downstream_analysis_dir/manhat_plot_all_stats_for_paper_withlabels_withlines_agnostic.R | no_license | jgbaldwinbrown/jgbutils | R | false | false | 7,414 | r | library(qqman)
library(ggplot2)
#qq plot bayes factors with multiple window sizes:
manhatplot_fst_lfmm_xtx_bf <- function(inpath_fst,inpath_xtx,inpath_bf,inpref_lfmm,insuf_lfmm,outpref,npops,wins,zs,bfs,fsts,
xtx_thresh = NA, bf_thresh = NA, lfmm_thresh = NA, fst_thresh = NA,
xtx_hits = NA, bf_hits = NA, lfmm_hits = NA, fst_hits = NA,
xtx_winthresh=NA,bf_winthresh=NA,lfmm_winthresh=NA,fst_winthresh=NA, lfmmwins = NA) {
#here, inpath is the path to a snptable with bf values, and outpref is the prefix for all plots
#npops is the number of populations (used for computing degrees of freedom)
#wins is a vector of the sliding window sizes to be used for plotting
tifoutpath = paste(outpref,"_multistat_forpaper_agnostic.tif",sep="")
tiff(tifoutpath,
width=2*4*600, height=3*2*600, res=600, compression="lzw")
par(mfrow=c(3,1))
par(mar=c(5.1,4.1,4.1,2.1))
all_labels = LETTERS
labels_index = 1
tlin=2
label_cex = 1.5
point_cex=0.5
print("done1")
######
#lfmm:
#if (is.na(lfmmwins)) {lfmmwins = wins}
#for (myz in zs){
# inpath = paste(inpref_lfmm,myz,insuf_lfmm,sep="")
# data <- readRDS(inpath)
# for (mywin in wins){
# if (mywin == 1) {
# mycolname = "adjusted.p.values"
# tempthresh = lfmm_thresh
# } else {
# mycolname = paste("lfmm_pcombo_win",mywin,sep="")
# tempthresh = lfmm_winthresh
# }
# mydat <- data[,mycolname]
# mychrom <- data$sort_variable
# mypos <- data$POS
# #theorybfs <- seq(1/length(mydat),1-(1/length(mydat)),length.out=length(mydat))
# manhatdat <- data.frame(BP=mypos,P=mydat,CHR=mychrom)
# manhatdatlog <- data.frame(BP=mypos,P=-log10(mydat),CHR=mychrom)
#
# manhattan(manhatdatlog,logp=FALSE,xlab="Contig",ylab="BF",main="11-population LFMM p-value (censored, logged)",ylim=c(min(manhatdatlog$P),max(manhatdatlog$P)),
# suggestiveline=FALSE,genomewideline=tempthresh)
# mtext(all_labels[labels_index],3, line=tlin, adj=0, cex=label_cex)
# labels_index = labels_index + 1
#
# }
#}
print("done2")
#######
#fst:
data <- readRDS(inpath_fst)
for (myfst in fsts){
for (mywin in 1){
if (mywin == 1) {
mycolname = paste("fst_",myfst,sep="")
tempthresh = fst_thresh
} else {
mycolname = paste("fst_",myfst,"_win",mywin,sep="")
tempthresh = fst_winthresh
}
mydat <- data[,mycolname]
mychrom <- data$chromnum
mypos <- data$POS
mylambda = 1/mean(mydat,na.rm=TRUE)
theorybfs <- qexp(seq(1/length(mydat),1-(1/length(mydat)),length.out=length(mydat)),rate=mylambda)
manhatdat <- data.frame(BP=mypos,P=mydat,CHR=mychrom)
manhatdat <- manhatdat[complete.cases(manhatdat),]
manhatdat$CHR <- factor(manhatdat$CHR)
llevels <- length(levels(manhatdat$CHR))
levels(manhatdat$CHR) <- 1:llevels
manhatdat$CHR <- as.numeric(manhatdat$CHR)
manhatdatlog <- data.frame(BP=mypos,P=log10(sapply(mydat,FUN = function(x){
if (!is.na(x)) {
if (x <= 0) {1e-9} else {x}
} else {x}
})),CHR=mychrom)
manhattan(manhatdat,logp=FALSE,xlab="Contig",ylab=expression("F"["ST"]),main=expression("11-population F"["ST"]),
suggestiveline=FALSE,genomewideline=tempthresh,cex=point_cex)
mtext(all_labels[labels_index],3, line=tlin, adj=0, cex=label_cex)
labels_index = labels_index + 1
}
}
print("done4")
#######
#xtx:
data <- readRDS(inpath_xtx)
for (mywin in wins){
if (mywin == 1) {
tempthresh=xtx_thresh
mycolname = "xtx"
myylab=expression("X"^"T"*"X")
mymain=expression("11-population X"^"T"*"X")
} else {
tempthresh=xtx_winthresh
mycolname = paste("xtx_win",mywin,sep="")
myylab=expression("25-SNP X"^"T"*"X")
mymain=expression("11-population X"^"T"*"X, 25-SNP average")
}
mydat <- data[,mycolname]
mychrom <- data$chromnum
mypos <- data$chrompos
theorybfs <- seq(1/length(mydat),1-(1/length(mydat)),length.out=length(mydat))
manhatdat <- data.frame(BP=mypos,P=mydat,CHR=mychrom)
#manhatdatlog <- data.frame(BP=mypos,P=log10(mydat),CHR=mychrom)
if (!is.na(xtx_thresh)) {
manhattan(manhatdat,logp=FALSE,xlab="Contig",ylab=myylab,main=mymain,ylim=c(min(manhatdat$P),max(manhatdat$P)),
suggestiveline=FALSE,genomewideline=tempthresh,cex=point_cex)
} else {
manhattan(manhatdat,logp=FALSE,xlab="Contig",ylab=myylab,main=mymain,ylim=c(min(manhatdat$P),max(manhatdat$P)),
suggestiveline=FALSE,genomewideline=FALSE,cex=point_cex)
}
mtext(all_labels[labels_index],3, line=tlin, adj=0, cex=label_cex)
labels_index = labels_index + 1
}
print("done5")
######
dev.off()
}
myinpref_lfmm <- "/home/jbaldwin/new_home/wild_shrimp_data/for_shrimp_paper_1/bwa_alignments/snp_tables/deduped/new_downsampled/full_fst_dsbig_fused/bayenv_5repavg/v4_full_data_fixedinput_splitout/lfmm_multiwin/dsbig_snp_freqmat_fused_cens.txt.K9.s"
myinsuf_lfmm <- ".9.zscoreavg.withchroms.withhead.sorted.multiwin_1000.RDS"
myin_fst <- "/home/jbaldwin/new_home/wild_shrimp_data/for_shrimp_paper_1/bwa_alignments/snp_tables/deduped/new_downsampled/full_fst_dsbig_fused/python_fst/fst_multiwin/only-PASS-Q30-SNPS-cov_v2_ds_7_11_v1_fused_sorted_fst_mean_multiwin_snpnames_chromnums.RDS"
myin_xtx <- "dsbig_fused_xtx_and_bf_5combo_withchroms_2sorted_cens_withhead_uniq_multiwin_plusxtx.RDS"
myin_bf <- myin_xtx
myoutpref <- "manhat_plot_allstats_withlabels_withlines_5avg"
mynpops <- 11
mywins <- c(1,25)
mylfmmwins <- c(1,1000)
myzs <- c(8)
mybfs <- c(23)
myfsts <- c("mean")
#mybfs_conv1 <- (mybfs - 2) / 3 + 1
mybfs_conv1 <- mybfs - 1
myxtx_thresh <- read.table("xtx_thresh_11pop.txt")[1,1]
mybf_thresh <- read.table("bf_thresh_11pop.txt")[1,mybfs_conv1]
mylfmm_thresh <- read.table("lfmm_thresh_11pop.txt")[1,((myzs-1)*3)+1]
myfst_thresh <- read.table("gene_association/fst_significance_data/fst_mean_sigthresh_fst_nowin.txt")[1,1]
#myxtx_thresh_win <- read.table("xtx_thresh_11pop_25win.txt")[1,1]
myxtx_thresh_win <- 25
mybf_thresh_win <- read.table("bf_thresh_11pop_25win.txt")[1,mybfs_conv1]
mylfmm_thresh_win <- read.table("lfmm_thresh_11pop_100win.txt")[1,((myzs-1)*3)+1]
myfst_thresh_win <- read.table("gene_association/fst_significance_data/fst_mean_sigthresh_fst.txt")[1,1]
manhatplot_fst_lfmm_xtx_bf(myin_fst,myin_xtx,myin_bf,myinpref_lfmm,myinsuf_lfmm,
myoutpref,mynpops,mywins,
myzs,mybfs,myfsts,
xtx_thresh=myxtx_thresh,bf_thresh=mybf_thresh,lfmm_thresh=mylfmm_thresh,fst_thresh=myfst_thresh,
xtx_winthresh=myxtx_thresh_win,bf_winthresh=mybf_thresh_win,lfmm_winthresh=mylfmm_thresh_win,fst_winthresh=myfst_thresh_win,
lfmmwins=mylfmmwins)
# a<- rnorm(1000,0,1)
# b<-rnorm(1000,0,2)
# c<-rnorm(1000,0,3)
# d<-rnorm(1000,0,4)
# #quartz(w=6,h=8)
# par(mfrow=c(2,2))
# #par(mai=c(1,0,1,1))
# par(mar=c(3,2,4,1))
# #par(plt=c(1.1,1.1,1.1,1.1))
# tlin=2
# hist(a)
# mtext("A",3, line=tlin, adj=0, cex=2)
# hist(b)
# mtext("B",3, line=tlin, adj=0, cex=2)
# hist(c)
# mtext("C",3, line=tlin, adj=0, cex=2)
# hist(d)
# mtext("D",3, line=tlin, adj=0, cex=2)
|
#' Begin constructing a ggghost cache
#'
#' The data and initial \code{ggpot()} call are stored as a list (call) with
#' attribute (data).
#'
#' @details The data must be passed into the \code{ggplot} call directly.
#' Passing this in via a magrittr pipe remains as a future improvement. The
#' newly created \code{ggghost} object is a list of length 1 containing the
#' \code{ggplot} call, with attribute \code{data}; another list, containing
#' the \code{data_name} and \code{data} itself.
#'
#' @param lhs LHS of call
#' @param rhs RHS of call
#'
#' @return Assigns the \code{ggghost} structure to the \code{lhs} symbol.
#'
#' @export
#' @examples
#' ## create a ggghost object
#' tmpdata <- data.frame(x = 1:100, y = rnorm(100))
#'
#' z %g<% ggplot(tmpdata, aes(x,y))
`%g<%` <- function(lhs, rhs) {
match <- match.call()
match_lhs <- match[[2]]
match_rhs <- match[[3]]
parent <- parent.frame()
new_obj <- structure(list(as.call(match_rhs)), class = c("ggghost", "gg"))
data_name <- eval(parse(text = sub("ggplot[^(]*", "identify_data", deparse(summary(new_obj)[[1]]))))
attr(new_obj, "data") <- list(data_name = data_name,
data = get(data_name, envir = parent))
assign(as.character(match_lhs), new_obj, envir = parent)
return(invisible(NULL))
}
#' Identify the data passed to ggplot
#'
#' Duplicate arguments to ggplot2::ggplot with the intent that the \code{data}
#' argument can be captured and identified.
#'
#' @inheritParams ggplot2::ggplot
#'
#' @return Name of the \code{data.frame} passed to \code{ggplot}
#'
#' @keywords internal
identify_data <- function(data, mapping = ggplot2::aes(), ..., environment = parent.frame()) {
match <- match.call()
data_name <- match[["data"]]
if (is.null(data_name)) stop("could not identify data from call.")
return(as.character(data_name))
}
#' Reports whether x is a ggghost object
#'
#' @param x An object to test
#'
#' @return logical; \code{TRUE} if \code{x} inherits class \code{ggghost}
#' @export
is.ggghost <- function(x) inherits(x, "ggghost")
#' Add a New ggplot Component to a ggghost Object
#'
#' This operator allows you to add objects to a ggghost object in the style of @hrbrmstr.
#'
#' @param e1 An object of class \code{ggghost}
#' @param e2 A component to add to \code{e1}
#'
#' @return Appends the \code{e2} call to the \code{ggghost} structure
#' @rdname plus-ggghost
#'
#' @importFrom ggplot2 is.theme is.ggplot %+%
#' @export
#'
#' @examples
#' #' ## create a ggghost object
#' tmpdata <- data.frame(x = 1:100, y = rnorm(100))
#'
#' z %g<% ggplot(tmpdata, aes(x,y))
#' z <- z + geom_point(col = "steelblue")
#' z <- z + theme_bw()
#' z <- z + labs(title = "My cool ggplot")
#' z <- z + labs(x = "x axis", y = "y axis")
#' z <- z + geom_smooth()
"+.gg" <- function(e1, e2) {
if (is.ggghost(e1)) {
new_obj <- structure(append(e1, match.call()[[3]]), class = c("ggghost", "gg"))
attr(new_obj, "data") <- attr(e1, "data")
if (!is.null(attr(e1, "suppdata"))) {
attr(new_obj, "suppdata") <- attr(e1, "suppdata")
}
return(new_obj)
} else {
return(e1 %+% e2)
}
}
#' Remove a call from a ggghost object
#'
#' Calls can be removed from the \code{ggghost} object via regex matching of the
#' function name. All matching calls will be removed based on the match to the
#' string up to the first bracket, so any arguments are irrelevant.
#'
#' For example, subtracting \code{geom_line()} will remove all calls matching
#' \code{geom_line} regardless of their arguments.
#'
#' `labs()` has been identified as a special case, as it requires an argument in
#' order to be recognised as a valid function. Thus, trying to remove it with an
#' empty argument will fail. That said, the argument doesn't need to match, so
#' it can be populated with a dummy string or anything that evaluates in scope.
#' See examples.
#'
#' @param e1 An object of class \code{ggghost}
#' @param e2 A component to remove from \code{e1}
#'
#' @return A \code{ggghost} structure with calls matching \code{e2} removed,
#' otherwise the same as \code{e1}
#'
#' @rdname minus-ggghost
#' @export
#'
#' @examples
#' ## create a ggghost object
#' tmpdata <- data.frame(x = 1:100, y = rnorm(100))
#'
#' z %g<% ggplot(tmpdata, aes(x,y))
#' z <- z + geom_point(col = "steelblue")
#' z <- z + theme_bw()
#' z <- z + labs(title = "My cool ggplot")
#' z <- z + labs(x = "x axis", y = "y axis")
#' z <- z + geom_smooth()
#'
#' ## remove the geom_smooth
#' z - geom_smooth()
#'
#' ## remove the labels
#' ## NOTE: argument must be present and able to be
#' ## evaluated in scope
#' z - labs(TRUE) # works
#' z - labs(title) # works because of title(), but removes all labs()
"-.gg" <- function(e1, e2) {
if (ggplot2::is.theme(e1)) stop("not implemented for ggplot2 themes")
else if (ggplot2::is.ggplot(e1)) stop("not implemented for ggplot2 plots")
else if (is.ggghost(e1)) {
call_to_remove <- match.call()[[3]]
if (!any(grepl(sub("\\(.*$", "", call_to_remove)[1], as.character(summary(e1, combine = TRUE))))) {
warning("ggghostbuster: can't find that call in the call list", call. = FALSE)
return(e1)
} else if (sub("\\(.*$", "", call_to_remove)[1] == "ggplot") {
warning("ggghostbuster: can't remove the ggplot call itself", call. = FALSE)
return(e1)
}
new_obj <- structure(unclass(e1)[-grep(sub("\\(.*$", "", call_to_remove)[1], unclass(e1))], class = c("ggghost", "gg"))
attr(new_obj, "data") <- attr(e1, "data")
if (!is.null(attr(e1, "suppdata"))) {
attr(new_obj, "suppdata") <- attr(e1, "suppdata")
}
return(new_obj)
}
}
#' Collect ggghost calls and produce the ggplot output
#'
#' @param x A ggghost object to be made into a ggplot grob
#' @param ... Not used, provided for \code{print.default} generic consistency.
#'
#' @return The ggplot plot data (invisibly). Used for the side-effect of producing a ggplot plot.
#' @export
print.ggghost <- function(x, ...) {
recover_data(x, supp = TRUE)
plotdata <- eval(parse(text = paste(x, collapse = " + ")))
print(plotdata)
return(invisible(plotdata))
}
#' List the calls contained in a ggghost object
#'
#' Summarises a ggghost object by presenting the contained calls in the order
#' they were added. Optionally concatenates these into a single ggplot call.
#'
#' @details The data is also included in ggghost objects. If this is also
#' desired in the output, use \code{str}. See example.
#'
#' @param object A ggghost object to present
#' @param ... Mainly provided for \code{summary.default} generic consistency.
#' When \code{combine} is passed as an argument (arbitrary value) the list of
#' calls is concatenated into a single string as one might write the ggplot
#' call.
#'
#' @return Either a list of ggplot calls or a string of such concatenated with " + "
#' @export
#'
#' @examples
#' ## present the ggghost object as a list
#' tmpdata <- data.frame(x = 1:100, y = rnorm(100))
#'
#' z %g<% ggplot(tmpdata, aes(x,y))
#' z <- z + geom_point(col = "steelblue")
#' summary(z)
#'
#' ## present the ggghost object as a string
#' summary(z, combine = TRUE) # Note, value of 'combine' is arbitrary
#'
#' ## to inspect the data structure also captured, use str()
#' str(z)
summary.ggghost <- function(object, ...) {
dots <- eval(substitute(alist(...)))
combine = "combine" %in% names(dots)
if (combine)
return(paste(object, collapse = " + "))
else
return(utils::head(object, n = length(object)))
}
#' Extract a subset of a ggghost object
#'
#' Alternative to subtracting calls using `-.gg`, this method allows one to
#' select the desired components of the available calls and have those
#' evaluated.
#'
#' @param x A ggghost object to subset
#' @param ... A logical expression indicating which elements to select.
#' Typically a vector of list numbers, but potentially a vector of logicals or
#' logical expressions.
#'
#' @return Another ggghost object containing only the calls selected.
#' @export
#'
#' @examples
#' ## create a ggghost object
#' tmpdata <- data.frame(x = 1:100, y = rnorm(100))
#'
#' z %g<% ggplot(tmpdata, aes(x,y))
#' z <- z + geom_point(col = "steelblue")
#' z <- z + theme_bw()
#' z <- z + labs(title = "My cool ggplot")
#' z <- z + labs(x = "x axis", y = "y axis")
#' z <- z + geom_smooth()
#'
#' ## remove the labels and theme
#' subset(z, c(1,2,6))
#' ## or
#' subset(z, c(TRUE,TRUE,FALSE,FALSE,FALSE,TRUE))
subset.ggghost <- function(x, ...) {
new_obj <- structure(unclass(x)[...], class = c("ggghost", "gg"))
attr(new_obj, "data") <- attr(x, "data")
if (!is.null(attr(x, "suppdata"))) {
attr(new_obj, "suppdata") <- attr(x, "suppdata")
}
return(new_obj)
}
#' Bring a ggplot to life (re-animate)
#'
#' Creates an animation showing the stepwise process of building up a ggplot.
#' Successively adds calls from a ggghost object and then combines these into an
#' animated GIF.
#'
#' @param object A ggghost object to animate
#' @param gifname Output filename to save the .gif to (not including any path,
#' will be saved to current directory)
#' @param interval A positive number to set the time interval of the animation
#' (unit in seconds); see \code{animation::ani.options}
#' @param ani.width width of image frames (unit in px); see
#' \code{animation::ani.options}
#' @param ani.height height of image frames (unit in px); see
#' \code{animation::ani.options}
#'
#' @return \code{TRUE} if it gets that far
#'
#' @importFrom animation ani.options saveGIF
#' @export
#' @rdname reanimate
#'
#' @examples
#' \dontrun{
#' ## create an animation showing the process of building up a plot
#' reanimate(z, "mycoolplot.gif")
#' }
reanimate <- function(object, gifname = "ggghost.gif", interval = 1, ani.width = 600, ani.height = 600) {
stopifnot(length(object) > 1)
animation::ani.options(interval = interval, ani.width = ani.width, ani.height = ani.height)
animation::saveGIF({
recover_data(object, supp = TRUE)
ggtmp <- object[[1]]
print(eval(ggtmp))
for (i in 2:length(object)) {
ggtmp <- eval(ggtmp) + eval(object[[i]])
print(ggtmp)
}
}, movie.name = gifname)
return(invisible(TRUE))
}
#' @export
#' @rdname reanimate
lazarus <- reanimate
#' Recover data Stored in a ggghost object
#'
#' The data used to generate a plot is an essential requirement for a
#' reproducible graphic. This is somewhat available from a ggplot \code{grob}
#' (in raw form) but it it not easily accessible, and isn't named the same way
#' as the original call.
#'
#' This function retrieves the data from the ggghost object as it was when it
#' was originally called.
#'
#' If supplementary data has also been attached using \code{\link{supp_data}}
#' then this will also be recovered (if requested).
#'
#' When used iteractively, a warning will be produced if the data to be
#' extracted exists in the workspace but not identical to the captured version.
#'
#' @param x A ggghost object from which to extract the data.
#' @param supp (logical) Should the supplementary data be extracted also?
#'
#' @return A \code{data.frame} of the original data, named as it was when used
#' in \code{ggplot(data)}
#' @export
recover_data <- function(x, supp = TRUE) {
## create a local copy of the data
y <- yname <- attr(x, "data")$data_name
assign(y, attr(x, "data")$data, envir = environment())
## if the data exists in the calling frame, but has changed since
## being saved to the ggghost object, produce a warning (but do it anyway)
parent <- parent.frame()
optout_data <- ""
if (exists(y, where = parent)) {
if (!identical(get(y, envir = environment()), get(y, envir = parent))) {
warning(paste0("Potentially overwriting object ", yname, " in working space, but object has changed"), call. = FALSE, immediate. = TRUE)
## this should really be ggghost::in_the_shell as per @hrbrmstr's suggestion
if (interactive()) {
optout_data <- readline("Press 'n' to opt out of overwriting ")
}
}
}
if (optout_data != "n") assign(yname, attr(x, "data")$data, envir = parent)
if (supp) {
optout_supp_data <- ""
supp_list <- supp_data(x)
if (length(supp_list) > 0) {
if (exists(supp_list[[1]], where = parent)) {
if (!identical(supp_list[[2]], get(supp_list[[1]], envir = parent))) {
warning(paste0("Potentially overwriting object ", supp_list[[1]], " in working space, but object has changed"), call. = FALSE, immediate. = TRUE)
if (interactive()) {
optout_supp_data <- readline("Press 'n' to opt out of overwriting ")
}
}
}
if (optout_supp_data != "n") assign(supp_list[[1]], supp_list[[2]], envir = parent)
}
}
return(invisible(NULL))
}
#' Inspect the supplementary data attached to a ggghost object
#'
#' @param x A ggghost object
#'
#' @return A list with two elements: the name of the supplementary data, and the
#' supplementary data itself
#'
#' @export
supp_data <- function(x) {
value <- attr(x, "suppdata")
# if (length(value) == 0 & interactive()) warning("ggghostbuster: no supplementary data found", call. = FALSE)
return(value)
}
#' Attach supplementary data to a ggghost object
#'
#' @param x A ggghost object to which the supplementary data should be
#' attached
#' @param value Supplementary data to attach to the ggghost object, probably
#' used as an additional data input to a \code{scale_*} or \code{geom_*} call
#'
#' @return The original object with \code{suppdata} attribute
#'
#' @export
"supp_data<-" <- function(x, value) {
if (is.ggghost(x)) {
if (length(attr(x, "suppdata")) > 0) {
warning("ggghostbuster: can't assign more than one supplementary data set to a ggghost object.", call. = FALSE)
return(x)
}
attr(x, "suppdata") <- list(supp_data_name = as.character(substitute(value)),
supp_data = value)
} else {
stop("attempt to attach supplementary data to a non-ggghost object")
}
return(x)
} | /R/ghost.R | no_license | cran/ggghost | R | false | false | 14,681 | r | #' Begin constructing a ggghost cache
#'
#' The data and initial \code{ggpot()} call are stored as a list (call) with
#' attribute (data).
#'
#' @details The data must be passed into the \code{ggplot} call directly.
#' Passing this in via a magrittr pipe remains as a future improvement. The
#' newly created \code{ggghost} object is a list of length 1 containing the
#' \code{ggplot} call, with attribute \code{data}; another list, containing
#' the \code{data_name} and \code{data} itself.
#'
#' @param lhs LHS of call
#' @param rhs RHS of call
#'
#' @return Assigns the \code{ggghost} structure to the \code{lhs} symbol.
#'
#' @export
#' @examples
#' ## create a ggghost object
#' tmpdata <- data.frame(x = 1:100, y = rnorm(100))
#'
#' z %g<% ggplot(tmpdata, aes(x,y))
`%g<%` <- function(lhs, rhs) {
match <- match.call()
match_lhs <- match[[2]]
match_rhs <- match[[3]]
parent <- parent.frame()
new_obj <- structure(list(as.call(match_rhs)), class = c("ggghost", "gg"))
data_name <- eval(parse(text = sub("ggplot[^(]*", "identify_data", deparse(summary(new_obj)[[1]]))))
attr(new_obj, "data") <- list(data_name = data_name,
data = get(data_name, envir = parent))
assign(as.character(match_lhs), new_obj, envir = parent)
return(invisible(NULL))
}
#' Identify the data passed to ggplot
#'
#' Duplicate arguments to ggplot2::ggplot with the intent that the \code{data}
#' argument can be captured and identified.
#'
#' @inheritParams ggplot2::ggplot
#'
#' @return Name of the \code{data.frame} passed to \code{ggplot}
#'
#' @keywords internal
identify_data <- function(data, mapping = ggplot2::aes(), ..., environment = parent.frame()) {
match <- match.call()
data_name <- match[["data"]]
if (is.null(data_name)) stop("could not identify data from call.")
return(as.character(data_name))
}
#' Reports whether x is a ggghost object
#'
#' @param x An object to test
#'
#' @return logical; \code{TRUE} if \code{x} inherits class \code{ggghost}
#' @export
is.ggghost <- function(x) inherits(x, "ggghost")
#' Add a New ggplot Component to a ggghost Object
#'
#' This operator allows you to add objects to a ggghost object in the style of @hrbrmstr.
#'
#' @param e1 An object of class \code{ggghost}
#' @param e2 A component to add to \code{e1}
#'
#' @return Appends the \code{e2} call to the \code{ggghost} structure
#' @rdname plus-ggghost
#'
#' @importFrom ggplot2 is.theme is.ggplot %+%
#' @export
#'
#' @examples
#' #' ## create a ggghost object
#' tmpdata <- data.frame(x = 1:100, y = rnorm(100))
#'
#' z %g<% ggplot(tmpdata, aes(x,y))
#' z <- z + geom_point(col = "steelblue")
#' z <- z + theme_bw()
#' z <- z + labs(title = "My cool ggplot")
#' z <- z + labs(x = "x axis", y = "y axis")
#' z <- z + geom_smooth()
"+.gg" <- function(e1, e2) {
if (is.ggghost(e1)) {
new_obj <- structure(append(e1, match.call()[[3]]), class = c("ggghost", "gg"))
attr(new_obj, "data") <- attr(e1, "data")
if (!is.null(attr(e1, "suppdata"))) {
attr(new_obj, "suppdata") <- attr(e1, "suppdata")
}
return(new_obj)
} else {
return(e1 %+% e2)
}
}
#' Remove a call from a ggghost object
#'
#' Calls can be removed from the \code{ggghost} object via regex matching of the
#' function name. All matching calls will be removed based on the match to the
#' string up to the first bracket, so any arguments are irrelevant.
#'
#' For example, subtracting \code{geom_line()} will remove all calls matching
#' \code{geom_line} regardless of their arguments.
#'
#' `labs()` has been identified as a special case, as it requires an argument in
#' order to be recognised as a valid function. Thus, trying to remove it with an
#' empty argument will fail. That said, the argument doesn't need to match, so
#' it can be populated with a dummy string or anything that evaluates in scope.
#' See examples.
#'
#' @param e1 An object of class \code{ggghost}
#' @param e2 A component to remove from \code{e1}
#'
#' @return A \code{ggghost} structure with calls matching \code{e2} removed,
#' otherwise the same as \code{e1}
#'
#' @rdname minus-ggghost
#' @export
#'
#' @examples
#' ## create a ggghost object
#' tmpdata <- data.frame(x = 1:100, y = rnorm(100))
#'
#' z %g<% ggplot(tmpdata, aes(x,y))
#' z <- z + geom_point(col = "steelblue")
#' z <- z + theme_bw()
#' z <- z + labs(title = "My cool ggplot")
#' z <- z + labs(x = "x axis", y = "y axis")
#' z <- z + geom_smooth()
#'
#' ## remove the geom_smooth
#' z - geom_smooth()
#'
#' ## remove the labels
#' ## NOTE: argument must be present and able to be
#' ## evaluated in scope
#' z - labs(TRUE) # works
#' z - labs(title) # works because of title(), but removes all labs()
"-.gg" <- function(e1, e2) {
if (ggplot2::is.theme(e1)) stop("not implemented for ggplot2 themes")
else if (ggplot2::is.ggplot(e1)) stop("not implemented for ggplot2 plots")
else if (is.ggghost(e1)) {
call_to_remove <- match.call()[[3]]
if (!any(grepl(sub("\\(.*$", "", call_to_remove)[1], as.character(summary(e1, combine = TRUE))))) {
warning("ggghostbuster: can't find that call in the call list", call. = FALSE)
return(e1)
} else if (sub("\\(.*$", "", call_to_remove)[1] == "ggplot") {
warning("ggghostbuster: can't remove the ggplot call itself", call. = FALSE)
return(e1)
}
new_obj <- structure(unclass(e1)[-grep(sub("\\(.*$", "", call_to_remove)[1], unclass(e1))], class = c("ggghost", "gg"))
attr(new_obj, "data") <- attr(e1, "data")
if (!is.null(attr(e1, "suppdata"))) {
attr(new_obj, "suppdata") <- attr(e1, "suppdata")
}
return(new_obj)
}
}
#' Collect ggghost calls and produce the ggplot output
#'
#' @param x A ggghost object to be made into a ggplot grob
#' @param ... Not used, provided for \code{print.default} generic consistency.
#'
#' @return The ggplot plot data (invisibly). Used for the side-effect of producing a ggplot plot.
#' @export
print.ggghost <- function(x, ...) {
recover_data(x, supp = TRUE)
plotdata <- eval(parse(text = paste(x, collapse = " + ")))
print(plotdata)
return(invisible(plotdata))
}
#' List the calls contained in a ggghost object
#'
#' Summarises a ggghost object by presenting the contained calls in the order
#' they were added. Optionally concatenates these into a single ggplot call.
#'
#' @details The data is also included in ggghost objects. If this is also
#' desired in the output, use \code{str}. See example.
#'
#' @param object A ggghost object to present
#' @param ... Mainly provided for \code{summary.default} generic consistency.
#' When \code{combine} is passed as an argument (arbitrary value) the list of
#' calls is concatenated into a single string as one might write the ggplot
#' call.
#'
#' @return Either a list of ggplot calls or a string of such concatenated with " + "
#' @export
#'
#' @examples
#' ## present the ggghost object as a list
#' tmpdata <- data.frame(x = 1:100, y = rnorm(100))
#'
#' z %g<% ggplot(tmpdata, aes(x,y))
#' z <- z + geom_point(col = "steelblue")
#' summary(z)
#'
#' ## present the ggghost object as a string
#' summary(z, combine = TRUE) # Note, value of 'combine' is arbitrary
#'
#' ## to inspect the data structure also captured, use str()
#' str(z)
summary.ggghost <- function(object, ...) {
dots <- eval(substitute(alist(...)))
combine = "combine" %in% names(dots)
if (combine)
return(paste(object, collapse = " + "))
else
return(utils::head(object, n = length(object)))
}
#' Extract a subset of a ggghost object
#'
#' Alternative to subtracting calls using `-.gg`, this method allows one to
#' select the desired components of the available calls and have those
#' evaluated.
#'
#' @param x A ggghost object to subset
#' @param ... A logical expression indicating which elements to select.
#' Typically a vector of list numbers, but potentially a vector of logicals or
#' logical expressions.
#'
#' @return Another ggghost object containing only the calls selected.
#' @export
#'
#' @examples
#' ## create a ggghost object
#' tmpdata <- data.frame(x = 1:100, y = rnorm(100))
#'
#' z %g<% ggplot(tmpdata, aes(x,y))
#' z <- z + geom_point(col = "steelblue")
#' z <- z + theme_bw()
#' z <- z + labs(title = "My cool ggplot")
#' z <- z + labs(x = "x axis", y = "y axis")
#' z <- z + geom_smooth()
#'
#' ## remove the labels and theme
#' subset(z, c(1,2,6))
#' ## or
#' subset(z, c(TRUE,TRUE,FALSE,FALSE,FALSE,TRUE))
subset.ggghost <- function(x, ...) {
new_obj <- structure(unclass(x)[...], class = c("ggghost", "gg"))
attr(new_obj, "data") <- attr(x, "data")
if (!is.null(attr(x, "suppdata"))) {
attr(new_obj, "suppdata") <- attr(x, "suppdata")
}
return(new_obj)
}
#' Bring a ggplot to life (re-animate)
#'
#' Creates an animation showing the stepwise process of building up a ggplot.
#' Successively adds calls from a ggghost object and then combines these into an
#' animated GIF.
#'
#' @param object A ggghost object to animate
#' @param gifname Output filename to save the .gif to (not including any path,
#' will be saved to current directory)
#' @param interval A positive number to set the time interval of the animation
#' (unit in seconds); see \code{animation::ani.options}
#' @param ani.width width of image frames (unit in px); see
#' \code{animation::ani.options}
#' @param ani.height height of image frames (unit in px); see
#' \code{animation::ani.options}
#'
#' @return \code{TRUE} if it gets that far
#'
#' @importFrom animation ani.options saveGIF
#' @export
#' @rdname reanimate
#'
#' @examples
#' \dontrun{
#' ## create an animation showing the process of building up a plot
#' reanimate(z, "mycoolplot.gif")
#' }
reanimate <- function(object, gifname = "ggghost.gif", interval = 1, ani.width = 600, ani.height = 600) {
stopifnot(length(object) > 1)
animation::ani.options(interval = interval, ani.width = ani.width, ani.height = ani.height)
animation::saveGIF({
recover_data(object, supp = TRUE)
ggtmp <- object[[1]]
print(eval(ggtmp))
for (i in 2:length(object)) {
ggtmp <- eval(ggtmp) + eval(object[[i]])
print(ggtmp)
}
}, movie.name = gifname)
return(invisible(TRUE))
}
#' @export
#' @rdname reanimate
lazarus <- reanimate
#' Recover data Stored in a ggghost object
#'
#' The data used to generate a plot is an essential requirement for a
#' reproducible graphic. This is somewhat available from a ggplot \code{grob}
#' (in raw form) but it it not easily accessible, and isn't named the same way
#' as the original call.
#'
#' This function retrieves the data from the ggghost object as it was when it
#' was originally called.
#'
#' If supplementary data has also been attached using \code{\link{supp_data}}
#' then this will also be recovered (if requested).
#'
#' When used iteractively, a warning will be produced if the data to be
#' extracted exists in the workspace but not identical to the captured version.
#'
#' @param x A ggghost object from which to extract the data.
#' @param supp (logical) Should the supplementary data be extracted also?
#'
#' @return A \code{data.frame} of the original data, named as it was when used
#' in \code{ggplot(data)}
#' @export
recover_data <- function(x, supp = TRUE) {
## create a local copy of the data
y <- yname <- attr(x, "data")$data_name
assign(y, attr(x, "data")$data, envir = environment())
## if the data exists in the calling frame, but has changed since
## being saved to the ggghost object, produce a warning (but do it anyway)
parent <- parent.frame()
optout_data <- ""
if (exists(y, where = parent)) {
if (!identical(get(y, envir = environment()), get(y, envir = parent))) {
warning(paste0("Potentially overwriting object ", yname, " in working space, but object has changed"), call. = FALSE, immediate. = TRUE)
## this should really be ggghost::in_the_shell as per @hrbrmstr's suggestion
if (interactive()) {
optout_data <- readline("Press 'n' to opt out of overwriting ")
}
}
}
if (optout_data != "n") assign(yname, attr(x, "data")$data, envir = parent)
if (supp) {
optout_supp_data <- ""
supp_list <- supp_data(x)
if (length(supp_list) > 0) {
if (exists(supp_list[[1]], where = parent)) {
if (!identical(supp_list[[2]], get(supp_list[[1]], envir = parent))) {
warning(paste0("Potentially overwriting object ", supp_list[[1]], " in working space, but object has changed"), call. = FALSE, immediate. = TRUE)
if (interactive()) {
optout_supp_data <- readline("Press 'n' to opt out of overwriting ")
}
}
}
if (optout_supp_data != "n") assign(supp_list[[1]], supp_list[[2]], envir = parent)
}
}
return(invisible(NULL))
}
#' Inspect the supplementary data attached to a ggghost object
#'
#' @param x A ggghost object
#'
#' @return A list with two elements: the name of the supplementary data, and the
#' supplementary data itself
#'
#' @export
supp_data <- function(x) {
value <- attr(x, "suppdata")
# if (length(value) == 0 & interactive()) warning("ggghostbuster: no supplementary data found", call. = FALSE)
return(value)
}
#' Attach supplementary data to a ggghost object
#'
#' @param x A ggghost object to which the supplementary data should be
#' attached
#' @param value Supplementary data to attach to the ggghost object, probably
#' used as an additional data input to a \code{scale_*} or \code{geom_*} call
#'
#' @return The original object with \code{suppdata} attribute
#'
#' @export
"supp_data<-" <- function(x, value) {
if (is.ggghost(x)) {
if (length(attr(x, "suppdata")) > 0) {
warning("ggghostbuster: can't assign more than one supplementary data set to a ggghost object.", call. = FALSE)
return(x)
}
attr(x, "suppdata") <- list(supp_data_name = as.character(substitute(value)),
supp_data = value)
} else {
stop("attempt to attach supplementary data to a non-ggghost object")
}
return(x)
} |
plot2 <- function () {
#import data into table (only the data we need!)
pwr_cns <- read.table("household_power_consumption.txt",header=FALSE,
sep=";",colClasses="character",na.strings="?",
nrow=2880,skip=66637)
#due to skips on import, we need to name the columns
clnms <- readLines("household_power_consumption.txt",n=1)
clnms <- strsplit(clnms,split=";")
clnms <- unlist(clnms)
colnames(pwr_cns) <- clnms
#create datetime object for plotting
datetime <- paste(pwr_cns$Date,pwr_cns$Time)
datetime <- strptime(datetime,"%d/%m/%Y %H:%M:%S")
#time to reclass the columns
for (i in 3:9) {pwr_cns[,i]<-as.numeric(pwr_cns[,i])}
#let the plotting begin
png(file="plot2.png",480,480)
plot(datetime,pwr_cns$Global_active_power,xlab="",
ylab="Global Active Power (kilowatts)",type="n")
lines(datetime,pwr_cns$Global_active_power)
dev.off()
} | /plot2.R | no_license | timsturges/ExData_Plotting1 | R | false | false | 1,077 | r | plot2 <- function () {
#import data into table (only the data we need!)
pwr_cns <- read.table("household_power_consumption.txt",header=FALSE,
sep=";",colClasses="character",na.strings="?",
nrow=2880,skip=66637)
#due to skips on import, we need to name the columns
clnms <- readLines("household_power_consumption.txt",n=1)
clnms <- strsplit(clnms,split=";")
clnms <- unlist(clnms)
colnames(pwr_cns) <- clnms
#create datetime object for plotting
datetime <- paste(pwr_cns$Date,pwr_cns$Time)
datetime <- strptime(datetime,"%d/%m/%Y %H:%M:%S")
#time to reclass the columns
for (i in 3:9) {pwr_cns[,i]<-as.numeric(pwr_cns[,i])}
#let the plotting begin
png(file="plot2.png",480,480)
plot(datetime,pwr_cns$Global_active_power,xlab="",
ylab="Global Active Power (kilowatts)",type="n")
lines(datetime,pwr_cns$Global_active_power)
dev.off()
} |
# Fetch soil moisture data in order to create example data
# Hydrologic Response Units = HRUs
library(data.table)
#library(sbtools)
### Example modeled data from Blodgett's code
# Get soil moisture csv
# Downloaded from the following link because using sbtools was taking wayyyyyyyyyy
# too long. Took over an hour and a half and then I gave up.
# https://www.sciencebase.gov/catalog/item/5a4ea3bee4b0d05ee8c6647b
# IF science base download would work, I would use this code:
# authenticate_sb()
# nhm_prms_sbid <- "5a4ea3bee4b0d05ee8c6647b"
# nhm_prms_sbitem <- item_get(nhm_prms_sbid)
# nhm_prms_files <- item_list_files(nhm_prms_sbitem)
# soilm_i <- grep("nhru_sroff", nhm_prms_files$fname)
# soilm_data_fn <- nhm_prms_files$url[soilm_i]
# URL: https://www.sciencebase.gov/catalog/item/5a4ea3bee4b0d05ee8c6647b
# Download and unzip: nhru_sroff.zip
# Then save the nhru_sroff.csv file to the `cache/` folder.
# The line below will be used as the file path unless sbtools method works.
soilm_data_fn <- "cache/nhru_soil_moist_tot.csv"
# Read in the initial soil moisture data using fread since there are 109,952 columns
soilm_data <- fread(soilm_data_fn, header=TRUE)
# Filter out to get just the last week of data to use as the actual values for the example data
soilm_data[, Date := as.Date(Date)]
max_date <- max(soilm_data$Date) # get the max date in the dataset to use for filtering
soilm_data_1wk <- soilm_data[max_date - Date <= 7]
# Saving the intermediate version
saveRDS(soilm_data_1wk, "cache/nhru_soilm_1wk.rds")
| /rscripts/create_soilmoisture_example_data/1_fetch_soilmoisture_data.R | no_license | abriggs-usgs/wbeep-sandboxr | R | false | false | 1,535 | r | # Fetch soil moisture data in order to create example data
# Hydrologic Response Units = HRUs
library(data.table)
#library(sbtools)
### Example modeled data from Blodgett's code
# Get soil moisture csv
# Downloaded from the following link because using sbtools was taking wayyyyyyyyyy
# too long. Took over an hour and a half and then I gave up.
# https://www.sciencebase.gov/catalog/item/5a4ea3bee4b0d05ee8c6647b
# IF science base download would work, I would use this code:
# authenticate_sb()
# nhm_prms_sbid <- "5a4ea3bee4b0d05ee8c6647b"
# nhm_prms_sbitem <- item_get(nhm_prms_sbid)
# nhm_prms_files <- item_list_files(nhm_prms_sbitem)
# soilm_i <- grep("nhru_sroff", nhm_prms_files$fname)
# soilm_data_fn <- nhm_prms_files$url[soilm_i]
# URL: https://www.sciencebase.gov/catalog/item/5a4ea3bee4b0d05ee8c6647b
# Download and unzip: nhru_sroff.zip
# Then save the nhru_sroff.csv file to the `cache/` folder.
# The line below will be used as the file path unless sbtools method works.
soilm_data_fn <- "cache/nhru_soil_moist_tot.csv"
# Read in the initial soil moisture data using fread since there are 109,952 columns
soilm_data <- fread(soilm_data_fn, header=TRUE)
# Filter out to get just the last week of data to use as the actual values for the example data
soilm_data[, Date := as.Date(Date)]
max_date <- max(soilm_data$Date) # get the max date in the dataset to use for filtering
soilm_data_1wk <- soilm_data[max_date - Date <= 7]
# Saving the intermediate version
saveRDS(soilm_data_1wk, "cache/nhru_soilm_1wk.rds")
|
library(tidyverse)
Gap <- readxl::read_excel("//INHS-Bison/ResearchData/Groups/Kaskaskia_CREP/Analysis/GL_AQ_GAP_Data_2010/VIEW_WATERSHED_TOTAL_minus_ny.xlsx",
sheet = 2, col_types = c("text","text","text","text", "text",
"numeric","numeric","numeric", "numeric", "numeric","numeric","numeric","numeric","numeric", "numeric",
"numeric","numeric","numeric", "numeric", "numeric","numeric","numeric","numeric","numeric", "numeric",
"numeric","numeric","numeric", "numeric", "numeric","numeric","numeric","numeric","numeric", "numeric",
"numeric","numeric","numeric", "numeric", "numeric","numeric","numeric","numeric","numeric", "numeric",
"numeric","numeric","numeric", "numeric", "numeric","numeric","numeric","numeric","numeric", "numeric",
"numeric","numeric","numeric", "numeric", "numeric","numeric","numeric","numeric","numeric", "numeric",
"numeric","numeric","numeric", "numeric", "numeric","numeric","numeric","numeric","numeric", "numeric",
"numeric","numeric","numeric", "numeric", "numeric","numeric","numeric","numeric","numeric", "numeric",
"numeric","numeric","numeric", "numeric", "numeric","numeric","numeric","numeric","numeric", "numeric",
"numeric","numeric","numeric", "numeric", "numeric","numeric","numeric","numeric","numeric", "numeric",
"numeric","numeric","numeric", "numeric", "numeric","numeric","numeric","numeric","numeric", "numeric",
"numeric","numeric","numeric", "numeric", "numeric"
))
Gap_group_WT <- Gap %>%
filter(PU_CODE == 'kasky')
colnames(Gap_group_WT)[colnames(Gap_group_WT)=="PU_CODE"] <- "PU_Code"
colnames(Gap_group_WT)[colnames(Gap_group_WT)=="GAP_CODE"] <- "Gap_Code"
colnames(Gap_group_WT)[colnames(Gap_group_WT)=="PU_GAP"] <- "PU_Gap_Code"
Gap_group_WT$WT_Urban <- Gap_group_WT$WT_LU11P + Gap_group_WT$WT_LU12P + Gap_group_WT$WT_LU13P + Gap_group_WT$WT_LU14P
Gap_group_WT$WT_Agriculture <- Gap_group_WT$WT_LU21P + Gap_group_WT$WT_LU22P + Gap_group_WT$WT_LU23P
Gap_group_WT$WT_Grassland <- Gap_group_WT$WT_LU30P
#Forested Total = Forested_wetland + Forested_upland (Forest + All wooded wetland including shrubland)
Gap_group_WT$WT_Forest_total <- Gap_group_WT$WT_LU41P + Gap_group_WT$WT_LU42P + Gap_group_WT$WT_LU43P + Gap_group_WT$WT_LU61P + Gap_group_WT$WT_LU610P + Gap_group_WT$WT_LU611P + Gap_group_WT$WT_LU612P + Gap_group_WT$WT_LU613P
Gap_group_WT$WT_Forested_upland <- Gap_group_WT$WT_LU41P + Gap_group_WT$WT_LU42P + Gap_group_WT$WT_LU43P
# Forested_wetland is all wooded wetland including WT_LU610P (Wooded Wetland, shrubland)
Gap_group_WT$WT_Forested_wetland <- Gap_group_WT$WT_LU61P + Gap_group_WT$WT_LU610P + Gap_group_WT$WT_LU611P + Gap_group_WT$WT_LU612P + Gap_group_WT$WT_LU613P
Gap_group_WT$WT_Inland_water <- Gap_group_WT$WT_LU50P
# Open wet in SSA is calculated as inland water/open water + non-wooded wetland (Wetland-emergent herbaceous)
# In GAP we use Open Water + Wetland, non-wooded
Gap_group_WT$WT_Open_wet <- Gap_group_WT$WT_LU50P + Gap_group_WT$WT_LU62P
Gap_group_WT$WT_Wetland_total <- Gap_group_WT$WT_LU61P + Gap_group_WT$WT_LU610P + Gap_group_WT$WT_LU611P + Gap_group_WT$WT_LU612P + Gap_group_WT$WT_LU613P + Gap_group_WT$WT_LU62P
Gap_group_WT$WT_Barren <- Gap_group_WT$WT_LU70P
# Not in the SSA layers: G2 Alluvium (medium)- QG23P, all 0 for kasky
Gap_group_WT$WT_Alluvium_fluvial <- Gap_group_WT$WT_QG12P + Gap_group_WT$WT_QG19P + Gap_group_WT$WT_QG23P
Gap_group_WT$WT_Attenuated_drift <- Gap_group_WT$WT_QG14P + Gap_group_WT$WT_QG22P
Gap_group_WT$WT_Bedrock <- Gap_group_WT$WT_QG99P
Gap_group_WT$WT_Broken_rocky <- Gap_group_WT$WT_QG11P + Gap_group_WT$WT_QG14P + Gap_group_WT$WT_QG22P
# Coarse from SSA does not include Outwash (coarse) A3- WT_QG1P & Attenuated drift (coarse) J3- WT_QG14P
# Attenuated drift (coarse) is all 0 for kasky
# Not in the SSA layers: Colluvium (coarse) F3- WT_QG32P and Stagnation moraine (coarse) I3- WT_QG13P all 0 for kasky
#### Remove WT_QG1P if you want identical values to Yong/Brians Group Data
Gap_group_WT$WT_Coarse <- Gap_group_WT$WT_QG1P + Gap_group_WT$WT_QG2P + Gap_group_WT$WT_QG5P + Gap_group_WT$WT_QG8P + Gap_group_WT$WT_QG10P + Gap_group_WT$WT_QG13P +
Gap_group_WT$WT_QG14P +Gap_group_WT$WT_QG17P + Gap_group_WT$WT_QG19P + Gap_group_WT$WT_QG29P + Gap_group_WT$WT_QG32P
Gap_group_WT$WT_Coarse_moraine <- Gap_group_WT$WT_QG5P + Gap_group_WT$WT_QG8P + Gap_group_WT$WT_QG13P
Gap_group_WT$WT_Colluvium <- Gap_group_WT$WT_QG11P
Gap_group_WT$WT_Dune <- Gap_group_WT$WT_QG29P
Gap_group_WT$WT_Fine_moraine <- Gap_group_WT$WT_QG3P + Gap_group_WT$WT_QG6P + Gap_group_WT$WT_QG30P
# Fine from SSA does not include Lacustrine clay and silt (fine) E1- WT_QG9P
#### Remove WT_QG9P if you want identical values to Yong/Brians Group Data
Gap_group_WT$WT_Fines <- Gap_group_WT$WT_QG3P +Gap_group_WT$WT_QG6P + Gap_group_WT$WT_QG9P + Gap_group_WT$WT_QG24P + Gap_group_WT$WT_QG30P
Gap_group_WT$WT_Icecontact <- Gap_group_WT$WT_QG2P
# Not in the SSA layers: E4 Lacustrine (peat and muck)- QG31P, all 0 for kasky
Gap_group_WT$WT_Lacustrine <- Gap_group_WT$WT_QG9P + Gap_group_WT$WT_QG10P + Gap_group_WT$WT_QG31P
Gap_group_WT$WT_Loess <- Gap_group_WT$WT_QG24P
# Medium from SSA does not include Attenuated-drift (medium) J2- WT_QG22P
# Attenuated drift (medium) is all 0 for kasky
# Not in the SSA layers: G2 Alluvium (medium)- QG23P, all 0 for kasky
Gap_group_WT$WT_Medium <- Gap_group_WT$WT_QG4P + Gap_group_WT$WT_QG7P + Gap_group_WT$WT_QG16P + Gap_group_WT$WT_QG20P + Gap_group_WT$WT_QG22P + Gap_group_WT$WT_QG23P
Gap_group_WT$WT_Medium_moraine <- Gap_group_WT$WT_QG4P + Gap_group_WT$WT_QG7P + Gap_group_WT$WT_QG20P
Gap_group_WT$WT_Outwash <- Gap_group_WT$WT_QG1P
Gap_group_WT$WT_Outwash_icecontact <- Gap_group_WT$WT_QG1P + Gap_group_WT$WT_QG2P
# Not in the SSA layers: E4 Lacustrine (peat and muck)- QG31P, all 0 for kasky
Gap_group_WT$WT_Peat_muck <- Gap_group_WT$WT_QG18P + Gap_group_WT$WT_QG31P
Gap_group_WT$WT_Rocky <- Gap_group_WT$WT_QG11P + Gap_group_WT$WT_QG14P + Gap_group_WT$WT_QG22P + Gap_group_WT$WT_QG99P
# Regroup Bed Rock Depth
Gap_group_WT$WT_BR50 <- Gap_group_WT$WT_BD201P
Gap_group_WT$WT_BR100 <- Gap_group_WT$WT_BD202P
Gap_group_WT$WT_BR200 <- Gap_group_WT$WT_BD203P
Gap_group_WT$WT_BR400 <- Gap_group_WT$WT_BD204P
Gap_group_WT$WT_BRG100 <- Gap_group_WT$WT_BD203P + Gap_group_WT$WT_BD204P + Gap_group_WT$WT_BD205P + Gap_group_WT$WT_BD206P + Gap_group_WT$WT_BD207P + Gap_group_WT$WT_BD208P
# Regroup Bed Rock Type
Gap_group_WT$WT_BR_sandstone <- Gap_group_WT$WT_BR1P
Gap_group_WT$WT_BR_shale <- Gap_group_WT$WT_BR2P
Gap_group_WT$WT_BR_carbonate <- Gap_group_WT$WT_BR3P
Gap_group_WT$WT_BR_metamorphic <- Gap_group_WT$WT_BR41P
Gap_group_WT$WT_BR_igneous <- Gap_group_WT$WT_BR42P
Gap_group_WT$WT_BR_unknown <- Gap_group_WT$WT_BR5P
Gap_group_WT$WT_BR_water <- Gap_group_WT$WT_BR6P
Gap_group_WT <- Gap_group_WT %>%
select(PU_Gap_Code, PU_Code, Gap_Code,
WT_GDD, WT_JUL_MNX, WT_PREC,
WT_TOTAL_SQME, WT_PERMX, WT_SLOPE, WT_DARCYX, WT_LENGTH,
WT_BR_sandstone, WT_BR_shale, WT_BR_carbonate, WT_BR_metamorphic, WT_BR_igneous, WT_BR_unknown, WT_BR_water,
WT_BR50, WT_BR100, WT_BR200, WT_BR400, WT_BRG100,
WT_Urban, WT_Agriculture, WT_Grassland, WT_Forest_total, WT_Forested_upland, WT_Forested_wetland, WT_Inland_water, WT_Open_wet, WT_Wetland_total, WT_Barren,
WT_Alluvium_fluvial, WT_Attenuated_drift, WT_Bedrock, WT_Broken_rocky, WT_Coarse, WT_Coarse_moraine, WT_Colluvium, WT_Dune, WT_Fine_moraine, WT_Fines,
WT_Icecontact, WT_Lacustrine, WT_Loess, WT_Medium, WT_Medium_moraine, WT_Outwash, WT_Outwash_icecontact, WT_Peat_muck, WT_Rocky)
write.csv(Gap_group_WT, file = "//INHS-Bison/ResearchData/Groups/Kaskaskia_CREP/Analysis/Fish/Data/kasky_landuse_geology_WATERSHED_TOTAL.csv", row.names = F)
| /landuse_geology_metrics/WT_landuse_geology_metric_tibble.R | permissive | lehostert/Fish_Analysis | R | false | false | 8,408 | r | library(tidyverse)
Gap <- readxl::read_excel("//INHS-Bison/ResearchData/Groups/Kaskaskia_CREP/Analysis/GL_AQ_GAP_Data_2010/VIEW_WATERSHED_TOTAL_minus_ny.xlsx",
sheet = 2, col_types = c("text","text","text","text", "text",
"numeric","numeric","numeric", "numeric", "numeric","numeric","numeric","numeric","numeric", "numeric",
"numeric","numeric","numeric", "numeric", "numeric","numeric","numeric","numeric","numeric", "numeric",
"numeric","numeric","numeric", "numeric", "numeric","numeric","numeric","numeric","numeric", "numeric",
"numeric","numeric","numeric", "numeric", "numeric","numeric","numeric","numeric","numeric", "numeric",
"numeric","numeric","numeric", "numeric", "numeric","numeric","numeric","numeric","numeric", "numeric",
"numeric","numeric","numeric", "numeric", "numeric","numeric","numeric","numeric","numeric", "numeric",
"numeric","numeric","numeric", "numeric", "numeric","numeric","numeric","numeric","numeric", "numeric",
"numeric","numeric","numeric", "numeric", "numeric","numeric","numeric","numeric","numeric", "numeric",
"numeric","numeric","numeric", "numeric", "numeric","numeric","numeric","numeric","numeric", "numeric",
"numeric","numeric","numeric", "numeric", "numeric","numeric","numeric","numeric","numeric", "numeric",
"numeric","numeric","numeric", "numeric", "numeric","numeric","numeric","numeric","numeric", "numeric",
"numeric","numeric","numeric", "numeric", "numeric"
))
Gap_group_WT <- Gap %>%
filter(PU_CODE == 'kasky')
colnames(Gap_group_WT)[colnames(Gap_group_WT)=="PU_CODE"] <- "PU_Code"
colnames(Gap_group_WT)[colnames(Gap_group_WT)=="GAP_CODE"] <- "Gap_Code"
colnames(Gap_group_WT)[colnames(Gap_group_WT)=="PU_GAP"] <- "PU_Gap_Code"
Gap_group_WT$WT_Urban <- Gap_group_WT$WT_LU11P + Gap_group_WT$WT_LU12P + Gap_group_WT$WT_LU13P + Gap_group_WT$WT_LU14P
Gap_group_WT$WT_Agriculture <- Gap_group_WT$WT_LU21P + Gap_group_WT$WT_LU22P + Gap_group_WT$WT_LU23P
Gap_group_WT$WT_Grassland <- Gap_group_WT$WT_LU30P
#Forested Total = Forested_wetland + Forested_upland (Forest + All wooded wetland including shrubland)
Gap_group_WT$WT_Forest_total <- Gap_group_WT$WT_LU41P + Gap_group_WT$WT_LU42P + Gap_group_WT$WT_LU43P + Gap_group_WT$WT_LU61P + Gap_group_WT$WT_LU610P + Gap_group_WT$WT_LU611P + Gap_group_WT$WT_LU612P + Gap_group_WT$WT_LU613P
Gap_group_WT$WT_Forested_upland <- Gap_group_WT$WT_LU41P + Gap_group_WT$WT_LU42P + Gap_group_WT$WT_LU43P
# Forested_wetland is all wooded wetland including WT_LU610P (Wooded Wetland, shrubland)
Gap_group_WT$WT_Forested_wetland <- Gap_group_WT$WT_LU61P + Gap_group_WT$WT_LU610P + Gap_group_WT$WT_LU611P + Gap_group_WT$WT_LU612P + Gap_group_WT$WT_LU613P
Gap_group_WT$WT_Inland_water <- Gap_group_WT$WT_LU50P
# Open wet in SSA is calculated as inland water/open water + non-wooded wetland (Wetland-emergent herbaceous)
# In GAP we use Open Water + Wetland, non-wooded
Gap_group_WT$WT_Open_wet <- Gap_group_WT$WT_LU50P + Gap_group_WT$WT_LU62P
Gap_group_WT$WT_Wetland_total <- Gap_group_WT$WT_LU61P + Gap_group_WT$WT_LU610P + Gap_group_WT$WT_LU611P + Gap_group_WT$WT_LU612P + Gap_group_WT$WT_LU613P + Gap_group_WT$WT_LU62P
Gap_group_WT$WT_Barren <- Gap_group_WT$WT_LU70P
# Not in the SSA layers: G2 Alluvium (medium)- QG23P, all 0 for kasky
Gap_group_WT$WT_Alluvium_fluvial <- Gap_group_WT$WT_QG12P + Gap_group_WT$WT_QG19P + Gap_group_WT$WT_QG23P
Gap_group_WT$WT_Attenuated_drift <- Gap_group_WT$WT_QG14P + Gap_group_WT$WT_QG22P
Gap_group_WT$WT_Bedrock <- Gap_group_WT$WT_QG99P
Gap_group_WT$WT_Broken_rocky <- Gap_group_WT$WT_QG11P + Gap_group_WT$WT_QG14P + Gap_group_WT$WT_QG22P
# Coarse from SSA does not include Outwash (coarse) A3- WT_QG1P & Attenuated drift (coarse) J3- WT_QG14P
# Attenuated drift (coarse) is all 0 for kasky
# Not in the SSA layers: Colluvium (coarse) F3- WT_QG32P and Stagnation moraine (coarse) I3- WT_QG13P all 0 for kasky
#### Remove WT_QG1P if you want identical values to Yong/Brians Group Data
Gap_group_WT$WT_Coarse <- Gap_group_WT$WT_QG1P + Gap_group_WT$WT_QG2P + Gap_group_WT$WT_QG5P + Gap_group_WT$WT_QG8P + Gap_group_WT$WT_QG10P + Gap_group_WT$WT_QG13P +
Gap_group_WT$WT_QG14P +Gap_group_WT$WT_QG17P + Gap_group_WT$WT_QG19P + Gap_group_WT$WT_QG29P + Gap_group_WT$WT_QG32P
Gap_group_WT$WT_Coarse_moraine <- Gap_group_WT$WT_QG5P + Gap_group_WT$WT_QG8P + Gap_group_WT$WT_QG13P
Gap_group_WT$WT_Colluvium <- Gap_group_WT$WT_QG11P
Gap_group_WT$WT_Dune <- Gap_group_WT$WT_QG29P
Gap_group_WT$WT_Fine_moraine <- Gap_group_WT$WT_QG3P + Gap_group_WT$WT_QG6P + Gap_group_WT$WT_QG30P
# Fine from SSA does not include Lacustrine clay and silt (fine) E1- WT_QG9P
#### Remove WT_QG9P if you want identical values to Yong/Brians Group Data
Gap_group_WT$WT_Fines <- Gap_group_WT$WT_QG3P +Gap_group_WT$WT_QG6P + Gap_group_WT$WT_QG9P + Gap_group_WT$WT_QG24P + Gap_group_WT$WT_QG30P
Gap_group_WT$WT_Icecontact <- Gap_group_WT$WT_QG2P
# Not in the SSA layers: E4 Lacustrine (peat and muck)- QG31P, all 0 for kasky
Gap_group_WT$WT_Lacustrine <- Gap_group_WT$WT_QG9P + Gap_group_WT$WT_QG10P + Gap_group_WT$WT_QG31P
Gap_group_WT$WT_Loess <- Gap_group_WT$WT_QG24P
# Medium from SSA does not include Attenuated-drift (medium) J2- WT_QG22P
# Attenuated drift (medium) is all 0 for kasky
# Not in the SSA layers: G2 Alluvium (medium)- QG23P, all 0 for kasky
Gap_group_WT$WT_Medium <- Gap_group_WT$WT_QG4P + Gap_group_WT$WT_QG7P + Gap_group_WT$WT_QG16P + Gap_group_WT$WT_QG20P + Gap_group_WT$WT_QG22P + Gap_group_WT$WT_QG23P
Gap_group_WT$WT_Medium_moraine <- Gap_group_WT$WT_QG4P + Gap_group_WT$WT_QG7P + Gap_group_WT$WT_QG20P
Gap_group_WT$WT_Outwash <- Gap_group_WT$WT_QG1P
Gap_group_WT$WT_Outwash_icecontact <- Gap_group_WT$WT_QG1P + Gap_group_WT$WT_QG2P
# Not in the SSA layers: E4 Lacustrine (peat and muck)- QG31P, all 0 for kasky
Gap_group_WT$WT_Peat_muck <- Gap_group_WT$WT_QG18P + Gap_group_WT$WT_QG31P
Gap_group_WT$WT_Rocky <- Gap_group_WT$WT_QG11P + Gap_group_WT$WT_QG14P + Gap_group_WT$WT_QG22P + Gap_group_WT$WT_QG99P
# Regroup Bed Rock Depth
Gap_group_WT$WT_BR50 <- Gap_group_WT$WT_BD201P
Gap_group_WT$WT_BR100 <- Gap_group_WT$WT_BD202P
Gap_group_WT$WT_BR200 <- Gap_group_WT$WT_BD203P
Gap_group_WT$WT_BR400 <- Gap_group_WT$WT_BD204P
Gap_group_WT$WT_BRG100 <- Gap_group_WT$WT_BD203P + Gap_group_WT$WT_BD204P + Gap_group_WT$WT_BD205P + Gap_group_WT$WT_BD206P + Gap_group_WT$WT_BD207P + Gap_group_WT$WT_BD208P
# Regroup Bed Rock Type
Gap_group_WT$WT_BR_sandstone <- Gap_group_WT$WT_BR1P
Gap_group_WT$WT_BR_shale <- Gap_group_WT$WT_BR2P
Gap_group_WT$WT_BR_carbonate <- Gap_group_WT$WT_BR3P
Gap_group_WT$WT_BR_metamorphic <- Gap_group_WT$WT_BR41P
Gap_group_WT$WT_BR_igneous <- Gap_group_WT$WT_BR42P
Gap_group_WT$WT_BR_unknown <- Gap_group_WT$WT_BR5P
Gap_group_WT$WT_BR_water <- Gap_group_WT$WT_BR6P
Gap_group_WT <- Gap_group_WT %>%
select(PU_Gap_Code, PU_Code, Gap_Code,
WT_GDD, WT_JUL_MNX, WT_PREC,
WT_TOTAL_SQME, WT_PERMX, WT_SLOPE, WT_DARCYX, WT_LENGTH,
WT_BR_sandstone, WT_BR_shale, WT_BR_carbonate, WT_BR_metamorphic, WT_BR_igneous, WT_BR_unknown, WT_BR_water,
WT_BR50, WT_BR100, WT_BR200, WT_BR400, WT_BRG100,
WT_Urban, WT_Agriculture, WT_Grassland, WT_Forest_total, WT_Forested_upland, WT_Forested_wetland, WT_Inland_water, WT_Open_wet, WT_Wetland_total, WT_Barren,
WT_Alluvium_fluvial, WT_Attenuated_drift, WT_Bedrock, WT_Broken_rocky, WT_Coarse, WT_Coarse_moraine, WT_Colluvium, WT_Dune, WT_Fine_moraine, WT_Fines,
WT_Icecontact, WT_Lacustrine, WT_Loess, WT_Medium, WT_Medium_moraine, WT_Outwash, WT_Outwash_icecontact, WT_Peat_muck, WT_Rocky)
write.csv(Gap_group_WT, file = "//INHS-Bison/ResearchData/Groups/Kaskaskia_CREP/Analysis/Fish/Data/kasky_landuse_geology_WATERSHED_TOTAL.csv", row.names = F)
|
# formatting utility functions
#' convert factor to numeric
as.Num = function(x){
x %>% as.character %>% as.numeric
}
#' adding quotes to string
add_quotes = function(x){
paste0('"', x, '"')
}
#' making plate well index
#' Output: named vector (Well --> location); column-wise location
well96_index = function(){
wells = c('A1', 'B1', 'C1', 'D1', 'E1', 'F1', 'G1', 'H1',
'A2', 'B2', 'C2', 'D2', 'E2', 'F2', 'G2', 'H2',
'A3', 'B3', 'C3', 'D3', 'E3', 'F3', 'G3', 'H3',
'A4', 'B4', 'C4', 'D4', 'E4', 'F4', 'G4', 'H4',
'A5', 'B5', 'C5', 'D5', 'E5', 'F5', 'G5', 'H5',
'A6', 'B6', 'C6', 'D6', 'E6', 'F6', 'G6', 'H6',
'A7', 'B7', 'C7', 'D7', 'E7', 'F7', 'G7', 'H7',
'A8', 'B8', 'C8', 'D8', 'E8', 'F8', 'G8', 'H8',
'A9', 'B9', 'C9', 'D9', 'E9', 'F9', 'G9', 'H9',
'A10', 'B10', 'C10', 'D10', 'E10', 'F10', 'G10', 'H10',
'A11', 'B11', 'C11', 'D11', 'E11', 'F11', 'G11', 'H11',
'A12', 'B12', 'C12', 'D12', 'E12', 'F12', 'G12', 'H12')
idx = 1:length(wells)
names(idx) = wells
return(idx)
}
#' making plate well index
#' Output: named vector (Well --> location); column-wise location
well384_index = function(){
wells = c('A1', 'B1', 'C1', 'D1', 'E1', 'F1', 'G1', 'H1', 'I1', 'J1', 'K1', 'L1', 'M1', 'N1', 'O1', 'P1',
'A2', 'B2', 'C2', 'D2', 'E2', 'F2', 'G2', 'H2', 'I2', 'J2', 'K2', 'L2', 'M2', 'N2', 'O2', 'P2',
'A3', 'B3', 'C3', 'D3', 'E3', 'F3', 'G3', 'H3', 'I3', 'J3', 'K3', 'L3', 'M3', 'N3', 'O3', 'P3',
'A4', 'B4', 'C4', 'D4', 'E4', 'F4', 'G4', 'H4', 'I4', 'J4', 'K4', 'L4', 'M4', 'N4', 'O4', 'P4',
'A5', 'B5', 'C5', 'D5', 'E5', 'F5', 'G5', 'H5', 'I5', 'J5', 'K5', 'L5', 'M5', 'N5', 'O5', 'P5',
'A6', 'B6', 'C6', 'D6', 'E6', 'F6', 'G6', 'H6', 'I6', 'J6', 'K6', 'L6', 'M6', 'N6', 'O6', 'P6',
'A7', 'B7', 'C7', 'D7', 'E7', 'F7', 'G7', 'H7', 'I7', 'J7', 'K7', 'L7', 'M7', 'N7', 'O7', 'P7',
'A8', 'B8', 'C8', 'D8', 'E8', 'F8', 'G8', 'H8', 'I8', 'J8', 'K8', 'L8', 'M8', 'N8', 'O8', 'P8',
'A9', 'B9', 'C9', 'D9', 'E9', 'F9', 'G9', 'H9', 'I9', 'J9', 'K9', 'L9', 'M9', 'N9', 'O9', 'P9',
'A10', 'B10', 'C10', 'D10', 'E10', 'F10', 'G10', 'H10', 'I10', 'J10', 'K10', 'L10', 'M10', 'N10', 'O10', 'P10',
'A11', 'B11', 'C11', 'D11', 'E11', 'F11', 'G11', 'H11', 'I11', 'J11', 'K11', 'L11', 'M11', 'N11', 'O11', 'P11',
'A12', 'B12', 'C12', 'D12', 'E12', 'F12', 'G12', 'H12', 'I12', 'J12', 'K12', 'L12', 'M12', 'N12', 'O12', 'P12',
'A13', 'B13', 'C13', 'D13', 'E13', 'F13', 'G13', 'H13', 'I13', 'J13', 'K13', 'L13', 'M13', 'N13', 'O13', 'P13',
'A14', 'B14', 'C14', 'D14', 'E14', 'F14', 'G14', 'H14', 'I14', 'J14', 'K14', 'L14', 'M14', 'N14', 'O14', 'P14',
'A15', 'B15', 'C15', 'D15', 'E15', 'F15', 'G15', 'H15', 'I15', 'J15', 'K15', 'L15', 'M15', 'N15', 'O15', 'P15',
'A16', 'B16', 'C16', 'D16', 'E16', 'F16', 'G16', 'H16', 'I16', 'J16', 'K16', 'L16', 'M16', 'N16', 'O16', 'P16',
'A17', 'B17', 'C17', 'D17', 'E17', 'F17', 'G17', 'H17', 'I17', 'J17', 'K17', 'L17', 'M17', 'N17', 'O17', 'P17',
'A18', 'B18', 'C18', 'D18', 'E18', 'F18', 'G18', 'H18', 'I18', 'J18', 'K18', 'L18', 'M18', 'N18', 'O18', 'P18',
'A19', 'B19', 'C19', 'D19', 'E19', 'F19', 'G19', 'H19', 'I19', 'J19', 'K19', 'L19', 'M19', 'N19', 'O19', 'P19',
'A20', 'B20', 'C20', 'D20', 'E20', 'F20', 'G20', 'H20', 'I20', 'J20', 'K20', 'L20', 'M20', 'N20', 'O20', 'P20',
'A21', 'B21', 'C21', 'D21', 'E21', 'F21', 'G21', 'H21', 'I21', 'J21', 'K21', 'L21', 'M21', 'N21', 'O21', 'P21',
'A22', 'B22', 'C22', 'D22', 'E22', 'F22', 'G22', 'H22', 'I22', 'J22', 'K22', 'L22', 'M22', 'N22', 'O22', 'P22',
'A23', 'B23', 'C23', 'D23', 'E23', 'F23', 'G23', 'H23', 'I23', 'J23', 'K23', 'L23', 'M23', 'N23', 'O23', 'P23',
'A24', 'B24', 'C24', 'D24', 'E24', 'F24', 'G24', 'H24', 'I24', 'J24', 'K24', 'L24', 'M24', 'N24', 'O24', 'P24')
idx = 1:length(wells)
names(idx) = wells
return(idx)
}
#' converting wellID to column (assuming 96-well)
#' input: character vector of wellIDs
well2index = function(x, plate_type='96-well'){
if(plate_type == '96-well'){
idx = well96_index()
} else
if(plate_type == '384-well'){
idx = well384_index()
} else {
stop('Do not recoginize plate type')
}
return(as.numeric(idx[x]))
}
#' converting wellID to column (assuming 96-well)
#' input: numeric well number
index2well = function(x, plate_type='96-well'){
if(plate_type == '96-well'){
idx = well96_index()
} else
if(plate_type == '384-well'){
idx = well384_index()
} else {
stop('Do not recoginize plate type')
}
idx2 = names(idx)
names(idx2) = idx
#return(names(idx)[idx==x])
return(as.character(idx2[x]))
}
| /utils/format.R | permissive | nick-youngblut/MicrobeMeter | R | false | false | 4,755 | r | # formatting utility functions
#' convert factor to numeric
as.Num = function(x){
x %>% as.character %>% as.numeric
}
#' adding quotes to string
add_quotes = function(x){
paste0('"', x, '"')
}
#' making plate well index
#' Output: named vector (Well --> location); column-wise location
well96_index = function(){
wells = c('A1', 'B1', 'C1', 'D1', 'E1', 'F1', 'G1', 'H1',
'A2', 'B2', 'C2', 'D2', 'E2', 'F2', 'G2', 'H2',
'A3', 'B3', 'C3', 'D3', 'E3', 'F3', 'G3', 'H3',
'A4', 'B4', 'C4', 'D4', 'E4', 'F4', 'G4', 'H4',
'A5', 'B5', 'C5', 'D5', 'E5', 'F5', 'G5', 'H5',
'A6', 'B6', 'C6', 'D6', 'E6', 'F6', 'G6', 'H6',
'A7', 'B7', 'C7', 'D7', 'E7', 'F7', 'G7', 'H7',
'A8', 'B8', 'C8', 'D8', 'E8', 'F8', 'G8', 'H8',
'A9', 'B9', 'C9', 'D9', 'E9', 'F9', 'G9', 'H9',
'A10', 'B10', 'C10', 'D10', 'E10', 'F10', 'G10', 'H10',
'A11', 'B11', 'C11', 'D11', 'E11', 'F11', 'G11', 'H11',
'A12', 'B12', 'C12', 'D12', 'E12', 'F12', 'G12', 'H12')
idx = 1:length(wells)
names(idx) = wells
return(idx)
}
#' making plate well index
#' Output: named vector (Well --> location); column-wise location
well384_index = function(){
wells = c('A1', 'B1', 'C1', 'D1', 'E1', 'F1', 'G1', 'H1', 'I1', 'J1', 'K1', 'L1', 'M1', 'N1', 'O1', 'P1',
'A2', 'B2', 'C2', 'D2', 'E2', 'F2', 'G2', 'H2', 'I2', 'J2', 'K2', 'L2', 'M2', 'N2', 'O2', 'P2',
'A3', 'B3', 'C3', 'D3', 'E3', 'F3', 'G3', 'H3', 'I3', 'J3', 'K3', 'L3', 'M3', 'N3', 'O3', 'P3',
'A4', 'B4', 'C4', 'D4', 'E4', 'F4', 'G4', 'H4', 'I4', 'J4', 'K4', 'L4', 'M4', 'N4', 'O4', 'P4',
'A5', 'B5', 'C5', 'D5', 'E5', 'F5', 'G5', 'H5', 'I5', 'J5', 'K5', 'L5', 'M5', 'N5', 'O5', 'P5',
'A6', 'B6', 'C6', 'D6', 'E6', 'F6', 'G6', 'H6', 'I6', 'J6', 'K6', 'L6', 'M6', 'N6', 'O6', 'P6',
'A7', 'B7', 'C7', 'D7', 'E7', 'F7', 'G7', 'H7', 'I7', 'J7', 'K7', 'L7', 'M7', 'N7', 'O7', 'P7',
'A8', 'B8', 'C8', 'D8', 'E8', 'F8', 'G8', 'H8', 'I8', 'J8', 'K8', 'L8', 'M8', 'N8', 'O8', 'P8',
'A9', 'B9', 'C9', 'D9', 'E9', 'F9', 'G9', 'H9', 'I9', 'J9', 'K9', 'L9', 'M9', 'N9', 'O9', 'P9',
'A10', 'B10', 'C10', 'D10', 'E10', 'F10', 'G10', 'H10', 'I10', 'J10', 'K10', 'L10', 'M10', 'N10', 'O10', 'P10',
'A11', 'B11', 'C11', 'D11', 'E11', 'F11', 'G11', 'H11', 'I11', 'J11', 'K11', 'L11', 'M11', 'N11', 'O11', 'P11',
'A12', 'B12', 'C12', 'D12', 'E12', 'F12', 'G12', 'H12', 'I12', 'J12', 'K12', 'L12', 'M12', 'N12', 'O12', 'P12',
'A13', 'B13', 'C13', 'D13', 'E13', 'F13', 'G13', 'H13', 'I13', 'J13', 'K13', 'L13', 'M13', 'N13', 'O13', 'P13',
'A14', 'B14', 'C14', 'D14', 'E14', 'F14', 'G14', 'H14', 'I14', 'J14', 'K14', 'L14', 'M14', 'N14', 'O14', 'P14',
'A15', 'B15', 'C15', 'D15', 'E15', 'F15', 'G15', 'H15', 'I15', 'J15', 'K15', 'L15', 'M15', 'N15', 'O15', 'P15',
'A16', 'B16', 'C16', 'D16', 'E16', 'F16', 'G16', 'H16', 'I16', 'J16', 'K16', 'L16', 'M16', 'N16', 'O16', 'P16',
'A17', 'B17', 'C17', 'D17', 'E17', 'F17', 'G17', 'H17', 'I17', 'J17', 'K17', 'L17', 'M17', 'N17', 'O17', 'P17',
'A18', 'B18', 'C18', 'D18', 'E18', 'F18', 'G18', 'H18', 'I18', 'J18', 'K18', 'L18', 'M18', 'N18', 'O18', 'P18',
'A19', 'B19', 'C19', 'D19', 'E19', 'F19', 'G19', 'H19', 'I19', 'J19', 'K19', 'L19', 'M19', 'N19', 'O19', 'P19',
'A20', 'B20', 'C20', 'D20', 'E20', 'F20', 'G20', 'H20', 'I20', 'J20', 'K20', 'L20', 'M20', 'N20', 'O20', 'P20',
'A21', 'B21', 'C21', 'D21', 'E21', 'F21', 'G21', 'H21', 'I21', 'J21', 'K21', 'L21', 'M21', 'N21', 'O21', 'P21',
'A22', 'B22', 'C22', 'D22', 'E22', 'F22', 'G22', 'H22', 'I22', 'J22', 'K22', 'L22', 'M22', 'N22', 'O22', 'P22',
'A23', 'B23', 'C23', 'D23', 'E23', 'F23', 'G23', 'H23', 'I23', 'J23', 'K23', 'L23', 'M23', 'N23', 'O23', 'P23',
'A24', 'B24', 'C24', 'D24', 'E24', 'F24', 'G24', 'H24', 'I24', 'J24', 'K24', 'L24', 'M24', 'N24', 'O24', 'P24')
idx = 1:length(wells)
names(idx) = wells
return(idx)
}
#' converting wellID to column (assuming 96-well)
#' input: character vector of wellIDs
well2index = function(x, plate_type='96-well'){
if(plate_type == '96-well'){
idx = well96_index()
} else
if(plate_type == '384-well'){
idx = well384_index()
} else {
stop('Do not recoginize plate type')
}
return(as.numeric(idx[x]))
}
#' converting wellID to column (assuming 96-well)
#' input: numeric well number
index2well = function(x, plate_type='96-well'){
if(plate_type == '96-well'){
idx = well96_index()
} else
if(plate_type == '384-well'){
idx = well384_index()
} else {
stop('Do not recoginize plate type')
}
idx2 = names(idx)
names(idx2) = idx
#return(names(idx)[idx==x])
return(as.character(idx2[x]))
}
|
library(rTensor)
### Name: matvec-methods
### Title: Tensor Matvec Unfolding
### Aliases: matvec matvec,Tensor-method matvec-methods
### ** Examples
tnsr <- rand_tensor(c(2,3,4))
matT1<- matvec(tnsr)
| /data/genthat_extracted_code/rTensor/examples/matvec-methods.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 207 | r | library(rTensor)
### Name: matvec-methods
### Title: Tensor Matvec Unfolding
### Aliases: matvec matvec,Tensor-method matvec-methods
### ** Examples
tnsr <- rand_tensor(c(2,3,4))
matT1<- matvec(tnsr)
|
/R/wordcloud/wordcloud.R | no_license | teresa-huangt/BNU_Project | R | false | false | 2,764 | r | ||
\name{sim.table}
\alias{sim.table}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Table of simulations for the health economic model
}
\description{
Using the input in the form of MCMC simulations and after having run the health
economic model, produces a summary table of the simulations from the cost-effectiveness
analysis
}
\usage{
sim.table(he, wtp = 25000)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{he}{
A \code{bcea} object containing the results of the Bayesian modelling and the economic
evaluation.
}
\item{wtp}{
The value of the willingness to pay threshold to be used in the summary table.
}
}
\value{
Produces the following elements:
\item{Table}{A table with the simulations from the economic model}
\item{names.cols}{A vector of labels to be associated with each column of the table}
\item{wtp}{The selected value of the willingness to pay}
\item{ind.table}{The index associated with the selected value of the willingness to pay
threshold in the grid used to run the analysis}
}
\references{
Baio, G., Dawid, A. P. (2011). Probabilistic Sensitivity Analysis in Health Economics.
Statistical Methods in Medical Research doi:10.1177/0962280211419832.
Baio G. (2012). Bayesian Methods in Health Economics. CRC/Chapman Hall, London
}
\author{
Gianluca Baio
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{bcea}}
}
\examples{
# See Baio G., Dawid A.P. (2011) for a detailed description of the
# Bayesian model and economic problem
#
# Load the processed results of the MCMC simulation model
data(Vaccine)
#
# Runs the health economic evaluation using BCEA
m <- bcea(e=e,c=c, # defines the variables of
# effectiveness and cost
ref=2, # selects the 2nd row of (e,c)
# as containing the reference intervention
interventions=treats, # defines the labels to be associated
# with each intervention
Kmax=50000 # maximum value possible for the willingness
# to pay threshold; implies that k is chosen
# in a grid from the interval (0,Kmax)
)
#
# Now can save the simulation exercise in an object using sim.table()
st <- sim.table(m, # uses the results of the economic evalaution
# (a "bcea" object)
wtp=25000 # selects the particular value for k
)
#
# The table can be explored. For example, checking the
# element 'Table' of the object 'st'
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{Health economic evaluation} % __ONLY ONE__ keyword per line
| /man/sim.table.Rd | no_license | PAREXEL-PAC/BCEA | R | false | false | 2,765 | rd | \name{sim.table}
\alias{sim.table}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Table of simulations for the health economic model
}
\description{
Using the input in the form of MCMC simulations and after having run the health
economic model, produces a summary table of the simulations from the cost-effectiveness
analysis
}
\usage{
sim.table(he, wtp = 25000)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{he}{
A \code{bcea} object containing the results of the Bayesian modelling and the economic
evaluation.
}
\item{wtp}{
The value of the willingness to pay threshold to be used in the summary table.
}
}
\value{
Produces the following elements:
\item{Table}{A table with the simulations from the economic model}
\item{names.cols}{A vector of labels to be associated with each column of the table}
\item{wtp}{The selected value of the willingness to pay}
\item{ind.table}{The index associated with the selected value of the willingness to pay
threshold in the grid used to run the analysis}
}
\references{
Baio, G., Dawid, A. P. (2011). Probabilistic Sensitivity Analysis in Health Economics.
Statistical Methods in Medical Research doi:10.1177/0962280211419832.
Baio G. (2012). Bayesian Methods in Health Economics. CRC/Chapman Hall, London
}
\author{
Gianluca Baio
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{bcea}}
}
\examples{
# See Baio G., Dawid A.P. (2011) for a detailed description of the
# Bayesian model and economic problem
#
# Load the processed results of the MCMC simulation model
data(Vaccine)
#
# Runs the health economic evaluation using BCEA
m <- bcea(e=e,c=c, # defines the variables of
# effectiveness and cost
ref=2, # selects the 2nd row of (e,c)
# as containing the reference intervention
interventions=treats, # defines the labels to be associated
# with each intervention
Kmax=50000 # maximum value possible for the willingness
# to pay threshold; implies that k is chosen
# in a grid from the interval (0,Kmax)
)
#
# Now can save the simulation exercise in an object using sim.table()
st <- sim.table(m, # uses the results of the economic evalaution
# (a "bcea" object)
wtp=25000 # selects the particular value for k
)
#
# The table can be explored. For example, checking the
# element 'Table' of the object 'st'
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{Health economic evaluation} % __ONLY ONE__ keyword per line
|
# Load Data
Data <- read.csv("~/Desktop/Data_PS2a_S15.csv", header=FALSE, sep=",") # Import Data
y <- Data[[1]]
x1 <- Data[[2]]
x2 <- Data[[3]]
x3 <- Data[[4]]
x <- cbind(x1, x2, x3) # create x matrix
# Load packages
# library("mvtnorm", lib.loc="~/R/win-library/3.2")
# library("mnormt", lib.loc="~/R/win-library/3.2")
# library("coda", lib.loc="~/R/win-library/3.2")
# GLM ---------------------------------------------------------------------
# GLM for beta coefficients, SE's and Hessian
logitY <- glm(y~ x2 + x3, family = "binomial", data = Data)
summary(logitY)
beta <- logitY$coefficients # Beta MLE
SE <- summary(logitY)$coefficients[,2] # Standard errors
Hessian <- summary(logitY)$cov.scaled # inverse of the Hessian
# # Proposal Density Variables ----------------------------------------------
#
# # Define variables to use for Proposal Density in MH Algorithm
# # We will center around maximum likelihood with large tails, so a T distribution
# location1 = unname(beta) # Center it around Beta MLE
# scale1 = unname(Hessian)*30
# scale2 = unname(SE)
# df = 3 # Fat tails for the T distribution
#
# # Prior -------------------------------------------------------------------
#
# # Mean 0
# mu_prior <- t(c(0,0,0))
# # Variance - Covariance matrix
# sigma_prior = diag(3)*10
# # Number of Iterations
# n = 10000
#
#
#
# # Initial Values ----------------------------------------------------------
#
# plotx <- seq(1,9000,1)
# zeros <- rep(0, times = n)
# beta_c <- matrix(c(zeros, zeros, zeros), nrow = 3)
# beta_p <- matrix(c(zeros, zeros, zeros), nrow = 3)
#
# # Logistic pdf
# d = 2*y - 1
#
#
# # Metropolis Hastings -----------------------------------------------------
#
# # Acceptance Variable
# acceptn = zeros
#
# # Procedure
# i <- 1
# while (i<n) {
# beta_p[,i] = location1 + scale2*rmvt(1, sigma = scale1*2, df = df) # Generate proposal beta from T distribution beginning at maximum likelihood
# logit_p = prod(plogis(d*(x%*%beta_p[,i]))) # f(y|B*), plogis is the logistic distribution
# logit_c = prod(plogis(d*(x%*%beta_c[,i]))) # f(y|B)
# prior_p = dmvnorm(beta_p[,i], mean = mu_prior, sigma = sigma_prior) # prior for proposal pi(B*)
# prior_c = dmvnorm(beta_c[,i], mean = mu_prior, sigma = sigma_prior) # prior for current pi(B)
# proposal_p = dmt(x = beta_p[,i], mean = beta, S = scale1, df = df) #q(B*|y)
# proposal_c = dmt(x = beta_c[,i], mean = beta, S = scale1, df = df) # q(B|y)
#
# a_ratio = (logit_p*prior_p*proposal_c)/(logit_c*prior_c*proposal_p) # ratio for acceptance
# a_prob = min(1,a_ratio) # probability of acceptance
# accept = rbinom(1, size = 1, prob = a_prob) #random binomial draw for acceptance
#
# if (accept == 1){
# beta_c[,i+1] = beta_p[,i]
# acceptn[i] = 1
# } else {
# beta_c[,i+1] = beta_c[,i]
# acceptn[i] = 0
# }
# i = i+1
#
# }
#
# accept_burn = acceptn[1001:n]
# totalM <- sum(accept_burn)
# a_rate <- totalM/9000 # Acceptance rate
# print(a_rate)
#
#
# # Posterior Mean and SD ----------------------------------------------------------
# beta_px1 <- beta_p[1,1001:n] # burn the first thousand
# beta_px2 <- beta_p[2,1001:n]
# beta_px3 <- beta_p[3,1001:n]
#
# print(mean(beta_px1))
# print(sd(beta_px1))
# print(mean(beta_px2))
# print(sd(beta_px2))
# print(mean(beta_px3))
# print(sd(beta_px3))
#
# # Plots -------------------------------------------------------------------
# pdf(file = "plot1.pdf")
# hist(beta_px1, breaks = 1000, freq = FALSE, main = "Beta 1", xlab = "Red line = True Beta")
# abline(v = beta[1], col="red")
# dev.off()
#
# pdf(file = "plot2.pdf")
# hist(beta_px2, breaks = 1000, freq = FALSE, main = "Beta 2", xlab = "Red line = True Beta")
# abline(v = beta[2], col="red")
# dev.off()
#
# pdf(file = "plot3.pdf")
# hist(beta_px3, breaks = 1000, freq = FALSE, main = "Beta 3", xlab = "Red line = True Beta")
# abline(v = beta[3], col="red")
# dev.off()
#
#
# # Marginal Effect ---------------------------------------------------------
#
# betaPost <- mean(beta_px2)
# xvector <- betaPost * x2
# PiME <- 1/(1+exp(xvector))
# MEffect <- betaPost*PiME*(1-PiME)
#
# MEffect_mean <- mean(MEffect)
# MEffect_SD <- sd(MEffect)
# print(MEffect_mean)
# print(MEffect_SD)
#
# pdf(file = "plot4.pdf")
# hist(MEffect, breaks = 1000, freq = FALSE, main = "Marginal Effect Mean", xlab = "Red line = Mean")
# abline(v = MEffect_mean, col="red")
# dev.off()
#
# pdf(file = "plot5.pdf")
# plot(plotx,beta_c[1,],"l")
# dev.off()
# pdf(file = "plot6.pdf")
# plot(plotx,beta_c[2,],"l")
# dev.off()
# pdf(file = "plot7.pdf")
# plot(plotx,beta_c[3,],"l")
# dev.off() | /R/Old/problem1.R | no_license | dillon4287/CodeProjects | R | false | false | 4,616 | r | # Load Data
Data <- read.csv("~/Desktop/Data_PS2a_S15.csv", header=FALSE, sep=",") # Import Data
y <- Data[[1]]
x1 <- Data[[2]]
x2 <- Data[[3]]
x3 <- Data[[4]]
x <- cbind(x1, x2, x3) # create x matrix
# Load packages
# library("mvtnorm", lib.loc="~/R/win-library/3.2")
# library("mnormt", lib.loc="~/R/win-library/3.2")
# library("coda", lib.loc="~/R/win-library/3.2")
# GLM ---------------------------------------------------------------------
# GLM for beta coefficients, SE's and Hessian
logitY <- glm(y~ x2 + x3, family = "binomial", data = Data)
summary(logitY)
beta <- logitY$coefficients # Beta MLE
SE <- summary(logitY)$coefficients[,2] # Standard errors
Hessian <- summary(logitY)$cov.scaled # inverse of the Hessian
# # Proposal Density Variables ----------------------------------------------
#
# # Define variables to use for Proposal Density in MH Algorithm
# # We will center around maximum likelihood with large tails, so a T distribution
# location1 = unname(beta) # Center it around Beta MLE
# scale1 = unname(Hessian)*30
# scale2 = unname(SE)
# df = 3 # Fat tails for the T distribution
#
# # Prior -------------------------------------------------------------------
#
# # Mean 0
# mu_prior <- t(c(0,0,0))
# # Variance - Covariance matrix
# sigma_prior = diag(3)*10
# # Number of Iterations
# n = 10000
#
#
#
# # Initial Values ----------------------------------------------------------
#
# plotx <- seq(1,9000,1)
# zeros <- rep(0, times = n)
# beta_c <- matrix(c(zeros, zeros, zeros), nrow = 3)
# beta_p <- matrix(c(zeros, zeros, zeros), nrow = 3)
#
# # Logistic pdf
# d = 2*y - 1
#
#
# # Metropolis Hastings -----------------------------------------------------
#
# # Acceptance Variable
# acceptn = zeros
#
# # Procedure
# i <- 1
# while (i<n) {
# beta_p[,i] = location1 + scale2*rmvt(1, sigma = scale1*2, df = df) # Generate proposal beta from T distribution beginning at maximum likelihood
# logit_p = prod(plogis(d*(x%*%beta_p[,i]))) # f(y|B*), plogis is the logistic distribution
# logit_c = prod(plogis(d*(x%*%beta_c[,i]))) # f(y|B)
# prior_p = dmvnorm(beta_p[,i], mean = mu_prior, sigma = sigma_prior) # prior for proposal pi(B*)
# prior_c = dmvnorm(beta_c[,i], mean = mu_prior, sigma = sigma_prior) # prior for current pi(B)
# proposal_p = dmt(x = beta_p[,i], mean = beta, S = scale1, df = df) #q(B*|y)
# proposal_c = dmt(x = beta_c[,i], mean = beta, S = scale1, df = df) # q(B|y)
#
# a_ratio = (logit_p*prior_p*proposal_c)/(logit_c*prior_c*proposal_p) # ratio for acceptance
# a_prob = min(1,a_ratio) # probability of acceptance
# accept = rbinom(1, size = 1, prob = a_prob) #random binomial draw for acceptance
#
# if (accept == 1){
# beta_c[,i+1] = beta_p[,i]
# acceptn[i] = 1
# } else {
# beta_c[,i+1] = beta_c[,i]
# acceptn[i] = 0
# }
# i = i+1
#
# }
#
# accept_burn = acceptn[1001:n]
# totalM <- sum(accept_burn)
# a_rate <- totalM/9000 # Acceptance rate
# print(a_rate)
#
#
# # Posterior Mean and SD ----------------------------------------------------------
# beta_px1 <- beta_p[1,1001:n] # burn the first thousand
# beta_px2 <- beta_p[2,1001:n]
# beta_px3 <- beta_p[3,1001:n]
#
# print(mean(beta_px1))
# print(sd(beta_px1))
# print(mean(beta_px2))
# print(sd(beta_px2))
# print(mean(beta_px3))
# print(sd(beta_px3))
#
# # Plots -------------------------------------------------------------------
# pdf(file = "plot1.pdf")
# hist(beta_px1, breaks = 1000, freq = FALSE, main = "Beta 1", xlab = "Red line = True Beta")
# abline(v = beta[1], col="red")
# dev.off()
#
# pdf(file = "plot2.pdf")
# hist(beta_px2, breaks = 1000, freq = FALSE, main = "Beta 2", xlab = "Red line = True Beta")
# abline(v = beta[2], col="red")
# dev.off()
#
# pdf(file = "plot3.pdf")
# hist(beta_px3, breaks = 1000, freq = FALSE, main = "Beta 3", xlab = "Red line = True Beta")
# abline(v = beta[3], col="red")
# dev.off()
#
#
# # Marginal Effect ---------------------------------------------------------
#
# betaPost <- mean(beta_px2)
# xvector <- betaPost * x2
# PiME <- 1/(1+exp(xvector))
# MEffect <- betaPost*PiME*(1-PiME)
#
# MEffect_mean <- mean(MEffect)
# MEffect_SD <- sd(MEffect)
# print(MEffect_mean)
# print(MEffect_SD)
#
# pdf(file = "plot4.pdf")
# hist(MEffect, breaks = 1000, freq = FALSE, main = "Marginal Effect Mean", xlab = "Red line = Mean")
# abline(v = MEffect_mean, col="red")
# dev.off()
#
# pdf(file = "plot5.pdf")
# plot(plotx,beta_c[1,],"l")
# dev.off()
# pdf(file = "plot6.pdf")
# plot(plotx,beta_c[2,],"l")
# dev.off()
# pdf(file = "plot7.pdf")
# plot(plotx,beta_c[3,],"l")
# dev.off() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.