blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
68fa8e6293d198e8b0f46a59eedca9788571d459
|
fa5eb1a6e94be9be5d1bc19d1807c6ed2983b2d0
|
/libbase/R/attach.local.R
|
cf6e9f401e94281790b008b5bf596a1abddcd32b
|
[] |
no_license
|
bereginyas/rlib
|
57c8a4f3548b34ba9a69dd3774ab127cbd4632be
|
f511254f1ed46f5a7d43eea7884cf31ef2cda9ca
|
refs/heads/master
| 2022-01-07T12:54:04.819043
| 2019-05-25T05:34:24
| 2019-05-25T05:34:24
| 67,099,725
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 860
|
r
|
attach.local.R
|
## local attach, not added to search path
attach.local = function(object, fields=NULL, excludeFields=NULL, overwrite=TRUE, envir=parent.frame(), ...) {
if (is.null(fields)) {
fields <- names(object)
# Don't try to attach non-named elements
fields <- setdiff(fields, "")
}
# Note: we cannot do 'fields <- setdiff(fields, excludeFields)', because
# that will also remove duplicates!
attachedFields <- character(0L)
for (field in fields) {
if (field %in% excludeFields)
next
if (overwrite || !exists(field, envir=envir, inherits=FALSE)) {
assign(field, object[[field]], envir=envir)
# Remove field this way a 2nd field of the same name can
# be attached (and overwrite the first one)
object[[field]] <- NULL
attachedFields <- c(attachedFields, field)
}
}
invisible(attachedFields)
}
|
4740a9aa2f9ecfa99c77b4cbdf74586cc08fc23c
|
d0b5ca282def5cda68c9adead9ba4db72acadb62
|
/CanadaCOVID19/R/plot_Ca.R
|
8b14b43a2b405ee0b4b52d4ab050118296bf79e3
|
[] |
no_license
|
YujieWang95/package
|
ed1deeed110f22145927affa252d18c1b297f7dd
|
d6fed1c2f8fed06ad5f115f9b05474f75819a499
|
refs/heads/main
| 2023-05-03T03:39:31.824917
| 2021-05-10T19:42:55
| 2021-05-10T19:42:55
| 342,692,634
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,812
|
r
|
plot_Ca.R
|
##' @description Show the new cases/new deaths/cumulative cases from the day the first case occured to a chosen day in Canada.
##'
##' @title New cases/New deaths/Cumulative cases by day in Canada
##' @name plot_Ca
##' @author Yujie & Jiahui
##' @usage plot_Ca(stopdate,type)
##' @param stopdate "yyyy-mm-dd" Double quotes are required. (eg. "2020-06-21")
##' @param type "New", "Deaths", "Cumulative" Double quotes are required.
##' @return A line chart of the new cases/new deaths/cumulative cases from the day the first case occured to a chosen day in Canada.
##' @import ggplot2
##' @export
##' @examples
##' plot_Ca("2020-06-12","New")
##' plot_Ca("2020-09-12","Cumulative")
##' plot_Ca("2020-11-12","Deaths")
#utils::globalVariables(c("prname","numtoday", "numconf", "numdeaths"))
plot_Ca <- function(stopdate,type){
full <- utils::read.csv("https://health-infobase.canada.ca/src/data/covidLive/covid19-download.csv", header=TRUE,sep=",",as.is=TRUE,stringsAsFactors = FALSE)
Canada <- subset(full, prname == "Canada")
Canada[is.na(Canada)]<-0
Canada$date<-lubridate::ymd(Canada$date)
if (type=="New"){
stopdate<-lubridate::ymd(stopdate)
sub_pro_ca<-subset(Canada,date<=stopdate)
ggplot(sub_pro_ca,aes(x=date,y=numtoday))+geom_point(na.rm=TRUE)+geom_line()+ylab("Number of New Cases")
}
else if (type=="Cumulative"){
stopdate<-lubridate::ymd(stopdate)
sub_pro_ca<-subset(Canada,date<=stopdate)
ggplot(sub_pro_ca,aes(x=date,y=numconf))+geom_point(na.rm=TRUE)+geom_line()+ylab("Number of Cumulative Cases")
}
else if(type=="Deaths"){
stopdate<-lubridate::ymd(stopdate)
sub_pro_ca<-subset(Canada,date<=stopdate)
ggplot(sub_pro_ca,aes(x=date,y=numdeaths))+geom_point(na.rm=TRUE)+geom_line()+ylab("Number of Deaths")
}
}
|
4be77388d9980bff944be28594ac5fbdada4135e
|
9d4e9bf93975e5f48eb10d4f4949b6e1ff302e40
|
/train/0a-quick_pipeline.R
|
7a73d404848c9496bbce17f173e18dbc341d48f7
|
[] |
no_license
|
kota7/kgschart-r
|
d8ccc87e39f43a078e88e2f013563b941c95c7f5
|
d97f9012b7d7e84a4543123e127ec3f73cc7febd
|
refs/heads/master
| 2022-09-16T01:04:42.168707
| 2022-08-13T02:44:29
| 2022-08-13T02:44:29
| 89,450,572
| 0
| 0
| null | 2017-07-02T13:29:16
| 2017-04-26T07:19:49
|
HTML
|
UTF-8
|
R
| false
| false
| 2,695
|
r
|
0a-quick_pipeline.R
|
# quick implementation of pipeline-like functionality
Flatten <- function(...)
{
fit <- function(data) {}
transform <- function(data)
{
dim(data$x) <- c(dim(data$x)[1], dim(data$x)[2]*dim(data$x)[3])
data
}
self <- environment()
self
}
PCA <- function(n, ...)
{
model <- NULL
fit <- function(data)
{
# set retx=FALSE, to avoid keeping the rotated data inside
if (is.null(model)) model <<- stats::prcomp(data$x, retx=FALSE, ...)
# to save storage, cut the rotation matrix above the required size (n)
model$rotation <<- model$rotation[, 1:n]
}
transform <- function(data)
{
#data$x <- predict(model, data$x)[, 1:n]
data$x <- scale(data$x, model$center, model$scale) %*% model$rotation
data
}
self <- environment()
self
}
MLP <- function(hidden, output, ...)
{
model <- NULL
labels <- NULL
fit <- function(data)
{
data$y <- format_y(data$y)
if (is.null(model)) {
model <<- deepnet::nn.train(data$x, data$y,
hidden=hidden, output=output, ...)
if (output=='softmax') labels <<- colnames(data$y)
} else {
model <<- deepnet::nn.train(data$x, data$y,
initB=model$B, initW=model$W,
hidden=hidden, output=output, ...)
}
# remove unnecessary big attributes, to make the object size small
model$post <<- NULL
model$pre <<- NULL
model$e <<- NULL
model$vW <<- NULL
model$vB <<- NULL
}
format_y <- function(y)
{
# convert y into one-hot format, if it is not a matrix
# already matrix, return as is
if (is.matrix(y)) return(y)
# currently a vector, convert to a factor
# use the predefiend labels if any
if (is.vector(y)) {
if (is.null(labels)) {
y <- factor(y)
} else {
y <- factor(y, levels=labels)
}
}
nnet::class.ind(y)
}
prediction <- function(x, ...)
{
p <- deepnet::nn.predict(model, x)
if (output=='softmax') labels[max.col(p)] else p
}
self <- environment()
self
}
Pipeline <- function(...)
{
steps <- list(...)
fit <- function(x, y, incr=FALSE)
{
data <- list(x=x, y=y)
for (i in seq_along(steps))
{
if (!incr | i != length(steps)) steps[[i]]$fit(data)
if (i != length(steps)) data <- steps[[i]]$transform(data)
}
}
prediction <- function(x, ...)
{
data <- list(x=x, y=NULL)
for (i in seq_along(steps))
{
if (i != length(steps)) {
data <- steps[[i]]$transform(data)
} else {
return(steps[[i]]$prediction(data$x, ...))
}
}
}
self <- environment()
self
}
|
d93d56a144e83027027cef51778adeaa275d4045
|
a5394f0a48914e5278f7172648af749e9cd60005
|
/R/ens.rpart.R
|
cfa1f56e414ab0989c9751f39376debd915f6d3a
|
[] |
no_license
|
cran/ensemble
|
ff086efb0d873ad397f84bd2d0e10c80ddb9d25d
|
f7af10dfd3df1fa0acae260bca2c4aa9d3139039
|
refs/heads/master
| 2020-05-30T18:02:42.316016
| 2000-12-07T00:00:00
| 2000-12-07T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,857
|
r
|
ens.rpart.R
|
ens.rpart <-
function (formula, data = NULL, weights, na.action = na.rpart,
method, model = FALSE, x = FALSE, y = TRUE, parms, control,
...)
{
call <- match.call()
if (is.data.frame(model)) {
m <- model
model <- FALSE
}
else {
m <- match.call(expand = FALSE)
m$model <- m$method <- m$control <- NULL
m$x <- m$y <- m$parms <- m$... <- NULL
m$na.action <- na.action
m[[1]] <- as.name("model.frame.default")
m <- eval(m, sys.frame(sys.parent()))
}
Terms <- attr(m, "terms")
if (any(attr(Terms, "order") > 1))
stop("Trees cannot handle interaction terms")
Y <- model.extract(m, "response")
if (missing(method)) {
if (is.factor(Y))
method <- "class"
else if (is.Surv(Y))
method <- "exp"
else if (is.matrix(Y))
method <- "poisson"
else method <- "anova"
}
method.int <- pmatch(method, c("anova", "poisson", "class",
"exp"))
if (is.na(method.int))
stop("Invalid method")
method <- c("anova", "poisson", "class", "exp")[method.int]
if (method.int == 4)
method.int <- 2
wt <- model.extract(m, "weights")
if (length(wt) == 0)
wt <- rep(1, nrow(m))
offset <- attr(Terms, "offset")
if (missing(parms))
init <- (get(paste("rpart", method, sep = ".")))(Y, offset,
, wt)
else init <- (get(paste("rpart", method, sep = ".")))(Y,
offset, parms, wt)
Y <- init$y
X <- rpart.matrix(m)
nobs <- nrow(X)
xlevels <- attr(X, "column.levels")
cats <- rep(0, ncol(X))
if (!is.null(xlevels)) {
cats[as.numeric(names(xlevels))] <- unlist(lapply(xlevels,
length))
}
controls <- rpart.control(...)
if (!missing(control))
controls[names(control)] <- control
xval <- controls$xval
if (is.null(xval) || (length(xval) == 1 && xval == 0)) {
xgroups <- 0
xval <- 0
}
else if (length(xval) == 1) {
xgroups <- sample(rep(1:xval, length = nobs), nobs, replace = FALSE)
}
else if (length(xval) == nobs) {
xgroups <- xval
xval <- length(unique(xgroups))
}
else stop("Invalid value for xval")
tfun <- function(x) {
if (is.matrix(x))
rep(is.ordered(x), ncol(x))
else is.ordered(x)
}
isord <- unlist(lapply(m[attr(Terms, "term.labels")], tfun))
rpfit <- .C("s_to_rp", n = as.integer(nobs), nvarx = as.integer(ncol(X)),
ncat = as.integer(cats * !isord), method = as.integer(method.int),
as.double(unlist(controls)), parms = as.double(init$parms),
as.integer(xval), as.integer(xgroups), as.double(t(init$y)),
as.double(X), as.integer(!is.finite(X)), error = character(1),
wt = as.double(wt), NAOK = TRUE)
if (rpfit$n == -1) {
return(NA)
}
nodes <- rpfit$n
nsplit <- rpfit$nvarx
numcp <- rpfit$method
ncat <- rpfit$ncat[1]
numresp <- init$numresp
if (nsplit == 0)
stop("No splits found")
cpcol <- if (xval > 0)
5
else 3
if (ncat == 0)
catmat <- 0
else catmat <- matrix(integer(1), ncat, max(cats))
rp <- .C("s_to_rp2", as.integer(nobs), as.integer(nsplit),
as.integer(nodes), as.integer(ncat), as.integer(cats *
!isord), as.integer(max(cats)), as.integer(xval),
which = integer(nobs), cptable = matrix(double(numcp *
cpcol), nrow = cpcol), dsplit = matrix(double(1),
nsplit, 3), isplit = matrix(integer(1), nsplit, 3),
csplit = catmat, dnode = matrix(double(1), nodes, 3 +
numresp), inode = matrix(integer(1), nodes, 6))
tname <- c("<leaf>", dimnames(X)[[2]])
if (cpcol == 3)
temp <- c("CP", "nsplit", "rel error")
else temp <- c("CP", "nsplit", "rel error", "xerror", "xstd")
dimnames(rp$cptable) <- list(temp, 1:numcp)
splits <- matrix(c(rp$isplit[, 2:3], rp$dsplit), ncol = 5,
dimnames = list(tname[rp$isplit[, 1] + 1], c("count",
"ncat", "improve", "index", "adj")))
index <- rp$inode[, 2]
nadd <- sum(isord[rp$isplit[, 1]])
if (nadd > 0) {
newc <- matrix(integer(1), nadd, max(cats))
cvar <- rp$isplit[, 1]
indx <- isord[cvar]
cdir <- splits[indx, 2]
ccut <- floor(splits[indx, 4])
splits[indx, 2] <- cats[cvar[indx]]
splits[indx, 4] <- ncat + 1:nadd
for (i in 1:nadd) {
newc[i, 1:(cats[(cvar[indx])[i]])] <- -1 * as.integer(cdir[i])
newc[i, 1:ccut[i]] <- as.integer(cdir[i])
}
if (ncat == 0)
catmat <- newc
else catmat <- rbind(rp$csplit, newc)
ncat <- ncat + nadd
}
else catmat <- rp$csplit
rplab <- .C("rplabel", as.integer(nodes), as.integer(index),
splits[index, c(2, 4)], as.integer(ncat), as.integer(catmat),
cutleft = character(nodes), cutright = character(nodes))[6:7]
temp <- ifelse(index == 0, 1, index)
svar <- ifelse(index == 0, 0, rp$isplit[temp, 1])
frame <- data.frame(row.names = rp$inode[, 1], var = factor(svar,
0:ncol(X), tname), n = rp$inode[, 5], wt = rp$dnode[,
3], dev = rp$dnode[, 1], yval = rp$dnode[, 4], complexity = rp$dnode[,
2], ncompete = pmax(0, rp$inode[, 3] - 1), nsurrogate = rp$inode[,
4])
frame$splits <- matrix(unlist(rplab), ncol = 2, dimnames = list(NULL,
c("cutleft", "cutright")))
if (method == "class") {
numclass <- init$numresp - 1
temp <- rp$dnode[, -(1:4)] %*% diag(init$parms[1:numclass] *
sum(init$counts)/init$counts)
frame$yprob <- matrix(temp/c(temp %*% rep(1, numclass)),
ncol = numclass, dimnames = list(NULL, init$ylevels))
frame$yval2 <- matrix(rp$dnode[, -(1:4)], ncol = numclass,
dimnames = list(NULL, init$ylevels))
}
else if (method == "poisson" | method == "exp")
frame$yval2 <- rp$dnode[, 5]
ans <- list(frame = frame, where = structure(rp$which, names = row.names(m)),
call = call, terms = Terms, cptable = t(rp$cptable),
splits = splits, method = method, parms = init$parms,
control = controls)
if (ncat > 0)
ans$csplit <- catmat + 2
if (model) {
ans$model <- m
if (missing(y))
y <- FALSE
}
if (y)
ans$y <- Y
if (x) {
ans$x <- X
ans$wt <- wt
}
ans$control <- controls
if (!is.null(xlevels))
attr(ans, "xlevels") <- xlevels
if (method == "class")
attr(ans, "ylevels") <- init$ylevels
na.action <- attr(m, "na.action")
if (length(na.action))
ans$na.action <- na.action
class(ans) <- c("rpart")
ans
}
|
0b281fe4ec1fc4a9b38f6359b6a7c6312fbca33b
|
e216da99e347be74e0d2ef0021c50db3c7d5de50
|
/R/mouseHumanConversion.R
|
4169a5a11cc128be4ced752477ca2f15c333c6c7
|
[] |
no_license
|
vincent-van-hoef/myFunctions
|
e6afc3b997d7593a24a4841e671a084bc2897474
|
0299622df24169a1881a38a5dc799e645fab6926
|
refs/heads/master
| 2021-05-05T00:38:46.894937
| 2018-07-25T11:41:45
| 2018-07-25T11:41:45
| 119,551,523
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 553
|
r
|
mouseHumanConversion.R
|
#' Basic function to convert mouse to human gene names
#' This function converts a vector of mouse gene symbols to human gene symbols.
#' @param x A vector of mouse genes
#' @export
convertMouseGeneList <- function(x){
human <- useMart("ensembl", dataset = "hsapiens_gene_ensembl")
mouse <- useMart("ensembl", dataset = "mmusculus_gene_ensembl")
tmp <- getLDS(attributes = c("mgi_symbol"), filters = "mgi_symbol", values = x , mart = mouse, attributesL = c("hgnc_symbol"), martL = human, uniqueRows=TRUE)
humanx <- unique(tmp[,2])
return(humanx)
}
|
749c08c7a2e4f01f47942d90f31753323eb94999
|
d8a28f2f5a8d532da596a433aa75348187befa76
|
/functions/func_preprocess.R
|
b36a171ed19338d05684ebfa290c7e969f594ef8
|
[
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
nreinicke/EVIPro-Fleet-Gen
|
9340e5139024a7e1997ec5c55e89df90a946126d
|
3892d9eefeaa57801ff3daa12b18fa2383d74220
|
refs/heads/master
| 2023-01-10T15:45:36.795840
| 2020-07-10T19:20:13
| 2020-07-10T19:20:13
| 283,615,589
| 1
| 0
|
NOASSERTION
| 2020-07-29T22:34:45
| 2020-07-29T22:34:44
| null |
UTF-8
|
R
| false
| false
| 2,575
|
r
|
func_preprocess.R
|
# Author: Schatz Energy Research Center
# Original Version: Micah Wright
# Date: March 01, 2019
# Description: Processes and saves the evi sessions and time series load profile for all ambient temperatures.
# Version History
# 2019-09-12 Max: minor edits, run on SUV data
# 2020-05-04 Jerome: turn into formal function for use in .Rmd in order to formalize and document all data generation steps
# load necessary packages
library(future.apply)
library(data.table)
# source functions
source("functions/func_loadEVIPro.R") # loads EVIPro data and stores in a single data table
source("functions/func_calcBaseEVILoad.R") # pre-calculates load profile for all unique_vid in evi_raw
#Define function
preprocess_NREL_data <- function(temp_list, # vector of character strings of ambient temperature values (i.e. c("20C","30C","35C"))
inputdir_evipro, # character string of directory containing files provided by NREL
inputdir_chts, # character string of file path to chts_dvmt.csv file provided by NREL
outputdir_eviraw, # character string of directory to save raw evi .rds files
outputdir_loadprofile, # character string of directory to save raw load profile .rds files
vmt_bin_size, # integer of vmt bin size for load_EVIPro() function
loadprofile_timestep) { # float of time step in decimal hours for calcBaseEVILoad() function
# Parallelize lapply across temperatures
future_lapply(seq(1:length(temp_list)), function(i) {
# load charging session data into data table
evi_raw <- load_EVIPro(inputdir_evipro,
temp_list[i],
inputdir_chts,
vmt_bin_size)
# Save charging session data table
if(!dir.exists(outputdir_eviraw)) {
dir.create(outputdir_eviraw, recursive=T)
}
saveRDS(evi_raw, paste0(outputdir_eviraw,
temp_list[i],
".rds"))
# Create load profiles
evi_load_profiles <- calcBaseEVILoad(evi_raw, loadprofile_timestep)
# Save load profiles data table
if(!dir.exists(outputdir_loadprofile)) {
dir.create(outputdir_loadprofile, recursive=T)
}
saveRDS(evi_load_profiles, paste0(outputdir_loadprofile,
temp_list[i],
".rds"))
})
}
|
9525362f5ecfc818f87022c2139a8e9f3a9cb880
|
8f9f5362319ba7c15cc92d7a2acd9988cc8607f1
|
/R/vis.R
|
4bd762f5fc864654f4c8f222393ebd676c41a725
|
[
"MIT"
] |
permissive
|
mc30/disnap
|
2afc2cc7b7289fa50beb3041d59a4c13850b2fa3
|
66433b6f62cae28a923b81f4c27a9a50ae06d0ae
|
refs/heads/master
| 2020-05-19T04:03:13.219263
| 2020-01-16T10:07:27
| 2020-01-16T10:07:27
| 184,816,176
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,094
|
r
|
vis.R
|
###############################################
# Visualisation
###############################################
#' @title Plot PPP objects
#' @description Plot all PPP objects from the provided list.
#'
#' @param lst List of PPP objects.
#'
# @return .
# @details .
#'
#' @author Mikhail Churakov
#'
#' @export
plotPPPlist <- function(lst) {
old.par <- par(mfrow = c(1, length(lst)))
for (i in 1:length(lst))
plot(lst[[i]], main = names(lst)[i])
par(old.par)
}
#' @title Plot density of PPP objects
#' @description Plots density of all PPP objects from the provided list.
#'
#' @param lst List of PPP objects.
#'
# @return .
# @details .
#'
#' @author Mikhail Churakov
#'
#' @export
densityPPPlist <- function(lst) {
# require(spatstat)
old.par <- par(mfrow = c(2, length(lst)))
for (i in 1:length(lst))
plot(density(lst[[i]]), main = "All tested herds")
for (i in 1:length(lst)) {
pitem <- lst[[i]]
plot(density(pitem[which(pitem$marks > 0)]), main = paste0("Positive herds in: ", names(lst)[i])) # TODO: correct for factors
}
par(old.par)
}
|
c11324e1ac83667b9fc56d0ac7c5616ac1736a1d
|
fd9f494cd9746cf6f4fefbe57e6eee8fb9f38a13
|
/inst/examples/Discrim_Install.r
|
6e1d107e48070291fb0c2edbc0018e2844f89b44
|
[] |
no_license
|
PingYangChen/DiscrimOD
|
30936fa300b90f4f5aec65190584cac9fa921fae
|
fbc04af687d80fa8b304dcf7550a579ac846794f
|
refs/heads/master
| 2022-02-05T20:24:10.014705
| 2022-01-23T06:06:50
| 2022-01-23T06:06:50
| 92,010,316
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 696
|
r
|
Discrim_Install.r
|
# File Name: Discrim_Install.r
# Description:
# To use the **DiscrimOD** package, one need to have R packages 'devtools',
# 'Rcpp' and 'RcppArmadillo' installed in advance. This file is written for
# users to install all the required R packages and our **DiscrimOD** package.
# Please copy the codes below and run them in R.
# ----------------------------------------------------------------------------
# Install required R packages
pkgRequired <- c("devtools", "Rcpp", "RcppArmadillo")
install.packages(pkgRequired)
# Install the **DiscrimOD** package from GitHub
devtools::install_github("PingYangChen/DiscrimOD")
# Try if it is installed successfully
library(DiscrimOD)
?DiscrimOD
|
a2e8cc4641b960389e91cb9dde50329844be5069
|
4b711eacdc4b76b14c3ebc08cc582ed9a4e5ce84
|
/AndroidPrediction/Analysis.R
|
35c9de02d8160b7762ac849887acd3ed9203470a
|
[] |
no_license
|
FRSB/AndroidDataCollection
|
d0d6172974ea9e469e41ee1772af9dd8c86a0e6e
|
40296d906b22d8b3bf66a78f1b23a161427918fa
|
refs/heads/master
| 2021-08-05T10:25:54.236822
| 2021-02-04T07:52:54
| 2021-02-04T07:52:54
| 8,315,037
| 2
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,370
|
r
|
Analysis.R
|
rm(list = ls())
# install required libraries and packages
.libPaths("lib") #remove this line to use your personal library instead
#install.packages("hash")
#install.packages("HMM")
# load required libraries and packages
library(hash)
library(HMM)
source("MarkovChains.R")
source("DataTransformation.R")
source("Evaluation.R")
source("Locations.R")
# generate dummy data set
cellIds = c(1,1,1,1,1,1,1,2,4,1,3,4,1,2,4,1,4,1,2,3,1,2,3,4,1,2,3,4,1,3,4,1,2,4,1,4,1,2,3,1,2,3,4,1,2,3,4,1,3,4,1,2,4,1,4,1,2,3,1,2,3,4,1,2,3,4,1,3,4,1,2,4,1,4,1,2,3,1,2,3,4,1,2,3,4,1,3,4,1,2,4,1,4,1,2,3,1,2,3,4,1,2,3,4,1,3,4,1,2,4,1,4,1,2,3,1,2,3,4,1,2,3,4,1,3,4,1,2,4,1,4,1,2,3,1,2,3,4)
cells = rep("cell", length(cellIds))
cells = paste(cells, cellIds)
# read real data
data = read.csv("pp_sb_data.csv", sep=";")
cells = data$cellid
# plot cell locations
cellLocations = estimateCellLocations(data)
pdf("cell_locations.pdf",title="Cell Locations",width=12)
plot(y=cellLocations$latitude, x=cellLocations$longitude,ylab="Latitude",xlab="Longitude", type="p", pch=16, xaxt="n")
rect(par("usr")[1], par("usr")[3], par("usr")[2], par("usr")[4], col = "lemonchiffon1")
points(y=cellLocations$latitude, x=cellLocations$longitude, pch=16)
axis(1, seq(10,13,0.2))
par(xaxp = c(10,13,15))
grid(col="gray35")
dev.off()
# data transformation
cells = removeNA(cells)
cells = removeDuplicateConsecutiveStates(cells)
#cells = removeInfrequentCells(cells)
cellData = encodeCells(cells)
cellIds = cellData[[1]]
windowedCellIds = applyWindow(cellIds)
# apply growing window evaluation
a1=applyGrowingWindowValidation(data=windowedCellIds, inferencer=FirstOrderMarkovChain.inferTransitionTensor, predictor=FirstOrderMarkovChain.predictStates, evaluator=calculateAccuracy, warmUp=1)
a2=applyGrowingWindowValidation(data=windowedCellIds, inferencer=SecondOrderMarkovChain.inferTransitionTensor, predictor=SecondOrderMarkovChain.predictStates, evaluator=calculateAccuracy, warmUp=1)
a3=applyGrowingWindowValidation(data=windowedCellIds[1:nrow(windowedCellIds)], inferencer=ThirdOrderMarkovChain.inferTransitionTensor, predictor=ThirdOrderMarkovChain.predictStates, evaluator=calculateAccuracy, warmUp=1)
#a4=applyGrowingWindowValidation(data=windowedCellIds, inferencer=HiddenMarkovModel.infer, predictor=HiddenMarkovModel.predictStates, evaluator=calculateAccuracy, warmUp=1)
pdf("prediction_accuracy.pdf", title="Prediction Accuracy", width=9)
plot(a1, type="l", col="blue", xlab="Anzahl Beobachtungen", ylab="Kumulierte Vorhersagegenauigkeit", ylim=c(0,0.5), lwd=2)
lines(a2, col="darkmagenta", lwd=2)
lines(a3, col="darkgreen", lwd=2)
legend(440, 0.1, c("MK 1. Ordnung", "MK 2. Ordnung", "MK 3. Ordnung"), lty=c(1,1), lwd=c(2,2), col=c("blue", "darkmagenta", "darkgreen"))
dev.off()
# infer dummy data
#numStates = length(unique(c(windowedCellIds$tNext,windowedCellIds[1,])))
#t1 = FirstOrderMarkovChain.inferTransitionTensor(windowedCellIds, numStates)
#t2 = SecondOrderMarkovChain.inferTransitionTensor(windowedCellIds, numStates)
#t3 = ThirdOrderMarkovChain.inferTransitionTensor(windowedCellIds, numStates)
#tHmm = HiddenMarkovModel.infer(windowedCellIds, numStates)
# apply models on dummy data
#p1 = FirstOrderMarkovChain.predictStates(t1,windowedCellIds)
#p2 = SecondOrderMarkovChain.predictStates(t2,windowedCellIds)
#p3 = ThirdOrderMarkovChain.predictStates(t3,windowedCellIds)
#pHmm = HiddenMarkovModel.predictStates(tHmm,windowedCellIds)
# count number of right predictions
#calculateAccuracy(p1)
#calculateAccuracy(p2)
#calculateAccuracy(p3)
#calculateAccuracy(pHmm)
# apply cross validation
#applyNFoldCrossValidation(n=10, method="random", data=windowedCellIds, inferencer=FirstOrderMarkovChain.inferTransitionTensor, predictor=FirstOrderMarkovChain.predictStates, evaluator=calculateAccuracy)
#applyNFoldCrossValidation(n=10, method="random", data=windowedCellIds, inferencer=SecondOrderMarkovChain.inferTransitionTensor, predictor=SecondOrderMarkovChain.predictStates, evaluator=calculateAccuracy)
#applyNFoldCrossValidation(n=10, method="random", data=windowedCellIds, inferencer=ThirdOrderMarkovChain.inferTransitionTensor, predictor=ThirdOrderMarkovChain.predictStates, evaluator=calculateAccuracy)
#applyNFoldCrossValidation(n=10, method="random", data=windowedCellIds, inferencer=HiddenMarkovModel.infer, predictor=HiddenMarkovModel.predictStates, evaluator=calculateAccuracy)
|
55a5cef162b6d7c823df6d16ef6aad9998c514b6
|
c55835fb6a8930fed3483260d63ac86078058986
|
/preamble.r
|
222e564be80e2a43b61b4f2be217411395b23b38
|
[] |
no_license
|
ppreshant/R_chip_analysis
|
bc4244fbf7ababd78673387812f591448103f250
|
d4098b49b586cfb4ca6cee2dbf73145ac5a19fb4
|
refs/heads/master
| 2021-01-19T05:27:07.458828
| 2016-10-07T21:33:22
| 2016-10-07T21:33:22
| 62,277,637
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,207
|
r
|
preamble.r
|
setwd("E:/R files_array comparision") # change it to the dropbox location of the R files_array comparision folder
# give the path of the input files (it should be the same unless changed)
folo <- 'results/Kinase/F_late_2dayexp' # for Fgfr2 arrays
# folo <- 'results/Kinase/J_6dayexp' # for Jak2 arrays
# reads the list of all 12 files in the folder (Control, WT, 4 mutants all in duplicate = 6 * 2 = 12)
fl <- list.files(folo, pattern = '*.gpr$', recursive= FALSE, include.dirs = TRUE)
# defining path for output of figures etc.
oufl <- 'results/latest plots/min of 4 spots/'
# sourcing ggplot2 and cowplot - to install the packages use install.packages(ggplot2) and install.packages(cowplot)
library(ggplot2)
library(cowplot)
# sourcing the scripts being used
source('E:/R files_array comparision/readarray.r') # for the radioactive assay -
source('E:/R files_array comparision/check_hits_g.r')
source('E:/R files_array comparision/make_scatterplots.r')
# source('E:/R files_array comparision/simple_read.r') # for pilot 4,5 arrays - not tested to work
# Fl <- lapply(fl,make_checklist)
# fc <- make_sctr(6,3,Fl)
# C <- make_checklist(c)
# J <- make_checklist(j)
# jc <- make_sctr(1,2,list(J,C))
Flf <- lapply(fl,make_checklist) # Reads all files in the folder, makes a checklist column for each file- for known hit(from Heng's paper), known Protein protein interaction, etc.
Fl2f <- all_min4() # Takes all 12 files in the folder and Merges data from the 2 duplicate arrays retaining the minimum of the 2 values for each spot
# draws all the required scatterplots
# a <- graphitall(Fl2,'F')
# --------------------IGNORE BELOW THIS LINE-------------------------------------------
# f <- 'F_100181_pmt-950_5um.gpr'
# con <- 'C_100182_pmt-950_5um.gpr'
# g <- 'gst/F_pilot1_gst.gpr'
# c <- "pilot4/C_pilot4_9049232.gpr"
# f <- "pilot4/F_pilot4_9049233.gpr"
# c <- 'pilot5/C_pilot5.gpr'
# j <- 'pilot5/J_pilot5.gpr'
# F <- hits(f)
# c <- 'C_F1.gpr'
# f <- 'F2.gpr'
#
# folo <- 'C:/Dropbox (Zhu Lab)/prashant_zhu lab/results/Huprot scans/Kinase/J'
# fl <- list.files('results/F/w photoshop', pattern = '*.gpr$', recursive= FALSE, include.dirs = TRUE)
#fl <- list.files('results/pilot3', pattern = '*.gpr$', recursive= F, include.dirs = TRUE)
# f2 <- 'F_100185_pilot2.gpr'
# c2 <- 'C_100183_pilot2.gpr'
# source('E:/R files_array comparision/simpler.r')
# Fl2 <- all_merg()
# # Al <- rbind(cbind(x[[1]], Kinase = 'Control'),cbind(x[[2]], Kinase = 'Fgfr2'),cbind(x[[3]], Kinase = 'F1'),cbind(x[[4]], Kinase = 'F2'),cbind(x[[5]], Kinase = 'F3'),cbind(x[[6]], Kinase = 'F4'))
# # p <- ggplot(data = Alf, aes(B22.Median, fill = Kinase, alpha = .5)) + geom_density() + theme_classic() + ggtitle('Full background density')
# Flr2 <- all_rmcntrl(Fl2)
# # plot on multiple rows - for sig spots - int comparision
# # plt <- ggplot(data = Als, aes(Name, `Kinase Intensity`, fill = Kinase)) + geom_bar(stat = 'identity', position = 'dodge') + ggtitle('Significant spots : Intensity comparision') + scale_fill_brewer(palette = 'Dark2') + theme(axis.text = element_text(angle = 90)) + facet_wrap(~dum, scales = 'free')
# # > plt + theme(strip.background = element_blank(), strip.text.x = element_blank())
|
a36d784487b79a124f4ad5064ed85ea2bd5e560e
|
3618885dabc16828de4fa1175fb0dd17a153a060
|
/day013.R
|
c0655ea204f821acf0049075478f0e18d8c007b9
|
[] |
no_license
|
nhoteling/AdventOfCode2020
|
9044f41a603422a9ff655d8ee38bb45003de7e0c
|
68f1ee65bc14ed921f2927751c7581ecffa82104
|
refs/heads/main
| 2023-02-17T02:55:23.079038
| 2021-01-13T20:07:51
| 2021-01-13T20:07:51
| 319,969,533
| 0
| 0
| null | 2021-01-13T20:07:52
| 2020-12-09T13:45:33
|
HTML
|
UTF-8
|
R
| false
| false
| 967
|
r
|
day013.R
|
library(stringr)
fdata <- readLines("data/day013.txt")
tm <- as.integer(fdata[1])
ids0 <- unlist(str_split(fdata[2],pattern=","))
ids1 <- as.integer(ids0[ ids0 != "x" ])
len <- length(ids1)
d <- lapply(seq_len(len), function(i) {
v <- seq.int(from=0,to=tm+ids1[i],by=ids1[i])
df <- data.frame(id=ids1[i], tm= v[ v>tm ])
})
df <- do.call(rbind,d)
bus_id <- df$id[ df$tm == min(df$tm) ]
tm_wait <- (min(df$tm)-tm)
print(paste(bus_id,"*",tm_wait,"=",bus_id*tm_wait))
#
# Part 2 (from SelbyDavid.com)
#
buses <- as.integer(ids0)
sieve <- function(a1, a2, n1, n2, maxit=1e5) {
x <- a1 + n1*(0:maxit)
x[which.max(x %% n2 == a2 %% n2)]
}
find_timetable <- function(buses) {
offsets <- -(seq_along(buses) - 1)[!is.na(buses)] #a
buses <- buses[!is.na(buses)] #n
x <- offsets[1]
for (i in 2:length(buses))
x <- sieve(x, offsets[i], prod(head(buses, i-1)), buses[i])
x
}
print(paste(format(find_timetable(buses), sci=FALSE)))
|
7dd5aa475598153e114cefe86bab630d064f41a2
|
f0c51db62ce23cbafec52f447cee4a8837023be8
|
/Solution to Medium test by Xing.R
|
7aa8fbe91a3a287ecafdc563b4ed869d3cdca319
|
[] |
no_license
|
XingXiong/gsoc2017
|
e665ba2db25bc8f1a14e24f25568605af47916a3
|
c58aa1ddd81db527be1a4c38a06ac697e1863950
|
refs/heads/master
| 2021-01-22T23:58:26.996677
| 2017-03-31T05:26:10
| 2017-03-31T05:26:10
| 85,685,068
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,658
|
r
|
Solution to Medium test by Xing.R
|
###############
# Almost the same as easy test2,I get ".SN li" from the website structure.
#
crawl_species <- function(validurl,outputfilename1,outputfilename2){
require(rvest)
require(stringr)
url1 <- validurl
species <- url1 %>%
read_html()%>%
html_nodes(".SN li")
species <- gsub("<i>|</i>|<li>","",species)
species <- gsub("&","&",species)
species <- gsub("\\;.*","",species)
# This step get the full scientific name.
full_scientific_name <- species
full_scientific_name <- gsub("=","",species)
full_scientific_name <- sub("^\\s","",full_scientific_name)
# Following steps are fetching genus,author,year,species,subspecies in order.
genus <- str_extract_all(full_scientific_name,"^[[:blank:]]?[A-z]+")
rest1 <- gsub("^[[:blank:]]?[A-z]+","",full_scientific_name)
author <- str_extract_all(rest1,"[A-Z]{1}.?\\s?&?\\s?[A-z]+.?(.*?)?,")
year <- str_extract_all(full_scientific_name,"\\s?[0-9]+")
rest2 <- gsub("[A-Z]{1}.?\\s?&?\\s?[A-z]+.?(.*?)?,","",rest1)
rest3 <- gsub("[?[0-9]+]?","",rest2)
species <- str_extract_all(rest3,"^\\s?[A-z]+")
subspecies <- gsub("^\\s?[A-z]+","",rest3)
a <- length(full_scientific_name)
final <- data.frame(genus = c(rep(0,a)),species = c(rep(0,a)),subspecies = c(rep
(0,a)),author = c(rep(0,a)),year = c(rep(0,a)))
genus <- as.character(genus)
species <- as.character(species)
species <- gsub("\\s*","",species)
subspecies <-as.character(subspecies)
subspecies <- gsub("^\\s+|\\s+$","",subspecies)
subspecies1 <- str_extract(subspecies,"^[A-z]+\\s?")
restofspecies <- gsub("^[A-z]+\\s?"," ",subspecies)
author <- paste(restofspecies,author)
author <- as.character(author)
author <- gsub("^\\s*|,","",author)
year <- as.character(year)
for (i in 1:a){if (genus[i] != "character(0)"){final[i,1] = genus[i]} else{final[i,1] = "
"}
if (species[i] != "character(0)"){final[i,2] = species[i]} else{species[i] = " "
final[i,2] = " "}
if (! is.na(subspecies1[i])){final[i,3] = subspecies1[i]} else{final[i,3] = " "}
if (author[i] != "character(0)"){final[i,4] = author[i]} else{final[i,4] = " "}
if (year[i] != "character(0)"){final[i,5] = year[i]} else{final[i,5] = " "}}
write.csv(final,outputfilename1,row.names = FALSE)
write.csv(full_scientific_name,outputfilename2,row.names = FALSE)
species
}
crawl_species("http://ftp.funet.fi/pub/sci/bio/life/insecta/lepidoptera/ditrysia/papilionoidea/papilionidae/papilioninae/lamproptera/","allcomponents.csv","full sciname.csv")
|
7925c2655ada98dfb5698f5768332e28de581a97
|
08c16d791ad8250be127f90dc62b8cab8942ef86
|
/R/factors.R
|
941bc318431aaf54a4aed367838805196ef1e2a8
|
[
"MIT"
] |
permissive
|
mrdwab/mathrrr
|
a4b2327eb34d41af6e8be88477ea076e5b1e2f3c
|
aee28db6761a737ac869780506dc8400668face1
|
refs/heads/master
| 2022-11-21T04:28:33.611664
| 2020-07-17T23:52:09
| 2020-07-17T23:52:09
| 278,999,650
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,369
|
r
|
factors.R
|
#' All Factors of a Number
#'
#' The `factors_of` function returns a numeric vector of the factors of a
#' provided number.
#'
#' @param x The number that you want to find the factors of.
#'
#' @return A numeric vector.
#' @examples
#' factors_of(8)
#' @export
factors_of <- function(x) which(!x %% seq_len(x))
NULL
#' Common Factors of Multiple Numbers
#'
#' The `common_factors` function takes a set of numbers and identifies
#' the common factors between them. It can also be used to identify the
#' greatest common factor of a set of numbers.
#'
#' @param \dots The numbers that you want to get the common factors of.
#' @param greatest Logical. Should the result be only the greatest
#' common factor? Defaults to `FALSE`.
#'
#' @return A numeric vector.
#' @examples
#' common_factors(18, 48)
#' common_factors(25, 50, 100, greatest = TRUE)
#' @export
common_factors <- function(..., greatest = FALSE) {
out <- Reduce(intersect, lapply(
unlist(list(...), use.names = FALSE), factors_of))
if (isTRUE(greatest)) max(out) else out
}
NULL
#' Prime Factors of a Number
#'
#' The `prime_factors` function is used to calculate the prime factors of
#' a positive integer greater than 1.
#'
#' @param x The number that you want the prime factors of.
#' @param unique Logical. Should the function return all prime factors
#' (where `prod(prime_factors(x)) == x`) or just the unique prime factors?
#' Defaults to `TRUE`.
#'
#' @return A numeric vector either with the repeated prime factorization
#' of the input or just the unique prime factors of the input.
#'
#' @note Returns `NULL` for a value of 1, and generates an error for
#' values less than 1.
#'
#' @examples
#' prime_factors(100, unique = FALSE)
#' prime_factors(100)
#' @export
prime_factors <- function(x, unique = TRUE) {
if (x < 1) stop("
This function is to be used on positive integers greater than 1")
if (x %in% primes) {
facs <- x
} else {
facs <- c()
i <- 2
rem <- x
while (prod(facs) != x) {
if (!rem %% i) {
facs <- c(facs, i)
rem <- rem/i
i <- 1
}
i <- i + 1
}
}
if (isTRUE(unique)) unique(facs) else facs
}
NULL
#' Least Common Multiple of a Set of Numbers
#'
#' The `least_common_multiple` function takes a set of numbers and
#' calculates their least common multiple using the prime factorization
#' method.
#'
#' @param \dots The numbers for which you want the least common multiple.
#'
#' @return A single integer value representing the least common multiple
#' of the set of inputs.
#'
#' @note The absolute values of the input is used in calculating the
#' least common multiple.
#'
#' @examples
#' least_common_multiple(4, 7, 11)
#' @export
least_common_multiple <- function(...) {
L <- list(...)
l <- sort(abs(unique(unlist(L, use.names = FALSE))))
if (!.isInteger(l)) stop("
This function is only defined to be used on integer values")
if (any(l == 0)) {
0
} else if (identical(l, 1)) {
1
} else {
l <- l[l != 1]
if (all(!max(l) %% l)) {
max(l)
} else {
out <- lapply(l, prime_factors, unique = FALSE)
out <- unique(do.call(rbind, lapply(
out, function(y) data.frame(unclass(rle(y))))))
out <- out[as.logical(with(
out, ave(lengths, values, FUN = function(x) x == max(x)))), ]
prod(do.call("^", rev(out)))
}
}
}
NULL
|
74544470b23afe4942a5f97973cb6337723f1fa2
|
248db17ce191339720d3651a5eae817e03af789e
|
/R/get_package_and_url_names.R
|
8d085a918d6eab818f7b2eafa168f53d7d850bb4
|
[] |
no_license
|
rzhao621/SDS230
|
bf4b872c99ed7eefbf67d87040953525220284bc
|
b12be38bd0ce6c24f543526076c1968eb8ce506f
|
refs/heads/master
| 2023-07-16T23:42:21.482823
| 2021-09-02T12:42:32
| 2021-09-02T12:42:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,034
|
r
|
get_package_and_url_names.R
|
#' @import httr
# should set this to the name of the package
package_name <- "SDS230"
# should set this to the github user name of the repository owner
github_user_name <- "emeyers"
#' Get the package name and version number
#'
#' Returns the name of the package and the version number. This is useful to
#' check that one is using the most recent version of the package in case the
#' package is updated in the middle of the semester.
#'
#' @examples
#' # Download the first homework file
#' get_version()
#'
#' @export
get_version <- function() {
paste0(get_package_name(), ": version ", utils::packageVersion(get_package_name()))
}
### Helper functions used throughout the package ---------------------------
get_package_name <- function() {
package_name
}
get_github_user_name <- function() {
github_user_name
}
get_base_url <- function() {
base_path <- paste0("https://raw.githubusercontent.com/",
github_user_name, "/", package_name, "/master/ClassMaterial/")
base_path
}
|
487e4cc715da2c56b3ad363633c988c2f0cf9312
|
a7dd545bd4529bce3364fbd8078e26ad86499aea
|
/by-member-pressure-contours/1998_assimilation/adjusted_contours.R
|
2042440541a1968c47584152133463fddcd8f903
|
[] |
no_license
|
philip-brohan/weather.case.studies
|
335d15e4bc183f0139b56d411cc016f98b7be0b0
|
2139647d51156e1cd7f227b088fdc66ea079920e
|
refs/heads/master
| 2021-04-18T20:21:05.017207
| 2018-06-19T16:36:45
| 2018-06-19T16:36:45
| 42,944,588
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,158
|
r
|
adjusted_contours.R
|
#!/usr/common/graphics/R/R-3.1.0/bin/R --no-save
# Modern period
library(GSDF.TWCR)
library(GSDF.WeatherMap)
library(parallel)
library(lubridate)
Year<-1998
Month<-1
Day<-1
Hour<-0
d.total<-30 # Number of days to be rendered
version<-'3.5.1'
members<-seq(1,56)
GSDF.cache.dir<-sprintf("%s/GSDF.cache",Sys.getenv('SCRATCH'))
if(!file.exists(GSDF.cache.dir)) dir.create(GSDF.cache.dir,recursive=TRUE)
Imagedir<-sprintf("%s/images/1998-assimilation-adjusted",Sys.getenv('SCRATCH'),version)
if(!file.exists(Imagedir)) dir.create(Imagedir,recursive=TRUE)
c.date<-chron(dates=sprintf("%04d/%02d/%02d",Year,Month,Day),
times=sprintf("%02d:00:00",Hour),
format=c(dates='y/m/d',times='h:m:s'))
Options<-WeatherMap.set.option(NULL)
Options<-WeatherMap.set.option(Options,'show.mslp',T)
Options<-WeatherMap.set.option(Options,'show.ice',F)
Options<-WeatherMap.set.option(Options,'show.obs',T)
Options<-WeatherMap.set.option(Options,'show.fog',F)
Options<-WeatherMap.set.option(Options,'show.precipitation',F)
Options<-WeatherMap.set.option(Options,'temperature.range',6)
range<-45
aspect<-16/9
Options<-WeatherMap.set.option(Options,'lat.min',range*-1)
Options<-WeatherMap.set.option(Options,'lat.max',range)
Options<-WeatherMap.set.option(Options,'lon.min',range*aspect*-1)
Options<-WeatherMap.set.option(Options,'lon.max',range*aspect)
Options<-WeatherMap.set.option(Options,'pole.lon',185)
Options<-WeatherMap.set.option(Options,'pole.lat',15)
Options$obs.size<- 0.25
land<-WeatherMap.get.land(Options)
Options$mslp.lwd<-1
Options$mslp.base=0 # Base value for anomalies
Options$mslp.range=50000 # Anomaly for max contour
Options$mslp.crange=3000 # Anomaly for max contour colour
Options$mslp.step=1000 # Smaller -more contours
Options$mslp.tpscale=350 # Smaller -contours less transparent
# Overrides mslp options options
contour.levels<-seq(-300,300,30)
contour.levels<-abs(contour.levels)**1.5*sign(contour.levels)
# Estimate a first-guess ensemble by scaling the analysis
# ensemble to have the first-guess mean and spread
inflate.ensemble<-function(ensemble,fg.mean,fg.spread) {
ens.mean<-GSDF.reduce.1d(ensemble,'ensemble',mean)
ens.sd<-GSDF.reduce.1d(ensemble,'ensemble',sd)
fg.mean<-GSDF.regrid.2d(fg.mean,ens.mean)
fg.spread<-GSDF.regrid.2d(fg.spread,ens.mean)
for(i in seq(1,length(ensemble$dimensions[[1]]$values))) {
for(j in seq(1,length(ensemble$dimensions[[2]]$values))) {
ensemble$data[i,j,,1]<-(ensemble$data[i,j,,1]-ens.mean$data[i,j,1])*
fg.spread$data[i,j,1]/ens.sd$data[i,j,1]+
fg.mean$data[i,j,1]
}
}
return(ensemble)
}
# Special functions for getting data
get.forecast.step.start<-function(year,month,day,hour) {
hour<-as.integer(hour)
hour<-hour-hour%%6
e<-TWCR.get.members.slice.at.hour('prmsl',year,month,day,
hour,version=version)
return(e)
}
get.forecast.step.end<-function(year,month,day,hour) {
hour<-as.integer(hour)
hour<-hour-hour%%6+6
if(hour>23) {
ymd<-ymd(sprintf("%04d-%02d-%02d",year,month,day))+days(1)
year<-year(ymd)
month<-month(ymd)
day<-day(ymd)
hour<-hour-24
}
e<-TWCR.get.members.slice.at.hour('prmsl',year,month,day,
hour,version=version)
fg.m<-TWCR.get.slice.at.hour('prmsl',year,month,day,hour,
type='fg.mean',version=version)
fg.s<-TWCR.get.slice.at.hour('prmsl',year,month,day,hour,
type='fg.spread',version=version)
e<-inflate.ensemble(e,fg.m,fg.s)
return(e)
}
get.forecast.step.interpolated<-function(year,month,day,hour) {
e1<-get.forecast.step.start(year,month,day,hour)
e2<-get.forecast.step.end(year,month,day,hour)
stage<-(hour%%6)/6
e1$data[]<-e2$data*stage+e1$data*(1-stage)
return(e1)
}
get.assimilation.step.start<-function(year,month,day,hour) {
return(get.forecast.step.end(year,month,day,hour-1))
}
get.assimilation.step.end<-function(year,month,day,hour) {
return(get.forecast.step.start(year,month,day,hour+1))
}
get.assimilation.step.interpolated<-function(year,month,day,hour,stage) {
e1<-get.assimilation.step.start(year,month,day,hour)
e2<-get.assimilation.step.end(year,month,day,hour)
e1$data[]<-e2$data*stage+e1$data*(1-stage)
return(e1)
}
obs.get.colour<-function(mp) {
value<-max(0.001,min(0.999,mp/length(contour.levels)))
return(Options$wind.palette[ceiling(value*length(Options$wind.palette))])
}
Draw.obs.pressure<-function(obs,Options) {
min.pressure<-min(contour.levels)
w<-which(obs$SLP<min.pressure)
if(length(w)>0) {
Options$obs.colour<-obs.get.colour(0)
WeatherMap.draw.obs(obs[w,],Options)
}
for(mp in seq(2,length(contour.levels))) {
w<-which(obs$SLP<contour.levels[mp] & obs$SLP>=contour.levels[mp-1])
if(length(w)>0) {
Options$obs.colour<-obs.get.colour(mp-1)
WeatherMap.draw.obs(obs[w,],Options)
}
}
max.pressure<-Options$mslp.base+Options$mslp.crange
w<-which(obs$SLP>max.pressure)
if(length(w)>0) {
Options$obs.colour<-obs.get.colour(length(contour.levels))
WeatherMap.draw.obs(obs[w,],Options)
}
}
Draw.pressure<-function(mslp,Options,colour=c(0,0,0,1)) {
M<-GSDF.WeatherMap:::WeatherMap.rotate.pole(mslp,Options)
lats<-M$dimensions[[GSDF.find.dimension(M,'lat')]]$values
longs<-M$dimensions[[GSDF.find.dimension(M,'lon')]]$values
# Need particular data format for contourLines
if(lats[2]<lats[1] || longs[2]<longs[1] || max(longs)> 180 ) {
if(lats[2]<lats[1]) lats<-rev(lats)
if(longs[2]<longs[1]) longs<-rev(longs)
longs[longs>180]<-longs[longs>180]-360
longs<-sort(longs)
M2<-M
M2$dimensions[[GSDF.find.dimension(M,'lat')]]$values<-lats
M2$dimensions[[GSDF.find.dimension(M,'lon')]]$values<-longs
M<-GSDF.regrid.2d(M,M2)
}
z<-matrix(data=M$data,nrow=length(longs),ncol=length(lats))
#contour.levels<-seq(Options$mslp.base-Options$mslp.range,
# Options$mslp.base+Options$mslp.range,
# Options$mslp.step)
lines<-contourLines(longs,lats,z,
levels=contour.levels)
if(!is.na(lines) && length(lines)>0) {
for(i in seq(1,length(lines))) {
tp<-min(1,(abs(lines[[i]]$level-Options$mslp.base)/
Options$mslp.tpscale))
lt<-1
lwd<-1
value<-min(0.999,(abs(lines[[i]]$level-Options$mslp.base)/
Options$mslp.crange))
value<-value/2+0.5
rgb<-col2rgb(Options$wind.palette[ceiling(value*length(Options$wind.palette))])/255
gp<-gpar(col=rgb(rgb[1],rgb[2],rgb[3],tp*colour[4]),
lwd=Options$mslp.lwd*lwd,lty=lt)
if(lines[[i]]$level<=Options$mslp.base) {
lt<-1
lwd<-1
value<-min(0.999,(abs(lines[[i]]$level-Options$mslp.base)/
Options$mslp.crange))*-1
value<-value/2+0.5
rgb<-col2rgb(Options$wind.palette[ceiling(value*length(Options$wind.palette))])/255
gp<-gpar(col=rgb(rgb[1],rgb[2],rgb[3],tp*colour[4]),
lwd=Options$mslp.lwd*lwd,lty=lt)
}
grid.xspline(x=unit(lines[[i]]$x,'native'),
y=unit(lines[[i]]$y,'native'),
shape=1,
gp=gp)
}
}
}
plot.forecast.hour<-function(year,month,day,hour) {
image.name<-sprintf("%04d-%02d-%02d:%02d:%02d.99.png",year,month,day,as.integer(hour),
as.integer(hour%%1*60))
ifile.name<-sprintf("%s/%s",Imagedir,image.name)
if(file.exists(ifile.name) && file.info(ifile.name)$size>0) return()
png(ifile.name,
width=1080*WeatherMap.aspect(Options),
height=1080,
bg=Options$sea.colour,
pointsize=24,
type='cairo')
Options$label<-sprintf("%04d-%02d-%02d:%02d:%02d",year,month,day,as.integer(hour),
as.integer(hour%%1*60))
pushViewport(dataViewport(c(Options$lon.min,Options$lon.max),
c(Options$lat.min,Options$lat.max),
extension=0))
WeatherMap.draw.land(land,Options)
#obs<-TWCR.get.obs(year,month,day,hour,version=version)
#w<-which(obs$Longitude>180)
#obs$Longitude[w]<-obs$Longitude[w]-360
#WeatherMap.draw.obs(obs,Options)
prmsl.normal<-TWCR.get.slice.at.hour('prmsl',year,month,day,hour,version='3.4.1',
type='normal')
e<-get.forecast.step.interpolated(year,month,day,
hour)
m<-GSDF.select.from.1d(e,'ensemble',1)
prmsl.normal<-GSDF.regrid.2d(prmsl.normal,m)
for(vn in seq_along(members)) {
m<-GSDF.select.from.1d(e,'ensemble',vn)
m$data[]<-as.vector(m$data)-as.vector(prmsl.normal$data)
Draw.pressure(m,Options,colour=c(0,0,0,0.5))
}
WeatherMap.draw.label(Options)
gc()
dev.off()
}
plot.assimilation.stage<-function(year,month,day,hour,stage) {
image.name<-sprintf("%04d-%02d-%02d:%02d:%02d.%02d.png",year,month,day,as.integer(hour),
as.integer(hour%%1*60),as.integer(stage*100)+1)
ifile.name<-sprintf("%s/%s",Imagedir,image.name)
#if(file.exists(ifile.name) && file.info(ifile.name)$size>0) return()
png(ifile.name,
width=1080*WeatherMap.aspect(Options),
height=1080,
bg=Options$sea.colour,
pointsize=24,
type='cairo')
Options$label<-sprintf("%04d-%02d-%02d:%02d:%02d",year,month,day,as.integer(hour),
as.integer(hour%%1*60))
pushViewport(dataViewport(c(Options$lon.min,Options$lon.max),
c(Options$lat.min,Options$lat.max),
extension=0))
WeatherMap.draw.land(land,Options)
prmsl.normal<-TWCR.get.slice.at.hour('prmsl',year,month,day,hour,version='3.4.1',
type='normal')
e<-get.assimilation.step.interpolated(year,month,day,
hour,stage)
m<-GSDF.select.from.1d(e,'ensemble',1)
prmsl.normal<-GSDF.regrid.2d(prmsl.normal,m)
obs<-TWCR.get.obs(year,month,day,hour,version=version,range=0.15)
w<-which(obs$Longitude>180)
obs$Longitude[w]<-obs$Longitude[w]-360
fg.m<-TWCR.get.slice.at.hour('prmsl',year,month,day,hour,
type='fg.mean',version=version)
obs$SLP<-obs$Mean.first.guess.pressure.difference*100+
GSDF.interpolate.ll(fg.m,obs$Latitude,obs$Longitude)-
GSDF.interpolate.ll(prmsl.normal,obs$Latitude,obs$Longitude)
Draw.obs.pressure(obs,Options)
for(vn in seq_along(members)) {
m<-GSDF.select.from.1d(e,'ensemble',vn)
m$data[]<-as.vector(m$data)-as.vector(prmsl.normal$data)
Draw.pressure(m,Options,colour=c(0,0,0,0.5))
}
WeatherMap.draw.label(Options)
gc()
dev.off()
}
for(day.count in seq(0,d.total)) {
dte<-ymd(sprintf("%04d-%02d-%02d",Year,Month,Day))+days(day.count)
year<-year(dte)
month<-month(dte)
day<-day(dte)
for(hour in c(0,6,12,18)) {
for(stage in seq(0.05,0.95,0.1)) {
mcparallel(plot.assimilation.stage(year,month,day,hour,stage))
#plot.assimilation.stage(year,month,day,hour,stage)
#q('no')
}
if(hour==6 || hour==18) mccollect(wait=TRUE)
}
mccollect(wait=TRUE)
for(hour in seq(0,23)) {
for(minute in c(5,15,25,35,45,55)) {
mcparallel(plot.forecast.hour(year,month,day,hour+minute/60))
#plot.forecast.hour(year,month,day,hour+minute/60)
}
if(hour%%4==3) mccollect(wait=TRUE)
}
mccollect(wait=TRUE)
}
|
b23c90351300bf364c98b5ef1a88c95ca30d8cf8
|
e7e4643a435d8f77f22dd229fd1bd9c298cde75e
|
/man/integrateFunction.Rd
|
f82f9a423ae0d971753c1a601fd21e867ddc9e9a
|
[] |
no_license
|
califano-lab/MOMA
|
74c5c1bb3b8c99352bb4b94912e3749022161816
|
852825b00474055b076b3564698c6d02fe8fdeb0
|
refs/heads/master
| 2023-04-06T13:14:06.110621
| 2020-06-04T20:54:19
| 2020-06-04T20:54:19
| 145,617,655
| 3
| 3
| null | null | null | null |
UTF-8
|
R
| false
| true
| 642
|
rd
|
integrateFunction.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cluster.r
\name{integrateFunction}
\alias{integrateFunction}
\title{Numerical integration of functions}
\usage{
integrateFunction(f, xmin, xmax, steps = 100, ...)
}
\arguments{
\item{f}{Function of 1 variable (first argument)}
\item{xmin}{Number indicating the min x value}
\item{xmax}{Number indicating the max x value}
\item{steps}{Integer indicating the number of steps to evaluate}
\item{...}{Additional arguments for \code{f}}
}
\value{
Number
}
\description{
Integrates numerically a function over a range using the trapezoid method
}
\keyword{internal}
|
6d0aa7d352906a8bf3e951e38e43e5bd70464337
|
aac51389396f601727bf2dbcbbb15829f1726026
|
/HW2.R
|
4d5b41584aa3cbbbaf2097e80860a6417acfec1f
|
[] |
no_license
|
yadevi/UMich_HS_650
|
a463d7e3b38a13d6176ecd5b062cfb349129d75b
|
ee1bad288a6d1ecafe92ece65b673fe9bab9d298
|
refs/heads/master
| 2021-06-23T09:43:45.030738
| 2017-08-23T17:02:49
| 2017-08-23T17:02:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,527
|
r
|
HW2.R
|
library(rvest)
library(gmodels)
library(ggplot2)
library(reshape2)
library(plotly)
library(MASS)
library(unbalanced)
library(GGally)
library(mi)
library(betareg)
# Q1
# Load the following two datasets, generate summary statistics for all variables, plot some of the features (e.g., histograms,
# box plots, density plots, etc.) of some variables, and save the data locally as CSV files
# ALS case-study Training data
alsdata = read.csv('C:/Users/Xylthx/Desktop/Semester 8/HS650/HW2/ALS_TrainingData_2223.csv', header = T)
summary(alsdata)
hist(alsdata$Age_mean, main = 'Histogram of mean age', xlab = 'Mean age')
boxplot(alsdata[,3:5], main = 'Boxplot for Albumin')
plot(density(alsdata$Albumin_median), main = 'Density plot for median albumin', xlab = 'Median albumin')
write.csv(alsdata, file = 'C:/Users/Xylthx/Desktop/Semester 8/HS650/HW2/ALS_TrainingData_forHW.csv')
# SOCR Knee Pain Data
wiki_url = read_html("http://wiki.socr.umich.edu/index.php/SOCR_Data_KneePainData_041409")
html_nodes(wiki_url, "#content")
socrdata = html_table(html_nodes(wiki_url, "table")[[2]])
socrdata = as.data.frame(socrdata)
summary(socrdata)
hist(socrdata$x, main = 'Histogram of x', xlab = 'x')
boxplot(socrdata[,1:2], main = 'Boxplot for x and Y')
plot(density(socrdata$x), main = 'Density plot for x', xlab = 'x')
write.csv(socrdata, file = 'C:/Users/Xylthx/Desktop/Semester 8/HS650/HW2/SOCR_data_forHW.csv')
# Q2
# Use ALS case-study data and long-format SOCR Parkinsons Disease data(extract rows with Time=0)
# to explore some bivariate relations (e.g. bivariate plot, correlation, table, crosstable etc.)
wiki_url = read_html("http://wiki.socr.umich.edu/index.php/SOCR_Data_PD_BiomedBigMetadata")
html_nodes(wiki_url, "#content")
pd_data = html_table(html_nodes(wiki_url, "table")[[1]])
pd_data = data.frame(pd_data)
pd_0 = subset(pd_data, pd_data$Time == 0)
plot(x = alsdata$Age_mean, y = alsdata$Albumin_median,
main = 'Mean age vs median albumin', xlab = 'Mean age', ylab = 'Median albumin')
cor(alsdata$Age_mean, alsdata$Albumin_median)
table(pd_0$Sex, pd_0$Dx)
CrossTable(pd_0$Sex, pd_0$Dx)
# Use 07_UMich_AnnArbor_MI_TempPrecipitation_HistData_1900_2015 data to show the relations between
# temperature and time. [Hint: use geom_line and geom_bar]
aadata = read.csv('C:/Users/Xylthx/Desktop/Semester 8/HS650/HW2/07_UMich_AnnArbor_MI_TempPrecipitation_HistData_1900_2015.csv',
header = T)
ind = seq(1, 111, 5)
aadata1 = aadata[ind,]
aanew = melt(aadata1, id.vars = 'Year')
colnames(aanew) = c('Year', 'Month', 'Temperature')
aanew$Month = as.factor(aanew$Month)
aanew$Temperature = as.numeric(aanew$Temperature)
plot1 = ggplot(aanew, aes(Year, Temperature, group = Month, color = Month)) + geom_line()
plot1
bar = ggplot(aanew, aes(x = Year, y = Temperature, fill = Month)) + geom_col() + facet_grid(. ~ Month) +
scale_y_continuous(breaks = seq(10, 80, 10))
bar
# Q3
# Introduce (artificially) some missing data, impute the missing values and examine the differences between the original,
# incomplete and imputed data in statistics.
n = 1000
m = 5
data = matrix(data = rnorm(5000, 10, 1), 1000, 5)
miss = sample(1:5000, 500)
data[miss] = NA
data = as.data.frame(data)
summary(data)
mdf = missing_data.frame(data)
show(mdf)
image(mdf)
imputations = mi(data, n.iter=5, n.chains=3, verbose=TRUE)
hist(imputations)
# Q4
# Generate a surface plot for the SOCR Knee Pain Data illustrating the 2D distribution of locations of the patient
# reported knee pain (use plotly and kernel density estimation).
socrdata$View = as.factor(socrdata$View)
kernal_density = with(socrdata, MASS::kde2d(x, Y, n = 50))
with(kernal_density, plot_ly(x=x, y=y, z=z, type="surface"))
# Q5
# Rebalance Parkinson's Disease data(extract rows with Time=0) according to disease(SWEED OR PD) and health (HC) using
# synthetic minority oversampling (SMOTE) to ensure approximately equal cohort sizes. (Notice: need to set 1 as the minority
# class.)
pd_0$Dx = as.factor(pd_0$Dx)
summary(pd_0$Dx)
# Binarize Dx based on disease or not
pd_0$Dx = ifelse(pd_0$Dx == 'HC', 'Control', 'Patient')
pd_0$Dx = as.factor(pd_0$Dx)
summary(pd_0$Dx)
pd_0$Control = ifelse(pd_0$Dx == 'Control', 1, 0)
pd_0$Control = as.factor(pd_0$Control)
summary(pd_0$Control)
pd_0$Sex = as.factor(pd_0$Sex)
summary(pd_0)
# Balancing cases
input = pd_0[ , -which(names(pd_0) %in% c("Cases", "Dx", "Control", 'Time'))]
output = pd_0$Control
data.1 = ubBalance(X = input, Y = output, type="ubSMOTE", percOver=250, percUnder=150, verbose=TRUE)
table(data.1$Y)
balancedData<-cbind(data.1$X, data.1$Y)
colnames(balancedData) <- c(colnames(input), "Control")
qqplot(input[, 5], balancedData [, 5])
# Q6
# Use the California Ozone Data to generate a summary report. Make sure include: summary for every variable,
# structure of data, proper data type convert(if needed), discuss the tendency of the ozone average concentration
# in terms of year's average for each location, explore the differences of the ozone concentration for area, explore
# the change of ozone concentration as seasons.
wiki_url = read_html('http://wiki.socr.umich.edu/index.php/SOCR_Data_121608_OzoneData')
html_node(wiki_url, '#content')
ozone_data = html_table(html_nodes(wiki_url, "table")[[1]])
summary(ozone_data)
ozone_data$VARIABLE = as.factor(ozone_data$VARIABLE)
ozone_data$LOCATION = as.factor(ozone_data$LOCATION)
ozone_data$COMP_SITES = as.factor(ozone_data$COMP_SITES)
# Ozone, year vs annual concentration in different locations
ozone.plot1 = ggplot(ozone_data, aes(YEAR, ANNUAL, group = LOCATION, color = LOCATION)) + geom_line()
ozone.plot1
# We can see that the annual ozone concentration generally increases from 1980 to 2010.
# Ozone annual concentraion vs latitude, longitude, and elevation, in year 2010.
ozone_data06 = subset(ozone_data, YEAR == '2006')
ggplot(ozone_data06, aes(LATITUDE, LONGITUDE, color = ANNUAL)) + geom_point()
# Ozone concentration for location 2008
ozone_data08 = subset(ozone_data, LOCATION == '2008')
ozone_data08 = ozone_data08[, 4:16]
ozone_data08 = melt(ozone_data08, id.vars = 'YEAR')
colnames(ozone_data08) = c('YEAR', 'MONTH', 'CONC')
ozone.plot2 = ggplot(ozone_data08, aes(MONTH, CONC, group = YEAR, color = YEAR)) + geom_point()
ozone.plot2
# We can see that generally the ozone concentration is low in winter.
|
fc9d2c0600d9b2f0282475862f98a90de681f5cc
|
20e0483ed898440420db9d234e2b85be29284376
|
/R/outs_tp.R
|
befb724f1e126e843850f203868ecfd5672895ba
|
[] |
no_license
|
cmaerzluft/TexasHoldEm
|
52e2cbe118a78cf8b7ac92b0c7991d823325d1ef
|
e8cb1a57fe70de394f41b466fb5d3ca4c48cde46
|
refs/heads/master
| 2021-06-18T11:49:36.022764
| 2021-06-11T06:44:52
| 2021-06-11T06:44:52
| 150,686,994
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,887
|
r
|
outs_tp.R
|
#' Outs to make a Two Pair
#'
#' @param cards Cards from hand, including pocket cards and all community cards
#' @param stage What part of deal was just performed, "flop" or "stage"?
#' @param output IN PROGRESS. Should function return number of outs ("counts") or the cards themselves ("cards")
#'
#' @return Either counts of cards that can make a Two Pair or a vector of cards that complete a Two Pair. If a data.frame
#' is input, the output is the same length as number of rows.
#' @export
outs_tp <- function(cards, stage, output = "counts") {
# Count duplicate cards
bests <- help_make_duplicates(cards)
# Count up the outs
if (stage == "turn") {
tp_outs <- if_else(
# A Two Pair (3 * number of cards on board that are better than worst pair)
bests$count1 == 2 & bests$count2 == 2 & bests$count3 < 2,
((bests$card3 > bests$card2) + (bests$card4 > bests$card2))*3L, if_else(
# A Pair (3* all remaining non-paired cards)
bests$count1 == 2 & bests$count2 < 2, 4L*3L,
# All other hands are better or not possible
0L
))
} else {
tp_outs <- if_else(
# A Two Pair ((14 - small pair - 1)*4 - (kicker > small pair))
bests$count1 == 2 & bests$count2 == 2,
as.integer(14 - bests$card2 - 1)*4L - (bests$card3 > bests$card2)*1L, if_else(
# A Pair ((all remaining non-paired cards) - the 3 individual cards we already have)
bests$count1 == 2 & bests$count2 < 2, 12L*4L - 3L, if_else(
# A High Card
bests$count1 == 1, 52L - 5L,
# Two Pair or better become better hands than Three of a Kind, or not possible
0L
)))
}
# Return Results
# If we want to return more stuff
# Could potentially return actual card outs (e.g. 304 and 307) in order to combined with other hand outs to get a
# total outs list/count
return(tp_outs)
}
|
c72c875cd736855dd06bfecdd78daf3ac36f1af5
|
817267ad6ee388294faf0c42a0b06aa3af6551b2
|
/portfoliotheory.R
|
6e4254c45c74a6b7991b256028d26ab80027023b
|
[] |
no_license
|
tgwisner/MS_thesis_R_code
|
d7c293bd81fdd160313fecbc82134f5686268e02
|
91dc5917779b3549e8caf9bb2a0e6135459a2949
|
refs/heads/master
| 2021-01-10T02:28:09.846862
| 2016-03-24T22:23:32
| 2016-03-24T22:23:32
| 54,678,107
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 521
|
r
|
portfoliotheory.R
|
library(stockPortfolio)
# Funds (change to indices?)
# vbmfx - US bond
# vtiax - int'l stock
# vtsmx - US stock
returns <- getReturns(c("VBMFX", "VTIAX", "VTSMX"), freq = "day", get = "overlapOnly")
model <- stockModel(returns, freq="day")
mvp <- optimalPort(model, Rf=0)
portPossCurve(model, riskRange = 6, add=FALSE, xlim = c(.0005, .012), main = "Efficient Frontier and MVP")
points(mvp, addNames=TRUE)
portCloud(model, riskRange = 10, N=10000, subSamp=2000, add=TRUE, pch=".", col="black")
abline(a=0, b=.1250448)
|
a4d9263782f67ffe059095994c8ac62358460b03
|
f245521e63b59e37470070092b7d1d38a87b2e48
|
/plotCover.r
|
cc03a442bd8e99c01faa8b960739de06c6b69a5c
|
[] |
no_license
|
douglask3/UKESM-land-eval
|
3c10d10eba32bcef1e7b2a057db3b22fdf2fd621
|
aad3f6902e516590be02585ad926bfe1cf5770bf
|
refs/heads/master
| 2021-08-17T06:32:10.736606
| 2021-07-14T12:57:13
| 2021-07-14T12:57:13
| 242,747,830
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,502
|
r
|
plotCover.r
|
source("cfg.r")
library("plotrix")
library(maps)
graphics.off()
obs_files = c(IGBP = "igbp", CCI = "cci", VCF = "VCF")
vars = c(Tree = "tree", Wood = "wood", Shrub = "shrub", Herb = "herb", Grass = "grass",
"Total\nveg. cover" = "bares")
limits = seq(0, 0.9, 0.1)*100
cols = c('#ffffe5','#f7fcb9','#d9f0a3','#addd8e','#78c679','#41ab5d','#238443','#006837','#004529')
files = list.files("outputs/", full.names = TRUE)
MM <- function(a, b, byRegions = TRUE) {
print("running MM")
if (nlayers(a) == 1) {
a = addLayer(a, 1-a)
b = addLayer(b, 1-b)
}
ar = raster::area(a)
ar[is.na(regions)] = NaN
ar[is.na(a[[1]])] = NaN
MMi <- function(region) {
if (!is.na(region)) ar[regions!=region] = 0
score = 2*sum.raster(abs(a-b) * ar, na.rm = TRUE)/ sum.raster(ar, na.rm = T)
return(score)
}
if (byRegions) {
scores = sapply(c(NaN, 1:28), MMi)
names(scores) = c("Global", region_names)
}
else scores = MMi(NaN)
return(scores)
}
median.raster <- function(...)
FUN.raster(median, ...)
compmn <- function(r, FUN = mean.raster) {
averageize <- function(rl) {
mn = rl
mn[!is.na(rl)] = FUN(rl, na.rm = TRUE)
return(mn)
}
if (nlayers(r)==1) mn = averageize(r) else mn = layer.apply(r, averageize)
return(MM(r, mn))
}
compRR <- function(r) {
mask = which(!is.na(r[[1]][]))
Mod <- function(...) {
randomize <- function(rl) {
index = sample(mask, length(mask), replace = FALSE)
rr = rl
rr[mask] = rl[index]
return(rr)
}
if (nlayers(r) ==1) rr = randomize(r) else rr = layer.apply(r, randomize)
score = MM(r, rr)
return(score)
}
scores = sapply(1:10, Mod)
return(cbind(apply(scores, 1, mean), apply(scores, 1, sd)))
}
areaWmean <- function(r) {
ar = raster::area(r)
ar[is.na(r)] = 0.0
ar[is.na(regions)] = 0.0
r[is.na(regions)] = NaN
fRegion <- function(region, std = FALSE) {
if (!is.na(region)) {
test = regions != region
r[test] = NaN
ar[test] = 0.0
}
if (std) out = FUN.raster(sd, r, na.rm = TRUE)
else out = sum.raster(r * ar, na.rm = TRUE)/sum.raster(ar, na.rm = TRUE)
return(out)
}
fRegions <- function(...) {
out = sapply(c(NaN, 1:28), fRegion, ...)
names(out) = c("Global", region_names)
return(out)
}
mns = fRegions()
sds = fRegions(std = TRUE)
out = cbind(mns, sds)
colnames(out) = c("cover mean", "cover sd")
return(out)
}
plotVariable <- function(var, vname) {
files = files[grepl(var, files)]
openFile <- function(nm, nmtext, plotMe = TRUE) {
addText <- function() {
if (var == vars[[1]]) mtext(nmtext, side = 3)
if (nm == obs_files[1]) {
mtext(vname, side = 2, padj = 0.5)
#if (var == vars[length(vars)])
}
}
file = files[grepl(nm, files)]
if (length(file) == 0) {
if (plotMe) {
plot.new()
addText()
}
return(NULL)
}
dat = brick(file)
if (nlayers(dat) == 1) dat = dat[[1]] else dat = mean(dat)
dat = raster::crop(dat, c(-180, 180, -60, 90))
if (nm == "VCF" && var != "bares") dat = dat/100
if (var == "bares") dat = 1 - dat
if (plotMe) {
plotStandardMap(dat*100, cols = cols, limits = limits)
addText()
}
return(dat)
}
obss = mapply(openFile, obs_files, names(obs_files))
obss = obss[!sapply(obss, is.null)]
sims = lapply(jobs, openFile, plotMe = FALSE)
test = !sapply(sims, is.null)
jobs = jobs[test]
sims = sims[test]
sims_mean = mean(layer.apply(sims, function(i) i))
sims_sd = sd.raster( (layer.apply(sims, function(i) i)))
plotStandardMap(sims_mean*100, e = sims_sd, cols = cols, limits = limits,
ePatternRes = 22, ePatternThick = 0.38, limits_error = c(0.01, 0.05))
if (var == vars[[1]]) mtext(side = 3, 'UKESM')
benchmarkObs <- function(obs, name) {
tab = lapply(sims, areaWmean)
tab = do.call(cbind, tab)
colnames(tab) = paste(rep(jobs, each = 2), '-', colnames(tab))
tab_mns = cbind(areaWmean(obs), tab)
colnames(tab_mns)[1:2] = c("Obs - cover mean", "Obs - sd")
tab_ben = sapply(sims, MM, obs)
colnames(tab_ben) = paste(jobs, '- MM')
tab_null = cbind(compmn(obs, median.raster), compmn(obs), compRR(obs))
colnames(tab_null) = c('median', 'mean', 'randomly-resampled mean',
'randomly-resampled sd')
tabi <- function(tab) cbind(apply(tab, 1, mean), apply(tab, 1, sd))
tab_ens = cbind(tab_mns[,1],
tabi(tab_mns[,seq(3, ncol(tab_mns)-1, by = 2)]),
tabi(tab_ben))
colnames(tab_ens) = c("Obs - cover",
"UKESM - cover mean", "UKESM - cover sd",
"UKESM - MM mean", "UKESM - MM sd")
tab = cbind(tab_mns, tab_ben, tab_null)
fname = paste0("docs/", var, '-', name, "-MM-full.csv")
tab = round(tab, 3)
write.csv(tab, file = fname)
tab_ens = cbind(tab_ens, tab_null)
fname = paste0("docs/", var, '-', name, "-MM-summ.csv")
tab_ens = round(tab_ens, 3)
write.csv(tab_ens, file = fname)
#ens_range = cbind(tab_ens[, 4] - tab_ens[, 5], tab_ens[, 4] + tab_ens[, 5])
nulls = cbind(tab_ens[,6:7], tab_ens[,8] - tab_ens[,9], tab_ens[,8] + tab_ens[,9])
index = 1:ncol(nulls)
nbeats = apply(cbind(nulls, tab_ben), 1,
function(i) sapply(i[-index], function(j) sum(j< i[index])))
return(nbeats)
}
bench = mapply(benchmarkObs, obss, names(obss), SIMPLIFY = FALSE)
#name = paste0("docs/", var, "-MM-full.csv")
#tab = round(tab, 3)
#write.csv(tab, file = fname)
#colnames(tabi) = c("UKESM-mean", "UKESM-sd")
#tabi = cbind(tabi, tab[, (ncol(tab)-3):ncol(tab)])
#fname = paste0("docs/", var, "-MM-summ.csv")
#tabi = round(tabi, 3)
#write.csv(tabi, file = fname)
return(list(obss, sims, bench))
}
png("figs/vegDist.png", height = 3 * 183/4, width = 183, units = 'mm', res = 450)
layout(rbind(t(matrix(1:24, nrow = 4)), 25), heights = c(1, 1, 1, 1, 1, 1, 0.3))
par( mar = rep(0,4), oma = rep(1.5, 4))
out = mapply(plotVariable, vars, names(vars))
StandardLegend(cols, limits, out[[1]][[1]][[1]], extend_max = FALSE,
maxLab = 100, add = FALSE)
dev.off()
scores = out[3,]
save(scores, vars, file = 'outputs/bench/cover.Rd')
browser()
itemComparison <- function(obs_name) {
if(any(names(out[[2]][[1]])==obs_name)) index = c(1, 3, 5, 6) else index = c(1, 4, 6)
makeItemObs <- function(i) out[[i]][[1]][[which(names(out[[i]][[1]]) == obs_name)]]
obsi = layer.apply(index, makeItemObs)
#obsi = addLayer(obsi, 1 - sum(obsi))
makeItemsMod <- function(mno) {
makeItemMod <- function(i) out[[i]][[2]][[mno]]
modi = layer.apply(index, makeItemMod)
#modi = addLayer(modi, 1-sum(modi))
return(modi)
}
modsi = lapply(1:length(out[[1]][[2]]), makeItemsMod)
scores = sapply(modsi, MM, obsi)
scores = cbind(scores, mean(scores), sd(scores),
compmn(obsi, median.raster), compmn(obsi), compRR(obsi))
colnames(scores) = c(jobs, 'UKESM-mean', 'UKESM-sd',
'median', 'mean', 'randomly-resampled mean', 'randomly-resampled sd')
tab = round(scores, 3)
browser()
tabFull = tab[,c(1:(ncol(tab)-6), (ncol(tab)-3):ncol(tab))]
write.csv(tabFull, file = paste0("docs/items-", obs_name, "-MM-full.csv"))
tabSumm = tab[,(ncol(tab)-5):ncol(tab)]
write.csv(tabSumm, file = paste0("docs/items-", obs_name, "-MM-summ.csv"))
return(scores)
}
items_scores = sapply(names(obs_files), itemComparison)
|
df9a7042171b77792494eb2ad857867a7cce3c67
|
dd1fa9020beb9b0205a5d05e0026ccae1556d14b
|
/itwill/R-script/chap18_ClusteringAnalysis.R
|
3a3c40783ef5c4ede2088f1ab89bcacb8b05e671
|
[] |
no_license
|
kimjieun6307/itwill
|
5a10250b6c13e6be41290e37320b15681af9ad9a
|
71e427bccd82af9f19a2a032f3a08ff3e1f5911d
|
refs/heads/master
| 2022-11-13T11:55:12.502959
| 2020-07-15T08:14:21
| 2020-07-15T08:14:21
| 267,373,834
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,674
|
r
|
chap18_ClusteringAnalysis.R
|
# chap18_ClusteringAnalysis(1)
###################################################
# 군집분석(Clustering)
###################################################
# 고객DB -> 알고리즘 -> 군집
# 알고리즘을 통해서(패턴으로) 근거리 모형으로 군집형성 - 규칙(rule)
# 변수에 의해서 그룹핑되다.
# 변수 적용 : 상품카테고리, 구매금액, 총거래금액
# 유사성 거리에 의한 유사객체를 묶어준다.
# 거리를 측정하여 집단의 이질성과 동질성을 평가하고, 이를 통해서
# 군집을 형성한다..
# 유사성 거리 : 유클리드 거리
# y변수가 없는 데이터 마이닝 기법
# 예) 몸, 키 관점에서 묶음 -> 3개 군집 <- 3개 군집의 특징 요약
# 주요 알고리즘 : hierarchical, k-means
# 그룹화를 통한 예측(그룹 특성 차이 분석-고객집단 이해)
# 1. 유클리드 거리
# 유클리드 거리(Euclidean distance)는 두 점 사이의 거리를 계산하는
# 방법으로 이 거리를 이용하여 유클리드 공간을 정의한다.
# (1) matrix 생성
x <- matrix(1:9, nrow=3, by=T)
# (2) matrix 대상 유클리드 거리 생성 함수
# 형식) dist(x, method="euclidean") -> x : numeric matrix, data frame
dist <- dist(x, method="euclidean") # method 생략가능
dist
# 1 2
# 2 5.196152
# 3 10.392305 5.196152
#<해석> 1행과 2행의 거리 5,196152 / 1행과 3행 거리 10,392305
# 2행과 3행 거리 5,196152
# (3) 유클리드 거리 계산 식
# 관측대상 p와 q의 대응하는 변량값의 차의 제곱의 합에 sqrt 적용
sqrt(sum((x[1,]-x[2,])**2)) # 5.196152 ---> dist(x)함수 값과 동일
sqrt(sum((x[1,]-x[3,])**2)) # 10.3923
#------------------------------------------------------------------
# 2. 계층적 군집분석(탐색적 분석)
# - 계층적 군집분석(Hierarchical Clustering)
# - 거리가 가장 가까운 대상부터 결합하여 나무모양의
# 계층구조를 상향식(Bottom-up)으로 만들어가면서 군집을 형성
# (1) 군집분석(Clustering)분석을 위한 패키지 설치
install.packages("cluster") # hclust() : 계층적 클러스터 함수 제공
library(cluster) # 일반적으로 3~10개 그룹핑이 적정
# (2) 데이터 셋 생성
r <- runif(15, min = 1, max = 50)
x <- matrix(r, nrow=5, by=T)
x
# [,1] [,2] [,3]
# [1,] 25.09302 10.82797 19.40195
# [2,] 37.68731 12.79780 13.10693
# [3,] 16.66040 29.51943 14.36246
# [4,] 28.87503 5.69022 20.89039
# [5,] 19.72782 13.85944 25.81579
# (3) matrix 대상 유클리드 거리 생성 함수
dist <- dist(x, method="euclidean") # method 생략가능
dist
# 1 2 3 4
# 2 14.217014
# 3 21.115784 26.894610
# 4 6.550991 13.738857 27.561609
# 5 8.894525 22.026923 19.642381 13.216169
#<해석> 1번째 관측치 기준으로 가장 가까운 관측치는 4번째(6,550991)이다.
mean(x[1,]) # 18.44098
mean(x[4,]) # 18.48521
#<해석> 평균이 유사함.
# (4) 유클리드 거리 matrix를 이용한 클러스터링
hc <- hclust(dist) # 클러스터링 적용
hc
# Call:
# hclust(d = dist)
# Cluster method : complete
# Distance : euclidean
# Number of objects: 5 ---> 5개 객체 사용
help(hclust)
plot(hc) # 클러스터 플로팅(Dendrogram) -> 1과2 군집(클러스터) 형성
#@@1 (덴드로 그램)-유클리드 거리를 기반으로 한다.
#<실습> 중1학년 신체검사 결과 군집분석
#---------------------------------------------
body <- read.csv("c:/ITWILL/2_Rwork/Part-IV/bodycheck.csv")
names(body)
# [1] "번호" "악력" "신장" "체중" "안경유무"
str(body) #'data.frame': 15 obs. of 5 variables:
idist <- dist(body)
idist
#@@2
hc <- hclust(idist)
plot(hc, hang=-1) # 음수값 제외
#@@3
# 3개 그룹 선정, 선 색 지정
rect.hclust(hc, k=3, border="red") # 3개 그룹 선정, 선 색 지정
#@@4
# 각 그룹별 서브셋 만들기
g1<- subset(body, 번호==15| 번호==1| 번호==4| 번호==8 | 번호==10)
g2<- subset(body, 번호==11| 번호==3| 번호==5| 번호==6 | 번호==14)
g3<- subset(body, 번호==2| 번호==9| 번호==7| 번호==12 | 번호==13)
# 각 그룹별 특징
summary(g1)
# 번호 악력 신장 체중 안경유무
# Min. : 1.0 Min. :23.0 Min. :142.0 Min. :32.0 Min. :1
# 1st Qu.: 4.0 1st Qu.:25.0 1st Qu.:146.0 1st Qu.:34.0 1st Qu.:1
# Median : 8.0 Median :25.0 Median :152.0 Median :38.0 Median :1
# Mean : 7.6 Mean :25.6 Mean :149.8 Mean :36.6 Mean :1
# 3rd Qu.:10.0 3rd Qu.:27.0 3rd Qu.:153.0 3rd Qu.:39.0 3rd Qu.:1
# Max. :15.0 Max. :28.0 Max. :156.0 Max. :40.0 Max. :1
# <해석>-----신장 : 142~156, 안경유무(1)
summary(g2) # 신장 : 155~168, 안경유무(1, 2)
summary(g3) # 신장 : 154~169, 안경유무(2)
# ★군집수는 분석가가 정해서 각 군집의 특징을 해석하면 됨★
#--------------------------------------------------------------------------
# 3. 계층형 군집분석에 그룹수 지정
# iris의 계층형군집결과에 그룹수를 지정하여 그룹수 만큼
# 잘라서 iris의 1번째(Sepal.Length)와 3번째(Petal.Length) 변수를
# 대상으로 클러스터별 변수의 평균 구하기
# 1) 유클리드 거리 계산
str(iris) # 'data.frame': 150 obs. of 5 variables:
idist<- dist(iris[1:4]) # dist(iris[, -5]) ---# 계산식 수행하려면 연속형 변수로 되어 있어야 함.(범주형 제외)
# 2) 계층형 군집분석(클러스터링)
hc <- hclust(idist)
hc
# Cluster method : complete
# Distance : euclidean
# Number of objects: 1
plot(hc, hang=-1)
rect.hclust(hc, k=3, border="red") # 3개 그룹수
#@@5
# 3) 그룹수 만들기 : cutree()함수 -> 지정된 그룹수 만큼 자르기
# 형식) cutree(계층형군집결과, k=그룹수) -> 그룹수 만큼 자름
ghc<- cutree(hc, k=3) # stats 패키지 제공
ghc # 150개(그룹을 의미하는 숫자(1~3) 출력)
# [1] 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
# [24] 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
# [47] 1 1 1 1 2 2 2 3 2 3 2 3 2 3 3 3 3 2 3 2 3 3 2
# [70] 3 2 3 2 2 2 2 2 2 2 3 3 3 3 2 3 2 2 2 3 3 3 2
# [93] 3 3 3 3 3 2 3 3 2 2 2 2 2 2 3 2 2 2 2 2 2 2 2
# [116] 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2
# [139] 2 2 2 2 2 2 2 2 2 2 2 2
# 4) iris에서 ghc 컬럼 추가
iris$ghc <- ghc
table(iris$ghc) # ghc 빈도수
# 1 2 3
# 50 72 28
head(iris,60) # ghc 칼럼 확인
#@@6
# 5) 그룹별 요약통계량 구하기
g1 <- subset(iris, ghc==1)
summary(g1[1:4])
# Sepal.Length Mean :5.006 / Petal.Length Mean :1.462
g2 <- subset(iris, ghc==2)
summary(g2[1:4])
# Sepal.Length Mean :6.546 / Petal.Length Mean :5.274
g3 <- subset(iris, ghc==3)
summary(g3[1:4])
# Sepal.Length Mean :5.532 / Petal.Length Mean :3.961
#------------------------------------------------------------------
# 4. 비계층적 군집분석(확인적 분석)
# - 군집 수를 알고 있는 경우 이용하는 군집분석 방법
# 군집분석 종류 : 계층적 군집분석(탐색적), 비계층적 군집분석(확인적)
# 1) data set 준비
library(ggplot2)
data(diamonds)
nrow(diamonds) # [1] 53940
View(diamonds)
str(diamonds) # Classes ‘tbl_df’, ‘tbl’ and 'data.frame': 53940 obs. of 10 variables:
t <- sample(nrow(diamonds),1000) # 1000개 샘플링
test <- diamonds[t, ] # 1000개 표본 추출
dim(test) # [1] 1000 10
head(test) # 검정 데이터
mydia <- test[c("price","carat", "depth", "table")] # 4개 칼럼만 선정
head(mydia)
# 2) 계층적 군집분석(탐색적 분석)
result <- hclust(dist(mydia), method="average") # 평균거리 이용
result
# Cluster method : average
# Distance : euclidean
# Number of objects: 1000
# [작성] 군집 방법(Cluster method)
# method = "complete" : 완전결합기준(최대거리 이용) <- default(생략 시)
# method = "single" : 단순결합기준(최소거리 이용)
# method = "average" : 평균결합기준(평균거리 이용)
plot(result, hang=-1) # hang : -1 이하 값 제거
#@@7
# 3) 비계층적 군집분석(확인적 분석) - kmeans()함수 이용
# - 확인적 군집분석 : 군집의 수를 알고 있는 경우
result2 <- kmeans(mydia, 3)
result2
# K-means clustering with 3 clusters of sizes 307, 115, 578 - 클러스터별 군집수
# Cluster means: 클러스터별 칼럼의 평균
names(result2) # cluster 칼럼 확인
#@@8
result2$cluster # 각 케이스에 대한 소속 군집수(1,2,3)
table(result2$cluster)
# 1 2 3
# 307 115 578
result2$centers # 각 군집별 중앙값(Cluster means)
# price carat depth table
# 1 5269.003 1.0771661 61.93094 57.89088
# 2 11989.887 1.5917391 61.68522 57.87826
# 3 1407.891 0.4834256 61.73010 57.05709
# 4) 원형데이터에 군집수 추가
mydia$cluster <- result2$cluster
head(mydia) # cluster 칼럼 확인
# price carat depth table cluster
# <int> <dbl> <dbl> <dbl> <int>
# 1 8214 1.51 62.8 59 1
# 2 5189 1.08 62.6 57 1
# 3 4654 1.12 61.6 58 1
# 4 5546 1 60.5 60 1
# 5 764 0.4 61.5 56 3
# 6 4836 1.01 63.7 60 1
# 5) 변수 간의 상관성 보기
plot(mydia[,-5]) # cluster 제외
#@@9
cor(mydia[,-5], method="pearson") # 상관계수 보기
# price carat depth table
# price 1.0000000 0.9212910 0.0275522 0.1581566
# carat 0.9212910 1.0000000 0.0641868 0.2074748
# depth 0.0275522 0.0641868 1.0000000 -0.3128470
# table 0.1581566 0.2074748 -0.3128470 1.0000000
#<해석> 상관관계가 가장 높음 price와 carat을 x축과 y축으로 해서 산점도 시각화
# 반응변수 : price <- 설명변수 : carat(양의 영향) > table(양의 영향) > depth(음의 영향)
library(corrgram) # 상관성 시각화
corrgram(mydia[,-5]) # 색상 적용 - 동일 색상으로 그룹화 표시
corrgram(mydia[,-5], upper.panel=panel.conf) # 수치(상관계수) 추가(위쪽)
# 6) 비계층적 군집시각화 --- 상관관계가 가장 높음 price(y축)와 carat(x축)으로 산점도 시각화
plot(mydia$carat, mydia$price)
plot(mydia$carat, mydia$price, col=mydia$cluster)
# mydia$cluster 변수로 색상 지정(1,2,3)
#<해석>군집이 제대로 만들어 졌는지 시각화로 확인
#@@10
# 중심점 표시 추가
result2$centers # Cluster means 값을 갖는 컬럼
# 각 그룹의 중심점에 포인트 추가
points(result2$centers[,c("carat", "price")], col=c(3,1,2), pch=8, cex=5)
# names(result2) -> centers 칼럼 확인
# col : color, pch : 중심점 문자, cex : 중심점 문자 크기
# pch(plotting character), cex(character expansion)
#@@11
####################################################
## 군집수 결정방법
####################################################
install.packages("NbClust")
library(NbClust)
data("iris")
iris_mat <- as.matrix(iris[-5])
dim(iris_mat)
?NbClust
#NbClust(data = NULL, diss = NULL, distance = "euclidean", min.nc = 2, max.nc = 15, method = NULL, index = "all", alphaBeale = 0.1)
nc <- NbClust(iris_mat, distance = "euclidean", min.nc = 2, max.nc = 15, method = "complete")
nc
#* According to the majority rule, the best number of clusters is 3
#@@14
names(nc)
# [1] "All.index" "All.CriticalValues"
# [3] "Best.nc" "Best.partition"
table(nc$Best.nc[1,])
# 0 1 2 3 4 6 15 ---> 클러스트
# 2 1 2 13 5 1 2
#<해석> 가장 높은 빈도수(13) -> 3개 클러스트가 가장 최적이다.
|
ccf47650fc1cb96834a9b64c445efc68a3f33f1c
|
a1d5615adff3d432c9d22f450d67d1340e81bec4
|
/R/headlines.R
|
c40dccf68193ea0174a4690d5d2875e1fc8a7387
|
[] |
no_license
|
phebepalmer/textclassificationexamples
|
922b0faeb513c114ee507edb4d722111bf121b3e
|
7965fb2fa26e5aec98773c7b0374abb5558e5bf5
|
refs/heads/master
| 2022-12-05T18:27:47.196889
| 2020-08-20T16:29:20
| 2020-08-20T16:29:20
| 281,805,698
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,135
|
r
|
headlines.R
|
#' Headlines
#'
#' This data comes from Chakraborty et. al., which combines headlines from
#' a variety of news and clickbait sources. Much of these headlines contain
#' subject matter innapropriate for classroom use. Given the volume of headlines
#' containing such language (especially for clickbait == TRUE), this filtering
#' might not catch all problematic headlines. User discretion is advised.
#'
#' @docType data
#' @format A data frame with 22949 rows and 3 variables:
#' \describe{
#' \item{title}{String}
#' \item{clickbait}{Boolean}
#' \item{ids}{Integer}
#' }
#' @source \url{https://github.com/bhargaviparanjape/clickbait}
"headlines"
#' Headlines Train
#'
#' This dataset is a random sample of approximately 80\% of the observations
#' in "headlines".
#'
#' @docType data
#' @format A data frame with 18360 rows and 3 variables:
#' \describe{
#' \item{title}{String}
#' \item{clickbait}{Boolean}
#' \item{ids}{Integer}
#' }
#' @source \url{https://github.com/bhargaviparanjape/clickbait}
"headlines_train"
#' Headlines Test
#'
#' This dataset is a random sample of the remaining 20\% of the observations
#' in "headlines" not found in the training set.
#'
#' @docType data
#' @format A data frame with 4589 rows and 3 variables:
#' \describe{
#' \item{title}{String}
#' \item{clickbait}{Boolean}
#' \item{ids}{Integer}
#' }
#' @source \url{https://github.com/bhargaviparanjape/clickbait}
"headlines_test"
#' Sample Headlines
#'
#' This dataset is a random sample of 1000 clickbait article headlines and
#' 1000 news article headlines from "headlines". This was created for use in
#' a guided activity using a data access platform called CODAP. If students
#' are interested in exploring how attributes differ across clickbait and news
#' headlines through use of 2 X 2 count tables, using this set will allow for
#' easier interpretation.
#'
#' @docType data
#' @format A data frame with 2000 rows and 3 variables:
#' \describe{
#' \item{title}{String}
#' \item{clickbait}{Boolean}
#' \item{ids}{Integer}
#' }
#' @source \url{https://github.com/bhargaviparanjape/clickbait}
"sample_headlines"
|
9dae839eaa871f186aa1a4de28626d1789e8b1c4
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/CNVassoc/R/mixture.R
|
7f0bf2ea903899bf24cb6680c2f9d2377db4ff76
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,265
|
r
|
mixture.R
|
mixture <-
function (intensities, num.class, mix.method, threshold.0, threshold.k,
mu.ini, sigma.ini, pi.ini, var.equal)
{
method <- charmatch(mix.method, c("mixdist", "mclust", "EMmixt"))
miss.threshold0 <- missing(threshold.0)
miss.thresholdk <- missing(threshold.k)
if (!miss.threshold0 && threshold.0 > min(intensities))
num.class <- num.class - 1
else {
if (!miss.threshold0 && threshold.0 <= min(intensities))
warning("threshold.0 ignored because it's smaller than intensity minimum value")
threshold.0 <- -Inf
}
if (!miss.thresholdk && threshold.k < max(intensities))
num.class <- num.class - 1
else {
if (!miss.thresholdk && threshold.k >= max(intensities))
warning("threshold.k ignored because it's bigger than intensity maximum value")
threshold.k <- Inf
}
yy <- intensities[intensities > threshold.0 & intensities < threshold.k]
if (max(num.class) < 2) {
cc <- unique(c(-Inf, threshold.0, threshold.k, Inf))
out <- as.integer(cut(intensities, cc))
attr(out, "mixture") <- NULL
attr(out, "means") <- mean(yy)
attr(out, "sds") <- 0
attr(out, "probabilities") <- sapply(seq(along=unique(out)), function(j) ifelse(out==j,1,0))
num.class <- 1
}
else {
res <- mix(yy, method, num.class, mu.ini, sigma.ini, pi.ini, var.equal)
num.class <- res$G
out <- classCNV(intensities, res, threshold.0, threshold.k)
attr(out, "mixture") <- res
attr(out, "means") <- c(res$parameter$mu)
attr(out, "sds") <- c(res$parameter$sigma)
}
attr(out, "meanRatio") <- intensities
attr(out, "num.copies") <- c(1:num.class)
if (!miss.threshold0) {
attr(out, "means") <- c(min(intensities), attr(out, "means"))
attr(out, "sds") <- c(0, attr(out, "sds"))
attr(out, "num.copies") <- c(0, attr(out, "num.copies"))
}
if (!miss.thresholdk) {
attr(out, "means") <- c(attr(out, "means"), max(intensities))
attr(out, "sds") <- c(attr(out, "sds"), 0)
attr(out, "num.copies") <- c(attr(out, "num.copies"), num.class + 1)
}
out
}
|
9d87d7d03291d64b2851fabf6dbb34194dbb6e9e
|
607fb8ae7ca5550de01bbb86304e6b4cf0e24152
|
/Tem que ver isso ai.R
|
89886ebf48773a0aeecb91a7ad105b452facc225
|
[] |
no_license
|
rodolphoBernabel/Curiosidades-eleitorais-2016
|
301090b55fd61f38b6b600edefa109f7db303bf2
|
fead587bc1b152ed5a7365198bb6af1e8e157fd2
|
refs/heads/master
| 2021-01-24T11:39:50.834161
| 2016-10-14T18:17:29
| 2016-10-14T18:17:29
| 70,204,761
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 1,182
|
r
|
Tem que ver isso ai.R
|
################################
# Curiosidades Eleitorais 2016 #
# Rodolpho Bernabel #
# 2016-10-06 #
################################
#limpa a área
rm(list=ls())
#lê o arquivo (substitua o caminho para o seu diretório)
despesas_candidatos_2016_brasil <- read.csv("C:/Users/Rodolpho/Downloads/despesas_candidatos_2016_brasil.txt", sep=";")
#cria uma cópia dos dados
df <- despesas_candidatos_2016_brasil
#data munging
attach(df)
#converte em numérico o valor da despesa
Valor.despesa.numeric <- as.numeric(gsub(",", ".", Valor.despesa))
#checa os resultados
head(Valor.despesa.numeric)
head(Valor.despesa)
summary(Valor.despesa.numeric)
#cola o novo vetor na base
df <- cbind(df,Valor.despesa.numeric)
#remove o vetor auxiliar
rm(Valor.despesa.numeric)
attach(df)
#Eliane Galdino
caso.1 <- df[which(Valor.despesa.numeric >= 7675000.8),]
caso.1
#Maurin Ribeiro
caso.2 <- df[which(Valor.despesa.numeric == 2175000),]
caso.2
#Joel Tomazi
caso.3 <- df[which(Valor.despesa.numeric == 4400000),]
caso.3
#Carlos Duarte
caso.4 <- df[which(Valor.despesa.numeric == 1200000.41),]
caso.4
|
edf9b957fad52d6342d94facebff5d44c58b0226
|
3ad58e81afb376d43220fc138a088bc47c63e8ff
|
/tests/testthat/test-openfair.R
|
9c15e600bd8b3d6db37cf49e6280d20d5404896f
|
[
"MIT"
] |
permissive
|
redzstyle/evaluator
|
11fe5425631c482592c697d0ca676f416e57419e
|
e81a72ca6536395b58fda9cc2a21d6cb78723740
|
refs/heads/master
| 2020-04-27T09:28:14.889581
| 2019-02-05T16:59:11
| 2019-02-05T16:59:11
| 125,245,691
| 0
| 0
|
NOASSERTION
| 2019-02-10T22:06:23
| 2018-03-14T16:58:20
|
R
|
UTF-8
|
R
| false
| false
| 9,385
|
r
|
test-openfair.R
|
test_that("Sample TEF", {
set.seed(1234)
tef <- sample_tef(params = list(n=10, 1, 10, 100))
expect_is(tef, "list")
# ensure that the list has the required elements
expect_equal(names(tef), c("type", "samples", "details"))
# ensure that the samples matches the number requested
expect_equal(length(tef$samples), 10)
# ensure that TEF values are returned as integers
expect_is(tef$samples, "integer")
# ensure that values of samples is correct
expect_equal(unlist(tef$samples),
c(7, 30, 2, 34, 36, 13, 14, 14, 9, 15))
})
context("Sample DIFF")
test_that("Sample DIFF", {
set.seed(1234)
dat <- sample_diff(params = list(n=10, 50, 70, 75, 3))
expect_is(dat, "list")
# ensure that the list has the required elements
expect_equal(names(dat), c("type", "samples", "details"))
# ensure that the samples matches the number requested
expect_equal(length(dat$samples), 10)
# ensure that values of samples is correct
expect_equal(signif(unlist(dat$samples), digits = 4),
signif(c(72.5519454551502, 65.1852603020272, 59.1564180836877,
74.5816023178688, 64.1192226440207, 63.561355776164,
70.1284833577168, 69.9960887031119, 70.0802721600923,
71.4683219144408), digits = 4))
})
test_that("Multi control diff works", {
diff_estimates <- data.frame(l = c(1, 2), ml = c(10, 15), h = c(20, 100),
conf = c(1, 3))
})
context("Sample TC")
test_that("Sample TC", {
set.seed(1234)
tc <- sample_tc(params = list(n=10, 50, 75, 100, 4))
expect_is(tc, "list")
# ensure that the list has the required elements
expect_equal(names(tc), c("type", "samples", "details"))
# ensure that the samples matches the number requested
expect_equal(length(tc$samples), 10)
# ensure that values of samples is correct
expect_equal(signif(unlist(tc$samples), digits = 4),
signif(c(61.7026564773373, 78.188740471894, 87.0623477417219,
53.1987199785052, 79.9184628308895, 80.7889924652588,
68.4387021948896, 68.7541469869603, 68.554057026653,
64.9764652390671), digits = 4))
})
context("Select Loss Opportunities")
test_that("Mean Difficulty Exceedance works when there are zero losses", {
threat_strengths <- c(0.2, 0.3, 0.4)
diff_strengths <- c(0.3, 0.4, 0.5)
dat <- select_loss_opportunities(threat_strengths, diff_strengths)
expect_equal(dat$details$mean_diff_exceedance, 0.1)
})
context("Sample VULN")
test_that("Sample VULN works with binom", {
set.seed(1234)
dat <- sample_vuln(params = list(n=10, 1, .5))
expect_is(dat, "list")
# ensure that the list has the required elements
expect_equal(names(dat), c("type", "samples", "details"))
# ensure that the samples matches the number requested
expect_equal(length(dat$samples), 10)
# ensure that values of samples is correct
expect_equal(sum(dat$samples), 7)
})
test_that("Sample VULN works with TC and DIFF", {
set.seed(1234)
tc <- sample_tc(params = list(n=10, 50, 70, 85, 2))$samples
diff <- sample_diff(params = list(n=10, 50, 70, 85, 2))$samples
dat <- sample_vuln(func = "evaluator::select_loss_opportunities", params = list(tc = tc, diff = diff))
expect_is(dat, "list")
# ensure that the list has the required elements
expect_equivalent(names(dat), c("type", "samples", "details"))
# ensure that the samples matches the number requested
expect_equivalent(length(dat$samples), 10)
# ensure that values of samples is correct
expect_equivalent(sum(dat$samples), 5)
# ensure that mean_tc_exceedance is set correctly
expect_equivalent(floor(dat$details$mean_tc_exceedance), 7)
# ensure that mean_diff_exceedance is set correctly
expect_equivalent(floor(dat$details$mean_diff_exceedance), 8)
})
test_that("TC and DIFF exceedance handles NA threat events", {
set.seed(1234)
tc <- c(NA)
diff <- sample_diff(params = list(n=2, 50, 70, 85, 2))$samples
dat <- sample_vuln(func = "evaluator::select_loss_opportunities", params = list(tc = tc, diff = diff))
expect_is(dat, "list")
# ensure that mean_tc_exceedance is set correctly
expect_equivalent(dat$details$mean_tc_exceedance, NA)
# ensure that mean_diff_exceedance is set correctly
expect_equivalent(dat$details$mean_diff_exceedance, NA)
})
context("Sample LM")
test_that("Sample LM", {
set.seed(1234)
lm <- sample_lm(params = list(n=10, min=1*10^4, mode=5*10^4, max=1*10^7, shape=3))
expect_is(lm, "list")
# ensure that the list has the required elements
expect_equal(names(lm), c("type", "samples", "details"))
# ensure that the samples matches the number requested
expect_equal(length(lm$samples), 10)
# ensure that values of samples is correct
expect_equal(signif(unlist(lm$samples), digits = 4),
signif(c(332422.727880636, 2831751.79415706, 35602.2608120876,
3349352.73654269, 3632631.71769846, 927503.010814968,
966756.805719722, 941718.366417413, 569057.598433507,
1069488.76293628), digits = 4))
})
test_that("Non-standard distributions work as expected", {
set.seed(1234)
lm <- sample_lm(func = "EnvStats::rlnormTrunc", list(n = 10, meanlog = 1, sdlog = 2, min = 1, max = 2))
expect_is(lm, "list")
# ensure that the list has the required elements
expect_equal(names(lm), c("type", "samples", "details"))
# ensure that the samples matches the number requested
expect_equal(length(lm$samples), 10)
# ensure that values of samples is correct
expect_equal(signif(unlist(lm$samples), digits = 4),
signif(c(1.087017, 1.552746, 1.539039, 1.553887, 1.823434,
1.571874, 1.007058, 1.184094, 1.599599, 1.442124),
digits = 4))
})
context("Sample LEF")
test_that("Sample LEF works with composition function", {
set.seed(1234)
tef <- sample_tef(params = list(n=10, 1, 10, 20))
vuln <- sample_vuln(params = list(n=10, 1, .6))
dat <- sample_lef(func = "evaluator::compare_tef_vuln",
params = list(tef = tef$samples, vuln = vuln$samples))
expect_is(dat, "list")
# ensure that the list has the required elements
expect_equal(names(dat), c("type", "samples", "details"))
# ensure that the samples matches the number requested
expect_equal(length(dat$samples), 10)
# ensure that LEF samples are always integers
expect_is(dat$samples, "integer")
# ensure that values of samples is correct
expect_equal(dat$samples, c(5, 11, 15, 2, 12, 0, 8, 0, 0, 6))
})
context("Standard simulation model")
test_that("Default simulation model returns expected results", {
sim <- openfair_tef_tc_diff_lm(list(
tef_params=list(list(func = "mc2d::rpert", min = 1, mode = 10, max=100, shape=4)),
tc_params=list(list(func = "mc2d::rpert", min = 1, mode = 10, max =75, shape=100)),
lm_params=list(list(func = "mc2d::rpert", min = 1, mode = 100, max = 10000, shape=54)),
diff_params=list(list(list(func = "mc2d::rpert", min = 1, mode = 10, max = 50, shape = 4)))),
n = 100)
expect_s3_class(sim, "tbl_df")
expect_equal(nrow(sim), 100)
expect_equal(length(sim), 12)
expect_equal(sum(sim$threat_events), 2287)
expect_equal(sum(sim$loss_events), 786)
})
context("Main simulation")
test_that("Full wrapped scenario works as expected", {
scenario <-structure(list(scenario_id = "1", scenario = "Inadequate human resources are available to execute the informaton security strategic security plan.",
tcomm = "Organizational Leadership", domain_id = "ORG",
controls = "1, 5, 7, 32, 14, 15, 16",
diff_params = list(list(list(func = "mc2d::rpert", min = 70L, mode = 85L, max = 98L, shape = 4L),
list(func = "mc2d::rpert", min = 50L, mode = 70L, max = 84L, shape = 4L),
list(func = "mc2d::rpert", min = 0L, mode = 10L, max = 30L, shape = 4L),
list(func = "mc2d::rpert", min = 50L, mode = 70L, max = 84L, shape = 4L),
list(func = "mc2d::rpert", min = 20L, mode = 30L, max = 50L, shape = 4L),
list(func = "mc2d::rpert", min = 20L, mode = 30L, max = 50L, shape = 4L),
list(func = "mc2d::rpert", min = 50L, mode = 70L, max = 84L, shape = 4L))),
tef_params = list(list(func = "mc2d::rpert",min = 10L, mode = 24, max = 52L, shape = 4L)),
tc_params = list(list(func = "mc2d::rpert", min = 33L, mode = 50, max = 60L, shape = 3L)),
lm_params = list(list(func = "mc2d::rpert", min = 10000L, mode = 20000, max = 500000L, shape = 4L))), row.names = c(NA, -1L),
class = c("tbl_df", "tbl", "data.frame"))
results <- evaluate_promise(run_simulations(scenario, 100L))
expect_s3_class(results$result, "tbl_df")
expect_equal(nrow(results$result), 100)
expect_equal(length(results$result), 13)
expect_equal(sum(results$result$threat_events), 2686)
#$expect_equal(sum(results$result$loss_events), 764)
expect_equal(sum(results$result$loss_events), 772)
})
|
ee28ef661ca6c7d02f0a3d84c559bef29ba5cc7e
|
6dfb737d4f74bff5392f26ffc101aa84d0695d3a
|
/inst/shiny/server.R
|
fa1f6174d9f847685f0374b8b331c13550f27833
|
[] |
no_license
|
cran/weightr
|
e66651ef7d65e554bb6782b4ce5e96c7fcdba910
|
af3c9bc0df2ff17ce2dbdc8e0a412ec479ea94b7
|
refs/heads/master
| 2021-01-01T05:03:10.816984
| 2019-07-06T17:00:10
| 2019-07-06T17:00:10
| 56,994,714
| 0
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 24,197
|
r
|
server.R
|
library("shiny")
library("foreign")
library("gridExtra")
library("ggplot2")
library("plotly")
source("weightfunction.R")
shinyServer(function(input, output, session) {
filedata <- reactive({
inFile <- input$file1
if (is.null(inFile))
return(NULL)
return(read.table(inFile$datapath, header=input$header, sep=input$sep, quote=input$quote,fill=TRUE))
})
output$contents <- renderTable({
validate(
need(input$file1 !="NULL", "Please upload a data file."))
filedata()
})
output$selectdigits <- renderUI({
selectInput(inputId = "digits", label="Select the number of significant digits to report.", choices=c(1,2,3,4,5,6,7,8,9,10),selected=c(4),multiple=FALSE)
})
output$selecteffects <- renderUI({
#if(!is.null(filedata())){
colNames <- colnames(filedata())
selectInput(inputId = "effects", label="Select the variable containing your effect sizes.", choices=colNames,multiple=FALSE)#}
#else{
# return()
#}
})
output$selectvariances <- renderUI({
#if(!is.null(filedata())){
colNames <- colnames(filedata())
colNames2 <- colNames[colNames != input$effects] #WORKS
selectInput(inputId = "variances", label="Select the variable containing your sampling variances.", choices=colNames2,multiple=FALSE)#}
# else{
# return()
# }
})
output$thesearemyp <- renderUI({
colNames <- colnames(filedata())
colNames2 <- colNames[colNames != input$effects]
selectInput(inputId = "thesearemyp", label="Select that column.", choices=colNames2,selected=0,multiple=FALSE)
})
output$selectmods <- renderUI({
#if(!is.null(filedata())){
colNames <- colnames(filedata())
colNames2 <- colNames[colNames != input$effects]
colNames3 <- colNames2[colNames2 != input$variances]
selectInput(inputId = "moderators", label="Select any moderator variables to include.", choices=colNames3,multiple=TRUE)#}
#else{
# return()
#}
})
output$selectsteps <- renderUI({
#if(!is.null(filedata())){
selectizeInput(inputId = "steps", label="Select at least one p-value cutpoint to include in your model. To include a cutpoint not provided, type it in and press enter.", choices=c(0.001,
0.005,
0.010,
0.020,
0.025,
0.050,
0.100,
0.200,
0.250,
0.300,
0.350,
0.500,
0.600,
0.650,
0.700,
0.750,
0.800,
0.900,
0.950,
0.975,
0.980,
0.990,
0.995,
0.999),
multiple=TRUE,
selected=c(0.025), options=list(create=TRUE,openOnFocus=TRUE))#}
# else{
# return()
#}
})
output$presetweights <- renderUI({
steps <- c(sort(input$steps),1.00)
lapply(1:length(steps), function(i) {
if(i == 1){
numericInput(paste("weight", i), paste('<', steps[1]), value = 1, width = '25%')
}
numericInput(paste("weight", i), paste(steps[i - 1], "<", steps[i]), value = 1)
})
})
unadjustweightnomods <- reactive({
validate(need(input$file1 !="NULL", "Please upload a data file."),
need(input$effects != 0, "Please enter the column numbers of the data file containing your effect sizes and variances."))
if(length(input$moderators) == 0){
effect <- filedata()[,input$effects]
v <- filedata()[,input$variances]
if(input$selectp==TRUE){
p <- filedata()[,input$thesearemyp]
}
if(input$selectp==FALSE){
p <- 1-pnorm(effect/sqrt(v))
}
unadnomods <- weightfunction(effect=effect, v=v,npred=0, 600, 600, p=p)
unadnomods
}
})
unadjustweightmods <- reactive({
if(length(input$moderators) > 0){
npred <- length(input$moderators)
effect <- filedata()[,input$effects]
v <- filedata()[,input$variances]
number <- length(effect)
# XX <- matrix(nrow=number,ncol=(npred+1))
# XX[,1] <- rep(1,number)
# for(i in 2:(npred+1)){ XX[,i] <- filedata()[,input$moderators[i - 1]] }
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Xx <- matrix(nrow=number,ncol=npred)
# modnames <- rep(0, npred)
# for(i in 1:npred)
# {
# Xx[,i] <- filedata()[,input$moderators[i]]
# modnames[i] <- noquote(paste(c("Xx[,",i,"]","+","Xx[,",i + 1,"]"),collapse=" "))
# }
# XX <- model.matrix(~modnames)
Xx <- matrix(nrow=number,ncol=npred)
Xx <- as.data.frame(Xx)
for(i in 1:npred)
{
Xx[,i] <- filedata()[,input$moderators[i]]
colnames(Xx)[i] <- input$moderators[i]
}
XX <- model.matrix(~., Xx)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
npred <- dim(XX)[2] - 1
prednames <- colnames(XX)
if(input$selectp==TRUE){
p <- filedata()[,input$thesearemyp]
}
if(input$selectp==FALSE){
p <- 1-pnorm(effect/sqrt(v))
}
unadmods <- weightfunction(effect=effect,
v=v,
npred, steps=600, XX=XX, prednames=prednames, p=p)
unadmods
}
})
adjustweightnomods <- reactive({
if(length(input$moderators) == 0){
effect <- filedata()[,input$effects]
v <- filedata()[,input$variances]
steps <- c(as.numeric(sort(input$steps)),1.00)
if(input$selectp==TRUE){
p <- filedata()[,input$thesearemyp]
}
if(input$selectp==FALSE){
p <- 1-pnorm(effect/sqrt(v))
}
if(input$woods){
weights <- rep(0, length(steps))
for(i in 1:length(steps)){
weights[i] <- eval(parse(text=paste("input$'weight ", i, "'", sep="")))
}
adnomods <- weightfunction(effect=effect, v=v, npred=0, steps=steps, 600, weights=weights, p=p)
}
else{
adnomods <- weightfunction(effect=effect, v=v, npred=0, steps=steps, 600, p=p)
}
adnomods
# format(adnomods, digits=input$digits)
}
})
adjustweightmods <- reactive({
if(length(input$moderators) > 0){
effect <- filedata()[,input$effects]
v <- filedata()[,input$variances]
npred <- length(input$moderators)
steps <- c(as.numeric(sort(input$steps)),1.00)
number <- length(effect)
if(input$selectp==TRUE){
p <- filedata()[,input$thesearemyp]
}
if(input$selectp==FALSE){
p <- 1-pnorm(effect/sqrt(v))
}
# XX <- matrix(nrow=number,ncol=(npred+1))
# XX[,1] <- rep(1,number)
# for(i in 2:(npred+1)){ XX[,i] <- filedata()[,input$moderators[i - 1]] }
Xx <- matrix(nrow=number,ncol=npred)
Xx <- as.data.frame(Xx)
for(i in 1:npred)
{
Xx[,i] <- filedata()[,input$moderators[i]]
colnames(Xx)[i] <- input$moderators[i]
}
XX <- model.matrix(~.,Xx)
npred <- dim(XX)[2] - 1
prednames <- colnames(XX)
if(input$woods){
weights <- rep(0, length(steps))
for(i in 1:length(steps)){
weights[i] <- eval(parse(text=paste("input$'weight ", i, "'", sep="")))
}
admods <- weightfunction(effect=effect, v=v, npred, steps=steps, XX=XX, prednames, weights=weights, p=p)
}
else{
admods <- weightfunction(effect=effect, v=v, npred, steps=steps, XX=XX, prednames, p=p)
}
# format(admods, digits=input$digits)
admods
}
})
output$effects <- renderTable({
validate(need(input$file1 !="NULL", "Please upload a data file."))
})
makedefaultPlot <- function(effect, v){
range_effect <- max(effect) - min(effect)
range_v <- max(sqrt(v)) - min(sqrt(v))
lowbound_effect <- min(effect) - 0.05*range_effect
upbound_effect <- max(effect) + 0.05*range_effect
lowbound_v <- min(sqrt(v)) - 0.05*range_v
upbound_v <- max(sqrt(v)) + 0.05*range_v
plot(sqrt(v),effect, xlim=c(lowbound_v,upbound_v), ylim=c(lowbound_effect, upbound_effect), xlab="Standard Error", ylab="Effect Size")
}
makeotherPlot <- function(effect, v){
range_effect <- max(effect) - min(effect)
range_v <- max(sqrt(v)) - min(sqrt(v))
lowbound_effect <- min(effect) - 0.025*range_effect
upbound_effect <- max(effect) + 0.025*range_effect
lowbound_v <- min(sqrt(v)) - 0.025*range_v
upbound_v <- max(sqrt(v)) + 0.025*range_v
plot(effect,sqrt(v), xlim=c(lowbound_effect,upbound_effect), ylim=c(lowbound_v, upbound_v), xlab="Effect Size", ylab="Standard Error")
}
output$funnelplot <- renderPlot({
validate(need(input$file1 !="NULL", "Please upload a data file."),
need(input$effects !=0, "Please enter the column numbers of the data file containing your effect sizes and variances."))
effect <- filedata()[,input$effects]
v <- filedata()[,input$variances]
steps <- 1 - as.numeric(sort(input$steps))
range_v <- max(sqrt(v)) - min(sqrt(v))
lowbound_v <- min(sqrt(v)) - 0.05*range_v
upbound_v <- max(sqrt(v)) + 0.05*range_v
if(input$flip == FALSE){
if(input$interact == FALSE){
print(makedefaultPlot(effect, v))
if(input$contour == FALSE){
if(input$estimates == TRUE){
abline(h=unadjustweightnomods()[2,2], col="red")
}
if(input$estimates == TRUE && length(input$moderators) > 0){
abline(h=unadjustweightmods()[2,2], col="red")
}
if(input$estimates2 == TRUE){
abline(h=adjustweightnomods()[2,2], col="blue")
}
if(input$estimates2 == TRUE && length(input$moderators) > 0){
abline(h=adjustweightmods()[2,2], col="blue")
}
}
else{
testv <- seq(lowbound_v, upbound_v, 0.01)
for(i in 1:length(steps)){
lines(testv, 0 + -qnorm(steps[i])*testv)
lines(testv, 0 - -qnorm(steps[i])*testv)
# lines(testv, qnorm(steps[i], 0, testv))
}
if(input$estimates == TRUE){
abline(h=unadjustweightnomods()[2,2], col="red")
}
if(input$estimates == TRUE && length(input$moderators) > 0){
abline(h=unadjustweightmods()[2,2], col="red")
}
if(input$estimates2 == TRUE){
abline(h=adjustweightnomods()[2,2], col="blue")
}
if(input$estimates2 == TRUE && length(input$moderators) > 0){
abline(h=adjustweightmods()[2,2], col="blue")
}
}
}
}
else {
print(makeotherPlot(effect, v))
if(input$contour == FALSE){
if(input$estimates == TRUE){
abline(v=unadjustweightnomods()[2,2], col="red")
}
if(input$estimates == TRUE && length(input$moderators) > 0){
abline(v=unadjustweightmods()[2,2], col="red")
}
if(input$estimates2 == TRUE){
abline(v=adjustweightnomods()[2,2], col="blue")
}
if(input$estimates2 == TRUE && length(input$moderators) > 0){
abline(v=adjustweightmods()[2,2], col="blue")
}
}
else{
testv <- seq(lowbound_v, upbound_v, 0.01)
for(i in 1:length(steps)){
lines((0 + -qnorm(steps[i])*testv), testv)
lines((0 - -qnorm(steps[i])*testv), testv)
# lines(qnorm(steps[i], 0, testv), testv)
}
##### NOTE to self -- I never added lines at moderators.
### Possibly should do?
if(input$estimates == TRUE){
abline(v=unadjustweightnomods()[2,2], col="red")
}
if(input$estimates == TRUE && length(input$moderators) > 0){
abline(v=unadjustweightmods()[2,2], col="red")
}
if(input$estimates2 == TRUE){
abline(v=adjustweightnomods()[2,2], col="blue")
}
if(input$estimates2 == TRUE && length(input$moderators) > 0){
abline(v=adjustweightmods()[2,2], col="blue")
}
}
}
}
)
output$plotly <- renderPlot({
validate(need(input$file1 !="NULL", "Please upload a data file."),
need(input$effects !=0, "Please enter the column numbers of the data file containing your effect sizes and variances."))
effect <- filedata()[,input$effects]
v <- filedata()[,input$variances]
steps <- 1 - as.numeric(sort(input$steps))
if(input$flip==FALSE){
range_effect <- max(effect) - min(effect)
range_v <- max(sqrt(v)) - min(sqrt(v))
lowbound_effect <- min(effect) - 0.05*range_effect
upbound_effect <- max(effect) + 0.05*range_effect
lowbound_v <- min(sqrt(v)) - 0.05*range_v
upbound_v <- max(sqrt(v)) + 0.05*range_v
plot(sqrt(v),effect, xlim=c(lowbound_v,upbound_v), ylim=c(lowbound_effect, upbound_effect), xlab="Standard Error", ylab="Effect Size")
if(input$contour==TRUE){
testv <- seq(lowbound_v, upbound_v, 0.01)
for(i in 1:length(steps)){
lines(testv, 0 + -qnorm(steps[i])*testv)
lines(testv, 0 - -qnorm(steps[i])*testv)
}
}
if(input$estimates==TRUE){
abline(h=unadjustweightnomods()[2,2], col="red")
}
if(input$estimates2==TRUE){
abline(h=adjustweightnomods()[2,2], col="blue")
}
}
else{
range_effect <- max(effect) - min(effect)
range_v <- max(sqrt(v)) - min(sqrt(v))
lowbound_effect <- min(effect) - 0.025*range_effect
upbound_effect <- max(effect) + 0.025*range_effect
lowbound_v <- min(sqrt(v)) - 0.025*range_v
upbound_v <- max(sqrt(v)) + 0.025*range_v
plot(effect,sqrt(v), xlim=c(lowbound_effect,upbound_effect), ylim=c(lowbound_v, upbound_v), xlab="Effect Size", ylab="Standard Error")
if(input$contour==TRUE){
testv <- seq(lowbound_v, upbound_v, 0.01)
for(i in 1:length(steps)){
lines((0 + -qnorm(steps[i])*testv), testv)
lines((0 - -qnorm(steps[i])*testv), testv)
}
}
if(input$estimates==TRUE){
abline(v=unadjustweightnomods()[2,2], col="red")
}
if(input$estimates2==TRUE){
abline(v=adjustweightnomods()[2,2], col="blue")
}
}
############################ ADD OPTIONS HERE FOR OTHER CHECKBOXES
######### LINES AT ESTIMATES, CONTOUR LINES
})
output$info <- renderText({
xy_str <- function(e) {
if(is.null(e)) return("NULL\n")
paste0("x=", round(e$x, 1), " y=", round(e$y, 1), "\n")
}
xy_range_str <- function(e) {
if(is.null(e)) return("NULL\n")
paste0("xmin=", round(e$xmin, 1), " xmax=", round(e$xmax, 1),
" ymin=", round(e$ymin, 1), " ymax=", round(e$ymax, 1))
}
pval_str <- function(e) {
if(is.null(e)) return("NULL\n")
paste0("pval=", round((1 - abs(pnorm(e$x/e$y))), digits=2),"\n")
}
paste0(
"click: ", xy_str(input$plot_click),
"dblclick: ", xy_str(input$plot_dblclick),
"hover: ", pval_str(input$plot_hover),
"brush: ", xy_range_str(input$plot_brush)
)
})
output$density <- renderPlot({
validate(need(input$file1 !="NULL", "Please upload a data file."),need(input$effects !=0, "Please enter the column numbers of the data file containing your effect sizes and variances."),need(length(input$moderators) == 0, "Please remove the moderators from your model to view this plot. The plot does not incorporate moderators."))
effect <- filedata()[,input$effects]
v <- filedata()[,input$variances]
steps <- c(as.numeric(sort(input$steps)),1.00)
print(makeDensityPlot(effect, v, steps))
})
######### DENSITY PLOTS HERE TO NEXT LINE #############
makeDensityPlot <- function(effect, v, steps){
######## I THINK this works .... triple and quadruple check the damn thing!
#Identifying appropriate values
vc1 <- unadjustweightnomods()[1,2]
mu1 <- unadjustweightnomods()[2,2]
vc2 <- adjustweightnomods()[1,2]
mu2 <- adjustweightnomods()[2,2]
weights <- adjustweightnomods()[3:(length(steps)+1),2]
cuts <- steps
x_low_lim <- min(effect) - 2
x_up_lim <- max(effect) + 2
# print(c(vc1, mu1, vc2, mu2, weights, cuts, x_low_lim, x_up_lim))
xfull <- seq(x_low_lim,x_up_lim,.01)
########### Trying the harmonic mean from Hoaglin as the average
## conditional variance, rather than the median. The harmonic mean
## actually appears to do a worse job. Weird.
vi <- median(v)
# w <- 1/v
# s_squared <- (length(effect)-1)/( sum( w ) - sum( w^2 / sum( w ) ) )
# vi <- s_squared
###########
fx <- ( 1/(sqrt(2*pi*(vi + vc1))) ) * exp( -1/2*( (xfull - mu1)^2 / (vi + vc1) ) )
yfull <- fx
A0 <- sum(rep(.01,length(xfull))*yfull)
# fx2 <- ( 1/(sqrt(2*pi*(vi + vc2))) ) * exp( -1/2*( (xfull - mu2)^2 / (vi + vc2) ) )
fx2 <- ( 1/(sqrt(2*pi*(vi + vc1))) ) * exp( -1/2*( (xfull - mu1)^2 / (vi + vc1) ) )
testlist <- -1 * qnorm(steps, 0, sqrt(vi + vc2))
testxfull <- findInterval(xfull,sort(testlist))
xlist <- split(xfull, testxfull)
ylist <- split(fx2, testxfull)
weights2 <- rev(c(1, weights))
testyfull <- mapply("*", ylist, weights2)
A1 <- sum(rep(.01,length(unlist(xlist)))*unlist(testyfull))
#Creating the plot
plot(c(x_low_lim,x_up_lim), c(0,(max(as.numeric(unlist(testyfull))/A1)+0.10)), type='n', xlab='Sample Effect Size',
ylab='Density',axes=FALSE,lwd=2,font.lab=2,main='Expected and Adjusted Densities')
box(lwd=2)
axis(side=2,font=2)
axis(side=1,font=2)
abline(c(0,0),lwd=2)
#Drawing unadjusted density
# lines(xfull,yfull,lty=2,lwd=2)
lines(xfull,yfull/A0,lty=2,lwd=2)
# lines(as.numeric(unlist(xlist)), as.numeric(unlist(testyfull)))
lines(as.numeric(unlist(xlist)), as.numeric(unlist(testyfull))/A1)
print("TEST")
}
######################################################
output$funnelplot2 <- renderUI({
plotOutput("funnelplot", width=paste0(input$width, "%"), height=input$height)
})
output$downloadfunnelplot <- downloadHandler(
filename = function(){
paste('funnelplot', Sys.Date(), '.pdf', sep='')
},
content = function(FILE=NULL) {
effect <- filedata()[,input$effects]
v <- filedata()[,input$variances]
pdf(file=FILE)
if(input$flip == FALSE){
print(makedefaultPlot(effect,v))}
else{
print(makeotherPlot(effect,v))
}
dev.off()
}
)
output$unadjustedweightfunction <- renderTable({
if(length(input$moderators) == 0){
format(unadjustweightnomods(), digits=input$digits)
}
else{
format(unadjustweightmods(), digits=input$digits)
}
})
output$questionmark <- renderImage({
list(src = './www/questionmark.png',width=17,height=17, alt = "Question_Mark")
}, deleteFile=FALSE)
output$questionmark2 <- renderImage({
list(src = './www/questionmark.png',width=17,height=17, alt = "Question_Mark")
}, deleteFile=FALSE)
output$adjustedweightfunction <- renderTable({
if(length(input$moderators) == 0){
validate(need(input$file1 !="NULL", "Please upload a data file."),
need(input$effects !=0, "Please enter the column numbers of the data file containing your effect sizes and variances."),
need(input$steps !=0, "Please select at least one p-value cutpoint to include in your model."))
intervaltally <- function(p, steps) {
p1 <- cut(p, breaks=c(-Inf,steps), labels=steps)
return(p1) }
effect <- filedata()[,input$effects]
v <- filedata()[,input$variances]
steps <- c(as.numeric(sort(input$steps)),1.00)
if(input$effects != "NULL") {
if(input$selectp==TRUE){
p <- filedata()[,input$thesearemyp]
}
if(input$selectp==FALSE){
p <- 1-pnorm(effect/sqrt(v))
}
pvalues <- as.numeric(table(intervaltally(p, steps)))
}
format(adjustweightnomods(), digits=input$digits)
# adjustweightnomods()
}
else{
intervaltally <- function(p, steps) {
p1 <- cut(p, breaks=c(-Inf,steps), labels=steps)
return(p1) }
effect <- filedata()[,input$effects]
v <- filedata()[,input$variances]
steps <- c(as.numeric(sort(input$steps)),1.00)
if(input$effects != "NULL") {
if(input$selectp==TRUE){
p <- filedata()[,input$thesearemyp]
}
if(input$selectp==FALSE){
p <- 1-pnorm(effect/sqrt(v))
}
pvalues <- as.numeric(table(intervaltally(p, steps)))
}
format(adjustweightmods(), digits=input$digits)
}
})
output$likelihoodratio <- renderTable({
validate(need(input$file1 !="NULL", "Please upload a data file."),
need(input$effects !=0, "Please enter the column numbers of the data file containing your effect sizes and variances."),
need(input$steps !=0, "Please select at least one p-value cutpoint to include in your model."),
need(input$woods==FALSE, "This is not valid under the Vevea and Woods (2005) model."))
if(length(input$moderators) == 0){
effect <- filedata()[,input$effects]
v <- filedata()[,input$variances]
steps <- c(as.numeric(sort(input$steps)),1.00)
if(input$selectp==TRUE){
p <- filedata()[,input$thesearemyp]
}
if(input$selectp==FALSE){
p <- 1-pnorm(effect/sqrt(v))
}
format(likelihoodfunct(effect=effect, v=v, npred=0, steps=steps, 600,p=p),digits=input$digits)
}
else{
effect <- filedata()[,input$effects]
v <- filedata()[,input$variances]
steps <- c(as.numeric(sort(input$steps)),1.00)
npred <- length(input$moderators)
if(input$selectp==TRUE){
p <- filedata()[,input$thesearemyp]
}
if(input$selectp==FALSE){
p <- 1-pnorm(effect/sqrt(v))
}
number <- length(effect)
XX <- matrix(nrow=number,ncol=(npred+1))
XX[,1] <- rep(1,number)
for(i in 2:(npred+1)){ XX[,i] <- filedata()[,input$moderators[i - 1]] }
format(likelihoodfunct(effect=effect, v=v, npred=npred, steps=steps, XX,p=p),digits=input$digits) }
})
output$samplesizes <- renderTable({
validate(need(input$file1 !="NULL", "Please upload a data file."),
need(input$effects !=0, "Please enter the column numbers of the data file containing your effect sizes and variances."),
need(input$steps !=0,"Please select at least one p-value cutpoint to include in your model."))
effect <- filedata()[,input$effects]
v <- filedata()[,input$variances]
intervaltally <- function(p, steps) {
p1 <- cut(p, breaks=c(-Inf,steps), labels=steps)
return(p1) }
steps <- c(as.numeric(sort(input$steps)),1.00)
if(input$selectp){
p <- filedata()[,input$thesearemyp]
}
else{
p <- 1-pnorm(effect/sqrt(v))
}
pvalues <- as.numeric(table(intervaltally(p, steps)))
format(sampletable(p=p, pvalues=pvalues, steps=steps), digits=input$digits)
})
#toggleModal(session, "samplesizes", toggle="toggle")
output$numberofeffects <- renderTable({
validate(need(input$file1 !="NULL", "Please upload a data file."),
need(input$effects !=0, "Please enter the column numbers of the data file containing your effect sizes and variances."),
need(input$steps !=0,"Please select at least one p-value cutpoint to include in your model."))
effect <- filedata()[,input$effects]
results <- matrix(nrow=1,ncol=1)
results[1,1] <- length(effect)
resultsb <- data.frame(results, row.names=c("k"))
colnames(resultsb) <- c("Total Number of Effects")
format(resultsb, digits=input$digits)
})
})
|
e4360e5f786f320a40c16c791ec474342cd78bed
|
36f9d3d21e7bf2b897c01b8c80ccda2a7b9e1c65
|
/cachematrix.R
|
99152820d11a44072a9e58f4557862511da3efad
|
[] |
no_license
|
plagi/ProgrammingAssignment2
|
09bd255df708a78d830c6a983dd7b464f4a6ebea
|
ff96de2c4320f2b2a3820d2a83e8f8a7a653e547
|
refs/heads/master
| 2021-01-14T11:20:53.372462
| 2014-05-15T19:27:11
| 2014-05-15T19:27:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,022
|
r
|
cachematrix.R
|
## The first function, makeCacheMatrix creates a special "matrix",
## which is really a list containing a function to:
## set the value of the matrix
## get the value of the matrix
## set the value of the inverse matrix
## get the value of the inverse matrix
makeCacheMatrix <- function(x = matrix()) {
im <- NULL
set <- function(y) {
x <<- y
inverseMatrix <<- NULL
}
get <- function() x
setinverse <- function(inverseMatrix) im <<- inverseMatrix
getinverse <- function() im
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## The second function calulates inverse matrix, that is stored
## in object function above, or returns cached inverse of the matrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
im <- x$getinverse()
if(!is.null(im)) {
message("getting cached matrix")
return(im)
}
data <- x$get()
im <- solve(data)
x$setinverse(im)
im
}
|
26de36118549eb8ccdc0e9335c8aa196befea3aa
|
5caa953c51a26a6bd1be24ddd78c8b6fb04eeaf1
|
/main/server.R
|
c11b23ac2edb85a359116b77a27221b153d110ea
|
[] |
no_license
|
tintinthong/hugo
|
adb121eaa1321d712d7dd1f1e45d9aed124f86db
|
6e7fc5d7f99cc6932f6c67f506fec729e0cfa525
|
refs/heads/master
| 2020-05-24T15:49:40.340583
| 2019-05-18T10:04:16
| 2019-05-18T10:04:16
| 187,340,902
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,983
|
r
|
server.R
|
#....:....1....:....2....:....3....:....4....:....5....:....6....:....7....:....8
function(input, output, session) {
#session
dataMat<-"here"
#sourced for each session
#source('all_sessions.R', local = TRUE)
output$plotJT <- renderPlot({
# Render a barplot
hist(data,
main="just something",
ylab="Shipment Value (US$)",
xlab="Year")
})
output$text1 <- renderText({paste("NextWord")}) #, input$var
output$text2 <- renderText({paste("Word for scale")}) #, input$var
# Reactive expression to generate the requested distribution ----
# This is called whenever the inputs change. The output functions
# defined below then use the value computed from this expression
d <- reactive({
dist <- switch(input$dist,
norm = rnorm,
unif = runif,
lnorm = rlnorm,
exp = rexp,
rnorm)
dist(input$n)
})
# Generate a plot of the data ----
# Also uses the inputs to build the plot label. Note that the
# dependencies on the inputs and the data reactive expression are
# both tracked, and all expressions are called in the sequence
# implied by the dependency graph.
output$plot <- renderPlot({
dist <- input$dist
n <- input$n
hist(d(),
main = paste("r", dist, "(", n, ")", sep = ""),
col = "#75AADB", border = "white")
})
# Generate a summary of the data ----
output$summary <- renderPrint({
summary(d())
})
# for (Si in 1:Ss) {
# for (Mi in 1:Ms) {
observeEvent(input$sliderInput, {
sliderInput <- input$sliderInput;
# SM[Si,Mi] <- vSM <- match(str(input$dynamic), btx)
})
output$dynamic_value <- renderPrint({
str(input$dynamic)
})
output$numeric_value <- renderPrint({
str(vSM)
output$selected_var<- renderText({
"You have selected this"
})
})
}
|
203559af8f371c068ea3538791583db4f9b3c6b5
|
187414dcb264fb49d82507a099fd5fdca6e55e38
|
/R/pkg/inst/worker/worker.R
|
7fc4680bad10e5b2f6b0a4272483712957bf3d17
|
[
"BSD-3-Clause",
"CC0-1.0",
"CDDL-1.1",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"EPL-2.0",
"CDDL-1.0",
"MIT",
"LGPL-2.0-or-later",
"Python-2.0",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-free-unknown",
"EPL-1.0",
"Classpath-exception-2.0",
"GCC-exception-3.1",
"CC-BY-SA-3.0",
"LGPL-2.1-only",
"LicenseRef-scancode-unicode",
"CPL-1.0",
"LicenseRef-scancode-other-permissive",
"GPL-2.0-only",
"CC-PDDC",
"NAIST-2003",
"LicenseRef-scancode-other-copyleft"
] |
permissive
|
apache/spark
|
8aeba2d80465a262acc95781ede105a5b5886f6d
|
60d8fc49bec5dae1b8cf39a0670cb640b430f520
|
refs/heads/master
| 2023-09-04T04:33:36.058199
| 2023-09-04T03:48:52
| 2023-09-04T03:48:52
| 17,165,658
| 39,983
| 32,449
|
Apache-2.0
| 2023-09-14T19:46:24
| 2014-02-25T08:00:08
|
Scala
|
UTF-8
|
R
| false
| false
| 10,355
|
r
|
worker.R
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Worker class
# Get current system time
currentTimeSecs <- function() {
as.numeric(Sys.time())
}
# Get elapsed time
elapsedSecs <- function() {
proc.time()[3]
}
compute <- function(mode, partition, serializer, deserializer, key,
colNames, computeFunc, inputData) {
if (mode > 0) {
if (deserializer == "row") {
# Transform the list of rows into a data.frame
# Note that the optional argument stringsAsFactors for rbind is
# available since R 3.2.4. So we set the global option here.
oldOpt <- getOption("stringsAsFactors")
options(stringsAsFactors = FALSE)
# Handle binary data types
if ("raw" %in% sapply(inputData[[1]], class)) {
inputData <- SparkR:::rbindRaws(inputData)
} else {
inputData <- do.call(rbind.data.frame, inputData)
}
options(stringsAsFactors = oldOpt)
names(inputData) <- colNames
} else {
# Check to see if inputData is a valid data.frame
stopifnot(deserializer == "byte" || deserializer == "arrow")
stopifnot(is.data.frame(inputData))
}
if (mode == 2) {
output <- computeFunc(key, inputData)
} else {
output <- computeFunc(inputData)
}
if (serializer == "row") {
# Transform the result data.frame back to a list of rows
output <- split(output, seq(nrow(output)))
} else {
# Serialize the output to a byte array
stopifnot(serializer == "byte" || serializer == "arrow")
}
} else {
output <- computeFunc(partition, inputData)
}
return(output)
}
outputResult <- function(serializer, output, outputCon) {
if (serializer == "byte") {
SparkR:::writeRawSerialize(outputCon, output)
} else if (serializer == "row") {
SparkR:::writeRowSerialize(outputCon, output)
} else if (serializer == "arrow") {
SparkR:::writeSerializeInArrow(outputCon, output)
} else {
# write lines one-by-one with flag
lapply(output, function(line) SparkR:::writeString(outputCon, line))
}
}
# Constants
specialLengths <- list(END_OF_STREAM = 0L, TIMING_DATA = -1L)
# Timing R process boot
bootTime <- currentTimeSecs()
bootElap <- elapsedSecs()
rLibDir <- Sys.getenv("SPARKR_RLIBDIR")
connectionTimeout <- as.integer(Sys.getenv("SPARKR_BACKEND_CONNECTION_TIMEOUT", "6000"))
dirs <- strsplit(rLibDir, ",")[[1]]
# Set libPaths to include SparkR package as loadNamespace needs this
# TODO: Figure out if we can avoid this by not loading any objects that require
# SparkR namespace
.libPaths(c(dirs, .libPaths()))
suppressPackageStartupMessages(library(SparkR))
port <- as.integer(Sys.getenv("SPARKR_WORKER_PORT"))
inputCon <- socketConnection(
port = port, blocking = TRUE, open = "wb", timeout = connectionTimeout)
SparkR:::doServerAuth(inputCon, Sys.getenv("SPARKR_WORKER_SECRET"))
outputCon <- socketConnection(
port = port, blocking = TRUE, open = "wb", timeout = connectionTimeout)
SparkR:::doServerAuth(outputCon, Sys.getenv("SPARKR_WORKER_SECRET"))
# read the index of the current partition inside the RDD
partition <- SparkR:::readInt(inputCon)
deserializer <- SparkR:::readString(inputCon)
serializer <- SparkR:::readString(inputCon)
# Include packages as required
packageNames <- unserialize(SparkR:::readRaw(inputCon))
for (pkg in packageNames) {
suppressPackageStartupMessages(library(as.character(pkg), character.only = TRUE))
}
# read function dependencies
funcLen <- SparkR:::readInt(inputCon)
computeFunc <- unserialize(SparkR:::readRawLen(inputCon, funcLen))
env <- environment(computeFunc)
parent.env(env) <- .GlobalEnv # Attach under global environment.
# Timing init envs for computing
initElap <- elapsedSecs()
# Read and set broadcast variables
numBroadcastVars <- SparkR:::readInt(inputCon)
if (numBroadcastVars > 0) {
for (bcast in seq(1:numBroadcastVars)) {
bcastId <- SparkR:::readInt(inputCon)
value <- unserialize(SparkR:::readRaw(inputCon))
SparkR:::setBroadcastValue(bcastId, value)
}
}
# Timing broadcast
broadcastElap <- elapsedSecs()
# Initial input timing
inputElap <- broadcastElap
# If -1: read as normal RDD; if >= 0, treat as pairwise RDD and treat the int
# as number of partitions to create.
numPartitions <- SparkR:::readInt(inputCon)
# 0 - RDD mode, 1 - dapply mode, 2 - gapply mode
mode <- SparkR:::readInt(inputCon)
if (mode > 0) {
colNames <- SparkR:::readObject(inputCon)
}
isEmpty <- SparkR:::readInt(inputCon)
computeInputElapsDiff <- 0
outputComputeElapsDiff <- 0
if (isEmpty != 0) {
if (numPartitions == -1) {
if (deserializer == "byte") {
# Now read as many characters as described in funcLen
data <- SparkR:::readDeserialize(inputCon)
} else if (deserializer == "string") {
data <- as.list(readLines(inputCon))
} else if (deserializer == "row" && mode == 2) {
dataWithKeys <- SparkR:::readMultipleObjectsWithKeys(inputCon)
keys <- dataWithKeys$keys
data <- dataWithKeys$data
} else if (deserializer == "row") {
data <- SparkR:::readMultipleObjects(inputCon)
} else if (deserializer == "arrow" && mode == 2) {
dataWithKeys <- SparkR:::readDeserializeWithKeysInArrow(inputCon)
keys <- dataWithKeys$keys
data <- dataWithKeys$data
} else if (deserializer == "arrow" && mode == 1) {
data <- SparkR:::readDeserializeInArrow(inputCon)
# See https://stat.ethz.ch/pipermail/r-help/2010-September/252046.html
# rbind.fill might be an alternative to make it faster if plyr is installed.
# Also, note that, 'dapply' applies a function to each partition.
data <- do.call("rbind", data)
}
# Timing reading input data for execution
inputElap <- elapsedSecs()
if (mode > 0) {
if (mode == 1) {
output <- compute(mode, partition, serializer, deserializer, NULL,
colNames, computeFunc, data)
} else {
# gapply mode
outputs <- list()
for (i in seq_len(length(data))) {
# Timing reading input data for execution
computeStart <- elapsedSecs()
output <- compute(mode, partition, serializer, deserializer, keys[[i]],
colNames, computeFunc, data[[i]])
computeElap <- elapsedSecs()
if (serializer == "arrow") {
outputs[[length(outputs) + 1L]] <- output
} else {
outputResult(serializer, output, outputCon)
outputComputeElapsDiff <- outputComputeElapsDiff + (elapsedSecs() - computeElap)
}
computeInputElapsDiff <- computeInputElapsDiff + (computeElap - computeStart)
}
if (serializer == "arrow") {
# See https://stat.ethz.ch/pipermail/r-help/2010-September/252046.html
# rbind.fill might be an alternative to make it faster if plyr is installed.
outputStart <- elapsedSecs()
combined <- do.call("rbind", outputs)
SparkR:::writeSerializeInArrow(outputCon, combined)
outputComputeElapsDiff <- elapsedSecs() - outputStart
}
}
} else {
output <- compute(mode, partition, serializer, deserializer, NULL,
colNames, computeFunc, data)
}
if (mode != 2) {
# Not a gapply mode
computeElap <- elapsedSecs()
outputResult(serializer, output, outputCon)
outputElap <- elapsedSecs()
computeInputElapsDiff <- computeElap - inputElap
outputComputeElapsDiff <- outputElap - computeElap
}
} else {
if (deserializer == "byte") {
# Now read as many characters as described in funcLen
data <- SparkR:::readDeserialize(inputCon)
} else if (deserializer == "string") {
data <- readLines(inputCon)
} else if (deserializer == "row") {
data <- SparkR:::readMultipleObjects(inputCon)
}
# Timing reading input data for execution
inputElap <- elapsedSecs()
res <- new.env()
# Step 1: hash the data to an environment
hashTupleToEnvir <- function(tuple) {
# NOTE: execFunction is the hash function here
hashVal <- computeFunc(tuple[[1]])
bucket <- as.character(hashVal %% numPartitions)
acc <- res[[bucket]]
# Create a new accumulator
if (is.null(acc)) {
acc <- SparkR:::initAccumulator()
}
SparkR:::addItemToAccumulator(acc, tuple)
res[[bucket]] <- acc
}
invisible(lapply(data, hashTupleToEnvir))
# Timing computing
computeElap <- elapsedSecs()
# Step 2: write out all of the environment as key-value pairs.
for (name in ls(res)) {
SparkR:::writeInt(outputCon, 2L)
SparkR:::writeInt(outputCon, as.integer(name))
# Truncate the accumulator list to the number of elements we have
length(res[[name]]$data) <- res[[name]]$counter
SparkR:::writeRawSerialize(outputCon, res[[name]]$data)
}
# Timing output
outputElap <- elapsedSecs()
computeInputElapsDiff <- computeElap - inputElap
outputComputeElapsDiff <- outputElap - computeElap
}
}
# Report timing
SparkR:::writeInt(outputCon, specialLengths$TIMING_DATA)
SparkR:::writeDouble(outputCon, bootTime)
SparkR:::writeDouble(outputCon, initElap - bootElap) # init
SparkR:::writeDouble(outputCon, broadcastElap - initElap) # broadcast
SparkR:::writeDouble(outputCon, inputElap - broadcastElap) # input
SparkR:::writeDouble(outputCon, computeInputElapsDiff) # compute
SparkR:::writeDouble(outputCon, outputComputeElapsDiff) # output
# End of output
SparkR:::writeInt(outputCon, specialLengths$END_OF_STREAM)
close(outputCon)
close(inputCon)
|
b1143c4c5e36f9c6bedac09eba732656fc60d8ca
|
53851868e25801999033fe8d7c3150b73e7dde65
|
/R/aegean/XTentModel.r
|
14aa21f3d2dd2bc233568c20692dc64e632532c8
|
[] |
no_license
|
xuzhikethinker/PRG
|
bb7e75d27f9da7611d3c26f10bb083ec69025487
|
25b971f6e65ef13f80d3a56732e4bb6d4502bb55
|
refs/heads/master
| 2016-09-06T02:27:18.042949
| 2013-03-27T18:17:53
| 2013-03-27T18:17:53
| 9,262,600
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,204
|
r
|
XTentModel.r
|
# produces XTENT models
inputDir="input/"
rootName="aegean39S1L3a";
typeName="_v1_3e-1.0j0.0m0.5k1.0l4.5b1.2D100.0MC_r4_";
methodNumber<-1
numberRows=39
distanceMeasure<-1
mainTitleOn=TRUE
source("criticalAngleDendrogram.r")
useAnglesOn=FALSE
methodNumber<-1
distanceMeasure<-1
criticalAngleDendrogram(inputDir, rootName, typeName, methodNumber, numberRows=39, distanceMeasure, useAnglesOn, mainTitleOn=TRUE)
distanceMeasure<-2
criticalAngleDendrogram(inputDir, rootName, typeName, methodNumber, numberRows=39, distanceMeasure, useAnglesOn, mainTitleOn=TRUE)
methodNumber<-2
distanceMeasure<-1
criticalAngleDendrogram(inputDir, rootName, typeName, methodNumber, numberRows=39, distanceMeasure, useAnglesOn, mainTitleOn=TRUE)
distanceMeasure<-2
criticalAngleDendrogram(inputDir, rootName, typeName, methodNumber, numberRows=39, distanceMeasure, useAnglesOn, mainTitleOn=TRUE)
methodNumber<-4
distanceMeasure<-1
criticalAngleDendrogram(inputDir, rootName, typeName, methodNumber, numberRows=39, distanceMeasure, useAnglesOn, mainTitleOn=TRUE)
distanceMeasure<-2
criticalAngleDendrogram(inputDir, rootName, typeName, methodNumber, numberRows=39, distanceMeasure, useAnglesOn, mainTitleOn=TRUE)
|
b03c09756c86406840d02792e3798222a43bded5
|
6d790f6448781672395a339fa7ed4bb6890ffb1e
|
/R/KF-interfaces.R
|
95e798471c4216c9f65e764ff27561a6152d1880
|
[] |
no_license
|
cran/KFKSDS
|
fac6899e084a7348f0c32e4ac9a8b1208a052d36
|
764548b72421436a4e617acfeeed1451e6cfb024
|
refs/heads/master
| 2016-09-05T13:47:26.383362
| 2015-01-28T00:00:00
| 2015-01-28T00:00:00
| 17,680,127
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,291
|
r
|
KF-interfaces.R
|
KalmanFilter <- function(y, ss,
KF.version = c("KFKSDS", "StructTS", "KFAS", "FKF", "dlm", "dse"),
KF.args = list(), check.args = TRUE, debug = FALSE)
{
KF.version <- match.arg(KF.version)[1]
a <- if (check.args) {
make.KF.args(ss, KF.version, KF.args)
} else KF.args
P0cov <- if (is.null(KF.args$P0cov)) FALSE else KF.args$P0cov
if (P0cov)
ss$P0[] <- ss$P0[1]
switch(KF.version,
"KFKSDS" =
{
res <- list(mloglik =
KF.C(y, ss, convergence = a$convergence, t0 = a$t0))
},
"StructTS" =
{
mod <- list(Z = ss$Z, a = ss$a0, P = ss$P0, T = ss$T,
V = ss$Q, h = ss$H, Pn = ss$P0)
res <- stats::KalmanRun(y, mod, -1, TRUE)
res[[1]][1] <- 2 * res[[1]][1] - log(res[[1]][2])
names(res) <- c("values", "residuals", "states")
res$mloglik <- 0.5 * sum(res[[1]]) #0.5 * sum(res[[1]]) / length(y)
res[[1]] <- NULL
id <- match(diag(ss$V), diag(ss$Q))
res[[2]] <- res[[2]][,id]
attributes(res[[1]]) <- attributes(y)
res[[2]] <- ts(res[[2]])
tsp(res[[2]]) <- tsp(y)
},
"KFAS" =
{
#require("KFAS")
#mkfas <- KFAS::SSModel(y = y, Z = ss$Z, H = ss$H, T = ss$T,
# R = ss$R, Q = ss$V, a1 = a$a0, P1 = ss$P0, P1inf = a$P1inf,
# distribution = "Gaussian", transform = "none", tolF = a$tolF)
#ss <- char2numeric(m0, FALSE)
#do not use "KFAS::" inside KFASS::SSmodel
tmp <- KFAS::SSMcustom(ss$Z, ss$T, ss$R, ss$V, a$a0, ss$P0, a$P1inf, 1, length(y))
mkfas <- KFAS::SSModel(y ~ -1 + tmp, H = ss$H,
data = data.frame(y = y), distribution = "gaussian", tol = a$tolF)
res <- KFAS::KFS(mkfas, smoothing = "none", simplify = TRUE)
res$mloglik <- -res$logLik
},
"FKF" =
{
#require("FKF")
res <- FKF::fkf(a0 = ss$a0, P0 = ss$P0,
dt = matrix(0, nrow = ncol(ss$T)), ct = a$ct,
Tt = ss$T, Zt = ss$Z, HHt = ss$Q, GGt = as.matrix(ss$H),
yt = rbind(y), check.input = a$check.input)
res$mloglik <- -res$logLik
},
#"sspir" =
#{
# require("sspir")
#
# yaux <- ts(matrix(y))
# tsp(yaux) <- tsp(y)
# msspir <- sspir::SS(y = yaux,
# Fmat = function(tt,x,phi) { return(matrix(phi$Z)) },
# Gmat = function(tt,x,phi) { return(phi$T) },
# Vmat = function(tt,x,phi) { return(matrix(phi$H)) },
# Wmat = function(tt,x,phi) { return(phi$Q) },
# m0 = rbind(ss$a0), C0 = ss$P0, phi = ss)
#
# res <- sspir::kfilter(msspir)
# res$mloglik <- drop(-res$loglik)
#},
"dlm" =
{
#require("dlm")
mdlm <- list(m0 = ss$a0, C0 = ss$P0,
FF = rbind(ss$Z), V = matrix(ss$H), GG = ss$T, W = ss$Q)
res1 <- dlm::dlmFilter(y = as.vector(y), mod = mdlm,
debug = FALSE, simplify = TRUE)
res2 <- dlm::dlmLL(y = as.vector(y), mod = mdlm, debug = FALSE)
res <- c(res1, mloglik = res2)
},
"dse" =
{
#require("dse")
mdse <- dse::SS(F. = ss$T, G = NULL, H = ss$Z, K = NULL, Q = ss$Q,
R = matrix(ss$H), z0 = rbind(ss$a0), P0 = ss$P0, rootP0 = NULL,
constants = NULL, description = NULL, names = NULL,
input.names = NULL, output.names = NULL)
##NOTE requires loading "dse"
TSdata <- dse::TSdata
res <- dse::l(mdse, TSdata(output = y), result = "like")
res <- list(mloglik = -res)
}
)
res
}
make.KF.args <- function(ss, KF.version, KF.args = list())
{
#NOTE only ss$a0 and ss$P0 are used in argument 'ss'
#keep the entire list 'ss' as argument for possible extensions
check.KF.args <- function(list1, list2, label)
{
nms1 <- names(list1)
nms2 <- names(list2)
notinlist2 <- !(nms1 %in% nms2)
wno <- which(notinlist2)
if (any(notinlist2))
warning("the following elements in argument 'KF.args'\n",
"are omitted with 'KF.version = ", label, "': ",
paste(nms1[wno], collapse = ", "), ".", call. = FALSE)
if (length(list1) > 0)
{
if (any(notinlist2)) {
list2[nms1[which(!notinlist2)]] <- list1[-wno]
} else
list2[nms1] <- list1
}
list2
}
# KF.version <- match.arg(KF.version,
# eval(formals(KFKSDS::KalmanFilter)$KF.version))
P0cov <- if (is.null(KF.args$P0cov)) FALSE else KF.args$P0cov
if (P0cov && !is.null(ss))
ss$P0[] <- ss$P0[1]
#NOTE 'inf' is currently not used
ldef <- list(P0cov = FALSE, inf = 99999)
ldef <- # list of default parameters for each interface
switch(KF.version,
"KFKSDS" =
{
c(ldef, list(t0 = 1, convergence = c(0, 9999))) #sUP = 0
},
"stats" = { ldef },
"StructTS" = { ldef },
"KFAS" =
{
a0 <- ss$a0
P1inf <- matrix(0, nrow(ss$P0), ncol(ss$P0))
if (all(ss$P0 == 0))
{
a0[] <- 0
diag(P1inf) <- 1
}
c(ldef, list(#yt = y, Zt = ss$Z, Tt = ss$T, Rt = ss$R,
#Ht = ss$H, Qt = ss$V, a1 = ss$a0, P1 = ss$P0,
a0 = a0, P1inf = P1inf, tolF = 1e-08))
},
"FKF" =
{
c(ldef, list(
ct = matrix(0),
check.input = TRUE))
},
"sspir" = { ldef },
"dlm" = { ldef },
"dse" = { ldef }
)
args <- check.KF.args(KF.args, ldef, KF.version)
args
}
|
6d9e13a6bef81201d101aea0c738211a77a1a3c1
|
eeae86290d4b0bcc11ce82dc8b03cc75613de1ce
|
/fig1/F1_src_LoadReadCounts.R
|
86f82e1d4ec8de44e7fbac7fe23984953ad45184
|
[] |
no_license
|
hjanime/TT-seq_mESC_pluripotency
|
07b438a7a4e3d1cecd3da9758b4329c1392dec0a
|
654e4176570cbd79e5ad6cfef3f3b2cfa67e2cf9
|
refs/heads/master
| 2023-08-02T11:25:25.136642
| 2021-10-03T21:08:37
| 2021-10-03T21:08:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,837
|
r
|
F1_src_LoadReadCounts.R
|
#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#
# read in kallisto tx counts on gencode.vM17.annotation and spike-in RNAs
# and save reads counts for normalization
#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#
# load kallisto counts, mm10
filenames <- sort(list.files('../data/kallisto_output', full.names = T)) # count with combined reference GENCODE vM20 and ncRNA annotation
sampleNewName <- gsub(".*/", "\\2", filenames)
count_table <- SummarizedExperiment::readKallisto(paste0(filenames, "/abundance.tsv"),
as = 'matrix', what = "tpm")
colnames(count_table) <- sampleNewName
# split to GENCODE transcripts, annotated TUs, spike-in RNAs
txRPK <- count_table[grepl("^ENS", rownames(count_table)), ] %>%
keepOneTx(rowname_gene_id = T, is_gene_sum = F)
tuRPK <- count_table[!grepl("^chrS|^ENS", rownames(count_table)), ]
tuRPK <- tuRPK[rowSums(tuRPK) > 0, ]
spRPK <- count_table[grep("^chrS", rownames(count_table)), ]
saveRDS(txRPK, "../data/txRPK_SL_2i.RData") # raw tmp without normalization
saveRDS(tuRPK, "../data/tuRPK_SL_2i.RData") # mm10 TU annotation kallisto tpm
saveRDS(spRPK, "../data/spRPK_SL_2i.RData")
# matrix for spike-in normalization
sp_F_mat <- spRPK[, grepl("FRNA", colnames(spRPK))]
colnames(sp_F_mat) <- gsub("(FRNA_)*", "\\2", colnames(sp_F_mat))
FRNA.sizefactor <- SizeFactorCal(sp_F_mat)
sp_L_mat <- spRPK[, grepl("LRNA",colnames(spRPK))]
colnames(sp_L_mat) <- gsub("(LRNA_)*", "\\2", colnames(sp_L_mat))
LRNA.sizefactor <- SizeFactorCal(sp_L_mat[1:4, ]) # use only labeled spike-ins
saveRDS(FRNA.sizefactor, "../data/FRNA.sizefactor.RData")
saveRDS(LRNA.sizefactor, "../data/LRNA.sizefactor.RData")
if (F) { # spike-in size factors for read count normalization
count_table <- SummarizedExperiment::readKallisto(paste0(filenames, "/abundance.tsv"),
as = 'matrix', what = "est_counts")
colnames(count_table) <- sampleNewName
# norm to RPK
eff_lengths <- SummarizedExperiment::readKallisto(paste0(filenames[1], "/abundance.tsv"),
as = 'matrix', what = "eff_length")
count_table <- count_table / c(eff_lengths) * 1e3
spRPK <- count_table[grep("^chrS", rownames(count_table)), ]
sp_F_mat <- spRPK[, grepl("FRNA", colnames(spRPK))]
colnames(sp_F_mat) <- gsub("(FRNA_)*", "\\2", colnames(sp_F_mat))
FRNA.sizefactor <- SizeFactorCal(sp_F_mat)
sp_L_mat <- spRPK[, grepl("LRNA",colnames(spRPK))]
colnames(sp_L_mat) <- gsub("(LRNA_)*", "\\2", colnames(sp_L_mat))
LRNA.sizefactor <- SizeFactorCal(sp_L_mat[1:4, ]) # use only labeled spike-ins
saveRDS(FRNA.sizefactor, "../data/FRNA.sizefactor.RC.RData")
saveRDS(LRNA.sizefactor, "../data/LRNA.sizefactor.RC.RData")
}
## spike-ins table, for labelel rate estimation
spikein_lens <- c("chrS2" = 1.023, "chrS4" = 1.033, "chrS5" = 1.042,
"chrS8" = 1.124, "chrS9" = 1.061, "chrS12" = 1.124)
# convert RPK to abundance by multiplying spikein lengths, since spikeins were mixed by weight
SampleSpCounts <- NULL
for(i in seq_len(ncol(sp_F_mat))){
SampleSpCounts <- rbind(SampleSpCounts,
data.frame(FRNA = sp_F_mat[, i] / FRNA.sizefactor[i] * spikein_lens,
LRNA = sp_L_mat[, i] / LRNA.sizefactor[i] * spikein_lens,
Sample = colnames(sp_L_mat)[i],
SpikeIns = rownames(sp_F_mat),
W = c(1, 0.1, 1, 0.1, 1, 0.1),
R = c(1, 1, 0.1, 0.1, 0, 0) ))
}
saveRDS(SampleSpCounts, "../data/SampleSpikeCounts.RData")
# normalise tx tpm with spike-in RNA size factor, for parameter estimation
tx_F_mat <- txRPK[, grep("FRNA", colnames(txRPK))]
colnames(tx_F_mat) <- gsub("FRNA_(.*)","\\1", colnames(tx_F_mat))
tx_F_mat <- sweep(tx_F_mat, 2, FRNA.sizefactor, '/') # divide spike-in size factors
tx_L_mat <- txRPK[, grep("LRNA", colnames(txRPK))]
colnames(tx_L_mat) <- gsub("LRNA_(.*)","\\1", colnames(tx_L_mat))
tx_L_mat <- sweep(tx_L_mat, 2, LRNA.sizefactor, '/')
saveRDS(tx_L_mat, "../data/tx_L_mat.RData")
saveRDS(tx_F_mat, "../data/tx_F_mat.RData")
# normalise non-coding TU tpm with spike-in RNA size factor
tu_F_mat <- tuRPK[, grep("FRNA", colnames(tuRPK))]
colnames(tu_F_mat) <- gsub("FRNA_(.*)","\\1", colnames(tu_F_mat))
tu_F_mat <- sweep(tu_F_mat, 2, FRNA.sizefactor, '/') # divide spike-in size factors
tu_L_mat <- tuRPK[, grep("LRNA", colnames(tuRPK))]
colnames(tu_L_mat) <- gsub("LRNA_(.*)","\\1", colnames(tu_L_mat))
tu_L_mat <- sweep(tu_L_mat, 2, LRNA.sizefactor, '/')
saveRDS(tu_F_mat, "../fig1/data/tu_FRNA_RPK_norm.RData")
saveRDS(tu_L_mat, "../fig1/data/tu_LRNA_RPK_norm.RData")
# process mm9 tx counts ------------------------------------------------------------------------------------------
if (F) {
# count with combined reference GENCODE vM20 and ncRNA annotation
filenames <- sort(list.files('../data/kallisto_output_mm9/', full.names = T))
sampleNewName <- gsub(".*/", "\\2", filenames)
count_table <- SummarizedExperiment::readKallisto(paste0(filenames, "/abundance.tsv"),
as = 'matrix', what = "est_counts")
colnames(count_table) <- sampleNewName
txRC <- count_table[grepl("^ENS", rownames(count_table)), ] %>%
keepOneTx(rowname_gene_id = T, is_gene_sum = T)
txRC <- txRC[rowSums(txRC) > 0, ]
# save read count
saveRDS(txRC, "../data/txRC_SL_2i_mm9.RData") # raw count without normalization
# matrix for spike-in normalization
sp_F_mat <- spRC[, grepl("FRNA", colnames(spRC))]
colnames(sp_F_mat) <- gsub("(FRNA_)*", "\\2", colnames(sp_F_mat))
FRNA.sizefactor <- SizeFactorCal(sp_F_mat)
sp_L_mat <- spRC[, grepl("LRNA",colnames(spRC))]
colnames(sp_L_mat) <- gsub("(LRNA_)*", "\\2", colnames(sp_L_mat))
LRNA.sizefactor <- SizeFactorCal(sp_L_mat[1:4, ]) # use only labeled spike-ins
saveRDS(list("FRNA.sizefactor" = FRNA.sizefactor,
"LRNA.sizefactor" = LRNA.sizefactor),
"../fig1/data/sizefactor.list.mm9.RData")
# normalise with spike-ins size factors
tx_F_mat <- txRC[, grepl("FRNA", colnames(txRC))]
colnames(tx_F_mat) <- gsub("FRNA_(.*)","\\1", colnames(tx_F_mat))
tx_F_mat <- sweep(tx_F_mat, 2, FRNA.sizefactor, '/') # divide spike-in size factors
tx_L_mat <- txRC[, grepl("LRNA", colnames(txRC))]
colnames(tx_L_mat) <- gsub("LRNA_(.*)","\\1", colnames(tx_L_mat))
tx_L_mat <- sweep(tx_L_mat, 2, LRNA.sizefactor, '/')
# saveRDS(list("tx_F_mat" = tx_F_mat, "tx_L_mat" = tx_L_mat,
# "tu_F_mat" = tu_F_mat, "tu_L_mat" = tu_L_mat),
# "../fig1/data/tx_tu_RPK_norm.mm9.RData")
library(DESeq2)
dds_FRNA <- DESeqDataSetFromMatrix(round(as.matrix(tx_F_mat)),
colData = data.frame(condition = gsub("(.*)_rep.", "\\1", colnames(tx_F_mat)) ),
design = ~ condition)
dds_LRNA <- DESeqDataSetFromMatrix(round(as.matrix(tx_L_mat)),
colData = data.frame(condition = gsub("(.*)_rep.", "\\1", colnames(tx_F_mat)) ),
design = ~ condition)
dds_FRNA <- DESeq(dds_FRNA)
dds_LRNA <- DESeq(dds_LRNA)
res_FRNA_2i <- results(dds_FRNA, contrast = c("condition", "2i_2d", "SL"))
res_FRNA_mTORi <- results(dds_FRNA, contrast = c("condition", "mTORi_1d", "SL"))
res_FRNA_SL2i <- results(dds_FRNA, contrast = c("condition", "SL2i_2d", "SL"))
res_LRNA_2i <- results(dds_LRNA, contrast = c("condition", "2i_2d", "SL"))
res_LRNA_mTORi <- results(dds_LRNA, contrast = c("condition", "mTORi_1d", "SL"))
res_LRNA_SL2i <- results(dds_LRNA, contrast = c("condition", "SL2i_2d", "SL"))
res <- biomaRt::select(EnsDb.Mmusculus.v79::EnsDb.Mmusculus.v79,
keys = rownames(res_FRNA_2i),
keytype = "GENEID",
columns = "GENENAME")
# Bulut et al. 2016 2i marker genes are specific to their own measurements
# "Spi1", "Prdm16", "Bmp7", "Sp100", "Dazl", "Trpm1", "Crxos"
marker_gene_2i <- c("Myc", "Zic3", "Nanog", "Utf1", "Dnmt3l", "Etv4", "Id1", "Lefty1",
"Tfcp2l1", "Fgf10", "Cdh2", "Lefty2", "Zic1", "Neurog2", "Sox1", "Sox17")
marker_gene_id_2i <- res$GENEID[match(marker_gene_2i, res$SYMBOL)]
marker_gene_mTORi <- c("Nphs1", "Hbp1", "Kirrel2", "Platr7", "Lefty1", "Txnip",
"Pdcd4", "Myrf", "Zfp652", "Aplp1", "Pim3", "Meg3", "Lefty2", "Grhl2")
dat_log2FC <- data.frame(log2FC_2i = c(res_FRNA_2i[marker_gene_id_2i, "log2FoldChange"],
res_LRNA_2i[marker_gene_id_2i, "log2FoldChange"]),
log2FC_SL2i = c(res_FRNA_SL2i[marker_gene_id_2i, "log2FoldChange"],
res_LRNA_SL2i[marker_gene_id_2i, "log2FoldChange"]),
Type = c(rep("FRNA", each = length(marker_gene_2i)),
rep("LRNA", each = length(marker_gene_2i))),
gene_name = rep(marker_gene_2i, 2),
gene_label = c(marker_gene_2i, rep("", length(marker_gene_2i))))
ggplot(dat_log2FC, aes(x = log2FC_2i, y = log2FC_SL2i,
shape = Type, label = gene_label,
color = gene_name, group = gene_name)) +
geom_abline(intercept = 0, slope = 1, color = add.alpha("grey50", 0.5)) +
geom_vline(xintercept = 0, color = add.alpha("grey50", 0.5)) +
geom_hline(yintercept = 0, color = add.alpha("grey50", 0.5)) +
geom_line(color = "grey50", lty = 2, size = 0.5) +
geom_point(size = 4) +
scale_shape_manual(values = c(1, 4)) +
ggtitle("DESeq2 of 2i marker genes (n=16)") +
xlab("log2FC 2i 2d") + ylab("log2FC SL2i 2d") +
geom_text(size = 4, hjust = -0.2, vjust = 0.7, check_overlap = T) +
guides(color = FALSE) +
theme_setting
ggsave(filename = "FigS2_dot_plot_2i_marker_gene_2i_vs_SL2i.png",
path = "../figS2/figs", width = 5, height = 4, device = "png")
g1 <- plot_scatter(dat = data.frame(x = res_FRNA_2i[names(gene.gr), "log2FoldChange"],
y = res_FRNA_SL2i[names(gene.gr), "log2FoldChange"]),
.xlab = "2i 2d", .ylab = "SL2i 2d",
xlim = c(-5, 5), ylim = c(-5, 5)) + ggtitle("FRNA log2FoldChange")
g2 <- plot_scatter(dat = data.frame(x = res_LRNA_2i[names(gene.gr), "log2FoldChange"],
y = res_LRNA_SL2i[names(gene.gr), "log2FoldChange"]),
.xlab = "2i 2d", .ylab = "SL2i 2d",
xlim = c(-5, 5), ylim = c(-5, 5)) + ggtitle("LRNA log2FoldChange")
ggsave(plot = grid.arrange(g1, g2, nrow = 1),
filename = "FigS2_dot_plot_gene_log2FC_2i_vs_SL2i.png",
path = "../figS2/figs", width = 6, height = 3, device = "png")
}
if (F) { # external mESC TT-seq data
TT_gene_RPK <- .countBam(bam_files = list.files("/mnt/0E471D453D8EE463/GEO_nascent_RNA_mm9/2018_Jan_TTseq_TX1072_1/bam",
"*bam$", full.names = T),
intervals = gene.gr,
stranded = T, paired.end = "ignore") / width(gene.gr) * 1e3
WT26_gene_RPK <- .countBam(bam_files = "/mnt/0E471D453D8EE463/TT_seq_data/161226_H33KO_ATRX_DAXX_KO/bam_mm9/TTSEQ-WT26.Aligned.sortedByCoord.out.bam",
intervals = gene.gr,
stranded = T, paired.end = "ignore") / width(gene.gr) * 1e3
H33WT_gene_RPK <- .countBam(bam_files = "/mnt/0E471D453D8EE463/TT_seq_data/161226_H33KO_ATRX_DAXX_KO/bam_mm9/TTSEQ-H33WT.Aligned.sortedByCoord.out.bam",
intervals = gene.gr,
stranded = T, paired.end = "ignore") / width(gene.gr) * 1e3
TT_E14_SL_RPK <- .countBam(bam_files = "/mnt/0E471D453D8EE463/TT_seq_data/bam_sl_2i_mm9/LRNA_SL_rep1.Aligned.sortedByCoord.out.bam",
intervals = gene.gr,
stranded = T, paired.end = "ignore") / width(gene.gr) * 1e3
dat_TT <- data.frame(Jan_TTseq = TT_gene_RPK[, 1],
E14_SL = TT_E14_SL_RPK[, 1],
WT26 = WT26_gene_RPK[, 1],
H33WT = H33WT_gene_RPK[, 1]) %>%
log2() %>%
as.data.frame() %>%
dplyr::filter(complete.cases(.) & is.finite(rowSums(.)) & Jan_TTseq > (-5) & E14_SL > (-5))
g1 <- ggplot((dat_TT), aes(x = E14_SL, y = Jan_TTseq,
color = get_dens(E14_SL, Jan_TTseq))) +
geom_point(cex = 0.2) +
annotate("text", x = Inf, y = Inf,
hjust = 1.2, vjust = 1.2,
label = paste0(" r = ", round(cor(dat_TT, method = "spearman")[2, 1], 3),
"\nn = ", nrow(dat_TT))) +
xlab("log2 Mean RPK E14 SL") + ylab("log2 RPK Jan et al. 0h") +
ggtitle("TT-seq WT mESC SL") +
scale_color_viridis_c(option = "C", direction = -1, begin = 0.05, end = 0.9) +
theme_setting +
theme(legend.position = "none")
g2 <- ggplot(dat_TT, aes(x = E14_SL, y = WT26,
color = get_dens(E14_SL, WT26))) +
geom_point(cex = 0.2) +
annotate("text", x = Inf, y = Inf,
hjust = 1.2, vjust = 1.2,
label = paste0(" r = ", round(cor(dat_TT, method = "spearman")[2, 3], 3),
"\nn = ", nrow(dat_TT))) +
xlab("log2 Mean RPK E14 SL") + ylab("log2 RPK WT26") +
ggtitle("TT-seq WT mESC SL") +
scale_color_viridis_c(option = "C", direction = -1, begin = 0.05, end = 0.9) +
theme_setting +
theme(legend.position = "none")
g3 <- ggplot(dat_TT, aes(x = E14_SL, y = H33WT,
color = get_dens(E14_SL, H33WT))) +
geom_point(cex = 0.2) +
annotate("text", x = Inf, y = Inf,
hjust = 1.2, vjust = 1.2,
label = paste0(" r = ", round(cor(dat_TT, method = "spearman")[2, 4], 3),
"\nn = ", nrow(dat_TT))) +
xlab("log2 Mean RPK E14 SL") + ylab("log2 RPK H33WT") +
ggtitle("TT-seq WT mESC SL") +
scale_color_viridis_c(option = "C", direction = -1, begin = 0.05, end = 0.9) +
theme_setting +
theme(legend.position = "none")
ggsave(grid.arrange(g1, g2, g3, nrow = 1),
filename = "../figS1/figs/FigS1_Scatter_mESC_TTseq_comparison.png",
width = 12, height = 4)
}
# ------------------------------------------------------------------------------------------
meanSampleCounts <- function(tx_mat)
{ # this function averaging replicates
tmp_mat = NULL
for( i in unique(colnames(tx_mat)) )
{
idx = colnames(tx_mat) == i
if( sum(idx) > 1)
{
tmp_mat <- cbind(tmp_mat, rowMeans(tx_mat[, idx]))
} else {
tmp_mat <- cbind(tmp_mat, tx_mat[, idx])
}
}
colnames(tmp_mat) <- unique(colnames(tx_mat))
return(tmp_mat)
}
# ---------------------------------------------------------------------------- #
# PCA plots of public data
if (F) {
# load kallisto counts, SL vs 2i public data
filenames <- sort(list.files('../data/kallisto_output_SL_2i/', full.names = T)) # count with combined reference GENCODE vM20 and ncRNA annotation
sampleNewName <- gsub(".*/", "\\2", filenames) %>%
gsub("201._", "", .) %>%
gsub("RNAseq_|RNASeq_|RNA-Seq_|_RNA-seq|_RNA-Seq", "", .) %>%
gsub("mES_WT_|E14.|ESC", "", .)
count_table_SL2i <- SummarizedExperiment::readKallisto(paste0(filenames, "/abundance.tsv"),
as = 'matrix', what = "est_counts")
colnames(count_table_SL2i) <- sampleNewName
txRC_SL2i <- count_table_SL2i[grepl("^ENS", rownames(count_table_SL2i)), ] %>%
keepOneTx(rowname_gene_id = T, is_gene_sum = T)
txRC_SL2i <- sapply(unique(gsub("_.$|_rep.", "", sampleNewName)),
function(x) {
tmp <- txRC_SL2i[, grep(x, colnames(txRC_SL2i))]
if (!is.null(dim(tmp))) {
rowMeans(tmp)
} else {
tmp
}
})
txRC_SL2i_LFC <- data.frame("Galonska_2i_24h" = txRC_SL2i[, 1] / txRC_SL2i[, 3],
"Galonska_2i_3d" = txRC_SL2i[, 2] / txRC_SL2i[, 3],
"Bulut_2i" = txRC_SL2i[, 4] / txRC_SL2i[, 6],
"Bulut_mTORi" = txRC_SL2i[, 5] / txRC_SL2i[, 6],
"Bulut_v6.5_2i" = txRC_SL2i[, 7] / txRC_SL2i[, 9],
"Bulut_v6.5_mTORi" = txRC_SL2i[, 8] / txRC_SL2i[, 9],
"Finley_2i" = txRC_SL2i[, 10] / txRC_SL2i[, 11],
"Joshi_2i" = txRC_SL2i[, 12] / txRC_SL2i[, 13],
"Marks_2i" = txRC_SL2i[, 14] / txRC_SL2i[, 15]) %>% log2()
txRC_FRNA <- readRDS("../data/txRC_SL_2i_mm9.RData")
txRC_FRNA <- txRC_FRNA[, grepl("LRNA", colnames(txRC_FRNA))]
txRC_FRNA <- sapply(unique(gsub("_rep.", "", colnames(txRC_FRNA))),
function(x) {
tmp <- txRC_FRNA[, grep(x, colnames(txRC_FRNA))]
if (!is.null(dim(tmp))) {
rowMeans(tmp)
} else {
tmp
}
})
txRC_FRNA_LFC <- data.frame("2i_2d" = txRC_FRNA[,1] / txRC_FRNA[,5],
# "2i_7d" = txRC_FRNA[,2] / txRC_FRNA[,5], # no replicate
"SL2i_2d" = txRC_FRNA[,6] / txRC_FRNA[,5],
"mTORi_1d" = txRC_FRNA[,3] / txRC_FRNA[,5],
"mTORi_2d" = txRC_FRNA[,4] / txRC_FRNA[,5])
gene.ov <- intersect.Vector(rownames(txRC_FRNA_LFC), rownames(txRC_SL2i_LFC))
txRC_all_LFC <- cbind(txRC_FRNA_LFC[gene.ov, ],
txRC_SL2i_LFC[gene.ov, ])
txRC_all_LFC <- txRC_all_LFC[is.finite(rowSums(txRC_all_LFC)) & !is.na(rowSums(txRC_all_LFC)), ]
pca_all_LFC <- prcomp(t(txRC_all_LFC[intersect.Vector(res$GENEID[res$SYMBOL %in% unlist(pluripotent_markers)], # a list from "FigS2_RNA_turnover_comparison.R"
rownames(txRC_all_LFC)), ]))
plot_pca <- function(pca, sample_names) {
pca$sdev <- pca$sdev^2
pc_var <- pca$sdev[1:2] / sum(pca$sdev) * 100
pc <- pca$x[, 1:2] %>% as.data.frame()
pc$sample_names <- sample_names
pc$Sample <- gsub("_.*", "", sample_names)
pc$Group <- c("2i", "mTORi")[grepl("2i", sample_names) + 1]
hull <- pc %>%
group_by(Group) %>%
slice(chull(PC1, PC2))
ggplot(pc, aes(x = PC1, y = PC2, color = Group, group = Group)) +
geom_point(size = 4) +
ggrepel::geom_text_repel(aes(label = sample_names), size = 4) +
geom_polygon(data = hull, alpha = 0 ) +
# ggforce::geom_mark_ellipse() +
# stat_ellipse(aes(x = PC1, y = PC2, group = Group), type = "norm") +
# scale_color_manual(values = colors_20[c(2, 16, 20, 7, 13, 10)]) +
xlim(range(pc$PC1) * 1.5 ) +
xlab(paste0("PC1 ", round(pc_var[1]), "% variance")) +
ylab(paste0("PC2 ", round(pc_var[2]), "% variance")) +
theme_setting +
theme(legend.position = "none")
}
plot_pca(pca = pca_all_LFC, sample_names = gsub("^X", "", colnames(txRC_all_LFC))) +
ggtitle("Pluripotent genes log2FC (n=36)")
ggsave(filename = "FigS1_PCA_2i_mTORi_log2FC_public_data_comparison.png",
path = "../figS1/figs",
device = "png", width = 5, height = 5)
}
|
15aacca9334ccb8b230421432c04766c30f8b159
|
b9aba21008fb49a8b4ffb4f04bdf66a4cafaef9d
|
/SF Salries Code/SVM.R
|
fd612e3ebc6c6a9112329f0218ff129773fb2257
|
[] |
no_license
|
wztaylor/Final-Project-DATA-495
|
66cb7d03d69804d0205eb0e1ad7494fd7267e94d
|
7c8799c2a18be62507a7d07353039c6b87021eaf
|
refs/heads/master
| 2021-01-09T20:47:03.842013
| 2016-06-02T23:12:52
| 2016-06-02T23:12:52
| 60,303,503
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 688
|
r
|
SVM.R
|
########################################
# SVM #
########################################
library(e1071)
library(rpart)
index <- sample(1:nrow(completedata),round(0.50*nrow(completedata)))
train <- completedata[index,]
test <- completedata[-index,]
## svm fit and prediction
svm.model <- svm(label ~ ., data = train, cost = 100, gamma = 1)
svm.pred <- predict(svm.model, test)
#Playing around with plotting
library(kernlab)
model.ksvm = ksvm(label~.,data = train, type="C-svc")
plot(model.ksvm, data=test)
# Accuracy
library(caret)
library(e1071)
postResample(svm.pred, test$label)
posPredValue(svm.pred,test$label)
negPredValue(svm.pred,test$label)
|
e24bdf5ec2cbb3f556f3c84c26a5399ac21d0ded
|
03ac171669a60b57e772941dfeb6be966e916808
|
/R/single_a_id.R
|
5fb6145a379d274ebaa2c27c3b315c54e8a21aee
|
[
"MIT"
] |
permissive
|
michaeldumelle/DumelleEtAl2021CopepodSentinel
|
8a37d4a260f6bacc3ec704d394e56ac9df2281bd
|
2da0ba403f5eabcf78836891ee4047578d7ee0f4
|
refs/heads/main
| 2023-08-23T10:47:24.998662
| 2021-10-04T23:43:44
| 2021-10-04T23:43:44
| 339,199,938
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,317
|
r
|
single_a_id.R
|
## Compute sentinel station statistics with an auxiliary stations
##
## `single_a_id` computes statistics for a single sentinel station
## corresponding to a single auxiliary stations
##
## @param s_id A character vector of unique sentinel station identifiers. If
## this variable is not specified, the \code{sentinel()} function defaults to
## \code{s_id} equaling all unique values in \code{id}.
## @param a_id A character vector of unique auxiliary station identifiers. If
## this variable is not specified, the \code{sentinel()} function defaults to
## \code{a_id} equaling all unique values in \code{id}.
## @param id A character vector matching the name of the variable in \code{data}
## containing station identifiers
## @param group A character vector matching the name of the variable in \code{data}
## containing grouping information for which to pair sentinel and auxiliary
## stations.
## @param value A character vector matching the name of the variable in \code{data}
## containing the response variable for which statistics are computed.
## @param data A data.frame, tibble, sp, or sf object containing the \code{id},
## \code{group}, and \code{value} variables.
## @param n_min An integer specifying the minimum number of matching \code{group}
## variables in each sentinel-auxiliary pair required to compute relevant
## statistics
## @param type A character vector indicating the type of statistic desired.
## @param output A character vector indicating the desired output. \code{output}
## can have three values: "overall", "individual", and "dataset". If "overall
## is in \code{output}, overall summary statistics are returned for each sentinel
## station. If "individual" is in \code{output}, statistics are returned for each
## sentinel-auxiliary station pair. If "dataset" is in \code{output}, a data.frame
## is returned with all data used to compute relevant statistics. Defaults to
## "overall".
## @param ... Additional arguments provided to sentinel that correspond to the
## value of the \code{type} argument.
##
## @return A list containing sublists for the corresponding `output` specified
## @export
##
## @examples
single_a_id <- function(s_id, a_id, id, group, value, data, n_min, type, ...){
output <- UseMethod("sentinel", structure(list(), class = type))
invisible(output)
}
|
7514959e8f030ad1cccebb3f4cbf289b85319bde
|
cc81dbbfa460d0d474c7e6618f19158c7bacbe3d
|
/params_pb_se.R
|
6575a0088fc134a5d42b5929459f922d6d1c204d
|
[] |
no_license
|
raimund-buehler/shinyapp
|
72be622de701d0e1f50794a7f691263b150401e5
|
c9667a18d10b9aba996c950aa514fd406513851a
|
refs/heads/master
| 2023-02-11T07:06:08.022763
| 2021-01-11T10:24:07
| 2021-01-11T10:24:07
| 278,086,703
| 0
| 1
| null | 2020-09-08T07:22:10
| 2020-07-08T12:44:04
|
R
|
UTF-8
|
R
| false
| false
| 326
|
r
|
params_pb_se.R
|
# set parameter for sterne & egger regression
if(input$go_SE > 0){
params_pb_se <- list(
pb_se_zval = SEres$res$zval,
pb_se_pval = SEres$res$pval,
thres_se_pval = input$SE_p
)
} else {
params_pb_se <- list(
pb_se_zval = NA,
pb_se_pval = NA,
thres_se_pval = NA
)
}
params_pb_se
|
a44b0dcf38b7bf7bcda4e7546f5f0a072b47ba13
|
aafa44abd35bd74bd3a5203e3d5ffcc7ef2775b5
|
/inst/extdata/junk/quasiTpm.R
|
3123d47aa8f87471d1470613e0cae58347eb2718
|
[] |
no_license
|
arcolombo/rToolKit
|
df1d53c9d1384217fbc9b4af4cd7fd14ff566aca
|
ad9dfcf44c044947fc442b0690bd23f26641f8a2
|
refs/heads/master
| 2021-03-24T12:34:39.090176
| 2018-02-17T06:32:52
| 2018-02-17T06:32:52
| 66,577,496
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,286
|
r
|
quasiTpm.R
|
#' @title quasi normalization of TPM by bundling
#' @description resulting quasi-normalization of TPM of a bundle numbers should now have variances which behave similarly to those of counts. this may be good enough to use the bundle wise TPM quanties for batch normalization and other shenanigans, then multiply through by the original proportions in each sample to retrieve the corrected per-transcript values to do unsupervised work. this might also address aspects of single cell noise.
#' @param kexp a stage level kexp
#' @param ensg an EnsgID
#' @param sample a sample column
#' @export
#' @return normalized tpm
quasiTpm<-function(kexp,ensg=ensgID,sample=sampleID){
sample_bundleDF<-.findBundle(kexp,ensg=ensgID,sample=sampleID)
##first must estimate s_i
total_mass<-sum( sample_bundleDF$est_counts/sample_bundleDF$eff_len)
L_g<-.medEffectiveLength(kexp,ensg=ensgID)
##S is the set of all expression features in a bundle
s_i<-sum(.scaleSampleFactor(kexp,ensg=ensgID,sample=sampleID))
###FIX ME:: s_i needs to be summed across txs , s_i returns a vector for each tanscript in bundle, need aggregated data?
sample_quasi_tpm<- (L_g/s_i)/total_mass
} ##main
.findBundle<-function(kexp,ensg=NULL,sample=NULL){
stopifnot(is.null(ensg)==FALSE)
txs<-rowRanges(kexp)[which(rowRanges(kexp)$gene_id==as.character(ensg))]
tx_effective_length<-assays(kexp)$eff_length[txs$tx_id,sample]
sample_name<-colnames(kexp)[sample]
estCounts<-counts(kexp)[txs$tx_id,sample]
tpm<-tpm(kexp)[txs$tx_id,sample]
df<-data.frame(txs=txs$tx_id,
eff_len=tx_effective_length,
est_counts=estCounts,
ensg=txs$gene_id,
tpm=tpm,
sample=sample_name,
stringsAsFactors=FALSE)
return(df)
}
.medEffectiveLength<-function(kexp,ensg=NULL ) {
txs<-rowRanges(kexp)[which(rowRanges(kexp)$gene_id==as.character(ensg))]
tx_effective_length<-assays(kexp)$eff_length[txs$tx_id,]
return(median(tx_effective_length))
}
.scaleSampleFactor<-function(kexp,ensg=NULL,sample=NULL){
txs<-rowRanges(kexp)[which(rowRanges(kexp)$gene_id==as.character(ensg))]
sample_total_count<-sum(counts(kexp)[txs$tx_id,sampleID])
tx_count<-counts(kexp)[txs$tx_id,sampleID]
s_i<-tx_count/sample_total_count
return(s_i)
}
|
9ba71f999a3de6f54b8d16a3f5b7f9cea2026fca
|
0fbc58702c39addfa7949391d92533922dcf9d49
|
/inst/examples/song-words-hclust.R
|
7f03886091a475469205585d16e848f92fa26035
|
[] |
no_license
|
yihui/MSG
|
d3d353514464f962a0d987efd8cf32ed50ac901a
|
8693859ef41139a43e32aeec33ab2af700037f82
|
refs/heads/master
| 2021-11-29T08:12:02.820072
| 2021-08-15T17:14:36
| 2021-08-15T17:14:36
| 1,333,662
| 30
| 12
| null | 2021-08-15T17:14:37
| 2011-02-06T05:42:53
|
R
|
UTF-8
|
R
| false
| false
| 312
|
r
|
song-words-hclust.R
|
# 宋词作者层次聚类谱系图
load(system.file("extdata", "SongWords.rda", package = "MSG"))
SongCorr = cor(SongWords) # 词风相关矩阵
song.hc = hclust(as.dist(1 - SongCorr))
par(mar = c(0.5, 4, .2, 0.1))
plot(song.hc, main = "", cex = .8, ylab = "高度")
rect.hclust(song.hc, k = 4, border = "red")
|
50d1e6c5c83fb79be0135da85219a92a592276e8
|
6bb000638105bea5d968913d53a2bdd750ad0fba
|
/src/data/clean/mobility/safegraph/weekly-patterns/process_weekly_patterns.R
|
02dc4e6c144744ddb47dad69acd3b4934bf5dc88
|
[] |
no_license
|
alecmacmillen/covid-research
|
3db91ec3f59ae76f23c563deedfc1a80f422bb99
|
cefa45c53fff4241402c8bd74f8135257e33118f
|
refs/heads/master
| 2022-10-17T19:17:09.934590
| 2020-06-13T05:26:30
| 2020-06-13T05:26:30
| 266,205,353
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,896
|
r
|
process_weekly_patterns.R
|
###################################################
# process_weekly_patterns.R
# Script to replicate data build for county-level POI and visits
# As described in Allcott et al (2020) Appendix A.1.1, step 3
# Merge county name from POI data build
###################################################
rm(list = ls())
library(tidyverse)
library(stringr)
library(data.table)
# patterns <- data.table::fread("data/raw/mobility/safegraph/weekly-patterns/main-file/2020-01-27-weekly-patterns.csv.gz")
safegraph.counties <- data.table::fread("data/interim/geographic/safegraph/poi_counties.csv.gz")
# Create list of files to process
in.root.dir <- "data/raw/mobility/safegraph/weekly-patterns/main-file/"
out.root.dir <- "data/interim/mobility/safegraph/weekly-patterns/main-file/"
all.files <- list.files(root.dir, recursive=TRUE)
n <- length(all.files)
# Loop through each main file and process
for (path in all.files) {
print(paste0("Processing file ", i, " of ", n))
in.file.path <- paste0(in.root.dir, path)
out.file.path <- paste0(out.root.dir, "processed_", path)
print(paste0(" File path: ", in.file.path))
df <- data.table::fread(in.file.path)
print(" Producing weekly aggregates...")
weekly <- process.weekly(df)
write_csv(weekly[[1]], paste0(out.root.dir, "processed_weekly_", path))
write_csv(weekly[[2]], paste0(out.root.dir, "processed_weekly_naics_", path))
rm(weekly)
rm(df)
gc()
print(" Producing daily aggregates...")
daily <- process.daily(df)
write_csv(daily[[1]], paste0(out.root.dir, "processed_daily_", path))
write_csv(daily[[2]], paste0(out.root.dir, "processed_daily_naics_", path))
rm(daily)
rm(df)
gc()
print(paste0("Current memory size: ", memory.size()))
i <- i + 1
}
### HELPER FUNCTIONS FOR PROCESSING RAW FILES
process.weekly <- function(patterns) {
weekly <- short %>%
select(safegraph_place_id, date_range_start, visits_by_day,
raw_visit_counts, raw_visitor_counts, poi_cbg, visitor_home_cbgs) %>%
mutate(date_week_start = as.Date(str_sub(date_range_start, start=1L, end=10L), "%Y-%m-%d")) %>%
left_join(select(safegraph.counties, safegraph_place_id, naics_code, state, county), by=c("safegraph_place_id"))
visitor.cbg.weekly <- weekly %>%
mutate(visitor_cbgs = gsub("[{}]", "", gsub('\"', '', visitor_home_cbgs))) %>%
# Keep only count of visitors from same CBG as the POI
separate(visitor_cbgs, into = c("vc1"), sep = ",") %>%
# Keep only the first CBG and count from that CBG. If there are visitors from the same CBG
# as the POI in question, it is stored first
separate(vc1, into = c("visitor_home_cbg", "count")) %>%
# Only keep counts if the visitor home CBG matches the POI CBG
# Calculate visitors from same CBG as POI and different CBG
mutate(count = ifelse(is.na(count), 0, as.integer(count)),
raw_visitor_counts_same_cbg = ifelse(visitor_home_cbg==poi_cbg, count, 0),
raw_visitor_counts_same_cbg = pmin(raw_visitor_counts, raw_visitor_counts_same_cbg),
raw_visitor_counts_diff_cbg = raw_visitor_counts - raw_visitor_counts_same_cbg)
weekly.agg <- visitor.cbg.weekly %>%
group_by(state, county, date_week_start) %>%
summarize(total_visits = sum(raw_visit_counts, na.rm=TRUE),
total_visitors = sum(raw_visitor_counts, na.rm=TRUE),
total_visitors_same_cbg = sum(raw_visitor_counts_same_cbg, na.rm=TRUE),
total_visitors_diff_cbg = sum(raw_visitor_counts_diff_cbg, na.rm=TRUE)) %>%
ungroup() %>%
arrange(state, county, date_week_start)
weekly.naics <- visitor.cbg.weekly %>%
mutate(naics = str_sub(naics_code, start=1L, end=2L)) %>%
group_by(state, county, naics, date_week_start) %>%
summarize(total_visits = sum(raw_visit_counts, na.rm=TRUE),
total_visitors = sum(raw_visitor_counts, na.rm=TRUE),
total_visitors_same_cbg = sum(raw_visitor_counts_same_cbg, na.rm=TRUE),
total_visitors_diff_cbg = sum(raw_visitor_counts_diff_cbg, na.rm=TRUE)) %>%
ungroup() %>%
arrange(state, county, naics, date_week_start)
return.list <- list(weekly.agg, weekly.naics)
return.list
}
process.daily <- function(patterns) {
daily <- patterns %>%
select(safegraph_place_id, date_range_start, visits_by_day) %>%
mutate(visits_by_day = str_sub(visits_by_day, start=2L, end=-2L)) %>%
separate(visits_by_day, into=c("d1", "d2", "d3", "d4", "d5", "d6", "d7"), sep=",") %>%
gather(key="day", value="visits", d1:d7) %>%
arrange(safegraph_place_id) %>%
group_by(safegraph_place_id) %>%
mutate(visits = as.integer(visits),
count = row_number()) %>%
ungroup() %>%
mutate(week_start_date = as.Date(str_sub(date_range_start, start=1L, end=10L), "%Y-%m-%d"),
date = week_start_date + count - 1) %>%
select(safegraph_place_id, date, visits) %>%
left_join(select(safegraph.counties, safegraph_place_id, naics_code, state, county), by=c("safegraph_place_id"))
daily.agg <- daily %>%
group_by(state, county, date) %>%
summarize(total_visits = sum(visits, na.rm=TRUE)) %>%
ungroup() %>%
arrange(state, county, date)
daily.naics <- daily %>%
mutate(naics = str_sub(naics_code, start=1L, end=2L)) %>%
group_by(state, county, naics, date) %>%
summarize(total_visits = sum(visits, na.rm=TRUE)) %>%
ungroup() %>%
arrange(state, county, naics, date)
return.list <- list(daily.agg, daily.naics)
return.list
}
### Read weekly patterns dfs back in and process
weekly.dfs <- list()
interim.root.dir <- "data/interim/mobility/safegraph/weekly-patterns/main-file/"
interim.files <- list.files(interim.root.dir, pattern="^processed_weekly_2020")
i <- 1
for (path in interim.files) {
df <- read_csv(paste0(interim.root.dir, path))
weekly.dfs[[i]] <- df
i <- i + 1
}
countyfips <- read_csv("data/raw/geographic/counties/countyfips.csv.gz")
# Generate alternate dependent variable specifications
all.weekly <- bind_rows(weekly.dfs) %>%
mutate(log_total_visits = log(1 + total_visits),
log_total_visitors = log(1 + total_visitors),
share_visitors_same_cbg = total_visitors_same_cbg / total_visitors,
share_visitors_diff_cbg = total_visitors_diff_cbg / total_visitors) %>%
select(state, county, date_week_start,
total_visits, log_total_visits,
total_visitors, log_total_visitors,
total_visitors_same_cbg, share_visitors_same_cbg,
total_visitors_diff_cbg, share_visitors_diff_cbg) %>%
rename(week_start_date = date_week_start)
all.weekly <- all.weekly %>%
left_join(countyfips, by=c("state", "county")) %>%
select(state, county, county_fips, everything())
# write_csv(all.weekly, "data/interim/mobility/safegraph/weekly-patterns/processed_weekly_visits.csv.gz")
|
9540b5ad95c11c404719ffda7ca98320e3c8ac2c
|
6f1c32b7c1686a2f618e0fee812e96f0e1b054a3
|
/R/ipak.R
|
86671a4ba609bbb2f143dd34940f047cbef22c7e
|
[] |
no_license
|
xtmgah/Tmisc
|
f1033c3cfdb10bf5c6d957c6ffd1c888d3047249
|
305a123f90bacb0139b56ecc9d61ebee14dcf7cf
|
refs/heads/master
| 2021-01-20T17:07:02.385416
| 2015-04-20T19:58:58
| 2015-04-20T19:58:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 312
|
r
|
ipak.R
|
#' Install packages
#'
#' Shortcut to \code{\link{install.packages}}
#'
#' @author Stephen Turner
#' @keywords keywords
#'
#' @param ... Arguments to be passed to \code{install.packages}
#'
#' @export
#'
#' @examples
#' \dontrun{
#' lsp(Tmisc, pattern="un")
#' }
ipak <-function(...) install.packages(...)
|
afa509cac365ecefda5fbf33eb41c1bbd4b307b6
|
bc4816180d9f01c5215fee4fd8742c5ccf14add1
|
/plot1.R
|
bc530630640b40bda7d0866631fbed476af3ebd5
|
[] |
no_license
|
singhs32/ExData_Plotting1
|
46f36a83fe0060047957475dcd4964233eff62f9
|
5f0f0e78a473212e1c71f9aa717433c1bfa045ca
|
refs/heads/master
| 2020-12-25T03:40:24.047673
| 2014-08-09T23:06:12
| 2014-08-09T23:06:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 712
|
r
|
plot1.R
|
plot1<- function()
{
## reading entire file
data<-read.table("./household_power_consumption.txt", header = TRUE, sep=";",na.strings=c("NA","?"),colClasses=c("character","character","numeric","numeric","numeric","numeric","numeric","numeric","numeric"))
## subsetting the data to get data we are interested in
dataofinterest<-subset(data,as.Date(data$Date,"%d/%m/%Y")%in%as.Date(c("2007-02-01","2007-02-02")))
## creating histogram
with(dataofinterest,hist(Global_active_power,col="Red",xlab="Global Active Power (kilowatts)",main="Global Active Power"))
## creating .png file
dev.copy(png,"plot1.png",width=480,height=480)
dev.off()
}
|
14aa27a0f164f3720b66c1d3508892519aa3e63d
|
35902ea94d808a4cf80050b3eba004d3fc2eef9d
|
/scripts/posterior_predictives_out_sample.R
|
502bc0c6a5c9c17b0e9bb3fabcf81c38a76e6aec
|
[
"CC0-1.0"
] |
permissive
|
Kucharssim/WALD-EM
|
79f2099b3490bba479f4a6700d524cbd71e607d4
|
9a34d02ba2f565b0291099b495f2683a0b0562b7
|
refs/heads/master
| 2023-01-06T05:08:52.399454
| 2020-11-10T10:55:43
| 2020-11-10T10:55:43
| 248,290,398
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 21,606
|
r
|
posterior_predictives_out_sample.R
|
library(tidyverse)
library(rstan)
library(here)
library(tidybayes)
library(patchwork)
library(imager)
library(ggforce)
ggplot2::theme_set(ggplot2::theme_classic(base_size = 14))
ggplot2::theme_update(axis.ticks.length = ggplot2::unit(6, "pt"),
axis.text = ggplot2::element_text(size = 15),
axis.title = ggplot2::element_text(size = 18))
# load data and fitted model
load(here::here("data", "cleaned_data.Rdata"))
load(here::here("saves", "fit_model.Rdata"))
# load(here::here("saves", "stan_data.Rdata"))
summary_pars <- summary(fit)$summary
# expose stan functions
#source(here::here("R", "expose_helpers_stan.R"))
source(here::here("R", "colours.R"))
source(here::here("R", "load_image.R"))
# create list from data to pass to Stan
df_sub <- subset(df, !train)
df_sub <- dplyr::mutate(df_sub, obs = 1:nrow(df_sub))
stan_data <- list(
N_obs = nrow(df_sub),
order = df_sub$order,
x = df_sub$x,
y = df_sub$y,
duration = df_sub$duration,
N_obj = nrow(objects),
obj_center_x = objects$x,
obj_center_y = objects$y,
obj_width = objects$width,
obj_height = objects$height,
N_ppt = dplyr::n_distinct(df_sub$id_ppt),
id_ppt = df_sub$id_ppt,
N_img = dplyr::n_distinct(df_sub$id_img),
id_img = df_sub$id_img,
obj_index_from = objects_in_images$from,
obj_index_to = objects_in_images$to,
N_obj_in_img = objects_in_images$n,
log_lik_saliency = df_sub$log_lik_saliency,
max_neighbors = ncol(saliency_log),
N_neighbors = df_sub$n_neighbors,
mean_sq_distances = mean_sq_distances[!df$train,,drop=FALSE],
saliency_log = saliency_log [!df$train,,drop=FALSE],
N_pix = max(saliency_normalized$idx),
half_width_pixel = 0.5 * 800 / max(saliency_normalized$row),
saliency_center_x = saliency_normalized$x[saliency_normalized$id_img == 1],
saliency_center_y = saliency_normalized$y[saliency_normalized$id_img == 2],
saliency = lapply(unique(df_sub$id_img), function(id) {
subset(saliency_normalized, subset = id_img == id, select = "value_normalized", drop = TRUE)
})
)
# gqs_model <- rstan::stan_model(here::here("stan", "gqs_objects_central_distance_saliency.stan"))
#
# mcmc <- as.data.frame(fit)
# #mcmc <- mcmc[, c(1:360, 363:457)]
# mcmc <- mcmc %>% dplyr::select(sigma_center, sigma_distance, scale_obj,
# dplyr::starts_with("weights"),
# dplyr::starts_with("z_weights_obj"),
# dplyr::starts_with("log_weights"),
# dplyr::starts_with("alpha"),
# dplyr::starts_with("sigma_attention"))
# mcmc <- mcmc %>% dplyr::sample_n(size = 40) # generate 100 predictives for every data point
#
# posterior_predictives <- rstan::gqs(gqs_model, data = stan_data, draws = mcmc)
#
# rm(fit, mcmc, stan_data, saliency_log) # unload memory a little
# save(posterior_predictives, file = here::here("saves", "posterior_predictives_out_sample.Rdata"))
load(here::here("saves", "posterior_predictives_out_sample.Rdata"))
mcmc_pred <- as.data.frame(posterior_predictives)
# Duration checks ----
duration_rep <- mcmc_pred %>%
dplyr::select(dplyr::starts_with("duration"))
duration_rep$iter <- 1:nrow(duration_rep)
duration_rep <- tidyr::pivot_longer(duration_rep, cols = dplyr::starts_with("duration"), names_prefix = "duration_rep",
names_to = "obs", values_to = "duration")
# there is a long tail of the predictions spanning to about 15 sec
# but the proportion of the predictions that exceed max of the data is relatively small
perc_pred_below_data <- mean(duration_rep$duration < max(df_sub$duration))
p1 <- ggplot2::ggplot(df_sub, ggplot2::aes(x = duration, y = ..density..)) +
# plot histogram of data
ggplot2::geom_histogram(col = cols_custom$dark_teal, fill = cols_custom$light_teal, bins = 50) +
ggplot2::geom_rug(mapping = ggplot2::aes(x = duration),
inherit.aes = FALSE, alpha = 0.05, length = ggplot2::unit(4, "pt"), sides = "b") +
# plot density of predictions
ggplot2::geom_density(data = duration_rep, mapping = ggplot2::aes(x = duration, group = iter),
col = cols_custom$mid_trans, alpha = 0.5) +
ggplot2::geom_density(data = duration_rep, mapping = ggplot2::aes(x = duration),
col = cols_custom$dark, size = 1) +
ggplot2::xlab("Fixation duration (sec)") +
ggplot2::ylab("Density") +
ggplot2::scale_x_continuous(expand = ggplot2::expansion(mult = c(0.05, 0.1), add = c(0, 0)), limits = c(0, max(df_sub$duration))) +
ggplot2::scale_y_continuous(expand = ggplot2::expansion(mult = c(0, 0.1), add = c(0, 0)))
p2 <- ggplot2::ggplot(df_sub, ggplot2::aes(x = duration)) +
# plot exdf of data
ggplot2::stat_ecdf(col = cols_custom$dark_teal, size = 1, n = 100) +
ggplot2::geom_rug(mapping = ggplot2::aes(x = duration),
inherit.aes = FALSE, outside = FALSE, alpha = 0.05, length = ggplot2::unit(4, "pt"), sides = "b") +
# plot exdf of predictions
ggplot2::stat_ecdf(data = duration_rep, mapping = ggplot2::aes(x = duration, group = iter),
col = cols_custom$mid_trans, alpha = 0.5) +
ggplot2::stat_ecdf(data = duration_rep, mapping = ggplot2::aes(x = duration),
col = cols_custom$dark, size = 1) +
ggplot2::xlab("Fixation duration (sec)") +
ggplot2::ylab("Cumulative probability") +
ggplot2::scale_x_continuous(expand = ggplot2::expansion(mult = c(0.05, 0.1), add = c(0, 0)), limits = c(0, max(df_sub$duration))) +
ggplot2::scale_y_continuous(expand = ggplot2::expansion(mult = c(0, 0), add = c(0, 0)))
p1_2 <- p1 + p2
p1_2
ggplot2::ggsave(filename = "fixation_durations.jpg", path = here::here("figures", "fit_model", "out_sample"),
plot = p1_2, width = 8, height = 4)
# X and Y coordinates checks ----
x_rep <- mcmc_pred %>%
dplyr::select(dplyr::starts_with("x"))
x_rep$iter <- 1:nrow(x_rep)
x_rep <- tidyr::pivot_longer(x_rep, cols = dplyr::starts_with("x"), names_prefix = "x_rep",
names_to = "obs", values_to = "x")
y_rep <- mcmc_pred %>%
dplyr::select(dplyr::starts_with("y"))
y_rep$iter <- 1:nrow(y_rep)
y_rep <- tidyr::pivot_longer(y_rep, cols = dplyr::starts_with("y"), names_prefix = "y_rep",
names_to = "obs", values_to = "y")
xy_rep <- dplyr::left_join(x_rep, y_rep)
xy_rep$obs <- gsub("\\[", "", xy_rep$obs)
xy_rep$obs <- gsub("\\]", "", xy_rep$obs)
xy_rep$obs <- as.integer(xy_rep$obs)
rm(x_rep, y_rep)
xy_rep <- dplyr::full_join(xy_rep, dplyr::select(df_sub, obs, id_ppt, id_img))
pb <- dplyr::progress_estimated(n = dplyr::n_distinct(df_sub$id_img))
for(img in unique(df_sub$id_img)){
image_name <- paste0(image_key$image[image_key$id_img == img], ".jpg")
image <- load_image(image_name)
image <- as.data.frame(image, wide = "c") %>% dplyr::mutate(rgb.val = rgb(c.1, c.2, c.3))
# plot image
pp_1 <- ggplot2::ggplot(image, ggplot2::aes(x = x, y = y)) +
ggplot2::geom_raster(ggplot2::aes(fill = rgb.val)) +
ggplot2::scale_fill_identity() +
ggplot2::scale_y_continuous(trans = scales::reverse_trans(), limits = c(600, 0), expand = c(0, 0)) +
ggplot2::scale_x_continuous(limits = c(0, 800), expand = c(0, 0)) +
ggplot2::theme_void() +
ggplot2::coord_fixed() +
ggplot2::ggtitle("Stimulus")
# plot observed fixations
pp_2 <- ggplot2::ggplot(subset(df_sub, id_img == img), ggplot2::aes(x = x, y = y)) +
ggplot2::geom_point(alpha = 0.5, shape = 19, col = cols_custom$dark_teal, fill = cols_custom$light_teal) +
ggplot2::scale_y_continuous(trans = scales::reverse_trans(), limits = c(600, 0), expand = c(0, 0)) +
ggplot2::scale_x_continuous(limits = c(0, 800), expand = c(0, 0)) +
ggplot2::theme_void() +
ggplot2::coord_fixed() +
ggplot2::ggtitle("Observed fixations")
# plot predicted fixations
pp_3 <- ggplot2::ggplot(subset(xy_rep, id_img == img), ggplot2::aes(x = x, y = y)) +
#ggplot2::stat_density_2d(aes(fill = ..density..), geom = "raster", contour = FALSE) +
#ggplot2::stat_density_2d(aes(fill = ..level..), geom = "polygon", col = cols_custom$dark) +
ggplot2::scale_fill_gradient(low = cols_custom$light, high = cols_custom$dark) +
ggplot2::geom_point(alpha = 0.05, shape = 19, col = cols_custom$dark_highlight, fill = cols_custom$light) +
ggplot2::scale_y_continuous(trans = scales::reverse_trans(), limits = c(600, 0), expand = c(0, 0)) +
ggplot2::scale_x_continuous(limits = c(0, 800), expand = c(0, 0)) +
ggplot2::theme_void() +
ggplot2::theme(legend.position = "none") +
ggplot2::coord_fixed() +
ggplot2::ggtitle("Predicted fixations")
# plot objects on the scene
pp_4 <- ggplot2::ggplot(subset(objects, id_img == img), ggplot2::aes(x = x, y = y)) +
ggplot2::geom_point(shape = 13) +
ggforce::geom_ellipse(ggplot2::aes(x0 = x, y0 = y, a = width/2, b = height/2, angle = 0)) +
ggplot2::scale_y_continuous(trans = scales::reverse_trans(), limits = c(600, 0), expand = c(0, 0)) +
ggplot2::scale_x_continuous(limits = c(0, 800), expand = c(0, 0)) +
ggplot2::theme_void() +
ggplot2::coord_fixed() +
ggplot2::ggtitle("Objects")
# plot saliency
pp_5 <- ggplot2::ggplot(subset(saliency_normalized, id_img == img), ggplot2::aes(x = x-0.5, y = y-0.5, fill = value)) +
ggplot2::geom_raster() +
ggplot2::scale_fill_gradient(low = "black", high = "white") +
ggplot2::scale_y_continuous(trans = scales::reverse_trans(), limits = c(600, 0), expand = c(0, 0)) +
ggplot2::scale_x_continuous(limits = c(0, 800), expand = c(0, 0)) +
ggplot2::theme_void() +
ggplot2::theme(legend.position = "none") +
ggplot2::coord_fixed() +
ggplot2::ggtitle("Saliency")
# plot exploitation
xp <- c(200, 600)
yp <- c(400, 200)
dat.distance <- tidyr::expand_grid(x = seq(0, 800, by = 5), y = seq(0, 600, by = 5))
dat.distance$z1 <- dnorm(dat.distance$x, xp[1], summary_pars["sigma_distance", "mean"]) * dnorm(dat.distance$y, yp[1], summary_pars["sigma_distance", "mean"])
dat.distance$z2 <- dnorm(dat.distance$x, xp[2], summary_pars["sigma_distance", "mean"]) * dnorm(dat.distance$y, yp[2], summary_pars["sigma_distance", "mean"])
dat.distance$z <- dat.distance$z1 + dat.distance$z2
dat.mock <- data.frame(x=c(rnorm(5, xp[1], summary_pars["sigma_distance", "mean"]), rnorm(3, xp[2], summary_pars["sigma_distance", "mean"])),
y=c(rnorm(5, yp[1], summary_pars["sigma_distance", "mean"]), rnorm(3, yp[2], summary_pars["sigma_distance", "mean"])),
f=1:8)
pp_6 <- ggplot2::ggplot(dat.distance, ggplot2::aes(x = x, y = y)) +
ggplot2::geom_raster(ggplot2::aes(fill = z)) +
ggplot2::geom_path(data = dat.mock, ggplot2::aes(x = x, y = y, col = f)) +
ggplot2::geom_point(data = dat.mock, ggplot2::aes(x = x, y = y), col = cols_custom$dark_teal) +
ggplot2::scale_fill_gradient(low = cols_custom$light_trans, high = cols_custom$dark_highlight) +
ggplot2::scale_color_gradient(low = cols_custom$dark_teal, high = cols_custom$mid_teal) +
ggplot2::scale_y_continuous(trans = scales::reverse_trans(), limits = c(600, 0), expand = c(0, 0)) +
ggplot2::scale_x_continuous(limits = c(0, 800), expand = c(0, 0)) +
ggplot2::theme_void() +
ggplot2::theme(legend.position = "none") +
ggplot2::coord_fixed() +
ggplot2::ggtitle("Exploitation")
# plot central bias
dat.central <- tidyr::expand_grid(x = seq(0, 800, by = 5), y = seq(0, 600, by = 5))
dat.central$z <- dnorm(dat.central$x, 400, summary_pars["sigma_center", "mean"]) * dnorm(dat.central$y, 300, summary_pars["sigma_center", "mean"])
dat.central$z <- dat.central$z / max(dat.central$z)
pp_7 <- ggplot2::ggplot(dat.central, ggplot2::aes(x = x, y = y, fill = z)) +
ggplot2::geom_raster() +
ggplot2::scale_fill_gradient(low = cols_custom$light_trans, high = cols_custom$dark_highlight) +
ggplot2::scale_y_continuous(trans = scales::reverse_trans(), limits = c(601, 0), expand = c(0, 0)) +
ggplot2::scale_x_continuous(limits = c(0, 801), expand = c(0, 0)) +
ggplot2::theme_void() +
ggplot2::theme(legend.position = "none") +
ggplot2::coord_fixed() +
ggplot2::ggtitle("Central bias")
# stich if together
pp_fac <- pp_4 + pp_5 + pp_6 + pp_7 + patchwork::plot_layout(ncol = 2)
pp <- pp_1 + pp_fac + pp_2 + pp_3 + patchwork::plot_layout(ncol = 2)
# save
ggplot2::ggsave(image_name, pp, path = here::here("figures/fit_model/out_sample/xy"),
width = 20, height = 16, units = "cm")
pb$tick()$print()
}
# Saccade amplitude check ----
# calculate aplitudes in data
amplitude_dat <- plyr::ddply(.data = df_sub, .variables = c("id_ppt", "id_img"), .fun = function(d){
.prev <- 1:(nrow(d)-1)
.next <- 2:nrow(d)
new_d <- data.frame(id_ppt = d$id_ppt[.prev], id_img = d$id_img[.prev],
distance = sqrt( (d$x[.prev] - d$x[.next])^2 + (d$y[.prev] - d$y[.next])^2 )
)
})
# calculate amplitudes (distances of predictions for the next fixation from the observed fixation)
# xy_rep <- subset(xy_rep, (iter %% 100) == 0)
xy_rep <- dplyr::full_join(xy_rep, dplyr::select(df_sub, obs, id_ppt, id_img))
amplitude_pred <- plyr::ddply(.data = df_sub, .variables = c("id_ppt", "id_img", "obs"), .fun = function(d){
ppt_d <- d$id_ppt[1]
img_d <- d$id_img[1]
obs_d <- d$obs
x_d <- d$x
y_d <- d$y
pred <- subset(xy_rep, obs == obs_d + 1 & id_ppt == ppt_d & id_img == img_d)
n_row <- nrow(pred)
if(n_row == 0){
return(data.frame(id_ppt=integer(), id_img=integer(), distance=numeric()))
} else{
out <- data.frame(
id_ppt = rep(ppt_d[1], n_row),
id_img = rep(img_d[1], n_row),
distance = sqrt( (pred$x - x_d)^2 + (pred$y - y_d)^2 )
)
return(out)
}
}, .progress = "text")
amplitude_pred$iter <- 1:nrow(amplitude_pred)
p1 <- ggplot2::ggplot(amplitude_dat, ggplot2::aes(x = distance, y = ..density..)) +
# plot histogram of data
ggplot2::geom_histogram(col = cols_custom$dark_teal, fill = cols_custom$light_teal, bins = 50) +
ggplot2::geom_rug(mapping = ggplot2::aes(x = distance),
inherit.aes = FALSE, alpha = 0.05, length = ggplot2::unit(4, "pt"), sides = "b") +
# plot density of predictions
ggplot2::geom_density(data = amplitude_pred, mapping = ggplot2::aes(x = distance),
col = cols_custom$dark, size = 1.5) +
ggplot2::xlab("Distance (pixels)") +
ggplot2::ylab("Density") +
ggplot2::scale_x_continuous(expand = ggplot2::expansion(mult = c(0.05, 0.1), add = c(0, 0))) +
ggplot2::scale_y_continuous(expand = ggplot2::expansion(mult = c(0, 0.1), add = c(0, 0)))
p2 <- ggplot2::ggplot(amplitude_dat, ggplot2::aes(x = distance)) +
# plot exdf of data
ggplot2::stat_ecdf(col = cols_custom$dark_teal, size = 1, n = 100) +
ggplot2::geom_rug(mapping = ggplot2::aes(x = distance),
inherit.aes = FALSE, outside = FALSE, alpha = 0.05, length = ggplot2::unit(4, "pt"), sides = "b") +
# plot exdf of predictions
ggplot2::stat_ecdf(data = amplitude_pred, mapping = ggplot2::aes(x = distance),
col = cols_custom$dark, size = 1.5) +
ggplot2::xlab("Distance (pixels)") +
ggplot2::ylab("Cumulative probability") +
ggplot2::scale_x_continuous(expand = ggplot2::expansion(mult = c(0.05, 0.1), add = c(0, 0))) +
ggplot2::scale_y_continuous(expand = ggplot2::expansion(mult = c(0, 0), add = c(0, 0)))
p1_2 <- p1 + p2
p1_2
ggplot2::ggsave(filename = "amplitude.jpg", path = here::here("figures", "fit_model", "out_sample"),
plot = p1_2, width = 8, height = 4)
pb <- dplyr::progress_estimated(n = dplyr::n_distinct(df_sub$id_img))
for(img in unique(df_sub$id_img)){
image_name <- paste0(image_key$image[image_key$id_img == img], ".jpg")
amplitude_dat_sub <- subset(amplitude_dat, id_img == img)
amplitude_pred_sub <- subset(amplitude_pred, id_img == img)
p1 <- ggplot2::ggplot(amplitude_dat_sub, ggplot2::aes(x = distance, y = ..density..)) +
# plot histogram of data
ggplot2::geom_histogram(col = cols_custom$dark_teal, fill = cols_custom$light_teal, bins = 30) +
ggplot2::geom_rug(mapping = ggplot2::aes(x = distance),
inherit.aes = FALSE, alpha = 0.05, length = ggplot2::unit(4, "pt"), sides = "b") +
# plot density of predictions
ggplot2::geom_density(data = amplitude_pred_sub, mapping = ggplot2::aes(x = distance),
col = cols_custom$dark, size = 1.5) +
ggplot2::xlab("Distance (pixels)") +
ggplot2::ylab("Density") +
ggplot2::scale_x_continuous(expand = ggplot2::expansion(mult = c(0.05, 0.1), add = c(0, 0))) +
ggplot2::scale_y_continuous(expand = ggplot2::expansion(mult = c(0, 0.1), add = c(0, 0)))
p2 <- ggplot2::ggplot(amplitude_dat_sub, ggplot2::aes(x = distance)) +
# plot exdf of data
ggplot2::stat_ecdf(col = cols_custom$dark_teal, size = 1, n = 100) +
ggplot2::geom_rug(mapping = ggplot2::aes(x = distance),
inherit.aes = FALSE, outside = FALSE, alpha = 0.05, length = ggplot2::unit(4, "pt"), sides = "b") +
# plot exdf of predictions
ggplot2::stat_ecdf(data = amplitude_pred_sub, mapping = ggplot2::aes(x = distance),
col = cols_custom$dark, size = 1.5) +
ggplot2::xlab("Distance (pixels)") +
ggplot2::ylab("Cumulative probability") +
ggplot2::scale_x_continuous(expand = ggplot2::expansion(mult = c(0.05, 0.1), add = c(0, 0))) +
ggplot2::scale_y_continuous(expand = ggplot2::expansion(mult = c(0, 0), add = c(0, 0)))
p1_2 <- p1 + p2
p1_2
ggplot2::ggsave(filename = image_name, plot = p1_2, path = here::here("figures", "fit_model", "out_sample", "amplitude"),
width = 8, height = 5)
pb$tick()$print()
}
# Saccade angle check ----
# atan2: 0 pi - right
# 0.5 pi - up
# 1 pi - left
# -0.5 pi - down
angle_dat <- plyr::ddply(.data = df_sub, .variables = c("id_ppt", "id_img"), .fun = function(d){
.prev <- 1:(nrow(d)-1)
.next <- 2:nrow(d)
x <- d$x[.next] - d$x[.prev]
y <- d$y[.next] - d$y[.prev]
# calculate angles
# do not forget: y axis is flipped in eye-tracking data, that's why we reverse the y components of the saccade vector
new_d <- data.frame(id_ppt = d$id_ppt[.prev], id_img = d$id_img[.prev],
angle = atan2(-y, x)
)
return(new_d)
})
angle_pred <- plyr::ddply(.data = df_sub, .variables = c("id_ppt", "id_img", "obs"), .fun = function(d){
ppt_d <- d$id_ppt[1]
img_d <- d$id_img[1]
obs_d <- d$obs
x_d <- d$x
y_d <- d$y
pred <- subset(xy_rep, obs == obs_d + 1 & id_ppt == ppt_d & id_img == img_d)
n_row <- nrow(pred)
if(n_row == 0){
return(data.frame(id_ppt=integer(), id_img=integer(), angle=numeric()))
} else{
# calculate angles
# do not forget: y axis is flipped in eye-tracking data, that's why we reverse the y components of the saccade vector
x <- pred$x - x_d
y <- pred$y - y_d
out <- data.frame(
id_ppt = rep(ppt_d[1], n_row),
id_img = rep(img_d[1], n_row),
angle = atan2(-y, x)
)
return(out)
}
}, .progress = "text")
p1 <- ggplot2::ggplot(angle_dat, ggplot2::aes(x = angle, y = ..density..)) +
ggplot2::geom_histogram(col = cols_custom$dark_teal, fill = cols_custom$mid_teal, alpha = 0.8, bins = 32) +
ggplot2::geom_histogram(data=angle_pred, col = cols_custom$dark, fill = cols_custom$mid, alpha = 0.8, bins = 32) +
ggplot2::coord_polar(start = 0.5*pi, direction = -1) +
ggplot2::scale_x_continuous(limits = c(-pi, pi),
breaks = seq(-0.5, 1, by = 0.5)*pi,
labels = c("down", "right", "up", "left")) +
ggplot2::scale_y_continuous(expand = c(0, 0.025)) +
ggplot2::theme_void() +
ggplot2::theme(axis.text.x = ggplot2::element_text(size = 15))
ggplot2::ggsave(filename = "angle.jpg", plot = p1, path = here::here("figures", "fit_model", "out_sample"),
width = 5, height = 5)
pb <- dplyr::progress_estimated(n = dplyr::n_distinct(df_sub$id_img))
for(img in unique(df_sub$id_img)){
image_name <- paste0(image_key$image[image_key$id_img == img], ".jpg")
angle_dat_sub <- subset(angle_dat, id_img == img)
angle_pred_sub <- subset(angle_pred, id_img == img)
p1 <- ggplot2::ggplot(angle_dat_sub, ggplot2::aes(x = angle, y = ..density..)) +
ggplot2::geom_histogram(col = cols_custom$dark_teal, fill = cols_custom$mid_teal, alpha = 0.8, bins = 32) +
ggplot2::geom_histogram(data=angle_pred_sub, col = cols_custom$dark, fill = cols_custom$mid, alpha = 0.8, bins = 32) +
ggplot2::coord_polar(start = 0.5*pi, direction = -1) +
ggplot2::scale_x_continuous(limits = c(-pi, pi),
breaks = seq(-0.5, 1, by = 0.5)*pi,
labels = c("down", "right", "up", "left")) +
ggplot2::scale_y_continuous(expand = c(0, 0.025)) +
ggplot2::theme_void() +
ggplot2::theme(axis.text.x = ggplot2::element_text(size = 15))
ggplot2::ggsave(filename = image_name, plot = p1, path = here::here("figures", "fit_model", "out_sample", "angle"),
width = 5, height = 5)
pb$tick()$print()
}
|
04c001257acdd9713a4fccf6f4d5fd2a85627ec9
|
e7bb2f29beacbc7d61b22f4a85dc5699ed76c5fb
|
/R/forest_lake.R
|
bf216a6d700e30bfb4fa7a354795465decd91b3a
|
[] |
no_license
|
jmzobitz/degreeDay
|
789ab92f42032799e25e7f22d7ba9b10be102f89
|
f72110fc10b528eebff8cec8cdbcbb4846e2c8bd
|
refs/heads/master
| 2020-04-10T03:07:03.957284
| 2019-01-24T19:06:36
| 2019-01-24T19:06:36
| 160,733,747
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 636
|
r
|
forest_lake.R
|
#' Measured minimum and maximum daily temperature from Forest Lake Weather Station
#'
#'
#' \itemize{
#' \item year. Year of measurement
#' \item day. Day of the year
#' \item min_temp. Minimum temperature (degrees Celsius)
#' \item max_temp. Maximum temperature (degrees Celsius)
#' }
#'
#' @docType data
#' @keywords datasets
#' @name forest_lake
#' @usage data(forest_lake)
#' @format A data frame with 365 rows and 4 variables
#' @source This data has been provided by the Utah Climate Center.
#'
#' Station ID USC00212881
#' Station Name FOREST LAKE 5NE
#' Latitude 45.3397
#' Longitude -92.9125
#' Elevation 280.4 m
NULL
|
14ab6799773b5898c89b56ff5b6190f6b88a0281
|
501684023d91f6de5617d1b13b2e3367dc919473
|
/gToolbox/man/dds_heatmap_rld_vst.Rd
|
de529253884a76f89810b0ea3e8beac41f8df7f1
|
[] |
no_license
|
aqibrahimbt/BioMarkerAnalysis
|
8c55d4d1085bb370e84def48ab47e63aa781f708
|
f37edf645e5e8abdd991ddbbb088b41775b18812
|
refs/heads/master
| 2022-01-08T23:34:57.680090
| 2018-08-22T10:30:07
| 2018-08-22T10:30:07
| 147,839,783
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 699
|
rd
|
dds_heatmap_rld_vst.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/de_analysis.R
\name{dds_heatmap_rld_vst}
\alias{dds_heatmap_rld_vst}
\title{Generates heatmap for the top {n} genes from Regularized Log transform
of the count data and outputs to JSON file}
\usage{
dds_heatmap_rld_vst(dds, subset, datasets, outdir, method = "raw")
}
\arguments{
\item{dds}{- dds object}
\item{subset}{- number of genes required}
\item{datasets}{- datasets used for the analysis}
\item{outdir}{- output directory}
\item{method}{- name of data transformation method}
}
\description{
Generates heatmap for the top {n} genes from Regularized Log transform
of the count data and outputs to JSON file
}
|
a78fcd6f03733b0491b0f01774dce96f41621bbf
|
c334867663e1786211cc398daa051a722478844b
|
/Sorting.R
|
c853891c70a243804c7d4b94183764fcedd5f4f8
|
[] |
no_license
|
Dinesh5191/Practice
|
ae01fb86a3fbac710a0d5baa9f284a82e9c7d12f
|
661313e6e519015d89d92b4fdb65d72184b36f9b
|
refs/heads/main
| 2023-03-09T04:10:41.506974
| 2021-02-23T07:06:27
| 2021-02-23T07:06:27
| 339,984,329
| 0
| 0
| null | 2021-02-23T07:06:28
| 2021-02-18T08:32:18
|
R
|
UTF-8
|
R
| false
| false
| 3,567
|
r
|
Sorting.R
|
# We may create vectors of class numeric or character with the concatenate function
codes <- c(380, 124, 818)
country <- c("italy", "canada", "egypt")
codes
codes <- c(italy = 380, canada = 124, egypt = 818)
codes <- c("italy" = 380, "canada" = 124, "egypt" = 818)
codes
class(codes)
codes <- c(380, 124, 818)
country <- c("france","canada","egypt")
names(codes) <- country
codes
range(0,10)
seq(0,10)
codes[1]<-("india=590")
codes
x<-codes$france
codes[c(1,3)]
codes[1:2]
codes[c("canada")]
codes[c("egypt","italy")]
x <- c(1, "canada", 3)
x
x<-1:5
y<-as.character(x)
y
as.numeric(y)
temp <- c("Beijing"=35, "Lagos"=88, "Paris"=42, "Rio de Janeiro"=84, "San Juan"=81, "Toronto"=30)
temp
city <- c("Beijing", "Lagos", "Paris", "Rio de Janeiro", "San Juan", "Toronto")
city
names(temp) <- city
temp[1:3]
temp[c(3,5)]
x <- 12:73
x
length(x)
seq(1, 100, 2)
length(seq(6, 55, 4/7))
seq(6, 55, 4/7)
pop <- murders$population
a <- seq(1, 10, length.out = 100)
class(a)
a
class(3L)
x <- c(1, 3, 5,"a")
x <- as.numeric(x)
library(dslabs)
data(murders)
sort(murders$total)
x <- c(31, 4, 15, 92, 65)
x
sort(x) # puts elements in order
index <- order(x) # returns index that will put x in order
x[index] # rearranging by this index puts elements in order
order(x)
murders$state[1:10]
murders$abb[1:10]
index <- order(murders$total)
murders$abb[index] # order abbreviations by total murders
max(murders$total) # highest number of total murders
i_max <- which.max(murders$total) # index with highest number of murders
murders$state[i_max] # state name with highest number of total murders
x <- c(31, 4, 15, 92, 65)
x
rank(x) # returns ranks (smallest to largest)
pop <- murders$population
pop
pop <- sort(pop)
pop
pop[1]
ord <- order(pop)
min(ord)
pop[ord]
order(ord)
order(pop)
ord
ord <- order(pop)
ord
pop <- murders$population
ord <- order(pop)
ord
i<-which.min(murders$population)
states <- murders$states
states(i)
i(states)
i <- murders$states
i
library(dslabs)
data(murders)
i<-which.min(murders$population)
i
state <- murders$state
state[i]
states <- murders$state
ranks <- rank(murders$population)
my_df <- data.frame(states,ranks)
my_df
ind <- order(murders$population)
ind
states[ind]
my_df <- data.frame(states[ind],ranks[ind])
my_df
my_df <- data.frame(states = states[ind], rank = ranks[ind])
my_df
my_df <- data.frame(states = states[ind], ranks = ranks[ind])
library(dslabs)
data(na_example)
str(na_example)
mean(na_example)
ind <- is.na(na_example)
sum(ind)
na_example[!ind]
x <- c(1, 2, 3)
ind <- c(FALSE, TRUE, FALSE)
mean(na_example)
mean(na_example[!ind])
# The name of the state with the maximum population is found by doing the following
murders$state[which.max(murders$population)]
# how to obtain the murder rate
murder_rate <- murders$total / murders$population * 100000
murders$state[which.max(murder_rate)]
# ordering the states by murder rate, in decreasing order
murders$state[order(murder_rate, decreasing=TRUE)]
temp <- c(35, 88, 42, 84, 81, 30)
temp <- 5/9 * (temp-32)
temp
city <- c("Beijing", "Lagos", "Paris", "Rio de Janeiro", "San Juan", "Toronto")
city_temps <- data.frame(name = city, temperature = temp)
city_temps
x <- seq(1, 100)
sum(x)
sum(1/x^2)
murder_rate <- murders$total / murders$population * 100000
mean(murder_rate)
x <- c(2, 43, 27, 96, 18)
rank(x)
order(x)
min(x)
which.min(x)
max(x)
which.max(x)
name <- c("Mandi", "Amy", "Nicole", "Olivia")
distance <- c(0.8, 3.1, 2.8, 4.0)
time <- c(10, 30, 40, 50)
time <- time/60
speed <- distance/time
speed
time
speed <- distance/time
speed
time(4)/60
|
1bf21310c4c92b8c7dd646b384c6c8a31dddebea
|
f2ed007678fb657948af026f26c87d268d4afc70
|
/R/distancesDistribution.R
|
c4ff87b17c00f13a624a9bef42279cf8203ded8e
|
[] |
no_license
|
cfyy/Distances
|
12998e04e0204563d3fc0bdf90cf851b46b9ada5
|
56a3f1f3f0833f0285410f4f6a28bb7bd1f801fc
|
refs/heads/master
| 2022-04-03T05:15:13.345382
| 2020-02-03T14:52:46
| 2020-02-03T14:52:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 942
|
r
|
distancesDistribution.R
|
distancesDistribution <- function(data) {
#require("dbt.classifiers")
#require("dbt.pareto")
# author: Raphael Päbst
warning('Not verified, may not work properly. Please check results')
maxNrOfPoints <- 1000
nrOfPoints <- nrow(data)
if (nrOfPoints > maxNrOfPoints) {
percentage <- (maxNrOfPoints / nrOfPoints) * 100
sampledData <- splitSample(data, percentage)[[1]]
}
else {
sampledData <- data
}
distances <- as.vector(dist(sampledData, method = "euclidean"))
paretoRadius <- ParetoRadius(distances)
pce <- PCE(distances, paretoRadius)
cumulativeKernels <- pce[[1]]
cumulativeDistanceDensity <- pce[[2]]
pdeEstimation <- DataVisualizations::ParetoDensityEstimation(distances, paretoRadius)
kernels <- pdeEstimation[[1]]
pdeDistance <- pdeEstimation[[2]]
return(list(cumulativeKernels = cumulativeKernels, cumulativeDistanceDensity = cumulativeDistanceDensity , kernels = kernels, pdeDistance = pdeDistance, distances = distances ))
}
|
9c9799f6e41b7a0820dd1539db319691fe613984
|
e28711ce5ece5984dfd14c934938e3fce1468306
|
/man/rSemiCov.Rd
|
fcbe0ef83c7d5f36a0592c8fdb8b2a7c6eb15665
|
[] |
no_license
|
jonathancornelissen/highfrequency
|
7387098c0998f2fb719cad4acff6b25bea781720
|
967dc40e0f7688f1e4d89ca5244ad1ac2b7810d4
|
refs/heads/master
| 2022-12-15T23:19:42.120107
| 2022-12-05T21:01:34
| 2022-12-05T21:01:34
| 7,306,202
| 125
| 62
| null | 2022-12-05T21:01:35
| 2012-12-24T11:15:54
|
R
|
UTF-8
|
R
| false
| true
| 3,883
|
rd
|
rSemiCov.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/realizedMeasures.R
\name{rSemiCov}
\alias{rSemiCov}
\title{Realized semicovariance}
\usage{
rSemiCov(
rData,
cor = FALSE,
alignBy = NULL,
alignPeriod = NULL,
makeReturns = FALSE
)
}
\arguments{
\item{rData}{an \code{xts} or \code{data.table} object containing returns or prices, possibly for multiple assets over multiple days.}
\item{cor}{boolean, in case it is \code{TRUE}, and the input data is multivariate, the correlation is returned instead of the covariance matrix. \code{FALSE} by default.}
\item{alignBy}{character, indicating the time scale in which \code{alignPeriod} is expressed.
Possible values are: \code{"ticks"}, \code{"secs"}, \code{"seconds"}, \code{"mins"}, \code{"minutes"}, \code{"hours"}}
\item{alignPeriod}{positive numeric, indicating the number of periods to aggregate over. For example, to aggregate
based on a 5-minute frequency, set \code{alignPeriod = 5} and \code{alignBy = "minutes"}.}
\item{makeReturns}{boolean, should be \code{TRUE} when \code{rData} contains prices instead of returns. \code{FALSE} by default.}
}
\value{
In case the data consists of one day a list of five \eqn{N \times N} matrices are returned. These matrices are named \code{mixed}, \code{positive}, \code{negative}, \code{concordant}, and \code{rCov}.
The latter matrix corresponds to the realized covariance estimator and is thus named like the function \code{\link{rCov}}.
In case the data spans more than one day, the list for each day will be put into another list named according to the date of the estimates.
}
\description{
Calculate the Realized Semicovariances (rSemiCov).
Let \eqn{ r_{t,i} } be an intraday \eqn{M x N} return matrix and \eqn{i=1,...,M}
the number of intraday returns. Then, let \eqn{r_{t,i}^{+} = max(r_{t,i},0)} and \eqn{r_{t,i}^{-} = min(r_{t,i},0)}.
Then, the realized semicovariance is given by the following three matrices:
\deqn{
\mbox{pos}_t =\sum_{i=1}^{M} r^{+}_{t,i} r^{+'}_{t,i}
}
\deqn{
\mbox{neg}_t =\sum_{i=1}^{M} r^{-}_{t,i} r^{-'}_{t,i}
}
\deqn{
\mbox{mixed}_t =\sum_{i=1}^{M} (r^{+}_{t,i} r^{-'}_{t,i} + r^{-}_{t,i} r^{+'}_{t,i})
}
The mixed covariance matrix will have 0 on the diagonal.
From these three matrices, the realized covariance can be constructed as \eqn{pos + neg + mixed}.
The concordant semicovariance matrix is \eqn{pos + neg}.
The off-diagonals of the concordant matrix is always positive, while for the mixed matrix, it is always negative.
}
\details{
In the case that cor is \code{TRUE}, the mixed matrix will be an \eqn{N \times N} matrix filled with NA as mapping the mixed covariance matrix into correlation space is impossible due to the 0-diagonal.
}
\examples{
# Realized semi-variance/semi-covariance for prices aligned
# at 5 minutes.
# Univariate:
rSVar = rSemiCov(rData = sampleTData[, list(DT, PRICE)], alignBy = "minutes",
alignPeriod = 5, makeReturns = TRUE)
rSVar
\dontrun{
library("xts")
# Multivariate multi day:
rSC <- rSemiCov(sampleOneMinuteData, makeReturns = TRUE) # rSC is a list of lists
# We extract the covariance between stock 1 and stock 2 for all three covariances.
mixed <- sapply(rSC, function(x) x[["mixed"]][1,2])
neg <- sapply(rSC, function(x) x[["negative"]][1,2])
pos <- sapply(rSC, function(x) x[["positive"]][1,2])
covariances <- xts(cbind(mixed, neg, pos), as.Date(names(rSC)))
colnames(covariances) <- c("mixed", "neg", "pos")
# We make a quick plot of the different covariances
plot(covariances)
addLegend(lty = 1) # Add legend so we can distinguish the series.
}
}
\references{
Bollerslev, T., Li, J., Patton, A. J., and Quaedvlieg, R. (2020). Realized semicovariances. \emph{Econometrica}, 88, 1515-1551.
}
\seealso{
\code{\link{ICov}} for a list of implemented estimators of the integrated covariance.
}
\author{
Emil Sjoerup.
}
\keyword{volatility}
|
991e60a6eb5533ef82095161a985b42c49165545
|
67c2633786ebaf36b649b0c07f7544e09cf9a924
|
/man/mechanismStability-class.Rd
|
9461d1691bcf0f0c29708411271a251310fc92ec
|
[] |
no_license
|
privacytoolsproject/PSI-Library
|
6343cb34cf28a8736807e2bc95990d2c7bbe3756
|
adaa32e941dc2832b0a719886d863e29f81808ec
|
refs/heads/develop
| 2021-03-27T10:22:03.874212
| 2020-02-12T19:02:20
| 2020-02-12T19:02:20
| 82,702,513
| 6
| 6
| null | 2020-02-12T19:02:21
| 2017-02-21T16:33:22
|
R
|
UTF-8
|
R
| false
| true
| 273
|
rd
|
mechanismStability-class.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mechanism-stability.R
\docType{class}
\name{mechanismStability-class}
\alias{mechanismStability-class}
\alias{mechanismStability}
\title{Stability mechanism}
\description{
Stability mechanism
}
|
28665107fea2f089df84b7b4b4e2eb1f35f52fb5
|
d85c04b9fe18a217ccfdb4b1a8ffe50db783676e
|
/shp2raster_function.R
|
ab44df26fa4007285534a96bc7d1e73cdccd6349
|
[
"MIT"
] |
permissive
|
hmorzaria/Biodiversity
|
bc76024521fdcdeb8bb0d75bdc619997df102a6f
|
1169a2312f27f07716177715bf10a237ee723516
|
refs/heads/master
| 2021-01-10T10:06:23.677131
| 2016-01-21T23:16:14
| 2016-01-21T23:16:14
| 49,678,635
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,069
|
r
|
shp2raster_function.R
|
##https://amywhiteheadresearch.wordpress.com/2014/05/01/shp2raster/
shp2raster <- function(shp, mask.raster, label, value, transform = FALSE, proj.from = NA,
proj.to = NA, map = TRUE) {
require(raster, rgdal)
# use transform==TRUE if the polygon is not in the same coordinate system as
# the output raster, setting proj.from & proj.to to the appropriate
# projections
if (transform == TRUE) {
proj4string(shp) <- proj.from
shp <- spTransform(shp, proj.to)
}
# convert the shapefile to a raster based on a standardised background
# raster
r <- rasterize(shp, mask.raster)
# set the cells associated with the shapfile to the specified value
r[!is.na(r)] <- value
# merge the new raster with the mask raster and export to the working
# directory as a tif file
r <- mask(merge(r, mask.raster), mask.raster, filename = label, format = "GTiff",
overwrite = T)
# plot map of new raster
if (map == TRUE) {
plot(r, main = label, axes = F, box = F)
}
names(r) <- label
return(r)
}
|
d6484d7c08e993703d8a8486db38d014bf6e0d7e
|
4180e1de7f766fd0065f33559481ff730470598a
|
/analyze/analyze.r
|
9a03d0f5a173b16473c4497830140351a113e8e5
|
[] |
no_license
|
rhema/chrome-study-tracker
|
77bcdeb24f6f1a64f8c66410953870d886fbd02b
|
b23509e33575b10b0bcb59ccfde42bd198885468
|
refs/heads/master
| 2020-05-20T06:16:07.318084
| 2012-12-20T16:03:06
| 2012-12-20T16:03:06
| 6,184,455
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,705
|
r
|
analyze.r
|
#check first and second
#check difference between first and second or difference between two results
#setwd("/Users/administrator/code/js/chrome-study-tracker/analyze")
setwd("/Users/rhema/Documents/work/gitwork/chrome-study-tracker/analyze")
study_data<-read.csv('second.csv',header=TRUE)
metrics<-cbind('collected','total_time','pdf_time','paper_page_time','start_page_time','collecting_time','transitional_page_time','depth_mean','depth_max', 'collected_depth','page_impression','keyword_variety','collection_novelty')
between_subjects_tests <- function()
{
for(i in 1:length(metrics))
{
print(metrics[i])
aov.result <- aov(study_data[,metrics[i]] ~ study_data[,'dataset']*study_data[,'method'])
# print(summary(aov.result))
print(model.tables(aov.result, "means"))
}
}
within_subjects_tests <- function()
{
for(i in 1:length(metrics))
{
print("start")
print(metrics[i])
aov.result <- aov(study_data[,metrics[i]] ~ study_data[,'method']+Error(study_data[,'subject']/study_data[,'method']))
print(summary(aov.result))
print(model.tables(aov.result, "means"))
# print("end")
# print("")
print("end")
}
}
within_subjects_tests2 <- function()
{
for(i in 1:length(metrics))
{
#print("start")
#print(metrics[i])
aov.result <- aov(study_data[,metrics[i]] ~ study_data[,'method']+Error(study_data[,'subject']/study_data[,'method']))
#print(summary(aov.result))
#print(model.tables(aov.result, "means"))
# print("end")
# print("")
# print("end")
}
}
within_subjects_tests <- function()
{
for(i in 1:length(metrics))
{
print("Start...")
# print(metrics[i])
# aov.result <- aov(study_data[,metrics[i]] ~ study_data[,'method']+Error(study_data[,'subject']/study_data[,'method']))
# print(summary(aov.result))
# print(model.tables(aov.result, "means"))
# print("end")
# print("")
}
}
###by hands
# cor.test(study_data[,'collected'], study_data[,'depth_max']) shows positive correlation between the number collected and the max depth....
#
#
#found thus fars...
#########################ice web
#'collected' , 10.714 12.000
#'total_time', 21.817 21.201
#'pdf_time', 0.8495 0.7620
#'paper_page_time', 0.789 16.237 ***
#'start_page_time', 18.735 2.476 ***
#'collecting_time', 0.8842 1.2541 (p 0.1070)
#'transitional_page_time', 0.0306 0.9057 *
#'depth_mean', , 2.1740 2.4872
'depth_max' 4.143 5.286
#'collected_depth', 2.2479 1.9810 (p .4393)
#'page_impression' 37.57 34.64
####################
##tested separating grad from non grad students, found no difference
|
499ab5b8679d46432dd14f519cb980e2c1eea139
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/lmomco/examples/is.gev.Rd.R
|
7d37ad383bda359331615e8c42753d0916191622
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 356
|
r
|
is.gev.Rd.R
|
library(lmomco)
### Name: is.gev
### Title: Is a Distribution Parameter Object Typed as Generalized Extreme
### Value
### Aliases: is.gev
### Keywords: utility (distribution, type check) Distribution: Generalized
### Extreme Value
### ** Examples
para <- pargev(lmoms(c(123,34,4,654,37,78)))
if(is.gev(para) == TRUE) {
Q <- quagev(0.5,para)
}
|
58ecbf8272d7031f52179f24bf9387b16fcb8b07
|
7311333635a0711c86a84b2badd571c5a0bd42b6
|
/man/entropy.Rd
|
bec81761d1957e0e12e93bacb075eaf0c32b623a
|
[] |
no_license
|
cran/infotheo
|
ef9d11f5a605df1d584b93cdaafd92e4797250ee
|
a0a3450b5ed66f49fadb98a0509baeb7e78f167d
|
refs/heads/master
| 2022-04-29T07:31:02.112855
| 2022-04-08T10:00:24
| 2022-04-08T10:00:24
| 17,696,780
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,841
|
rd
|
entropy.Rd
|
\name{entropy}
\alias{entropy}
\title{entropy computation}
\usage{entropy(X, method="emp")}
\arguments{
\item{X}{data.frame denoting a random vector where columns contain variables/features and rows contain outcomes/samples.}
\item{method}{The name of the entropy estimator. The package implements four estimators :
"emp", "mm", "shrink", "sg" (default:"emp") - see details.
These estimators require discrete data values - see \code{\link{discretize}}.}
}
\value{ \code{entropy} returns the entropy of the data in nats.}
\description{
\code{entropy} takes the dataset as input and computes the
entropy according
to the entropy estimator \code{method}.
}
\details{
\itemize{
\item "emp" : This estimator computes the entropy of the empirical probability distribution.
\item "mm" : This is the Miller-Madow asymptotic bias corrected empirical estimator.
\item "shrink" : This is a shrinkage estimate of the entropy of a Dirichlet probability distribution.
\item "sg" : This is the Schurmann-Grassberger estimate of the entropy of a Dirichlet probability distribution.
}
}
\author{
Patrick E. Meyer
}
\references{
Meyer, P. E. (2008). Information-Theoretic Variable Selection and Network Inference from Microarray Data. PhD thesis of the Universite Libre de Bruxelles.
J. Beirlant, E. J. Dudewica, L. Gyofi, and E. van der Meulen (1997). Nonparametric
entropy estimation : An overview. Journal of Statistics.
Hausser J. (2006). Improving entropy estimation and the inference of genetic regulatory networks.
Master thesis of the National Institute of Applied Sciences of Lyon.
}
\seealso{\code{\link{condentropy}}, \code{\link{mutinformation}}, \code{\link{natstobits}}}
\examples{
data(USArrests)
H <- entropy(discretize(USArrests),method="shrink")
}
\keyword{misc}
|
0f217d9ee57a0b9713ef4b8c274eaefccb57d80d
|
37a1b0e96b6a224b1df0a6c680ca02b34dcb581b
|
/R/summary_tibble.R
|
27d5d44bc4ee8cdac4eaa76577e5e4b18fe5497e
|
[] |
no_license
|
HuntsmanCancerInstitute/hciR
|
a75fcb4a4afb674477bcd9d26bb03a511228f78a
|
418a81899a31c4def7f9a7aef315f872a1a56700
|
refs/heads/master
| 2023-08-18T08:03:25.771541
| 2023-08-08T18:19:02
| 2023-08-08T18:19:02
| 67,628,344
| 9
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,585
|
r
|
summary_tibble.R
|
#' Summarize tibble columns
#'
#' Summarize tibbles with many columns like TCGA metadata. Summaries for large tibbles
#' with many rows will be slow and not recommended.
#'
#' @param x a tibble
#' @param y a vector
#'
#' @return A tibble with column names, types, number of unique values and NAs,
#' minimum, maximum and top three values.
#'
#' @author Chris Stubben
#'
#' @examples
#' summary_tibble(mtcars)
#' #drop_empty_columns( tibble( a=1:5, b=NA, c="x", d="") )
#' @export
summary_tibble <- function(x){
tibble::tibble(
column = colnames(x),
class = vapply(x, tibble::type_sum, character(1)), # code from glimpse
unique = sapply(x, function(y) length( stats::na.omit( unique(y)))),
NAs = sapply(x, function(y) sum(is.na(y) ) ),
### suppressWarnings to avoid : min(c(NA, NA), na.rm=TRUE)
min = trimws(suppressWarnings( apply(x, 2, min, na.rm=TRUE ))),
max = trimws(suppressWarnings( apply(x, 2, max, na.rm=TRUE ))),
## will be slow with many rows...
top3 = sapply(x, top3)
)
}
#' @describeIn summary_tibble Top three values
#' @export
top3 <- function(y){
z <- sort( table(y), decreasing = TRUE)
if(length(z) > 3) z <- z[1:3]
z <- paste0(names(z), " (", z, ")")
paste(z, collapse=", ")
}
#' @describeIn summary_tibble Drop empty columns
#' @export
drop_empty_columns <- function(x){
n1 <- apply(x, 2, function(y) all(is.na(y) | y=="") )
if(sum(n1) > 0){
message("Dropping ", sum(n1), " columns: ", paste( colnames(x)[n1], collapse=", ") )
x <- x[, !n1]
}
x
}
|
1d804ab821c38103e607e12f2b856cceb23c173c
|
ec3947959f93dd1d5112080db031c70fbd2cc127
|
/R/bfastTemplate.R
|
6ef4433d124e1b162bfbfb358e3a0708f5e10848
|
[] |
no_license
|
npp97-field/gimms_iran
|
77e7c104665d75464367246630d9bb110d801f27
|
de17f54d1991801173e14763f6d90565b33af11e
|
refs/heads/master
| 2020-03-27T07:10:01.810316
| 2015-12-09T07:51:40
| 2015-12-09T07:51:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 444
|
r
|
bfastTemplate.R
|
bfastTemplate <- function() {
## gimms raw data
gimms_files <- gimms::rearrangeFiles(dsn = "data", full.names = TRUE)
gimms_raster <- raster::stack(gimms_files[1])
## reference extent
library(rworldmap)
data("countriesCoarse")
spy_iran <- subset(countriesCoarse, ADMIN == "Iran")
## crop global gimms images
rst_template <- crop(gimms_rasters[[1]], spy_iran)
rst_template[] <- NA
return(rst_template)
}
|
20ae555786a9c5dabe31fbdb9d064d4dba8a1ea3
|
dbfff49801233324ee40f1ff559303d9ad23421d
|
/SG-RNA-seq/stress_granule.R
|
47c30c13be5839079b435c2b9d57116b3bad62a5
|
[] |
no_license
|
JungnamChoLab/CodonOptimality
|
62008e68ebc14c6c4090da7544c840c5ea2e1d38
|
65ce2bafd87a383f07ba7dd8b50ca36182039802
|
refs/heads/master
| 2023-06-16T05:44:07.424611
| 2021-07-02T05:04:46
| 2021-07-02T05:04:46
| 274,046,609
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,280
|
r
|
stress_granule.R
|
# Use the raw read count got from featurecounts to do DEG analysis and got SG-enriched and depleted transcripts
g <-read.delim("stress_granule_raw_read_count")
colnames(g)[7:10] <- c("Total1","Total2","SG1","SG2")
gg <- g[,c(1,7:10)]
g <- merge(t_type,gg,by.x="V1",by.y="Geneid")
table(g$V2)
library("DESeq2")
gg <- data.frame(g[,c(3:6)],row.names=g$V1)
countData <- as.matrix(gg)
colData <- data.frame("condition" = rep(c("Total","SG"),each=2))
rownames(colData) <- colnames(countData)
colData
all(rownames(colData) %in% colnames(countData))
dds <- DESeqDataSetFromMatrix(countData = countData, colData = colData, design = ~condition)
dds$condition
dds$condition <- factor(dds$condition, levels = c("Total","SG"))
dds$condition
dds <- DESeq(dds)
res <- results(dds)
res
hist( res$pvalue, breaks=20, col="grey" )
plotDispEsts( dds , ylim = c(1e-6, 1e1))
summary(res)
vsd <- vst(dds, blind=FALSE)
rld <- rlog(dds)
#use ggmaplot draw the MA figure https://rpkgs.datanovia.com/ggpubr/reference/ggmaplot.html
install.packages("ggpubr")
library("ggpubr")
ggmaplot(res, fdr = 0.05, fc = 4, genenames = NULL,
detection_call = NULL, size = 0.4, font.label = NULL, label.rectangle = FALSE, palette = c("#B31B21", "#1465AC",
"darkgray"), top = 0, select.top.method = c("padj", "fc"),
main = NULL, xlab = "Log2 Mean expression",
ylab = "Log2 Fold change (SG/Total)", ggtheme = theme_classic()+theme(legend.position='none'))
#export img in eps format w*h 600*400
sum(res$padj < 0.05, na.rm=TRUE)
re <- as.data.frame(res)
re$Significant <- "No"
re[is.na(re$padj),]$Significant <- "Not test"
#re[re$padj < 0.05 &abs(re$log2FoldChange)>=1,]$sign <-"yes"
re[re$padj <= 0.05 &re$log2FoldChange>=1 &!is.na(re$padj),]$Significant <-"Up"
re[re$padj <= 0.05 &re$log2FoldChange<=-1&!is.na(re$padj),]$Significant <-"Down"
re$Significant <- factor(re$Significant,levels = c("Up","Down","No"))
table(re[,c(7)])
v <- ggplot(re,aes(log2FoldChange,-log10(padj)))
v + geom_point(aes(colour=Significant))+geom_vline(xintercept=c(-1,1),linetype=4,colour="grey")+geom_hline(yintercept=-log10(0.05),linetype=4,colour="grey")+xlim(-17, 17)
write.table(re,"allDEGs.txt",sep="\t",quote=F)
|
740f5af505460c584aee73561a6cca3c8b2d8083
|
a5a1dfa861d42495ea1013c42f8edd460ca89561
|
/hcasmc_specific_eqtl/plot_hcasmc_specific_eqtls_pval.R
|
ad02e3cbcc4713ae1254c174988576a41ac69c64
|
[] |
no_license
|
chanibravo/hcasmc_eqtl
|
4cca8abab75d63196d005294bf42d291128fac24
|
0c4f25b5a336349d8e590f2ac357ce0519e16032
|
refs/heads/master
| 2021-09-20T05:52:29.831159
| 2018-08-05T05:23:01
| 2018-08-05T05:23:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 346
|
r
|
plot_hcasmc_specific_eqtls_pval.R
|
library(data.table)
library(dplyr)
library(dtplyr)
library(cowplot)
eqtl=fread('../processed_data/eqtl_and_atacseq/specificity.mean.sorted.10000.eql.txt')
setnames(eqtl,c('sid','pval','beta','se','tissue'))
pdf('../figures/hcasmc_specific_eqtl/qsiTop1e5.pval.pdf')
ggplot(eqtl[tissue=='HCASMC',],aes(-log10(pval)))+geom_histogram()
dev.off()
|
60ad4d1dfaeef9e47e7d035a71fbcd1d2076c38a
|
928f72ab4f9d8fd643c3c010ee732ea1c195a3a2
|
/tests/testthat.R
|
7c72829fbb30d444baba56f2168a68809d39de3f
|
[
"MIT"
] |
permissive
|
vegawidget/ggvega
|
362a3f5df2edeb2177f9dbdd68c21a605359e185
|
e9a98a3db7d894b5378b6d13f32b435af693a4ed
|
refs/heads/master
| 2023-01-04T13:49:40.936592
| 2021-10-22T23:05:09
| 2021-10-22T23:05:09
| 186,412,239
| 48
| 3
|
NOASSERTION
| 2023-01-04T07:47:28
| 2019-05-13T12:08:11
|
HTML
|
UTF-8
|
R
| false
| false
| 56
|
r
|
testthat.R
|
library(testthat)
library(ggvega)
test_check("ggvega")
|
2a7c70752e83ca3e04900dadd0df796fcec181c2
|
7b13f708f1b834a158b5637750f3577d3d5ac7d8
|
/geo_2016_repeatable.R
|
d5095ad2d17f36f3d177d84e370b2ff377348253
|
[] |
no_license
|
cal65/Geography-of-Cal
|
f98712ec4add52ea3b3d31926857b7f677987392
|
8b2c62f2853d71d35d7c6ee76d6752ac03e065b4
|
refs/heads/master
| 2021-01-09T21:45:16.508728
| 2017-02-20T05:54:50
| 2017-02-20T05:54:50
| 52,335,968
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,684
|
r
|
geo_2016_repeatable.R
|
setwd('/Users/christopherlee/Documents/CAL/Real_Life/Geography/')
library(ggplot2)
library(geosphere)
library(ggmap)
library(RColorBrewer)
library(sp)
library(plyr)
G_merged<-read.csv('G_merged.csv')
G_merged$Date.Begin <- as.Date(G_merged$Date.Begin)
G_merged$End.Date <- as.Date(G_merged$End.Date)
mapWorld<-borders("world", colour="darkseagreen1", fill="darkseagreen1")
map<- ggplot() + mapWorld + theme(panel.background = element_rect(fill = 'dodgerblue4', colour = 'dodgerblue4'))
map + geom_point(data=G_merged, aes(x=Lon, y=Lat, size=sqrt(Nights), color=Date.Begin), alpha=0.5)
scale_color_gradient(low='pink', high='blue')
pal<-brewer.pal(9, 'Set1')
arcmap <- map + geom_point(data=G_merged, aes(x=Lon, y=Lat, size=sqrt(Nights), color=Country), alpha=0.5)
for(i in 1:(nrow(G_coords)-1)){
arcmap<-arcmap + geom_path(data=lines[[i]], aes(x=lon, y=lat), size=.4, color='light blue', alpha=0.6)
}
center<-180
#base<-map('world', resolution=0, fill=F, xlim=c(-150, 170), ylim=c(-15,65))
worldmap <- map_data ("world", resolution=0, xlim=c(-150, 170), ylim=c(-15,85))
#worldmap <- map_data(base)
worldmap$long.recenter <- ifelse(worldmap$long < center - 180 , worldmap$long + 360, worldmap$long)
### Function to regroup split lines and polygons
# Takes dataframe, column with long and unique group variable, returns df with added column named group.regroup
RegroupElements <- function(df, longcol, idcol){
g <- rep(1, length(df[,longcol]))
if (diff(range(df[,longcol])) > 300) { # check if longitude within group differs more than 300 deg, ie if element was split
d <- df[,longcol] > mean(range(df[,longcol])) # we use the mean to help us separate the extreme values
g[!d] <- 1 # some marker for parts that stay in place (we cheat here a little, as we do not take into account concave polygons)
g[d] <- 2 # parts that are moved
}
g <- paste(df[, idcol], g, sep=".") # attach to id to create unique group variable for the dataset
df$group.regroup <- g
df
}
### Function to close regrouped polygons
# Takes dataframe, checks if 1st and last longitude value are the same, if not, inserts first as last and reassigns order variable
ClosePolygons <- function(df, longcol, ordercol){
if (df[1,longcol] != df[nrow(df),longcol]) {
tmp <- df[1,]
df <- rbind(df,tmp)
}
o <- c(1: nrow(df)) # rassign the order variable
df[,ordercol] <- o
df
}
# now regroup
worldmap.rg <- ddply(worldmap, .(group), RegroupElements, "long.recenter", "group")
# close polys
worldmap.cp <- ddply(worldmap.rg, .(group.regroup), ClosePolygons, "long.recenter", "order") # use the new grouping var
#############################################################################
# Plot worldmap using data from worldmap.cp
worldmap = ggplot(aes(x = long.recenter, y = lat), data = worldmap.cp) +
geom_polygon(aes(group = group.regroup), fill="darkseagreen1", colour = "darkseagreen1") +
scale_y_continuous(limits = c(-60, 85)) +
scale_x_continuous(limits = c(0, 360), expand = c(0, 0)) +
coord_equal() + theme_bw() +
theme(legend.position = "none",
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
#axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.ticks = element_blank(),
panel.border = element_rect(colour = "black"),
panel.background = element_rect(fill = 'dodgerblue4', colour = 'dodgerblue4'))
G_merged$Lon.recenter <- ifelse(G_merged$Lon < center - 180, G_merged$Lon + 360, G_merged$Lon)
G_coords<-G_merged[!is.na(G_merged$Lon),]
lines<-vector('list', length=nrow(G_coords)-1)
for(i in 1:(nrow(G_coords)-1)){
lines[[i]]<-as.data.frame(gcIntermediate(G_coords[i,c(8:9)], G_coords[i+1, c(8:9)]))
for(j in 1:nrow(lines[[i]])){
lines[[i]]$lon[j] <- ifelse(lines[[i]]$lon[j]< center - 180, lines[[i]]$lon[j] + 360, lines[[i]]$lon[j])
}
}
sweetmap <- worldmap + geom_point(data=G_merged, aes(x=Lon.recenter, y=Lat, size=sqrt(Nights), color=Country), alpha=0.5)
sweetermap<-qmap(c(180,0), zoom=2)+ geom_point(data=G_merged, aes(x=Lon.recenter, y=Lat, size=sqrt(Nights), color=Country), alpha=0.5)
for(i in 1:(nrow(G_coords)-1)){
sweetmap <-sweetmap + geom_path(data=lines[[i]], aes(x=lon, y=lat), size=.4, color='light blue', alpha=0.6)
sweetermap <-sweetermap + geom_path(data=lines[[i]], aes(x=lon, y=lat), size=.4, color='dark red', alpha=0.6)
}
sweetmap + xlim(0, 320) + ylim(-15,85) + ggtitle('2016 Travels') +
pal<-c('grey', brewer.pal(9, 'Set1'))
sweetermap + scale_color_manual(values=pal, guide=F) + scale_size_continuous(range(0,2.5), guide=F)+ ylim(-25,65) + ggtitle('2016 Travels')
|
29031792f881c0a42c51375b958f78130656f57b
|
45ab1e397b5fc69ba84c8f5dfb66c09b79bca4c6
|
/Course_II/R/pract/pract3/task4.r
|
3d85d9f1b7aad9cf3e8c017d8d751367e012e985
|
[
"WTFPL"
] |
permissive
|
GeorgiyDemo/FA
|
926016727afa1ce0ee49e6ca9c9a3c60c755b35f
|
9575c43fa01c261ea1ed573df9b5686b5a6f4211
|
refs/heads/master
| 2023-06-28T00:35:43.166167
| 2023-06-16T14:45:00
| 2023-06-16T14:45:00
| 203,040,913
| 46
| 65
|
WTFPL
| 2022-04-09T21:16:39
| 2019-08-18T18:19:32
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 829
|
r
|
task4.r
|
"
Для задания No 4 из Лабораторной работы No 2 написать программы
в которых Пользователь с клавиатуры вводит значения двух переменных разных типов,
которые затем сравниваются между собой. Использовать функции readline(), print(
и функции преобразования типов.
"
{
a1 <- as.integer(readline("Введите число №1 -> "))
a2 <- as.double(readline("Введите число №2 -> "))
print(paste("Тип числа №1:", typeof(a1)))
print(paste("Тип числа №2:", typeof(a2)))
if (a1 < a2) {
print("Число №1 больше")
} else{
print("Число №2 больше")
}
}
|
a0985077f546058bbe2b47e921141584583a106a
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/gWidgets2/examples/gframe.Rd.R
|
a06963b2070bf5b5d944b690525403de34610a15
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 489
|
r
|
gframe.Rd.R
|
library(gWidgets2)
### Name: gframe
### Title: Constructor for framed box container with label
### Aliases: .gframe gframe
### ** Examples
## Not run:
##D w <- gwindow("gformlayout", visible=FALSE)
##D f <- gframe("frame", horizontal=FALSE, container=w)
##D glabel("Lorem ipsum dolor sit amet, \nconsectetur adipiscing elit.", container=f)
##D gbutton("change name", container=f, handler=function(h,...) {
##D names(f) <- "new name"
##D })
##D visible(w) <- TRUE
## End(Not run)
|
56283c793ae2b1e85890eccf7a4c20bbcc90d6f7
|
80d01fcfe17acdb9953a0910a726e3e70a9d67e7
|
/man/geocode_place.Rd
|
b24ac704728b4235f58d5f89d2fa2f82d32cbfa8
|
[] |
no_license
|
bpb824/transportr
|
d5cb96afd08cc17e6cc078e25fb721852ddb8db1
|
5e9b12aa1f3a8ef539792bbec149c06a8b68b62f
|
refs/heads/master
| 2020-04-06T06:17:34.084717
| 2019-01-29T18:28:47
| 2019-01-29T18:28:47
| 43,518,670
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 550
|
rd
|
geocode_place.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/googleMapUtils.R
\name{geocode_place}
\alias{geocode_place}
\title{Geocode a place name using Google Places API}
\usage{
geocode_place(placeString, key, output = "loc")
}
\arguments{
\item{placeString}{A string describing the place you'd like to geocode}
\item{output}{The output can either be a simple location ('loc') or a list with all response results ('all')}
}
\value{
Location or list of API results
}
\description{
Geocode a place name using Google Places API
}
|
1b91b43dc61139abf20f229ac52aa75469e46ae1
|
639cd1de25056e67de6677e0e54698de0a0d5f5c
|
/man/ATM.Rd
|
4265e8bb1bb42be536a10573650a8d43c2cc4178
|
[] |
no_license
|
kengustafson/czerlinski1999
|
9d3e11238506633a5f4602078d16d07d57b0465f
|
640dcf5206f088739431d6db32d45c98c360a6da
|
refs/heads/main
| 2023-03-17T01:51:01.524455
| 2020-11-22T18:18:40
| 2020-11-22T18:18:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,846
|
rd
|
ATM.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/psychology_datasets.R
\docType{data}
\name{ATM}
\alias{ATM}
\title{Attractiveness of Men.}
\format{
A data frame with 32 rows and 7 variables:
\describe{
\item{case_number}{A numeric column with unique number id for each male celebrity.}
\item{object}{A character column with the last name of the male celebrity being rated.}
\item{criterion}{A numeric column--likely contains average attractiveness rating; units unclear.}
\item{recognition}{A numberic column--content unclear.}
\item{cue1}{A numeric column--likely contains average likeability, portion recognized, or American indicator; discretization method unclear.}
\item{cue2}{A numeric column--likely contains average likeability, portion recognized, or American indicator; discretization method unclear.}
\item{cue3}{A numeric column--likely contains average likeability, portion recognized, or American indicator; discretization method unclear.}
}
}
\source{
This data was pull from the Adaptive Toolbox Library on 07-Sept-2020.
It was uploaded to ATL on 2017-11-28, 8:00 by \email{admin@dotwebresearch.net}
This data description is taken from the summary on the ATL website.
The dataset: \url{http://www.dotwebresearch.net/AdaptiveToolboxOnline/#/data/2255}
Data citation: Henss, R. (1996). The attractiveness of prominent people. Unpublished manuscript, University of Saarbriicken, Saarbriicken, Germany.
}
\usage{
ATM
}
\description{
Predict average (inter-subject) attractiveness ratings based on the subjects average likeability ratings of each person, the percent of subjects who recognized each name (subjects saw only the name, no photos), and whether the person was American.
(Based on data from a study by Henss, 1996, using 115 male and 131 female Germans, aged 17-66 years.)
}
\keyword{datasets}
|
30cb44345af80c442fb5b91a7f8cd66ed5e9e672
|
543f60c0dd71a6eb227c0d8176b38d7e3554b00e
|
/Hyundai.R
|
428c2c2f07745c7958d0e91bb30b11d063ef2edc
|
[] |
no_license
|
ChandanNaik24/Data-Science-
|
b9e60e31f8b33211708e1f69691b1bdfabfd3714
|
f6dcf43d57a37f81bb28b54eb1ebd508943ff941
|
refs/heads/master
| 2023-04-16T10:12:37.658918
| 2021-04-15T06:01:35
| 2021-04-15T06:01:35
| 287,471,833
| 0
| 0
| null | null | null | null |
WINDOWS-1252
|
R
| false
| false
| 2,352
|
r
|
Hyundai.R
|
##### Installing the packages #####
install.packages("caTools")
install.packages("ggplot2")
install.packages("glmnet")
##### Loading libraries #####
library("caTools")
library("ggplot2")
library("glmnet")
l
##### Loading datasset #####
Hyundai <- read.csv("F:/Huyndai.csv")
View(Hyundai)
summary(Hyundai)
##### Explaintary Data Analysis #####
##### First 10 columns #####
head(Hyundai,10)
head(Hyundai$price, 10)
head(Hyundai$transmission,10)
##### Last 10 columns #####
tail(Hyundai,10)
tail(Hyundai$engineSize,10)
tail(Hyundai$tax.Â..,10)
##### Plot #####
plot(Hyundai$price, main = "Price", col = "blue")
plot(Hyundai$mileage, main = "Milegae", col = "red")
plot(Hyundai$price, Hyundai$mileage, main = "Price vs Mileage",
xlab = "Mileage", ylab = "Price", col = "purple")
plot(Hyundai$mpg, Hyundai$mileage, main = "MPG vs Milegae",
xlab = "MPG", ylab = "Mileage", col = "darkgreen")
##### Checking outlier #####
boxplot(Hyundai$price)
boxplot(Hyundai$mileage)
boxplot(Hyundai$tax.Â..)
##### Bar plot #####
barplot(Hyundai$price, main = "Price")
barplot(Hyundai$mileage, main = "mileage")
barplot(Hyundai$mpg, main = "MPG")
##### Histogram Plot #####
hist(Hyundai$price, main = "Price", col = "red")
hist(Hyundai$mileage, main = "Mileage", col = "green")
hist(Hyundai$tax.Â.., main = "Tax", col = "purple")
##### Model Building #####
split <- sample.split(Hyundai, SplitRatio = 0.7)
split
##### Creatinf Train & Test Dataset #####
train <- subset(Hyundai, split== 'TRUE')
test <- subset(train <- subset(Hyundai, split== 'TRUE')
##### Linear Regression #####
HyunLinear1 <- lm(mileage~price, data = Hyundai)
summary(HyunLinear1)
HyunLinear2 <- lm(mpg~mileage, data = Hyundai)
summary(HyunLinear2)
HyunLinear3 <- lm(year~model, data = Hyundai)
summary(HyunLinear3)
Hyunlinear4 <- lm(price~model, data = Hyundai)
summary(Hyunlinear4)
##### Multiple Regression #####
HyunMult1 <- lm(price~.,data = Hyundai)
summary(HyunMult1)
HyunMult2 <- lm(mileage~., data = Hyundai)
summary(HyunMult2)
HyunMult3 <- lm(year~., data = Hyundai)
summary(HyunMult3)
|
207a9320f427b784687aa75bf700c24c0f80a84b
|
f2a982ef2ad5d0a1086830a59f2700bc7e0c668a
|
/man/guess_separator.Rd
|
76c1fc20c1cbd1d8ac2c68cb18ef44b3bb5b36eb
|
[] |
no_license
|
jimsforks/cleanser
|
6f87363fefd5c0223c17d349ffa19f8d5ff1956c
|
1597f2bfcf58a0084c2810fea236e38a51385e43
|
refs/heads/master
| 2022-03-16T23:49:13.342589
| 2019-09-27T07:43:25
| 2019-09-27T07:43:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 707
|
rd
|
guess_separator.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/guess_separator.R
\name{guess_separator}
\alias{guess_separator}
\title{guess txt file separator}
\usage{
guess_separator(file, separator = c(",", ";", " ", "\\t"),
n_max = 1000)
}
\arguments{
\item{file}{path to csv or txt file}
\item{separator}{vector of possible separator to test}
\item{n_max}{number of row to parse}
}
\value{
sorted vector of possible separator
}
\description{
guess txt file separator
}
\examples{
file <- system.file("dataset","demo.csv",package = "cleanser")
guess_separator(file)
file2 <- system.file("dataset","demo2.csv",package = "cleanser")
guess_separator(file2)
}
|
4d9292a35c5f4bc7b82976527cba03e9dad847c9
|
b7808577564924e90aac8ad0bd800b11a078901f
|
/tests/testthat.R
|
3373517587b7bf6ef8d3c19868152b5812d6257d
|
[] |
no_license
|
congca/einr
|
c94ad5376a9df0054e87cb4f054237600f95373f
|
7e670e9432445d067eae3a3ba526276797c8573b
|
refs/heads/master
| 2021-09-22T17:00:10.187069
| 2018-09-12T10:09:18
| 2018-09-12T10:09:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 52
|
r
|
testthat.R
|
library(testthat)
library(einr)
test_check("einr")
|
571df06d7a839dcf271f2e3da4d3d10ebc3d7c26
|
49ff0bc7c07087584b907d08e68d398e7293d910
|
/mbg/mbg_core_code/mbg_central/LBDCore/R/read_inla_prior_matern.R
|
48b5b8e2aec63da2a3b2d795260b7b34e4cf846d
|
[] |
no_license
|
The-Oxford-GBD-group/typhi_paratyphi_modelling_code
|
db7963836c9ce9cec3ca8da3a4645c4203bf1352
|
4219ee6b1fb122c9706078e03dd1831f24bdaa04
|
refs/heads/master
| 2023-07-30T07:05:28.802523
| 2021-09-27T12:11:17
| 2021-09-27T12:11:17
| 297,317,048
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,396
|
r
|
read_inla_prior_matern.R
|
#' @title Read INLA Matern GP priors for TMB
#'
#' @description Read in a prior specification from config and make
#' it TMB readable.
#'
#' @param prior_string character, character vec of length 1 specifying priors
#'
#' @return List containing (1) spde object and (2) list specifying a TMB prior,
#' containing three elements:
#' - type: Is the prior pc or nonpc (i.e. normal)
#' - par1: Vector of length 2 for the first parameter. In the nonpc case,
#' corresponds to mean and precision for logtau. In the pc case,
#' corresponds to range0 and prange.
#' - par2: Vector of length 2 for the first parameter. In the nonpc case,
#' corresponds to mean and precision for logkappa. In the pc case,
#' corresponds to sigma0 and psigma.
#' @export
read_inla_prior_matern <- function(prior_string, mesh_s) {
prior_list <- eval(parse(text = prior_string[1]))
spde_list <- build_spde_prior(prior_list, mesh_s, st_gp_int_zero = FALSE)
spde_prior <- spde_list$spde_prior
spde <- spde_list$spde
if (spde_prior$type == "nonpc") {
par1 <- c(spde$param.inla$theta.mu[1], spde$param.inla$theta.Q[1, 1])
par2 <- c(spde$param.inla$theta.mu[2], spde$param.inla$theta.Q[2, 2])
} else {
par1 <- spde_prior$prior$range
par2 <- spde_prior$prior$sigma
}
return(list(
spde = spde,
prior = list(
type = prior_list$type,
par1 = par1,
par2 = par2
)
))
}
|
e18bd505269b5872425c54b439609162a26daa93
|
da8dae69e597072bc616936d1d72a96f65e4efa0
|
/code/currentversion/tools/nictools/R/ComputeDistance2Coast.R
|
887fc3609d5b30cdd8608103788725f96ba94a2a
|
[] |
no_license
|
UCL/provis
|
71e82c383cd9414840e57c2a2867826d6b4ee3e6
|
86a287c7bc705d4aeffb9bbcf96747e97e6d688b
|
refs/heads/master
| 2020-08-01T04:08:20.198284
| 2019-11-08T12:09:43
| 2019-11-08T12:09:43
| 210,310,151
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 527
|
r
|
ComputeDistance2Coast.R
|
#' Compute distance to coast
#' @param location vector of locations (longitude,latitude)
#' @param mapdir directory containing shape file with england coastline
#' @return distance distance to coast and coordinates of nearest point
#' @export
#' @examples
#' dist <- ComputeDistance2Coast(location,mapdir)
ComputeDistance2Coast<-function(location,mapdir) {
# Load shape file with coast
coastfile<-"englandcoast"
coast<-readOGR(mapdir,layer=coastfile)
distance<-dist2Line(location,coast)
return(distance)
}
|
6856f591b0e43efb65db86c3b9bfbf28bb2e39f0
|
b7d6ad61d15f2b0dbb81a59ef43ef86c1cac68ff
|
/usage_cachematrix.R
|
920fa41d2dc67bb717314ba66d1e8090f8de6d99
|
[] |
no_license
|
maro243/ProgrammingAssignment2
|
e663fa26f2c642bd79da25daf73af819d4d34e8c
|
0da340798afc27aaf3ecac09e142a5e287c37aa2
|
refs/heads/master
| 2021-01-18T12:06:19.584144
| 2014-08-24T11:43:40
| 2014-08-24T11:43:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 367
|
r
|
usage_cachematrix.R
|
# note to reviewer - this is some example usage, this source call won't work due the the absolute referencing
#couldn't figure out how to get the path to be purely relative
source('ass2/ProgrammingAssignment2/cachematrix.R')
M2x2 = matrix (c(4,3,3,2), nrow=2, ncol=2)
MC2x2 <- makeCacheMatrix(M2x2)
#first call
cacheSolve(MC2x2)
#second call
cacheSolve(MC2x2)
|
11ac11579f69cad31f58a082a39c7dcf59bff0a9
|
9de75837e81cccb6dcb43f59a2002b774f5753b6
|
/LDAcode.R
|
05320e1b2df2f490d2a9e07269c9d2026689f586
|
[] |
no_license
|
blackaceatzworg/IssueDefinitions
|
9383b3e4d3af197259171aa9e4350680c040c652
|
39f4798b30e685109a502fa7c98b3bfa72d27590
|
refs/heads/master
| 2021-01-21T23:37:51.357641
| 2015-02-17T02:04:33
| 2015-02-17T02:04:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,824
|
r
|
LDAcode.R
|
# DETERMINE ISSUE DIMENSIONS
install.packages("tm", dependencies=TRUE, repos='http://cran.us.r-project.org')
library(tm)
install.packages("SnowballC", dependencies=TRUE, repos='http://cran.us.r-project.org')
library(SnowballC)
install.packages("topicmodels", dependencies=TRUE, repos='http://cran.us.r-project.org')
library(topicmodels)
######## load statements
statements <- Corpus(DirSource("Witness Statements Complete txt"))
summary(statements)
#### pre-processing
statements <- tm_map(statements, content_transformer
(tolower))
statements <- tm_map(statements, removeNumbers)
statements <- tm_map(statements, removePunctuation)
stan_stopwords <-c(stopwords("english"))
statements <- tm_map(statements, removeWords, stan_stopwords)
statements <- tm_map(statements, removeWords,
c("nuclear","waste","wastes","spent","fuel",
"chairman","committee","thank","testimony","question","questions",
"record","available","statement","look","dont","talk","congress",
"people","believe","will","one","can","get","think","like","say","now","just","make","want","get","know","problem","point","much", "time","way","state"))
statements <- tm_map(statements, stemDocument)
#### create document term matrix
stateDTM <- DocumentTermMatrix(statements)
dim(stateDTM)
stateDTM
stateDTM <- removeSparseTerms(stateDTM, .99)
stateDTM
dim(stateDTM)
##############################################################
#### LDA with 7 topics; won't run ##
## SEED <- 12345 ##
## k <- 7 ##
## lda7 <- LDA(stateDTM, k, method="Gibbs", control=list(seed=SEED)) ##
## Export results to .csv ##
## state.dat <- cbind(topics(lda7),posterior(lda7)$topics) ##
## write.csv(state.dat, "nw.statements.complete.csv") ##
##############################################################
|
3a81298a16a40b7f35fa83925cffb1da69f92439
|
f3d4908b3f33681f9c485a7d6f0219ff396608a2
|
/WGBSQC/WGBS_QC_ANOVA_ASD.R
|
c983b14158ee0251ac4aef630292842834fb20a4
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
aciernia/CerebralCortex2019
|
77b8192604e5e65c7024a043180c4842d7177dc1
|
ce1a6df53cb8259218a690505c1ce0ac4786b5f2
|
refs/heads/master
| 2020-05-22T14:47:11.848948
| 2019-05-30T22:56:56
| 2019-05-30T22:56:56
| 186,393,457
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,104
|
r
|
WGBS_QC_ANOVA_ASD.R
|
#9/5/18
##############################################################################
# MulitQC output from Bismark for alignment data etc
# read in and run ANOVA
#Annie Vogel Ciernia
##############################################################################
#load in and concatinate pm_stats output from wgbs tools
library(ggplot2)
library(lsmeans)
library(nlme)
library(dplyr)
library(tidyr)
library(xlsx)
options("scipen"=100, "digits"=4) #prevent exponents
##############################################################################
#ASD samples
path <- "/Users/annieciernia/Box\ Sync/LaSalle\ Lab/Experiments/humanASD_WGBSDMRs/WGBS_QC/ASD"
setwd(path)
##############################################################################
#Total % methylation from Bismark
##############################################################################
PerMe <- read.delim("multiqc_bismark_methextract.txt",header=T)
#calcualte percent ch methylation
#combined reads for methylation chh and chg
PerMe$meth_ch_reads <- PerMe$meth_chg+PerMe$meth_chh
PerMe$unmeth_ch_reads <- PerMe$unmeth_chg + PerMe$unmeth_chh
#mCH/CH
PerMe$percent_chh_meth <- (PerMe$meth_ch_reads/(PerMe$unmeth_ch_reads+PerMe$meth_ch_reads))*100
#mCG/CG
PerMe$percentmeth_cpg <- (PerMe$meth_cpg/(PerMe$meth_cpg+PerMe$unmeth_cpg))*100
#add in group info:
#read in subject data
sampleinfo <- read.csv("/Users/annieciernia/Box\ Sync/LaSalle\ Lab/Experiments/humanASD_WGBSDMRs/AllSamples.csv",header=T)
PerMe$Sample <- gsub("_filtered","",PerMe$Sample)
PerMemerge <- merge(PerMe,sampleinfo,by.x="Sample",by.y="filename",all.x=T)
PerMemerge$Group <- factor(PerMemerge$Group)
PerMemerge$Sex <- factor(PerMemerge$Sex)
PerMemerge$Age <- as.numeric(PerMemerge$Age..yrs.)
DFout1 <- PerMemerge[,c(1,6,4,9,13,10,11,12)]
##############################################################################
#Read alignment info from Bismark
##############################################################################
DedupReads <- read.table("multiqc_bismark_dedup.txt",header=T)
Coverage <- read.delim("multiqc_general_stats.txt",header=T)
DF <- merge(DedupReads,Coverage,by="Sample")
DF$Sample <- gsub("_filtered","",DF$Sample)
DFMemerge <- merge(DF,sampleinfo,by.x="Sample",by.y="filename",all.x=T)
DFMemerge$Group <- factor(DFMemerge$Group)
DFMemerge$Sex <- factor(DFMemerge$Sex)
DFMemerge$Age <- as.numeric(DFMemerge$Age..yrs.)
DFtest <- DFMemerge %>% dplyr::select(Sample,Group,Sex,Age,aligned_reads,Bismark_mqc.generalstats.bismark.percent_aligned,
dedup_reads,dup_reads_percent,Bismark_mqc.generalstats.bismark.total_c,
Bismark_mqc.generalstats.bismark.C_coverage)
colnames(DFtest)[colnames(DFtest)=="Bismark_mqc.generalstats.bismark.percent_aligned"] <- c("percent_aligned")
colnames(DFtest)[colnames(DFtest)=="Bismark_mqc.generalstats.bismark.C_coverage"] <- c("Coverage")
colnames(DFtest)[colnames(DFtest)=="Bismark_mqc.generalstats.bismark.total_c"] <- c("TotalCs")
colnames(DFtest)
DFmaster <- merge(PerMemerge,DFtest,by="Sample")
##############################################################################
#ANOVA
##############################################################################
#model with sex, Age and coverage as a covariates
tmp <- lme(percentmeth_cpg ~ Group.x+Sex.x+Age.x+Coverage, ~1|Sample, data = DFmaster)
#model indcludes random effects subject ID
anova <- anova(tmp, type = "marginal") #marginal gives Type 3 SS for ANOVA
#write anova to output file
anova$factors <- rownames(anova)
anova <- anova[!grepl('(Intercept)', anova$factors),]
anova$measure <- c("Percent mCG/CG")
anova
# #posthoc tests for group at each timepoint
# refgrid <- ref.grid(tmp)
# tmp2 <- lsmeans(refgrid, ~Group|Sex)
# tmp3 <- summary(pairs(tmp2, adjust = "none"))
# tmp3 <- as.data.frame(tmp3)
# #adjust p value for all comparisons:
# tmp3$p.adjust <- p.adjust(tmp3$p.value,method = "BH") #BH adjusted p values
# tmp3$measure <- c("Percent mCG/CG")
# tmp3
#
#model
tmp2 <- lme(percent_chh_meth ~ Group.x+Sex.x+Age.x+Coverage, ~1|Sample, data = DFmaster)
#model indcludes random effects subject ID
anova2 <- anova(tmp2, type = "marginal") #marginal gives Type 3 SS for ANOVA
#write anova to output file
anova2$factors <- rownames(anova2)
anova2 <- anova2[!grepl('(Intercept)', anova2$factors),]
anova2$measure <- c("Percent mCH/CH")
anova2
# #posthoc tests for group at each timepoint
# refgrid <- ref.grid(tmp2)
# PH2 <- lsmeans(refgrid, ~Group|Sex)
# PH2 <- summary(pairs(PH2, adjust = "none"))
# PH2 <- as.data.frame(PH2)
# #adjust p value for all comparisons:
# PH2$p.adjust <- p.adjust(PH2$p.value,method = "BH") #BH adjusted p values
# PH2$measure <- c("Percent mCH/CH")
# PH2
ANOVAout <- rbind(anova,anova2)
#BHout <- rbind(tmp3,PH2)
#write.xlsx(BHout, file = "ASD_GlobalPercentMethylationPostHocs.xlsx")
write.xlsx(ANOVAout, file = "ASD_RM-ANOVA_PercentMethylation.xlsx")
###############################################################################################
#graphs
###############################################################################################
library(ggplot2)
library(cowplot)
PerMemerge2 <- PerMemerge%>%
group_by(Sample,Group,Sex) %>% gather(Type,PercentMeth,c(12,11))
PerMemerge2$Type <- gsub("percent_chh_meth","Percent mCH/CH",PerMemerge2$Type)
PerMemerge2$Type <- gsub("percentmeth_cpg","Percent mCG/CG",PerMemerge2$Type)
PerMemerge2$Group <- gsub("Control_ASD","Control",PerMemerge2$Group)
PerMemerge2$Group <- as.factor(PerMemerge2$Group)
PerMemerge2$Group <- relevel(PerMemerge2$Group,ref="Control")
pdf("ASD_GLobaMethylation.pdf", height = 3, width =6) # create PNG for the heat map
cbPalette <- c("#0072B2","#D55E00")
ggplot(PerMemerge2, aes(x=Group, y=PercentMeth),group=Sex) +
facet_wrap(~Type, scales = "free")+
stat_summary(geom = "boxplot",
fun.data = function(x) setNames(quantile(x, c(0.05, 0.25, 0.5, 0.75, 0.95)), c("ymin", "lower", "middle", "upper", "ymax")),
position = "dodge", aes(fill=Group))+
geom_point(position = position_dodge(width = 0.90),aes(group=Group,color=Sex)) +
scale_fill_manual(values = cbPalette) +
theme_cowplot(font_size = 15)+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+
scale_x_discrete(name = "Diagnosis") + #,limits = order2)+
scale_y_continuous(name = "Global Percent Methylation")+
ggtitle("Global Percent Methylation")
dev.off()
##############################################################################
#Read alignment info from Bismark
##############################################################################
DedupReads <- read.table("multiqc_bismark_dedup.txt",header=T)
Coverage <- read.delim("multiqc_general_stats.txt",header=T)
DF <- merge(DedupReads,Coverage,by="Sample")
DF$Sample <- gsub("_filtered","",DF$Sample)
DFMemerge <- merge(DF,sampleinfo,by.x="Sample",by.y="filename",all.x=T)
DFMemerge$Group <- factor(DFMemerge$Group)
DFMemerge$Sex <- factor(DFMemerge$Sex)
DFMemerge$Age <- as.numeric(DFMemerge$Age..yrs.)
DFtest <- DFMemerge %>% dplyr::select(Sample,Group,Sex,Age,aligned_reads,Bismark_mqc.generalstats.bismark.percent_aligned,
dedup_reads,dup_reads_percent,Bismark_mqc.generalstats.bismark.total_c,
Bismark_mqc.generalstats.bismark.C_coverage)
colnames(DFtest)[colnames(DFtest)=="Bismark_mqc.generalstats.bismark.percent_aligned"] <- c("percent_aligned")
colnames(DFtest)[colnames(DFtest)=="Bismark_mqc.generalstats.bismark.C_coverage"] <- c("Coverage")
colnames(DFtest)[colnames(DFtest)=="Bismark_mqc.generalstats.bismark.total_c"] <- c("TotalCs")
colnames(DFtest)
#model Group + sex
tmp <- lme(aligned_reads ~ Group+Sex, ~1|Sample, data = DFtest)
#model indcludes random effects subject ID
anova <- anova(tmp, type = "marginal") #marginal gives Type 3 SS for ANOVA
#write anova to output file
anova$factors <- rownames(anova)
anova <- anova[!grepl('(Intercept)', anova$factors),]
anova$measure <- c("Bismark Aligned Reads")
anova
#model Group * sex
tmp <- lme(percent_aligned ~ Group+Sex+, ~1|Sample, data = DFtest)
#model indcludes random effects subject ID
anova1 <- anova(tmp, type = "marginal") #marginal gives Type 3 SS for ANOVA
#write anova to output file
anova1$factors <- rownames(anova1)
anova1 <- anova1[!grepl('(Intercept)', anova1$factors),]
anova1$measure <- c("Percent Aligned Reads")
anova1
#model Group + sex
tmp <- lme(dedup_reads ~ Group+Sex, ~1|Sample, data = DFtest)
#model indcludes random effects subject ID
anova2 <- anova(tmp, type = "marginal") #marginal gives Type 3 SS for ANOVA
#write anova to output file
anova2$factors <- rownames(anova2)
anova2 <- anova2[!grepl('(Intercept)', anova2$factors),]
anova2$measure <- c("DeDuplicated Reads")
anova2
#model Group + sex
tmp <- lme(dup_reads_percent ~ Group+Sex, ~1|Sample, data = DFtest)
#model indcludes random effects subject ID
anova3 <- anova(tmp, type = "marginal") #marginal gives Type 3 SS for ANOVA
#write anova to output file
anova3$factors <- rownames(anova3)
anova3 <- anova3[!grepl('(Intercept)', anova3$factors),]
anova3$measure <- c("Percent Duplicated Reads")
anova3
#model Group + sex
tmp <- lme(Coverage ~ Group+Sex, ~1|Sample, data = DFtest)
#model indcludes random effects subject ID
anova4 <- anova(tmp, type = "marginal") #marginal gives Type 3 SS for ANOVA
#write anova to output file
anova4$factors <- rownames(anova4)
anova4 <- anova4[!grepl('(Intercept)', anova4$factors),]
anova4$measure <- c("Coverage")
anova4
#model Group + sex
tmp <- lme(TotalCs ~ Group+Sex, ~1|Sample, data = DFtest)
#model indcludes random effects subject ID
anova5 <- anova(tmp, type = "marginal") #marginal gives Type 3 SS for ANOVA
#write anova to output file
anova5$factors <- rownames(anova5)
anova5 <- anova5[!grepl('(Intercept)', anova5$factors),]
anova5$measure <- c("Total C's Analyzed")
anova5
ANOVAout <- rbind(anova,anova1,anova2,anova3,anova4,anova5)
ANOVAout$cohort <- c("ASD")
setwd("/Users/annieciernia/Box\ Sync/LaSalle\ Lab/Experiments/humanASD_WGBSDMRs/WGBS_QC/ASD")
write.xlsx(ANOVAout,file="ASD_WGBSQC_ANOVA.xlsx")
mergeDF <- merge(sampleinfo,DFtest,by.y="Sample",by.x="filename",all.y=T)
mergeDF2 <- merge(mergeDF,DFout1,by.x="filename",by.y="Sample")
write.xlsx(mergeDF2,file="ASD_WGBS_sampleinfo.xlsx")
##############################################################################
#Read alignment info from Bismark
##############################################################################
ASDinfo <- filter(sampleinfo) %>% filter(cohort == 1)
ASDinfo$PMI <- as.numeric(ASDinfo$PMI)
DFtest2 <- merge(ASDinfo,DFtest,by.x = "filename",by.y="Sample")
#model Group + sex
tmp <- lme(PMI ~ Group.x+Sex.x, ~1|filename, data = DFtest2)
#model indcludes random effects subject ID
anova5 <- anova(tmp, type = "marginal") #marginal gives Type 3 SS for ANOVA
#write anova to output file
anova5$factors <- rownames(anova5)
anova5 <- anova5[!grepl('(Intercept)', anova5$factors),]
anova5$measure <- c("PMI")
anova5
write.xlsx(anova5,"ASD_PMI_ANOVA.xlsx")
#age
DFtest2$Age <- as.numeric(DFtest2$Age..yrs.)
#model Group + sex
tmp <- lme(Age ~ Group.x+Sex.x, ~1|filename, data = DFtest2)
#model indcludes random effects subject ID
anova5 <- anova(tmp, type = "marginal") #marginal gives Type 3 SS for ANOVA
#write anova to output file
anova5$factors <- rownames(anova5)
anova5 <- anova5[!grepl('(Intercept)', anova5$factors),]
anova5$measure <- c("Age")
anova5
write.xlsx(anova5,"ASD_Age_ANOVA.xlsx")
#correlatoin global me and pmi
DFmaster <- merge(PerMemerge,DFtest,by="Sample")
DFmasterASD <- DFmaster %>% filter(cohort==1)
corrMEPMI <- cor.test(DFmasterASD$PMI,DFmasterASD$percent_cpg_meth, method="pearson")
coroutput <- data.frame(comparison = c("Pearson Correlation mCG/CG and PMI"),
r = corrMEPMI$estimate,
pvalue= corrMEPMI$p.value)
write.xlsx(coroutput,"PearsonCorl_PMI_mCG.xlsx")
|
ce940c13094e364725e9c8d945512abae8e88522
|
d546952d79f8fbbf08942ffcc4fe08450760fae4
|
/changebasis.R
|
5490c832da71e6fb55e2d4335c57c9c380e00101
|
[] |
no_license
|
georgercarder/New-York-City-Taxi-Trip-Duration
|
7c555b6e18d2b80bd37ec870397cab5bf49adec7
|
ee753e51d016cb2e1a391c4359dde7595eef606e
|
refs/heads/master
| 2021-01-20T22:05:16.884847
| 2017-08-29T19:33:44
| 2017-08-29T19:33:44
| 101,797,939
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 875
|
r
|
changebasis.R
|
#!/bin/Rscript
orgnshft.x<-function(x1)(x1+73.99363875389099)
orgnshft.y<-function(x2)(x2-40.736031073752805)
#x1,y1 are in rotated basis. of length 0.1 mile
#new origin is E 14th and 5th Ave
p<-pi/5
cp<-cos(p)
sp<-sin(p)
rotate<-function(A,B)c((cp*A-sp*B),(sp*A+cp*B))
#rotation transformation
#shift all points
m<-nrow(train.s)
pu.x.ob=pu.y.ob=drop.x.ob=drop.y.ob=pu.x.tb=pu.y.tb=drop.x.tb=drop.y.tb<-rep(0,m)
i=1
while(i <=m){
pu.x.ob[i]<-orgnshft.x(train.s$pickup_x[i])
pu.y.ob[i]<-orgnshft.y(train.s$pickup_y[i])
drop.x.ob[i]<-orgnshft.x(train.s$dropoff_x[i])
drop.y.ob[i]<-orgnshft.y(train.s$dropoff_y[i])
rotated.pu<-rotate(pu.x.ob[i],pu.y.ob[i])
rotated.drop<-rotate(drop.x.ob[i],drop.y.ob[i])
pu.x.tb[i]<-rotated.pu[1]
pu.y.tb[i]<-rotated.pu[2]
drop.x.tb[i]<-rotated.drop[1]
drop.y.tb[i]<-rotated.drop[2]
i=i+1
}
|
fde05cc999627b23c0bdd4b3922ef585767e5456
|
d5626554407c0515919864c622a2841f662bac54
|
/man/get_segmentation.Rd
|
db350f499be3dbf8ad4456f8b261be80ea6bfc68
|
[
"MIT"
] |
permissive
|
barefootbiology/heyexr
|
45aa456a0b5740a6e926359fe1d20cbceaa4257c
|
1f41e9120f7eae02b337d2f5b071f8a4a887d27f
|
refs/heads/main
| 2022-08-13T02:50:24.102333
| 2022-07-08T21:32:31
| 2022-07-08T21:32:31
| 75,117,687
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 404
|
rd
|
get_segmentation.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_segmentation.R
\name{get_segmentation}
\alias{get_segmentation}
\title{Get the segmentation data from an OCT object}
\usage{
get_segmentation(oct)
}
\arguments{
\item{oct}{OCT list object}
}
\value{
a tbl_df of the segmentation data
}
\description{
Retrieve the automated Heidelberg segmentation data from an OCT object
}
|
1a9a0f292b4df5ea2c23009710fc39effa394a2c
|
5dcacfc095c4eb6afcec840ee8a55e7794e277f9
|
/R/00_load-packages.R
|
b1b92b4d09690531a7106d3eb83d94f6c59db0cb
|
[] |
no_license
|
skdunnigan/chla-gtm-temp-interference
|
15ddf0ce7348e127346e0214757d52d7c956ff9c
|
a7dcccec56f9b9f23c8457229f1523ad9fbf99b9
|
refs/heads/main
| 2023-07-02T18:41:54.345749
| 2021-08-03T17:44:50
| 2021-08-03T17:44:50
| 375,429,392
| 0
| 0
| null | 2021-08-03T17:44:51
| 2021-06-09T16:59:05
|
R
|
UTF-8
|
R
| false
| false
| 572
|
r
|
00_load-packages.R
|
# 00 install-packages --------------------------------------------------------
# run this code chunk if you need to install the packages into your R console
# packages <- c('tidyverse', 'ggpubr', 'here', 'janitor', 'readxl') # all packages
#
# install.packages(packages) # install all packages
#
# rm(packages) # remove packages object from environment
# 01 load-packages -----------------------------------------------------------
library(tidyverse)
library(ggpubr)
library(here)
library(janitor)
library(readxl)
library(broom)
library(kableExtra)
library(patchwork)
|
f0009b7035ee746893e3e03f47340a9cf04e9995
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/TDMR/demo/demo02sonar.r
|
0f128caa81d972b7a2ed94783855f9a63ac01ea7
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 519
|
r
|
demo02sonar.r
|
#*# This demo shows a level-2 example (SPOT tuning on task SONAR)
## load package and set working directory (dir with .apd, .conf and main_*.r file):
path <- paste(find.package("TDMR"), "demo02sonar",sep="/");
#path <- paste("../inst", "demo02sonar",sep="/");
tdm=list(mainFile="main_sonar.r"
,runList="sonar_01.conf"
);
spotStep = "auto";
source(paste(path,tdm$mainFile,sep="/"));
source(paste(path,"start_bigLoop.r",sep="/"),chdir=TRUE); # change dir to 'path' while sourcing
|
79505310c46386f0d1ba2e491a506d38d5ee4918
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/paws/R/rekognition_operations.R
|
8e0e03518be502617b7ede005550185494c8df58
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| false
| 302,781
|
r
|
rekognition_operations.R
|
# This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common get_config new_operation new_request send_request
#' @include rekognition_service.R
NULL
#' Associates one or more faces with an existing UserID
#'
#' @description
#' Associates one or more faces with an existing UserID. Takes an array of
#' `FaceIds`. Each `FaceId` that are present in the `FaceIds` list is
#' associated with the provided UserID. The maximum number of total
#' `FaceIds` per UserID is 100.
#'
#' The `UserMatchThreshold` parameter specifies the minimum user match
#' confidence required for the face to be associated with a UserID that has
#' at least one `FaceID` already associated. This ensures that the
#' `FaceIds` are associated with the right UserID. The value ranges from
#' 0-100 and default value is 75.
#'
#' If successful, an array of `AssociatedFace` objects containing the
#' associated `FaceIds` is returned. If a given face is already associated
#' with the given `UserID`, it will be ignored and will not be returned in
#' the response. If a given face is already associated to a different
#' `UserID`, isn't found in the collection, doesn’t meet the
#' `UserMatchThreshold`, or there are already 100 faces associated with the
#' `UserID`, it will be returned as part of an array of
#' `UnsuccessfulFaceAssociations.`
#'
#' The `UserStatus` reflects the status of an operation which updates a
#' UserID representation with a list of given faces. The `UserStatus` can
#' be:
#'
#' - ACTIVE - All associations or disassociations of FaceID(s) for a
#' UserID are complete.
#'
#' - CREATED - A UserID has been created, but has no FaceID(s) associated
#' with it.
#'
#' - UPDATING - A UserID is being updated and there are current
#' associations or disassociations of FaceID(s) taking place.
#'
#' @usage
#' rekognition_associate_faces(CollectionId, UserId, FaceIds,
#' UserMatchThreshold, ClientRequestToken)
#'
#' @param CollectionId [required] The ID of an existing collection containing the UserID.
#' @param UserId [required] The ID for the existing UserID.
#' @param FaceIds [required] An array of FaceIDs to associate with the UserID.
#' @param UserMatchThreshold An optional value specifying the minimum confidence in the UserID match
#' to return. The default value is 75.
#' @param ClientRequestToken Idempotent token used to identify the request to
#' [`associate_faces`][rekognition_associate_faces]. If you use the same
#' token with multiple [`associate_faces`][rekognition_associate_faces]
#' requests, the same response is returned. Use ClientRequestToken to
#' prevent the same request from being processed more than once.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' AssociatedFaces = list(
#' list(
#' FaceId = "string"
#' )
#' ),
#' UnsuccessfulFaceAssociations = list(
#' list(
#' FaceId = "string",
#' UserId = "string",
#' Confidence = 123.0,
#' Reasons = list(
#' "FACE_NOT_FOUND"|"ASSOCIATED_TO_A_DIFFERENT_USER"|"LOW_MATCH_CONFIDENCE"
#' )
#' )
#' ),
#' UserStatus = "ACTIVE"|"UPDATING"|"CREATING"|"CREATED"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$associate_faces(
#' CollectionId = "string",
#' UserId = "string",
#' FaceIds = list(
#' "string"
#' ),
#' UserMatchThreshold = 123.0,
#' ClientRequestToken = "string"
#' )
#' ```
#'
#' @examples
#' \dontrun{
#' # This operation associates one or more faces with an existing UserID.
#' svc$associate_faces(
#' ClientRequestToken = "550e8400-e29b-41d4-a716-446655440002",
#' CollectionId = "MyCollection",
#' FaceIds = list(
#' "f5817d37-94f6-4335-bfee-6cf79a3d806e",
#' "851cb847-dccc-4fea-9309-9f4805967855",
#' "35ebbb41-7f67-4263-908d-dd0ecba05ab9"
#' ),
#' UserId = "DemoUser",
#' UserMatchThreshold = 70L
#' )
#' }
#'
#' @keywords internal
#'
#' @rdname rekognition_associate_faces
#'
#' @aliases rekognition_associate_faces
rekognition_associate_faces <- function(CollectionId, UserId, FaceIds, UserMatchThreshold = NULL, ClientRequestToken = NULL) {
op <- new_operation(
name = "AssociateFaces",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$associate_faces_input(CollectionId = CollectionId, UserId = UserId, FaceIds = FaceIds, UserMatchThreshold = UserMatchThreshold, ClientRequestToken = ClientRequestToken)
output <- .rekognition$associate_faces_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$associate_faces <- rekognition_associate_faces
#' Compares a face in the source input image with each of the 100 largest
#' faces detected in the target input image
#'
#' @description
#' Compares a face in the *source* input image with each of the 100 largest
#' faces detected in the *target* input image.
#'
#' If the source image contains multiple faces, the service detects the
#' largest face and compares it with each face detected in the target
#' image.
#'
#' CompareFaces uses machine learning algorithms, which are probabilistic.
#' A false negative is an incorrect prediction that a face in the target
#' image has a low similarity confidence score when compared to the face in
#' the source image. To reduce the probability of false negatives, we
#' recommend that you compare the target image against multiple source
#' images. If you plan to use [`compare_faces`][rekognition_compare_faces]
#' to make a decision that impacts an individual's rights, privacy, or
#' access to services, we recommend that you pass the result to a human for
#' review and further validation before taking action.
#'
#' You pass the input and target images either as base64-encoded image
#' bytes or as references to images in an Amazon S3 bucket. If you use the
#' AWS CLI to call Amazon Rekognition operations, passing image bytes isn't
#' supported. The image must be formatted as a PNG or JPEG file.
#'
#' In response, the operation returns an array of face matches ordered by
#' similarity score in descending order. For each face match, the response
#' provides a bounding box of the face, facial landmarks, pose details
#' (pitch, roll, and yaw), quality (brightness and sharpness), and
#' confidence value (indicating the level of confidence that the bounding
#' box contains a face). The response also provides a similarity score,
#' which indicates how closely the faces match.
#'
#' By default, only faces with a similarity score of greater than or equal
#' to 80% are returned in the response. You can change this value by
#' specifying the `SimilarityThreshold` parameter.
#'
#' [`compare_faces`][rekognition_compare_faces] also returns an array of
#' faces that don't match the source image. For each face, it returns a
#' bounding box, confidence value, landmarks, pose details, and quality.
#' The response also returns information about the face in the source
#' image, including the bounding box of the face and confidence value.
#'
#' The `QualityFilter` input parameter allows you to filter out detected
#' faces that don’t meet a required quality bar. The quality bar is based
#' on a variety of common use cases. Use `QualityFilter` to set the quality
#' bar by specifying `LOW`, `MEDIUM`, or `HIGH`. If you do not want to
#' filter detected faces, specify `NONE`. The default value is `NONE`.
#'
#' If the image doesn't contain Exif metadata,
#' [`compare_faces`][rekognition_compare_faces] returns orientation
#' information for the source and target images. Use these values to
#' display the images with the correct image orientation.
#'
#' If no faces are detected in the source or target images,
#' [`compare_faces`][rekognition_compare_faces] returns an
#' `InvalidParameterException` error.
#'
#' This is a stateless API operation. That is, data returned by this
#' operation doesn't persist.
#'
#' For an example, see Comparing Faces in Images in the Amazon Rekognition
#' Developer Guide.
#'
#' This operation requires permissions to perform the
#' `rekognition:CompareFaces` action.
#'
#' @usage
#' rekognition_compare_faces(SourceImage, TargetImage, SimilarityThreshold,
#' QualityFilter)
#'
#' @param SourceImage [required] The input image as base64-encoded bytes or an S3 object. If you use the
#' AWS CLI to call Amazon Rekognition operations, passing base64-encoded
#' image bytes is not supported.
#'
#' If you are using an AWS SDK to call Amazon Rekognition, you might not
#' need to base64-encode image bytes passed using the `Bytes` field. For
#' more information, see Images in the Amazon Rekognition developer guide.
#' @param TargetImage [required] The target image as base64-encoded bytes or an S3 object. If you use the
#' AWS CLI to call Amazon Rekognition operations, passing base64-encoded
#' image bytes is not supported.
#'
#' If you are using an AWS SDK to call Amazon Rekognition, you might not
#' need to base64-encode image bytes passed using the `Bytes` field. For
#' more information, see Images in the Amazon Rekognition developer guide.
#' @param SimilarityThreshold The minimum level of confidence in the face matches that a match must
#' meet to be included in the `FaceMatches` array.
#' @param QualityFilter A filter that specifies a quality bar for how much filtering is done to
#' identify faces. Filtered faces aren't compared. If you specify `AUTO`,
#' Amazon Rekognition chooses the quality bar. If you specify `LOW`,
#' `MEDIUM`, or `HIGH`, filtering removes all faces that don’t meet the
#' chosen quality bar. The quality bar is based on a variety of common use
#' cases. Low-quality detections can occur for a number of reasons. Some
#' examples are an object that's misidentified as a face, a face that's too
#' blurry, or a face with a pose that's too extreme to use. If you specify
#' `NONE`, no filtering is performed. The default value is `NONE`.
#'
#' To use quality filtering, the collection you are using must be
#' associated with version 3 of the face model or higher.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' SourceImageFace = list(
#' BoundingBox = list(
#' Width = 123.0,
#' Height = 123.0,
#' Left = 123.0,
#' Top = 123.0
#' ),
#' Confidence = 123.0
#' ),
#' FaceMatches = list(
#' list(
#' Similarity = 123.0,
#' Face = list(
#' BoundingBox = list(
#' Width = 123.0,
#' Height = 123.0,
#' Left = 123.0,
#' Top = 123.0
#' ),
#' Confidence = 123.0,
#' Landmarks = list(
#' list(
#' Type = "eyeLeft"|"eyeRight"|"nose"|"mouthLeft"|"mouthRight"|"leftEyeBrowLeft"|"leftEyeBrowRight"|"leftEyeBrowUp"|"rightEyeBrowLeft"|"rightEyeBrowRight"|"rightEyeBrowUp"|"leftEyeLeft"|"leftEyeRight"|"leftEyeUp"|"leftEyeDown"|"rightEyeLeft"|"rightEyeRight"|"rightEyeUp"|"rightEyeDown"|"noseLeft"|"noseRight"|"mouthUp"|"mouthDown"|"leftPupil"|"rightPupil"|"upperJawlineLeft"|"midJawlineLeft"|"chinBottom"|"midJawlineRight"|"upperJawlineRight",
#' X = 123.0,
#' Y = 123.0
#' )
#' ),
#' Pose = list(
#' Roll = 123.0,
#' Yaw = 123.0,
#' Pitch = 123.0
#' ),
#' Quality = list(
#' Brightness = 123.0,
#' Sharpness = 123.0
#' ),
#' Emotions = list(
#' list(
#' Type = "HAPPY"|"SAD"|"ANGRY"|"CONFUSED"|"DISGUSTED"|"SURPRISED"|"CALM"|"UNKNOWN"|"FEAR",
#' Confidence = 123.0
#' )
#' ),
#' Smile = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' )
#' )
#' )
#' ),
#' UnmatchedFaces = list(
#' list(
#' BoundingBox = list(
#' Width = 123.0,
#' Height = 123.0,
#' Left = 123.0,
#' Top = 123.0
#' ),
#' Confidence = 123.0,
#' Landmarks = list(
#' list(
#' Type = "eyeLeft"|"eyeRight"|"nose"|"mouthLeft"|"mouthRight"|"leftEyeBrowLeft"|"leftEyeBrowRight"|"leftEyeBrowUp"|"rightEyeBrowLeft"|"rightEyeBrowRight"|"rightEyeBrowUp"|"leftEyeLeft"|"leftEyeRight"|"leftEyeUp"|"leftEyeDown"|"rightEyeLeft"|"rightEyeRight"|"rightEyeUp"|"rightEyeDown"|"noseLeft"|"noseRight"|"mouthUp"|"mouthDown"|"leftPupil"|"rightPupil"|"upperJawlineLeft"|"midJawlineLeft"|"chinBottom"|"midJawlineRight"|"upperJawlineRight",
#' X = 123.0,
#' Y = 123.0
#' )
#' ),
#' Pose = list(
#' Roll = 123.0,
#' Yaw = 123.0,
#' Pitch = 123.0
#' ),
#' Quality = list(
#' Brightness = 123.0,
#' Sharpness = 123.0
#' ),
#' Emotions = list(
#' list(
#' Type = "HAPPY"|"SAD"|"ANGRY"|"CONFUSED"|"DISGUSTED"|"SURPRISED"|"CALM"|"UNKNOWN"|"FEAR",
#' Confidence = 123.0
#' )
#' ),
#' Smile = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' )
#' )
#' ),
#' SourceImageOrientationCorrection = "ROTATE_0"|"ROTATE_90"|"ROTATE_180"|"ROTATE_270",
#' TargetImageOrientationCorrection = "ROTATE_0"|"ROTATE_90"|"ROTATE_180"|"ROTATE_270"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$compare_faces(
#' SourceImage = list(
#' Bytes = raw,
#' S3Object = list(
#' Bucket = "string",
#' Name = "string",
#' Version = "string"
#' )
#' ),
#' TargetImage = list(
#' Bytes = raw,
#' S3Object = list(
#' Bucket = "string",
#' Name = "string",
#' Version = "string"
#' )
#' ),
#' SimilarityThreshold = 123.0,
#' QualityFilter = "NONE"|"AUTO"|"LOW"|"MEDIUM"|"HIGH"
#' )
#' ```
#'
#' @examples
#' \dontrun{
#' # This operation compares the largest face detected in the source image
#' # with each face detected in the target image.
#' svc$compare_faces(
#' SimilarityThreshold = 90L,
#' SourceImage = list(
#' S3Object = list(
#' Bucket = "mybucket",
#' Name = "mysourceimage"
#' )
#' ),
#' TargetImage = list(
#' S3Object = list(
#' Bucket = "mybucket",
#' Name = "mytargetimage"
#' )
#' )
#' )
#' }
#'
#' @keywords internal
#'
#' @rdname rekognition_compare_faces
#'
#' @aliases rekognition_compare_faces
rekognition_compare_faces <- function(SourceImage, TargetImage, SimilarityThreshold = NULL, QualityFilter = NULL) {
op <- new_operation(
name = "CompareFaces",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$compare_faces_input(SourceImage = SourceImage, TargetImage = TargetImage, SimilarityThreshold = SimilarityThreshold, QualityFilter = QualityFilter)
output <- .rekognition$compare_faces_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$compare_faces <- rekognition_compare_faces
#' Copies a version of an Amazon Rekognition Custom Labels model from a
#' source project to a destination project
#'
#' @description
#' Copies a version of an Amazon Rekognition Custom Labels model from a
#' source project to a destination project. The source and destination
#' projects can be in different AWS accounts but must be in the same AWS
#' Region. You can't copy a model to another AWS service.
#'
#' To copy a model version to a different AWS account, you need to create a
#' resource-based policy known as a *project policy*. You attach the
#' project policy to the source project by calling
#' [`put_project_policy`][rekognition_put_project_policy]. The project
#' policy gives permission to copy the model version from a trusting AWS
#' account to a trusted account.
#'
#' For more information creating and attaching a project policy, see
#' Attaching a project policy (SDK) in the *Amazon Rekognition Custom
#' Labels Developer Guide*.
#'
#' If you are copying a model version to a project in the same AWS account,
#' you don't need to create a project policy.
#'
#' To copy a model, the destination project, source project, and source
#' model version must already exist.
#'
#' Copying a model version takes a while to complete. To get the current
#' status, call
#' [`describe_project_versions`][rekognition_describe_project_versions] and
#' check the value of `Status` in the ProjectVersionDescription object. The
#' copy operation has finished when the value of `Status` is
#' `COPYING_COMPLETED`.
#'
#' This operation requires permissions to perform the
#' `rekognition:CopyProjectVersion` action.
#'
#' @usage
#' rekognition_copy_project_version(SourceProjectArn,
#' SourceProjectVersionArn, DestinationProjectArn, VersionName,
#' OutputConfig, Tags, KmsKeyId)
#'
#' @param SourceProjectArn [required] The ARN of the source project in the trusting AWS account.
#' @param SourceProjectVersionArn [required] The ARN of the model version in the source project that you want to copy
#' to a destination project.
#' @param DestinationProjectArn [required] The ARN of the project in the trusted AWS account that you want to copy
#' the model version to.
#' @param VersionName [required] A name for the version of the model that's copied to the destination
#' project.
#' @param OutputConfig [required] The S3 bucket and folder location where the training output for the
#' source model version is placed.
#' @param Tags The key-value tags to assign to the model version.
#' @param KmsKeyId The identifier for your AWS Key Management Service key (AWS KMS key).
#' You can supply the Amazon Resource Name (ARN) of your KMS key, the ID of
#' your KMS key, an alias for your KMS key, or an alias ARN. The key is
#' used to encrypt training results and manifest files written to the
#' output Amazon S3 bucket (`OutputConfig`).
#'
#' If you choose to use your own KMS key, you need the following
#' permissions on the KMS key.
#'
#' - kms:CreateGrant
#'
#' - kms:DescribeKey
#'
#' - kms:GenerateDataKey
#'
#' - kms:Decrypt
#'
#' If you don't specify a value for `KmsKeyId`, images copied into the
#' service are encrypted using a key that AWS owns and manages.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' ProjectVersionArn = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$copy_project_version(
#' SourceProjectArn = "string",
#' SourceProjectVersionArn = "string",
#' DestinationProjectArn = "string",
#' VersionName = "string",
#' OutputConfig = list(
#' S3Bucket = "string",
#' S3KeyPrefix = "string"
#' ),
#' Tags = list(
#' "string"
#' ),
#' KmsKeyId = "string"
#' )
#' ```
#'
#' @examples
#' \dontrun{
#' # This operation copies a version of an Amazon Rekognition Custom Labels
#' # model from a source project to a destination project.
#' svc$copy_project_version(
#' DestinationProjectArn = "arn:aws:rekognition:us-east-1:555555555555:proje...",
#' KmsKeyId = "arn:1234abcd-12ab-34cd-56ef-1234567890ab",
#' OutputConfig = list(
#' S3Bucket = "bucket-name",
#' S3KeyPrefix = "path_to_folder"
#' ),
#' SourceProjectArn = "arn:aws:rekognition:us-east-1:111122223333:project/So...",
#' SourceProjectVersionArn = "arn:aws:rekognition:us-east-1:111122223333:pro...",
#' Tags = list(
#' key1 = "val1"
#' ),
#' VersionName = "DestinationVersionName_cross_account"
#' )
#' }
#'
#' @keywords internal
#'
#' @rdname rekognition_copy_project_version
#'
#' @aliases rekognition_copy_project_version
rekognition_copy_project_version <- function(SourceProjectArn, SourceProjectVersionArn, DestinationProjectArn, VersionName, OutputConfig, Tags = NULL, KmsKeyId = NULL) {
op <- new_operation(
name = "CopyProjectVersion",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$copy_project_version_input(SourceProjectArn = SourceProjectArn, SourceProjectVersionArn = SourceProjectVersionArn, DestinationProjectArn = DestinationProjectArn, VersionName = VersionName, OutputConfig = OutputConfig, Tags = Tags, KmsKeyId = KmsKeyId)
output <- .rekognition$copy_project_version_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$copy_project_version <- rekognition_copy_project_version
#' Creates a collection in an AWS Region
#'
#' @description
#' Creates a collection in an AWS Region. You can add faces to the
#' collection using the [`index_faces`][rekognition_index_faces] operation.
#'
#' For example, you might create collections, one for each of your
#' application users. A user can then index faces using the
#' [`index_faces`][rekognition_index_faces] operation and persist results
#' in a specific collection. Then, a user can search the collection for
#' faces in the user-specific container.
#'
#' When you create a collection, it is associated with the latest version
#' of the face model version.
#'
#' Collection names are case-sensitive.
#'
#' This operation requires permissions to perform the
#' `rekognition:CreateCollection` action. If you want to tag your
#' collection, you also require permission to perform the
#' `rekognition:TagResource` operation.
#'
#' @usage
#' rekognition_create_collection(CollectionId, Tags)
#'
#' @param CollectionId [required] ID for the collection that you are creating.
#' @param Tags A set of tags (key-value pairs) that you want to attach to the
#' collection.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' StatusCode = 123,
#' CollectionArn = "string",
#' FaceModelVersion = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$create_collection(
#' CollectionId = "string",
#' Tags = list(
#' "string"
#' )
#' )
#' ```
#'
#' @examples
#' \dontrun{
#' # This operation creates a Rekognition collection for storing image data.
#' svc$create_collection(
#' CollectionId = "myphotos"
#' )
#' }
#'
#' @keywords internal
#'
#' @rdname rekognition_create_collection
#'
#' @aliases rekognition_create_collection
rekognition_create_collection <- function(CollectionId, Tags = NULL) {
op <- new_operation(
name = "CreateCollection",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$create_collection_input(CollectionId = CollectionId, Tags = Tags)
output <- .rekognition$create_collection_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$create_collection <- rekognition_create_collection
#' Creates a new Amazon Rekognition Custom Labels dataset
#'
#' @description
#' Creates a new Amazon Rekognition Custom Labels dataset. You can create a
#' dataset by using an Amazon Sagemaker format manifest file or by copying
#' an existing Amazon Rekognition Custom Labels dataset.
#'
#' To create a training dataset for a project, specify `train` for the
#' value of `DatasetType`. To create the test dataset for a project,
#' specify `test` for the value of `DatasetType`.
#'
#' The response from [`create_dataset`][rekognition_create_dataset] is the
#' Amazon Resource Name (ARN) for the dataset. Creating a dataset takes a
#' while to complete. Use
#' [`describe_dataset`][rekognition_describe_dataset] to check the current
#' status. The dataset created successfully if the value of `Status` is
#' `CREATE_COMPLETE`.
#'
#' To check if any non-terminal errors occurred, call
#' [`list_dataset_entries`][rekognition_list_dataset_entries] and check for
#' the presence of `errors` lists in the JSON Lines.
#'
#' Dataset creation fails if a terminal error occurs (`Status` =
#' `CREATE_FAILED`). Currently, you can't access the terminal error
#' information.
#'
#' For more information, see Creating dataset in the *Amazon Rekognition
#' Custom Labels Developer Guide*.
#'
#' This operation requires permissions to perform the
#' `rekognition:CreateDataset` action. If you want to copy an existing
#' dataset, you also require permission to perform the
#' `rekognition:ListDatasetEntries` action.
#'
#' @usage
#' rekognition_create_dataset(DatasetSource, DatasetType, ProjectArn)
#'
#' @param DatasetSource The source files for the dataset. You can specify the ARN of an existing
#' dataset or specify the Amazon S3 bucket location of an Amazon Sagemaker
#' format manifest file. If you don't specify `datasetSource`, an empty
#' dataset is created. To add labeled images to the dataset, You can use
#' the console or call
#' [`update_dataset_entries`][rekognition_update_dataset_entries].
#' @param DatasetType [required] The type of the dataset. Specify `train` to create a training dataset.
#' Specify `test` to create a test dataset.
#' @param ProjectArn [required] The ARN of the Amazon Rekognition Custom Labels project to which you
#' want to asssign the dataset.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' DatasetArn = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$create_dataset(
#' DatasetSource = list(
#' GroundTruthManifest = list(
#' S3Object = list(
#' Bucket = "string",
#' Name = "string",
#' Version = "string"
#' )
#' ),
#' DatasetArn = "string"
#' ),
#' DatasetType = "TRAIN"|"TEST",
#' ProjectArn = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname rekognition_create_dataset
#'
#' @aliases rekognition_create_dataset
rekognition_create_dataset <- function(DatasetSource = NULL, DatasetType, ProjectArn) {
op <- new_operation(
name = "CreateDataset",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$create_dataset_input(DatasetSource = DatasetSource, DatasetType = DatasetType, ProjectArn = ProjectArn)
output <- .rekognition$create_dataset_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$create_dataset <- rekognition_create_dataset
#' This API operation initiates a Face Liveness session
#'
#' @description
#' This API operation initiates a Face Liveness session. It returns a
#' `SessionId`, which you can use to start streaming Face Liveness video
#' and get the results for a Face Liveness session. You can use the
#' `OutputConfig` option in the Settings parameter to provide an Amazon S3
#' bucket location. The Amazon S3 bucket stores reference images and audit
#' images. You can use `AuditImagesLimit` to limit the number of audit
#' images returned. This number is between 0 and 4. By default, it is set
#' to 0. The limit is best effort and based on the duration of the
#' selfie-video.
#'
#' @usage
#' rekognition_create_face_liveness_session(KmsKeyId, Settings,
#' ClientRequestToken)
#'
#' @param KmsKeyId The identifier for your AWS Key Management Service key (AWS KMS key).
#' Used to encrypt audit images and reference images.
#' @param Settings A session settings object. It contains settings for the operation to be
#' performed. For Face Liveness, it accepts `OutputConfig` and
#' `AuditImagesLimit`.
#' @param ClientRequestToken Idempotent token is used to recognize the Face Liveness request. If the
#' same token is used with multiple
#' [`create_face_liveness_session`][rekognition_create_face_liveness_session]
#' requests, the same session is returned. This token is employed to avoid
#' unintentionally creating the same session multiple times.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' SessionId = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$create_face_liveness_session(
#' KmsKeyId = "string",
#' Settings = list(
#' OutputConfig = list(
#' S3Bucket = "string",
#' S3KeyPrefix = "string"
#' ),
#' AuditImagesLimit = 123
#' ),
#' ClientRequestToken = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname rekognition_create_face_liveness_session
#'
#' @aliases rekognition_create_face_liveness_session
rekognition_create_face_liveness_session <- function(KmsKeyId = NULL, Settings = NULL, ClientRequestToken = NULL) {
op <- new_operation(
name = "CreateFaceLivenessSession",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$create_face_liveness_session_input(KmsKeyId = KmsKeyId, Settings = Settings, ClientRequestToken = ClientRequestToken)
output <- .rekognition$create_face_liveness_session_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$create_face_liveness_session <- rekognition_create_face_liveness_session
#' Creates a new Amazon Rekognition Custom Labels project
#'
#' @description
#' Creates a new Amazon Rekognition Custom Labels project. A project is a
#' group of resources (datasets, model versions) that you use to create and
#' manage Amazon Rekognition Custom Labels models.
#'
#' This operation requires permissions to perform the
#' `rekognition:CreateProject` action.
#'
#' @usage
#' rekognition_create_project(ProjectName)
#'
#' @param ProjectName [required] The name of the project to create.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' ProjectArn = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$create_project(
#' ProjectName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname rekognition_create_project
#'
#' @aliases rekognition_create_project
rekognition_create_project <- function(ProjectName) {
op <- new_operation(
name = "CreateProject",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$create_project_input(ProjectName = ProjectName)
output <- .rekognition$create_project_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$create_project <- rekognition_create_project
#' Creates a new version of a model and begins training
#'
#' @description
#' Creates a new version of a model and begins training. Models are managed
#' as part of an Amazon Rekognition Custom Labels project. The response
#' from [`create_project_version`][rekognition_create_project_version] is
#' an Amazon Resource Name (ARN) for the version of the model.
#'
#' Training uses the training and test datasets associated with the
#' project. For more information, see Creating training and test dataset in
#' the *Amazon Rekognition Custom Labels Developer Guide*.
#'
#' You can train a model in a project that doesn't have associated datasets
#' by specifying manifest files in the `TrainingData` and `TestingData`
#' fields.
#'
#' If you open the console after training a model with manifest files,
#' Amazon Rekognition Custom Labels creates the datasets for you using the
#' most recent manifest files. You can no longer train a model version for
#' the project by specifying manifest files.
#'
#' Instead of training with a project without associated datasets, we
#' recommend that you use the manifest files to create training and test
#' datasets for the project.
#'
#' Training takes a while to complete. You can get the current status by
#' calling
#' [`describe_project_versions`][rekognition_describe_project_versions].
#' Training completed successfully if the value of the `Status` field is
#' `TRAINING_COMPLETED`.
#'
#' If training fails, see Debugging a failed model training in the *Amazon
#' Rekognition Custom Labels* developer guide.
#'
#' Once training has successfully completed, call
#' [`describe_project_versions`][rekognition_describe_project_versions] to
#' get the training results and evaluate the model. For more information,
#' see Improving a trained Amazon Rekognition Custom Labels model in the
#' *Amazon Rekognition Custom Labels* developers guide.
#'
#' After evaluating the model, you start the model by calling
#' [`start_project_version`][rekognition_start_project_version].
#'
#' This operation requires permissions to perform the
#' `rekognition:CreateProjectVersion` action.
#'
#' @usage
#' rekognition_create_project_version(ProjectArn, VersionName,
#' OutputConfig, TrainingData, TestingData, Tags, KmsKeyId)
#'
#' @param ProjectArn [required] The ARN of the Amazon Rekognition Custom Labels project that manages the
#' model that you want to train.
#' @param VersionName [required] A name for the version of the model. This value must be unique.
#' @param OutputConfig [required] The Amazon S3 bucket location to store the results of training. The S3
#' bucket can be in any AWS account as long as the caller has
#' `s3:PutObject` permissions on the S3 bucket.
#' @param TrainingData Specifies an external manifest that the services uses to train the
#' model. If you specify `TrainingData` you must also specify
#' `TestingData`. The project must not have any associated datasets.
#' @param TestingData Specifies an external manifest that the service uses to test the model.
#' If you specify `TestingData` you must also specify `TrainingData`. The
#' project must not have any associated datasets.
#' @param Tags A set of tags (key-value pairs) that you want to attach to the model.
#' @param KmsKeyId The identifier for your AWS Key Management Service key (AWS KMS key).
#' You can supply the Amazon Resource Name (ARN) of your KMS key, the ID of
#' your KMS key, an alias for your KMS key, or an alias ARN. The key is
#' used to encrypt training and test images copied into the service for
#' model training. Your source images are unaffected. The key is also used
#' to encrypt training results and manifest files written to the output
#' Amazon S3 bucket (`OutputConfig`).
#'
#' If you choose to use your own KMS key, you need the following
#' permissions on the KMS key.
#'
#' - kms:CreateGrant
#'
#' - kms:DescribeKey
#'
#' - kms:GenerateDataKey
#'
#' - kms:Decrypt
#'
#' If you don't specify a value for `KmsKeyId`, images copied into the
#' service are encrypted using a key that AWS owns and manages.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' ProjectVersionArn = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$create_project_version(
#' ProjectArn = "string",
#' VersionName = "string",
#' OutputConfig = list(
#' S3Bucket = "string",
#' S3KeyPrefix = "string"
#' ),
#' TrainingData = list(
#' Assets = list(
#' list(
#' GroundTruthManifest = list(
#' S3Object = list(
#' Bucket = "string",
#' Name = "string",
#' Version = "string"
#' )
#' )
#' )
#' )
#' ),
#' TestingData = list(
#' Assets = list(
#' list(
#' GroundTruthManifest = list(
#' S3Object = list(
#' Bucket = "string",
#' Name = "string",
#' Version = "string"
#' )
#' )
#' )
#' ),
#' AutoCreate = TRUE|FALSE
#' ),
#' Tags = list(
#' "string"
#' ),
#' KmsKeyId = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname rekognition_create_project_version
#'
#' @aliases rekognition_create_project_version
rekognition_create_project_version <- function(ProjectArn, VersionName, OutputConfig, TrainingData = NULL, TestingData = NULL, Tags = NULL, KmsKeyId = NULL) {
op <- new_operation(
name = "CreateProjectVersion",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$create_project_version_input(ProjectArn = ProjectArn, VersionName = VersionName, OutputConfig = OutputConfig, TrainingData = TrainingData, TestingData = TestingData, Tags = Tags, KmsKeyId = KmsKeyId)
output <- .rekognition$create_project_version_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$create_project_version <- rekognition_create_project_version
#' Creates an Amazon Rekognition stream processor that you can use to
#' detect and recognize faces or to detect labels in a streaming video
#'
#' @description
#' Creates an Amazon Rekognition stream processor that you can use to
#' detect and recognize faces or to detect labels in a streaming video.
#'
#' Amazon Rekognition Video is a consumer of live video from Amazon Kinesis
#' Video Streams. There are two different settings for stream processors in
#' Amazon Rekognition: detecting faces and detecting labels.
#'
#' - If you are creating a stream processor for detecting faces, you
#' provide as input a Kinesis video stream (`Input`) and a Kinesis data
#' stream (`Output`) stream for receiving the output. You must use the
#' `FaceSearch` option in `Settings`, specifying the collection that
#' contains the faces you want to recognize. After you have finished
#' analyzing a streaming video, use
#' [`stop_stream_processor`][rekognition_stop_stream_processor] to stop
#' processing.
#'
#' - If you are creating a stream processor to detect labels, you provide
#' as input a Kinesis video stream (`Input`), Amazon S3 bucket
#' information (`Output`), and an Amazon SNS topic ARN
#' (`NotificationChannel`). You can also provide a KMS key ID to
#' encrypt the data sent to your Amazon S3 bucket. You specify what you
#' want to detect by using the `ConnectedHome` option in settings, and
#' selecting one of the following: `PERSON`, `PET`, `PACKAGE`, `ALL`
#' You can also specify where in the frame you want Amazon Rekognition
#' to monitor with `RegionsOfInterest`. When you run the
#' [`start_stream_processor`][rekognition_start_stream_processor]
#' operation on a label detection stream processor, you input start and
#' stop information to determine the length of the processing time.
#'
#' Use `Name` to assign an identifier for the stream processor. You use
#' `Name` to manage the stream processor. For example, you can start
#' processing the source video by calling
#' [`start_stream_processor`][rekognition_start_stream_processor] with the
#' `Name` field.
#'
#' This operation requires permissions to perform the
#' `rekognition:CreateStreamProcessor` action. If you want to tag your
#' stream processor, you also require permission to perform the
#' `rekognition:TagResource` operation.
#'
#' @usage
#' rekognition_create_stream_processor(Input, Output, Name, Settings,
#' RoleArn, Tags, NotificationChannel, KmsKeyId, RegionsOfInterest,
#' DataSharingPreference)
#'
#' @param Input [required] Kinesis video stream stream that provides the source streaming video. If
#' you are using the AWS CLI, the parameter name is `StreamProcessorInput`.
#' This is required for both face search and label detection stream
#' processors.
#' @param Output [required] Kinesis data stream stream or Amazon S3 bucket location to which Amazon
#' Rekognition Video puts the analysis results. If you are using the AWS
#' CLI, the parameter name is `StreamProcessorOutput`. This must be a
#' S3Destination of an Amazon S3 bucket that you own for a label detection
#' stream processor or a Kinesis data stream ARN for a face search stream
#' processor.
#' @param Name [required] An identifier you assign to the stream processor. You can use `Name` to
#' manage the stream processor. For example, you can get the current status
#' of the stream processor by calling
#' [`describe_stream_processor`][rekognition_describe_stream_processor].
#' `Name` is idempotent. This is required for both face search and label
#' detection stream processors.
#' @param Settings [required] Input parameters used in a streaming video analyzed by a stream
#' processor. You can use `FaceSearch` to recognize faces in a streaming
#' video, or you can use `ConnectedHome` to detect labels.
#' @param RoleArn [required] The Amazon Resource Number (ARN) of the IAM role that allows access to
#' the stream processor. The IAM role provides Rekognition read permissions
#' for a Kinesis stream. It also provides write permissions to an Amazon S3
#' bucket and Amazon Simple Notification Service topic for a label
#' detection stream processor. This is required for both face search and
#' label detection stream processors.
#' @param Tags A set of tags (key-value pairs) that you want to attach to the stream
#' processor.
#' @param NotificationChannel
#' @param KmsKeyId The identifier for your AWS Key Management Service key (AWS KMS key).
#' This is an optional parameter for label detection stream processors and
#' should not be used to create a face search stream processor. You can
#' supply the Amazon Resource Name (ARN) of your KMS key, the ID of your
#' KMS key, an alias for your KMS key, or an alias ARN. The key is used to
#' encrypt results and data published to your Amazon S3 bucket, which
#' includes image frames and hero images. Your source images are
#' unaffected.
#' @param RegionsOfInterest Specifies locations in the frames where Amazon Rekognition checks for
#' objects or people. You can specify up to 10 regions of interest, and
#' each region has either a polygon or a bounding box. This is an optional
#' parameter for label detection stream processors and should not be used
#' to create a face search stream processor.
#' @param DataSharingPreference Shows whether you are sharing data with Rekognition to improve model
#' performance. You can choose this option at the account level or on a
#' per-stream basis. Note that if you opt out at the account level this
#' setting is ignored on individual streams.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' StreamProcessorArn = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$create_stream_processor(
#' Input = list(
#' KinesisVideoStream = list(
#' Arn = "string"
#' )
#' ),
#' Output = list(
#' KinesisDataStream = list(
#' Arn = "string"
#' ),
#' S3Destination = list(
#' Bucket = "string",
#' KeyPrefix = "string"
#' )
#' ),
#' Name = "string",
#' Settings = list(
#' FaceSearch = list(
#' CollectionId = "string",
#' FaceMatchThreshold = 123.0
#' ),
#' ConnectedHome = list(
#' Labels = list(
#' "string"
#' ),
#' MinConfidence = 123.0
#' )
#' ),
#' RoleArn = "string",
#' Tags = list(
#' "string"
#' ),
#' NotificationChannel = list(
#' SNSTopicArn = "string"
#' ),
#' KmsKeyId = "string",
#' RegionsOfInterest = list(
#' list(
#' BoundingBox = list(
#' Width = 123.0,
#' Height = 123.0,
#' Left = 123.0,
#' Top = 123.0
#' ),
#' Polygon = list(
#' list(
#' X = 123.0,
#' Y = 123.0
#' )
#' )
#' )
#' ),
#' DataSharingPreference = list(
#' OptIn = TRUE|FALSE
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname rekognition_create_stream_processor
#'
#' @aliases rekognition_create_stream_processor
rekognition_create_stream_processor <- function(Input, Output, Name, Settings, RoleArn, Tags = NULL, NotificationChannel = NULL, KmsKeyId = NULL, RegionsOfInterest = NULL, DataSharingPreference = NULL) {
op <- new_operation(
name = "CreateStreamProcessor",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$create_stream_processor_input(Input = Input, Output = Output, Name = Name, Settings = Settings, RoleArn = RoleArn, Tags = Tags, NotificationChannel = NotificationChannel, KmsKeyId = KmsKeyId, RegionsOfInterest = RegionsOfInterest, DataSharingPreference = DataSharingPreference)
output <- .rekognition$create_stream_processor_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$create_stream_processor <- rekognition_create_stream_processor
#' Creates a new User within a collection specified by CollectionId
#'
#' @description
#' Creates a new User within a collection specified by `CollectionId`.
#' Takes `UserId` as a parameter, which is a user provided ID which should
#' be unique within the collection. The provided `UserId` will alias the
#' system generated UUID to make the `UserId` more user friendly.
#'
#' Uses a `ClientToken`, an idempotency token that ensures a call to
#' [`create_user`][rekognition_create_user] completes only once. If the
#' value is not supplied, the AWS SDK generates an idempotency token for
#' the requests. This prevents retries after a network error results from
#' making multiple [`create_user`][rekognition_create_user] calls.
#'
#' @usage
#' rekognition_create_user(CollectionId, UserId, ClientRequestToken)
#'
#' @param CollectionId [required] The ID of an existing collection to which the new UserID needs to be
#' created.
#' @param UserId [required] ID for the UserID to be created. This ID needs to be unique within the
#' collection.
#' @param ClientRequestToken Idempotent token used to identify the request to
#' [`create_user`][rekognition_create_user]. If you use the same token with
#' multiple [`create_user`][rekognition_create_user] requests, the same
#' response is returned. Use ClientRequestToken to prevent the same request
#' from being processed more than once.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$create_user(
#' CollectionId = "string",
#' UserId = "string",
#' ClientRequestToken = "string"
#' )
#' ```
#'
#' @examples
#' \dontrun{
#' # Creates a new User within a collection specified by CollectionId.
#' svc$create_user(
#' CollectionId = "MyCollection",
#' UserId = "DemoUser"
#' )
#' }
#'
#' @keywords internal
#'
#' @rdname rekognition_create_user
#'
#' @aliases rekognition_create_user
rekognition_create_user <- function(CollectionId, UserId, ClientRequestToken = NULL) {
op <- new_operation(
name = "CreateUser",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$create_user_input(CollectionId = CollectionId, UserId = UserId, ClientRequestToken = ClientRequestToken)
output <- .rekognition$create_user_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$create_user <- rekognition_create_user
#' Deletes the specified collection
#'
#' @description
#' Deletes the specified collection. Note that this operation removes all
#' faces in the collection. For an example, see [Deleting a
#' collection](https://docs.aws.amazon.com/rekognition/latest/dg/delete-collection-procedure.html).
#'
#' This operation requires permissions to perform the
#' `rekognition:DeleteCollection` action.
#'
#' @usage
#' rekognition_delete_collection(CollectionId)
#'
#' @param CollectionId [required] ID of the collection to delete.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' StatusCode = 123
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$delete_collection(
#' CollectionId = "string"
#' )
#' ```
#'
#' @examples
#' \dontrun{
#' # This operation deletes a Rekognition collection.
#' svc$delete_collection(
#' CollectionId = "myphotos"
#' )
#' }
#'
#' @keywords internal
#'
#' @rdname rekognition_delete_collection
#'
#' @aliases rekognition_delete_collection
rekognition_delete_collection <- function(CollectionId) {
op <- new_operation(
name = "DeleteCollection",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$delete_collection_input(CollectionId = CollectionId)
output <- .rekognition$delete_collection_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$delete_collection <- rekognition_delete_collection
#' Deletes an existing Amazon Rekognition Custom Labels dataset
#'
#' @description
#' Deletes an existing Amazon Rekognition Custom Labels dataset. Deleting a
#' dataset might take while. Use
#' [`describe_dataset`][rekognition_describe_dataset] to check the current
#' status. The dataset is still deleting if the value of `Status` is
#' `DELETE_IN_PROGRESS`. If you try to access the dataset after it is
#' deleted, you get a `ResourceNotFoundException` exception.
#'
#' You can't delete a dataset while it is creating (`Status` =
#' `CREATE_IN_PROGRESS`) or if the dataset is updating (`Status` =
#' `UPDATE_IN_PROGRESS`).
#'
#' This operation requires permissions to perform the
#' `rekognition:DeleteDataset` action.
#'
#' @usage
#' rekognition_delete_dataset(DatasetArn)
#'
#' @param DatasetArn [required] The ARN of the Amazon Rekognition Custom Labels dataset that you want to
#' delete.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$delete_dataset(
#' DatasetArn = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname rekognition_delete_dataset
#'
#' @aliases rekognition_delete_dataset
rekognition_delete_dataset <- function(DatasetArn) {
op <- new_operation(
name = "DeleteDataset",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$delete_dataset_input(DatasetArn = DatasetArn)
output <- .rekognition$delete_dataset_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$delete_dataset <- rekognition_delete_dataset
#' Deletes faces from a collection
#'
#' @description
#' Deletes faces from a collection. You specify a collection ID and an
#' array of face IDs to remove from the collection.
#'
#' This operation requires permissions to perform the
#' `rekognition:DeleteFaces` action.
#'
#' @usage
#' rekognition_delete_faces(CollectionId, FaceIds)
#'
#' @param CollectionId [required] Collection from which to remove the specific faces.
#' @param FaceIds [required] An array of face IDs to delete.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' DeletedFaces = list(
#' "string"
#' ),
#' UnsuccessfulFaceDeletions = list(
#' list(
#' FaceId = "string",
#' UserId = "string",
#' Reasons = list(
#' "ASSOCIATED_TO_AN_EXISTING_USER"|"FACE_NOT_FOUND"
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$delete_faces(
#' CollectionId = "string",
#' FaceIds = list(
#' "string"
#' )
#' )
#' ```
#'
#' @examples
#' \dontrun{
#' # This operation deletes one or more faces from a Rekognition collection.
#' svc$delete_faces(
#' CollectionId = "myphotos",
#' FaceIds = list(
#' "ff43d742-0c13-5d16-a3e8-03d3f58e980b"
#' )
#' )
#' }
#'
#' @keywords internal
#'
#' @rdname rekognition_delete_faces
#'
#' @aliases rekognition_delete_faces
rekognition_delete_faces <- function(CollectionId, FaceIds) {
op <- new_operation(
name = "DeleteFaces",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$delete_faces_input(CollectionId = CollectionId, FaceIds = FaceIds)
output <- .rekognition$delete_faces_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$delete_faces <- rekognition_delete_faces
#' Deletes an Amazon Rekognition Custom Labels project
#'
#' @description
#' Deletes an Amazon Rekognition Custom Labels project. To delete a project
#' you must first delete all models associated with the project. To delete
#' a model, see
#' [`delete_project_version`][rekognition_delete_project_version].
#'
#' [`delete_project`][rekognition_delete_project] is an asynchronous
#' operation. To check if the project is deleted, call
#' [`describe_projects`][rekognition_describe_projects]. The project is
#' deleted when the project no longer appears in the response. Be aware
#' that deleting a given project will also delete any `ProjectPolicies`
#' associated with that project.
#'
#' This operation requires permissions to perform the
#' `rekognition:DeleteProject` action.
#'
#' @usage
#' rekognition_delete_project(ProjectArn)
#'
#' @param ProjectArn [required] The Amazon Resource Name (ARN) of the project that you want to delete.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Status = "CREATING"|"CREATED"|"DELETING"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$delete_project(
#' ProjectArn = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname rekognition_delete_project
#'
#' @aliases rekognition_delete_project
rekognition_delete_project <- function(ProjectArn) {
op <- new_operation(
name = "DeleteProject",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$delete_project_input(ProjectArn = ProjectArn)
output <- .rekognition$delete_project_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$delete_project <- rekognition_delete_project
#' Deletes an existing project policy
#'
#' @description
#' Deletes an existing project policy.
#'
#' To get a list of project policies attached to a project, call
#' [`list_project_policies`][rekognition_list_project_policies]. To attach
#' a project policy to a project, call
#' [`put_project_policy`][rekognition_put_project_policy].
#'
#' This operation requires permissions to perform the
#' `rekognition:DeleteProjectPolicy` action.
#'
#' @usage
#' rekognition_delete_project_policy(ProjectArn, PolicyName,
#' PolicyRevisionId)
#'
#' @param ProjectArn [required] The Amazon Resource Name (ARN) of the project that the project policy
#' you want to delete is attached to.
#' @param PolicyName [required] The name of the policy that you want to delete.
#' @param PolicyRevisionId The ID of the project policy revision that you want to delete.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$delete_project_policy(
#' ProjectArn = "string",
#' PolicyName = "string",
#' PolicyRevisionId = "string"
#' )
#' ```
#'
#' @examples
#' \dontrun{
#' # This operation deletes a revision of an existing project policy from an
#' # Amazon Rekognition Custom Labels project.
#' svc$delete_project_policy(
#' PolicyName = "testPolicy1",
#' PolicyRevisionId = "3b274c25e9203a56a99e00e3ff205fbc",
#' ProjectArn = "arn:aws:rekognition:us-east-1:111122223333:project/SourceProject/1656557123456"
#' )
#' }
#'
#' @keywords internal
#'
#' @rdname rekognition_delete_project_policy
#'
#' @aliases rekognition_delete_project_policy
rekognition_delete_project_policy <- function(ProjectArn, PolicyName, PolicyRevisionId = NULL) {
op <- new_operation(
name = "DeleteProjectPolicy",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$delete_project_policy_input(ProjectArn = ProjectArn, PolicyName = PolicyName, PolicyRevisionId = PolicyRevisionId)
output <- .rekognition$delete_project_policy_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$delete_project_policy <- rekognition_delete_project_policy
#' Deletes an Amazon Rekognition Custom Labels model
#'
#' @description
#' Deletes an Amazon Rekognition Custom Labels model.
#'
#' You can't delete a model if it is running or if it is training. To check
#' the status of a model, use the `Status` field returned from
#' [`describe_project_versions`][rekognition_describe_project_versions]. To
#' stop a running model call
#' [`stop_project_version`][rekognition_stop_project_version]. If the model
#' is training, wait until it finishes.
#'
#' This operation requires permissions to perform the
#' `rekognition:DeleteProjectVersion` action.
#'
#' @usage
#' rekognition_delete_project_version(ProjectVersionArn)
#'
#' @param ProjectVersionArn [required] The Amazon Resource Name (ARN) of the model version that you want to
#' delete.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Status = "TRAINING_IN_PROGRESS"|"TRAINING_COMPLETED"|"TRAINING_FAILED"|"STARTING"|"RUNNING"|"FAILED"|"STOPPING"|"STOPPED"|"DELETING"|"COPYING_IN_PROGRESS"|"COPYING_COMPLETED"|"COPYING_FAILED"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$delete_project_version(
#' ProjectVersionArn = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname rekognition_delete_project_version
#'
#' @aliases rekognition_delete_project_version
rekognition_delete_project_version <- function(ProjectVersionArn) {
op <- new_operation(
name = "DeleteProjectVersion",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$delete_project_version_input(ProjectVersionArn = ProjectVersionArn)
output <- .rekognition$delete_project_version_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$delete_project_version <- rekognition_delete_project_version
#' Deletes the stream processor identified by Name
#'
#' @description
#' Deletes the stream processor identified by `Name`. You assign the value
#' for `Name` when you create the stream processor with
#' [`create_stream_processor`][rekognition_create_stream_processor]. You
#' might not be able to use the same name for a stream processor for a few
#' seconds after calling
#' [`delete_stream_processor`][rekognition_delete_stream_processor].
#'
#' @usage
#' rekognition_delete_stream_processor(Name)
#'
#' @param Name [required] The name of the stream processor you want to delete.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$delete_stream_processor(
#' Name = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname rekognition_delete_stream_processor
#'
#' @aliases rekognition_delete_stream_processor
rekognition_delete_stream_processor <- function(Name) {
op <- new_operation(
name = "DeleteStreamProcessor",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$delete_stream_processor_input(Name = Name)
output <- .rekognition$delete_stream_processor_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$delete_stream_processor <- rekognition_delete_stream_processor
#' Deletes the specified UserID within the collection
#'
#' @description
#' Deletes the specified UserID within the collection. Faces that are
#' associated with the UserID are disassociated from the UserID before
#' deleting the specified UserID. If the specified `Collection` or `UserID`
#' is already deleted or not found, a `ResourceNotFoundException` will be
#' thrown. If the action is successful with a 200 response, an empty HTTP
#' body is returned.
#'
#' @usage
#' rekognition_delete_user(CollectionId, UserId, ClientRequestToken)
#'
#' @param CollectionId [required] The ID of an existing collection from which the UserID needs to be
#' deleted.
#' @param UserId [required] ID for the UserID to be deleted.
#' @param ClientRequestToken Idempotent token used to identify the request to
#' [`delete_user`][rekognition_delete_user]. If you use the same token with
#' multiple [`delete_user`][rekognition_delete_user]requests, the same
#' response is returned. Use ClientRequestToken to prevent the same request
#' from being processed more than once.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$delete_user(
#' CollectionId = "string",
#' UserId = "string",
#' ClientRequestToken = "string"
#' )
#' ```
#'
#' @examples
#' \dontrun{
#' # Deletes the specified UserID within the collection.
#' svc$delete_user(
#' ClientRequestToken = "550e8400-e29b-41d4-a716-446655440001",
#' CollectionId = "MyCollection",
#' UserId = "DemoUser"
#' )
#' }
#'
#' @keywords internal
#'
#' @rdname rekognition_delete_user
#'
#' @aliases rekognition_delete_user
rekognition_delete_user <- function(CollectionId, UserId, ClientRequestToken = NULL) {
op <- new_operation(
name = "DeleteUser",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$delete_user_input(CollectionId = CollectionId, UserId = UserId, ClientRequestToken = ClientRequestToken)
output <- .rekognition$delete_user_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$delete_user <- rekognition_delete_user
#' Describes the specified collection
#'
#' @description
#' Describes the specified collection. You can use
#' [`describe_collection`][rekognition_describe_collection] to get
#' information, such as the number of faces indexed into a collection and
#' the version of the model used by the collection for face detection.
#'
#' For more information, see Describing a Collection in the Amazon
#' Rekognition Developer Guide.
#'
#' @usage
#' rekognition_describe_collection(CollectionId)
#'
#' @param CollectionId [required] The ID of the collection to describe.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' FaceCount = 123,
#' FaceModelVersion = "string",
#' CollectionARN = "string",
#' CreationTimestamp = as.POSIXct(
#' "2015-01-01"
#' ),
#' UserCount = 123
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_collection(
#' CollectionId = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname rekognition_describe_collection
#'
#' @aliases rekognition_describe_collection
rekognition_describe_collection <- function(CollectionId) {
op <- new_operation(
name = "DescribeCollection",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$describe_collection_input(CollectionId = CollectionId)
output <- .rekognition$describe_collection_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$describe_collection <- rekognition_describe_collection
#' Describes an Amazon Rekognition Custom Labels dataset
#'
#' @description
#' Describes an Amazon Rekognition Custom Labels dataset. You can get
#' information such as the current status of a dataset and statistics about
#' the images and labels in a dataset.
#'
#' This operation requires permissions to perform the
#' `rekognition:DescribeDataset` action.
#'
#' @usage
#' rekognition_describe_dataset(DatasetArn)
#'
#' @param DatasetArn [required] The Amazon Resource Name (ARN) of the dataset that you want to describe.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' DatasetDescription = list(
#' CreationTimestamp = as.POSIXct(
#' "2015-01-01"
#' ),
#' LastUpdatedTimestamp = as.POSIXct(
#' "2015-01-01"
#' ),
#' Status = "CREATE_IN_PROGRESS"|"CREATE_COMPLETE"|"CREATE_FAILED"|"UPDATE_IN_PROGRESS"|"UPDATE_COMPLETE"|"UPDATE_FAILED"|"DELETE_IN_PROGRESS",
#' StatusMessage = "string",
#' StatusMessageCode = "SUCCESS"|"SERVICE_ERROR"|"CLIENT_ERROR",
#' DatasetStats = list(
#' LabeledEntries = 123,
#' TotalEntries = 123,
#' TotalLabels = 123,
#' ErrorEntries = 123
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_dataset(
#' DatasetArn = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname rekognition_describe_dataset
#'
#' @aliases rekognition_describe_dataset
rekognition_describe_dataset <- function(DatasetArn) {
op <- new_operation(
name = "DescribeDataset",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$describe_dataset_input(DatasetArn = DatasetArn)
output <- .rekognition$describe_dataset_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$describe_dataset <- rekognition_describe_dataset
#' Lists and describes the versions of a model in an Amazon Rekognition
#' Custom Labels project
#'
#' @description
#' Lists and describes the versions of a model in an Amazon Rekognition
#' Custom Labels project. You can specify up to 10 model versions in
#' `ProjectVersionArns`. If you don't specify a value, descriptions for all
#' model versions in the project are returned.
#'
#' This operation requires permissions to perform the
#' `rekognition:DescribeProjectVersions` action.
#'
#' @usage
#' rekognition_describe_project_versions(ProjectArn, VersionNames,
#' NextToken, MaxResults)
#'
#' @param ProjectArn [required] The Amazon Resource Name (ARN) of the project that contains the models
#' you want to describe.
#' @param VersionNames A list of model version names that you want to describe. You can add up
#' to 10 model version names to the list. If you don't specify a value, all
#' model descriptions are returned. A version name is part of a model
#' (ProjectVersion) ARN. For example, `my-model.2020-01-21T09.10.15` is the
#' version name in the following ARN.
#' `arn:aws:rekognition:us-east-1:123456789012:project/getting-started/version/my-model.2020-01-21T09.10.15/1234567890123`.
#' @param NextToken If the previous response was incomplete (because there is more results
#' to retrieve), Amazon Rekognition Custom Labels returns a pagination
#' token in the response. You can use this pagination token to retrieve the
#' next set of results.
#' @param MaxResults The maximum number of results to return per paginated call. The largest
#' value you can specify is 100. If you specify a value greater than 100, a
#' ValidationException error occurs. The default value is 100.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' ProjectVersionDescriptions = list(
#' list(
#' ProjectVersionArn = "string",
#' CreationTimestamp = as.POSIXct(
#' "2015-01-01"
#' ),
#' MinInferenceUnits = 123,
#' Status = "TRAINING_IN_PROGRESS"|"TRAINING_COMPLETED"|"TRAINING_FAILED"|"STARTING"|"RUNNING"|"FAILED"|"STOPPING"|"STOPPED"|"DELETING"|"COPYING_IN_PROGRESS"|"COPYING_COMPLETED"|"COPYING_FAILED",
#' StatusMessage = "string",
#' BillableTrainingTimeInSeconds = 123,
#' TrainingEndTimestamp = as.POSIXct(
#' "2015-01-01"
#' ),
#' OutputConfig = list(
#' S3Bucket = "string",
#' S3KeyPrefix = "string"
#' ),
#' TrainingDataResult = list(
#' Input = list(
#' Assets = list(
#' list(
#' GroundTruthManifest = list(
#' S3Object = list(
#' Bucket = "string",
#' Name = "string",
#' Version = "string"
#' )
#' )
#' )
#' )
#' ),
#' Output = list(
#' Assets = list(
#' list(
#' GroundTruthManifest = list(
#' S3Object = list(
#' Bucket = "string",
#' Name = "string",
#' Version = "string"
#' )
#' )
#' )
#' )
#' ),
#' Validation = list(
#' Assets = list(
#' list(
#' GroundTruthManifest = list(
#' S3Object = list(
#' Bucket = "string",
#' Name = "string",
#' Version = "string"
#' )
#' )
#' )
#' )
#' )
#' ),
#' TestingDataResult = list(
#' Input = list(
#' Assets = list(
#' list(
#' GroundTruthManifest = list(
#' S3Object = list(
#' Bucket = "string",
#' Name = "string",
#' Version = "string"
#' )
#' )
#' )
#' ),
#' AutoCreate = TRUE|FALSE
#' ),
#' Output = list(
#' Assets = list(
#' list(
#' GroundTruthManifest = list(
#' S3Object = list(
#' Bucket = "string",
#' Name = "string",
#' Version = "string"
#' )
#' )
#' )
#' ),
#' AutoCreate = TRUE|FALSE
#' ),
#' Validation = list(
#' Assets = list(
#' list(
#' GroundTruthManifest = list(
#' S3Object = list(
#' Bucket = "string",
#' Name = "string",
#' Version = "string"
#' )
#' )
#' )
#' )
#' )
#' ),
#' EvaluationResult = list(
#' F1Score = 123.0,
#' Summary = list(
#' S3Object = list(
#' Bucket = "string",
#' Name = "string",
#' Version = "string"
#' )
#' )
#' ),
#' ManifestSummary = list(
#' S3Object = list(
#' Bucket = "string",
#' Name = "string",
#' Version = "string"
#' )
#' ),
#' KmsKeyId = "string",
#' MaxInferenceUnits = 123,
#' SourceProjectVersionArn = "string"
#' )
#' ),
#' NextToken = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_project_versions(
#' ProjectArn = "string",
#' VersionNames = list(
#' "string"
#' ),
#' NextToken = "string",
#' MaxResults = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname rekognition_describe_project_versions
#'
#' @aliases rekognition_describe_project_versions
rekognition_describe_project_versions <- function(ProjectArn, VersionNames = NULL, NextToken = NULL, MaxResults = NULL) {
op <- new_operation(
name = "DescribeProjectVersions",
http_method = "POST",
http_path = "/",
paginator = list(input_token = "NextToken", limit_key = "MaxResults", output_token = "NextToken", result_key = "ProjectVersionDescriptions")
)
input <- .rekognition$describe_project_versions_input(ProjectArn = ProjectArn, VersionNames = VersionNames, NextToken = NextToken, MaxResults = MaxResults)
output <- .rekognition$describe_project_versions_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$describe_project_versions <- rekognition_describe_project_versions
#' Gets information about your Amazon Rekognition Custom Labels projects
#'
#' @description
#' Gets information about your Amazon Rekognition Custom Labels projects.
#'
#' This operation requires permissions to perform the
#' `rekognition:DescribeProjects` action.
#'
#' @usage
#' rekognition_describe_projects(NextToken, MaxResults, ProjectNames)
#'
#' @param NextToken If the previous response was incomplete (because there is more results
#' to retrieve), Amazon Rekognition Custom Labels returns a pagination
#' token in the response. You can use this pagination token to retrieve the
#' next set of results.
#' @param MaxResults The maximum number of results to return per paginated call. The largest
#' value you can specify is 100. If you specify a value greater than 100, a
#' ValidationException error occurs. The default value is 100.
#' @param ProjectNames A list of the projects that you want Amazon Rekognition Custom Labels to
#' describe. If you don't specify a value, the response includes
#' descriptions for all the projects in your AWS account.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' ProjectDescriptions = list(
#' list(
#' ProjectArn = "string",
#' CreationTimestamp = as.POSIXct(
#' "2015-01-01"
#' ),
#' Status = "CREATING"|"CREATED"|"DELETING",
#' Datasets = list(
#' list(
#' CreationTimestamp = as.POSIXct(
#' "2015-01-01"
#' ),
#' DatasetType = "TRAIN"|"TEST",
#' DatasetArn = "string",
#' Status = "CREATE_IN_PROGRESS"|"CREATE_COMPLETE"|"CREATE_FAILED"|"UPDATE_IN_PROGRESS"|"UPDATE_COMPLETE"|"UPDATE_FAILED"|"DELETE_IN_PROGRESS",
#' StatusMessage = "string",
#' StatusMessageCode = "SUCCESS"|"SERVICE_ERROR"|"CLIENT_ERROR"
#' )
#' )
#' )
#' ),
#' NextToken = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_projects(
#' NextToken = "string",
#' MaxResults = 123,
#' ProjectNames = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname rekognition_describe_projects
#'
#' @aliases rekognition_describe_projects
rekognition_describe_projects <- function(NextToken = NULL, MaxResults = NULL, ProjectNames = NULL) {
op <- new_operation(
name = "DescribeProjects",
http_method = "POST",
http_path = "/",
paginator = list(input_token = "NextToken", limit_key = "MaxResults", output_token = "NextToken", result_key = "ProjectDescriptions")
)
input <- .rekognition$describe_projects_input(NextToken = NextToken, MaxResults = MaxResults, ProjectNames = ProjectNames)
output <- .rekognition$describe_projects_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$describe_projects <- rekognition_describe_projects
#' Provides information about a stream processor created by
#' CreateStreamProcessor
#'
#' @description
#' Provides information about a stream processor created by
#' [`create_stream_processor`][rekognition_create_stream_processor]. You
#' can get information about the input and output streams, the input
#' parameters for the face recognition being performed, and the current
#' status of the stream processor.
#'
#' @usage
#' rekognition_describe_stream_processor(Name)
#'
#' @param Name [required] Name of the stream processor for which you want information.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Name = "string",
#' StreamProcessorArn = "string",
#' Status = "STOPPED"|"STARTING"|"RUNNING"|"FAILED"|"STOPPING"|"UPDATING",
#' StatusMessage = "string",
#' CreationTimestamp = as.POSIXct(
#' "2015-01-01"
#' ),
#' LastUpdateTimestamp = as.POSIXct(
#' "2015-01-01"
#' ),
#' Input = list(
#' KinesisVideoStream = list(
#' Arn = "string"
#' )
#' ),
#' Output = list(
#' KinesisDataStream = list(
#' Arn = "string"
#' ),
#' S3Destination = list(
#' Bucket = "string",
#' KeyPrefix = "string"
#' )
#' ),
#' RoleArn = "string",
#' Settings = list(
#' FaceSearch = list(
#' CollectionId = "string",
#' FaceMatchThreshold = 123.0
#' ),
#' ConnectedHome = list(
#' Labels = list(
#' "string"
#' ),
#' MinConfidence = 123.0
#' )
#' ),
#' NotificationChannel = list(
#' SNSTopicArn = "string"
#' ),
#' KmsKeyId = "string",
#' RegionsOfInterest = list(
#' list(
#' BoundingBox = list(
#' Width = 123.0,
#' Height = 123.0,
#' Left = 123.0,
#' Top = 123.0
#' ),
#' Polygon = list(
#' list(
#' X = 123.0,
#' Y = 123.0
#' )
#' )
#' )
#' ),
#' DataSharingPreference = list(
#' OptIn = TRUE|FALSE
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_stream_processor(
#' Name = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname rekognition_describe_stream_processor
#'
#' @aliases rekognition_describe_stream_processor
rekognition_describe_stream_processor <- function(Name) {
op <- new_operation(
name = "DescribeStreamProcessor",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$describe_stream_processor_input(Name = Name)
output <- .rekognition$describe_stream_processor_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$describe_stream_processor <- rekognition_describe_stream_processor
#' Detects custom labels in a supplied image by using an Amazon Rekognition
#' Custom Labels model
#'
#' @description
#' Detects custom labels in a supplied image by using an Amazon Rekognition
#' Custom Labels model.
#'
#' You specify which version of a model version to use by using the
#' `ProjectVersionArn` input parameter.
#'
#' You pass the input image as base64-encoded image bytes or as a reference
#' to an image in an Amazon S3 bucket. If you use the AWS CLI to call
#' Amazon Rekognition operations, passing image bytes is not supported. The
#' image must be either a PNG or JPEG formatted file.
#'
#' For each object that the model version detects on an image, the API
#' returns a (`CustomLabel`) object in an array (`CustomLabels`). Each
#' `CustomLabel` object provides the label name (`Name`), the level of
#' confidence that the image contains the object (`Confidence`), and object
#' location information, if it exists, for the label on the image
#' (`Geometry`).
#'
#' To filter labels that are returned, specify a value for `MinConfidence`.
#' `DetectCustomLabelsLabels` only returns labels with a confidence that's
#' higher than the specified value. The value of `MinConfidence` maps to
#' the assumed threshold values created during training. For more
#' information, see *Assumed threshold* in the Amazon Rekognition Custom
#' Labels Developer Guide. Amazon Rekognition Custom Labels metrics
#' expresses an assumed threshold as a floating point value between 0-1.
#' The range of `MinConfidence` normalizes the threshold value to a
#' percentage value (0-100). Confidence responses from
#' [`detect_custom_labels`][rekognition_detect_custom_labels] are also
#' returned as a percentage. You can use `MinConfidence` to change the
#' precision and recall or your model. For more information, see *Analyzing
#' an image* in the Amazon Rekognition Custom Labels Developer Guide.
#'
#' If you don't specify a value for `MinConfidence`,
#' [`detect_custom_labels`][rekognition_detect_custom_labels] returns
#' labels based on the assumed threshold of each label.
#'
#' This is a stateless API operation. That is, the operation does not
#' persist any data.
#'
#' This operation requires permissions to perform the
#' `rekognition:DetectCustomLabels` action.
#'
#' For more information, see *Analyzing an image* in the Amazon Rekognition
#' Custom Labels Developer Guide.
#'
#' @usage
#' rekognition_detect_custom_labels(ProjectVersionArn, Image, MaxResults,
#' MinConfidence)
#'
#' @param ProjectVersionArn [required] The ARN of the model version that you want to use.
#' @param Image [required]
#' @param MaxResults Maximum number of results you want the service to return in the
#' response. The service returns the specified number of highest confidence
#' labels ranked from highest confidence to lowest.
#' @param MinConfidence Specifies the minimum confidence level for the labels to return.
#' [`detect_custom_labels`][rekognition_detect_custom_labels] doesn't
#' return any labels with a confidence value that's lower than this
#' specified value. If you specify a value of 0,
#' [`detect_custom_labels`][rekognition_detect_custom_labels] returns all
#' labels, regardless of the assumed threshold applied to each label. If
#' you don't specify a value for `MinConfidence`,
#' [`detect_custom_labels`][rekognition_detect_custom_labels] returns
#' labels based on the assumed threshold of each label.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' CustomLabels = list(
#' list(
#' Name = "string",
#' Confidence = 123.0,
#' Geometry = list(
#' BoundingBox = list(
#' Width = 123.0,
#' Height = 123.0,
#' Left = 123.0,
#' Top = 123.0
#' ),
#' Polygon = list(
#' list(
#' X = 123.0,
#' Y = 123.0
#' )
#' )
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$detect_custom_labels(
#' ProjectVersionArn = "string",
#' Image = list(
#' Bytes = raw,
#' S3Object = list(
#' Bucket = "string",
#' Name = "string",
#' Version = "string"
#' )
#' ),
#' MaxResults = 123,
#' MinConfidence = 123.0
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname rekognition_detect_custom_labels
#'
#' @aliases rekognition_detect_custom_labels
rekognition_detect_custom_labels <- function(ProjectVersionArn, Image, MaxResults = NULL, MinConfidence = NULL) {
op <- new_operation(
name = "DetectCustomLabels",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$detect_custom_labels_input(ProjectVersionArn = ProjectVersionArn, Image = Image, MaxResults = MaxResults, MinConfidence = MinConfidence)
output <- .rekognition$detect_custom_labels_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$detect_custom_labels <- rekognition_detect_custom_labels
#' Detects faces within an image that is provided as input
#'
#' @description
#' Detects faces within an image that is provided as input.
#'
#' [`detect_faces`][rekognition_detect_faces] detects the 100 largest faces
#' in the image. For each face detected, the operation returns face
#' details. These details include a bounding box of the face, a confidence
#' value (that the bounding box contains a face), and a fixed set of
#' attributes such as facial landmarks (for example, coordinates of eye and
#' mouth), pose, presence of facial occlusion, and so on.
#'
#' The face-detection algorithm is most effective on frontal faces. For
#' non-frontal or obscured faces, the algorithm might not detect the faces
#' or might detect faces with lower confidence.
#'
#' You pass the input image either as base64-encoded image bytes or as a
#' reference to an image in an Amazon S3 bucket. If you use the AWS CLI to
#' call Amazon Rekognition operations, passing image bytes is not
#' supported. The image must be either a PNG or JPEG formatted file.
#'
#' This is a stateless API operation. That is, the operation does not
#' persist any data.
#'
#' This operation requires permissions to perform the
#' `rekognition:DetectFaces` action.
#'
#' @usage
#' rekognition_detect_faces(Image, Attributes)
#'
#' @param Image [required] The input image as base64-encoded bytes or an S3 object. If you use the
#' AWS CLI to call Amazon Rekognition operations, passing base64-encoded
#' image bytes is not supported.
#'
#' If you are using an AWS SDK to call Amazon Rekognition, you might not
#' need to base64-encode image bytes passed using the `Bytes` field. For
#' more information, see Images in the Amazon Rekognition developer guide.
#' @param Attributes An array of facial attributes you want to be returned. A `DEFAULT`
#' subset of facial attributes - `BoundingBox`, `Confidence`, `Pose`,
#' `Quality`, and `Landmarks` - will always be returned. You can request
#' for specific facial attributes (in addition to the default list) - by
#' using \[`"DEFAULT", "FACE_OCCLUDED"`\] or just \[`"FACE_OCCLUDED"`\].
#' You can request for all facial attributes by using \[`"ALL"]`.
#' Requesting more attributes may increase response time.
#'
#' If you provide both, `["ALL", "DEFAULT"]`, the service uses a logical
#' "AND" operator to determine which attributes to return (in this case,
#' all attributes).
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' FaceDetails = list(
#' list(
#' BoundingBox = list(
#' Width = 123.0,
#' Height = 123.0,
#' Left = 123.0,
#' Top = 123.0
#' ),
#' AgeRange = list(
#' Low = 123,
#' High = 123
#' ),
#' Smile = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' Eyeglasses = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' Sunglasses = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' Gender = list(
#' Value = "Male"|"Female",
#' Confidence = 123.0
#' ),
#' Beard = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' Mustache = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' EyesOpen = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' MouthOpen = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' Emotions = list(
#' list(
#' Type = "HAPPY"|"SAD"|"ANGRY"|"CONFUSED"|"DISGUSTED"|"SURPRISED"|"CALM"|"UNKNOWN"|"FEAR",
#' Confidence = 123.0
#' )
#' ),
#' Landmarks = list(
#' list(
#' Type = "eyeLeft"|"eyeRight"|"nose"|"mouthLeft"|"mouthRight"|"leftEyeBrowLeft"|"leftEyeBrowRight"|"leftEyeBrowUp"|"rightEyeBrowLeft"|"rightEyeBrowRight"|"rightEyeBrowUp"|"leftEyeLeft"|"leftEyeRight"|"leftEyeUp"|"leftEyeDown"|"rightEyeLeft"|"rightEyeRight"|"rightEyeUp"|"rightEyeDown"|"noseLeft"|"noseRight"|"mouthUp"|"mouthDown"|"leftPupil"|"rightPupil"|"upperJawlineLeft"|"midJawlineLeft"|"chinBottom"|"midJawlineRight"|"upperJawlineRight",
#' X = 123.0,
#' Y = 123.0
#' )
#' ),
#' Pose = list(
#' Roll = 123.0,
#' Yaw = 123.0,
#' Pitch = 123.0
#' ),
#' Quality = list(
#' Brightness = 123.0,
#' Sharpness = 123.0
#' ),
#' Confidence = 123.0,
#' FaceOccluded = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' EyeDirection = list(
#' Yaw = 123.0,
#' Pitch = 123.0,
#' Confidence = 123.0
#' )
#' )
#' ),
#' OrientationCorrection = "ROTATE_0"|"ROTATE_90"|"ROTATE_180"|"ROTATE_270"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$detect_faces(
#' Image = list(
#' Bytes = raw,
#' S3Object = list(
#' Bucket = "string",
#' Name = "string",
#' Version = "string"
#' )
#' ),
#' Attributes = list(
#' "DEFAULT"|"ALL"|"AGE_RANGE"|"BEARD"|"EMOTIONS"|"EYE_DIRECTION"|"EYEGLASSES"|"EYES_OPEN"|"GENDER"|"MOUTH_OPEN"|"MUSTACHE"|"FACE_OCCLUDED"|"SMILE"|"SUNGLASSES"
#' )
#' )
#' ```
#'
#' @examples
#' \dontrun{
#' # This operation detects faces in an image stored in an AWS S3 bucket.
#' svc$detect_faces(
#' Image = list(
#' S3Object = list(
#' Bucket = "mybucket",
#' Name = "myphoto"
#' )
#' )
#' )
#' }
#'
#' @keywords internal
#'
#' @rdname rekognition_detect_faces
#'
#' @aliases rekognition_detect_faces
rekognition_detect_faces <- function(Image, Attributes = NULL) {
op <- new_operation(
name = "DetectFaces",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$detect_faces_input(Image = Image, Attributes = Attributes)
output <- .rekognition$detect_faces_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$detect_faces <- rekognition_detect_faces
#' Detects instances of real-world entities within an image (JPEG or PNG)
#' provided as input
#'
#' @description
#' Detects instances of real-world entities within an image (JPEG or PNG)
#' provided as input. This includes objects like flower, tree, and table;
#' events like wedding, graduation, and birthday party; and concepts like
#' landscape, evening, and nature.
#'
#' For an example, see Analyzing images stored in an Amazon S3 bucket in
#' the Amazon Rekognition Developer Guide.
#'
#' You pass the input image as base64-encoded image bytes or as a reference
#' to an image in an Amazon S3 bucket. If you use the AWS CLI to call
#' Amazon Rekognition operations, passing image bytes is not supported. The
#' image must be either a PNG or JPEG formatted file.
#'
#' **Optional Parameters**
#'
#' You can specify one or both of the `GENERAL_LABELS` and
#' `IMAGE_PROPERTIES` feature types when calling the DetectLabels API.
#' Including `GENERAL_LABELS` will ensure the response includes the labels
#' detected in the input image, while including `IMAGE_PROPERTIES `will
#' ensure the response includes information about the image quality and
#' color.
#'
#' When using `GENERAL_LABELS` and/or `IMAGE_PROPERTIES` you can provide
#' filtering criteria to the Settings parameter. You can filter with sets
#' of individual labels or with label categories. You can specify inclusive
#' filters, exclusive filters, or a combination of inclusive and exclusive
#' filters. For more information on filtering see [Detecting Labels in an
#' Image](https://docs.aws.amazon.com/rekognition/latest/dg/labels-detect-labels-image.html).
#'
#' You can specify `MinConfidence` to control the confidence threshold for
#' the labels returned. The default is 55%. You can also add the
#' `MaxLabels` parameter to limit the number of labels returned. The
#' default and upper limit is 1000 labels.
#'
#' **Response Elements**
#'
#' For each object, scene, and concept the API returns one or more labels.
#' The API returns the following types of information about labels:
#'
#' - Name - The name of the detected label.
#'
#' - Confidence - The level of confidence in the label assigned to a
#' detected object.
#'
#' - Parents - The ancestor labels for a detected label. DetectLabels
#' returns a hierarchical taxonomy of detected labels. For example, a
#' detected car might be assigned the label car. The label car has two
#' parent labels: Vehicle (its parent) and Transportation (its
#' grandparent). The response includes the all ancestors for a label,
#' where every ancestor is a unique label. In the previous example,
#' Car, Vehicle, and Transportation are returned as unique labels in
#' the response.
#'
#' - Aliases - Possible Aliases for the label.
#'
#' - Categories - The label categories that the detected label belongs
#' to.
#'
#' - BoundingBox — Bounding boxes are described for all instances of
#' detected common object labels, returned in an array of Instance
#' objects. An Instance object contains a BoundingBox object,
#' describing the location of the label on the input image. It also
#' includes the confidence for the accuracy of the detected bounding
#' box.
#'
#' The API returns the following information regarding the image, as part
#' of the ImageProperties structure:
#'
#' - Quality - Information about the Sharpness, Brightness, and Contrast
#' of the input image, scored between 0 to 100. Image quality is
#' returned for the entire image, as well as the background and the
#' foreground.
#'
#' - Dominant Color - An array of the dominant colors in the image.
#'
#' - Foreground - Information about the sharpness, brightness, and
#' dominant colors of the input image’s foreground.
#'
#' - Background - Information about the sharpness, brightness, and
#' dominant colors of the input image’s background.
#'
#' The list of returned labels will include at least one label for every
#' detected object, along with information about that label. In the
#' following example, suppose the input image has a lighthouse, the sea,
#' and a rock. The response includes all three labels, one for each object,
#' as well as the confidence in the label:
#'
#' `{Name: lighthouse, Confidence: 98.4629}`
#'
#' `{Name: rock,Confidence: 79.2097}`
#'
#' ` {Name: sea,Confidence: 75.061}`
#'
#' The list of labels can include multiple labels for the same object. For
#' example, if the input image shows a flower (for example, a tulip), the
#' operation might return the following three labels.
#'
#' `{Name: flower,Confidence: 99.0562}`
#'
#' `{Name: plant,Confidence: 99.0562}`
#'
#' `{Name: tulip,Confidence: 99.0562}`
#'
#' In this example, the detection algorithm more precisely identifies the
#' flower as a tulip.
#'
#' If the object detected is a person, the operation doesn't provide the
#' same facial details that the [`detect_faces`][rekognition_detect_faces]
#' operation provides.
#'
#' This is a stateless API operation that doesn't return any data.
#'
#' This operation requires permissions to perform the
#' `rekognition:DetectLabels` action.
#'
#' @usage
#' rekognition_detect_labels(Image, MaxLabels, MinConfidence, Features,
#' Settings)
#'
#' @param Image [required] The input image as base64-encoded bytes or an S3 object. If you use the
#' AWS CLI to call Amazon Rekognition operations, passing image bytes is
#' not supported. Images stored in an S3 Bucket do not need to be
#' base64-encoded.
#'
#' If you are using an AWS SDK to call Amazon Rekognition, you might not
#' need to base64-encode image bytes passed using the `Bytes` field. For
#' more information, see Images in the Amazon Rekognition developer guide.
#' @param MaxLabels Maximum number of labels you want the service to return in the response.
#' The service returns the specified number of highest confidence labels.
#' @param MinConfidence Specifies the minimum confidence level for the labels to return. Amazon
#' Rekognition doesn't return any labels with confidence lower than this
#' specified value.
#'
#' If `MinConfidence` is not specified, the operation returns labels with a
#' confidence values greater than or equal to 55 percent.
#' @param Features A list of the types of analysis to perform. Specifying GENERAL_LABELS
#' uses the label detection feature, while specifying IMAGE_PROPERTIES
#' returns information regarding image color and quality. If no option is
#' specified GENERAL_LABELS is used by default.
#' @param Settings A list of the filters to be applied to returned detected labels and
#' image properties. Specified filters can be inclusive, exclusive, or a
#' combination of both. Filters can be used for individual labels or label
#' categories. The exact label names or label categories must be supplied.
#' For a full list of labels and label categories, see [Detecting
#' labels](https://docs.aws.amazon.com/rekognition/latest/dg/labels.html).
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Labels = list(
#' list(
#' Name = "string",
#' Confidence = 123.0,
#' Instances = list(
#' list(
#' BoundingBox = list(
#' Width = 123.0,
#' Height = 123.0,
#' Left = 123.0,
#' Top = 123.0
#' ),
#' Confidence = 123.0,
#' DominantColors = list(
#' list(
#' Red = 123,
#' Blue = 123,
#' Green = 123,
#' HexCode = "string",
#' CSSColor = "string",
#' SimplifiedColor = "string",
#' PixelPercent = 123.0
#' )
#' )
#' )
#' ),
#' Parents = list(
#' list(
#' Name = "string"
#' )
#' ),
#' Aliases = list(
#' list(
#' Name = "string"
#' )
#' ),
#' Categories = list(
#' list(
#' Name = "string"
#' )
#' )
#' )
#' ),
#' OrientationCorrection = "ROTATE_0"|"ROTATE_90"|"ROTATE_180"|"ROTATE_270",
#' LabelModelVersion = "string",
#' ImageProperties = list(
#' Quality = list(
#' Brightness = 123.0,
#' Sharpness = 123.0,
#' Contrast = 123.0
#' ),
#' DominantColors = list(
#' list(
#' Red = 123,
#' Blue = 123,
#' Green = 123,
#' HexCode = "string",
#' CSSColor = "string",
#' SimplifiedColor = "string",
#' PixelPercent = 123.0
#' )
#' ),
#' Foreground = list(
#' Quality = list(
#' Brightness = 123.0,
#' Sharpness = 123.0,
#' Contrast = 123.0
#' ),
#' DominantColors = list(
#' list(
#' Red = 123,
#' Blue = 123,
#' Green = 123,
#' HexCode = "string",
#' CSSColor = "string",
#' SimplifiedColor = "string",
#' PixelPercent = 123.0
#' )
#' )
#' ),
#' Background = list(
#' Quality = list(
#' Brightness = 123.0,
#' Sharpness = 123.0,
#' Contrast = 123.0
#' ),
#' DominantColors = list(
#' list(
#' Red = 123,
#' Blue = 123,
#' Green = 123,
#' HexCode = "string",
#' CSSColor = "string",
#' SimplifiedColor = "string",
#' PixelPercent = 123.0
#' )
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$detect_labels(
#' Image = list(
#' Bytes = raw,
#' S3Object = list(
#' Bucket = "string",
#' Name = "string",
#' Version = "string"
#' )
#' ),
#' MaxLabels = 123,
#' MinConfidence = 123.0,
#' Features = list(
#' "GENERAL_LABELS"|"IMAGE_PROPERTIES"
#' ),
#' Settings = list(
#' GeneralLabels = list(
#' LabelInclusionFilters = list(
#' "string"
#' ),
#' LabelExclusionFilters = list(
#' "string"
#' ),
#' LabelCategoryInclusionFilters = list(
#' "string"
#' ),
#' LabelCategoryExclusionFilters = list(
#' "string"
#' )
#' ),
#' ImageProperties = list(
#' MaxDominantColors = 123
#' )
#' )
#' )
#' ```
#'
#' @examples
#' \dontrun{
#' # This operation detects labels in the supplied image
#' svc$detect_labels(
#' Image = list(
#' S3Object = list(
#' Bucket = "mybucket",
#' Name = "myphoto"
#' )
#' ),
#' MaxLabels = 123L,
#' MinConfidence = 70L
#' )
#' }
#'
#' @keywords internal
#'
#' @rdname rekognition_detect_labels
#'
#' @aliases rekognition_detect_labels
rekognition_detect_labels <- function(Image, MaxLabels = NULL, MinConfidence = NULL, Features = NULL, Settings = NULL) {
op <- new_operation(
name = "DetectLabels",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$detect_labels_input(Image = Image, MaxLabels = MaxLabels, MinConfidence = MinConfidence, Features = Features, Settings = Settings)
output <- .rekognition$detect_labels_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$detect_labels <- rekognition_detect_labels
#' Detects unsafe content in a specified JPEG or PNG format image
#'
#' @description
#' Detects unsafe content in a specified JPEG or PNG format image. Use
#' [`detect_moderation_labels`][rekognition_detect_moderation_labels] to
#' moderate images depending on your requirements. For example, you might
#' want to filter images that contain nudity, but not images containing
#' suggestive content.
#'
#' To filter images, use the labels returned by
#' [`detect_moderation_labels`][rekognition_detect_moderation_labels] to
#' determine which types of content are appropriate.
#'
#' For information about moderation labels, see Detecting Unsafe Content in
#' the Amazon Rekognition Developer Guide.
#'
#' You pass the input image either as base64-encoded image bytes or as a
#' reference to an image in an Amazon S3 bucket. If you use the AWS CLI to
#' call Amazon Rekognition operations, passing image bytes is not
#' supported. The image must be either a PNG or JPEG formatted file.
#'
#' @usage
#' rekognition_detect_moderation_labels(Image, MinConfidence,
#' HumanLoopConfig)
#'
#' @param Image [required] The input image as base64-encoded bytes or an S3 object. If you use the
#' AWS CLI to call Amazon Rekognition operations, passing base64-encoded
#' image bytes is not supported.
#'
#' If you are using an AWS SDK to call Amazon Rekognition, you might not
#' need to base64-encode image bytes passed using the `Bytes` field. For
#' more information, see Images in the Amazon Rekognition developer guide.
#' @param MinConfidence Specifies the minimum confidence level for the labels to return. Amazon
#' Rekognition doesn't return any labels with a confidence level lower than
#' this specified value.
#'
#' If you don't specify `MinConfidence`, the operation returns labels with
#' confidence values greater than or equal to 50 percent.
#' @param HumanLoopConfig Sets up the configuration for human evaluation, including the
#' FlowDefinition the image will be sent to.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' ModerationLabels = list(
#' list(
#' Confidence = 123.0,
#' Name = "string",
#' ParentName = "string"
#' )
#' ),
#' ModerationModelVersion = "string",
#' HumanLoopActivationOutput = list(
#' HumanLoopArn = "string",
#' HumanLoopActivationReasons = list(
#' "string"
#' ),
#' HumanLoopActivationConditionsEvaluationResults = "string"
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$detect_moderation_labels(
#' Image = list(
#' Bytes = raw,
#' S3Object = list(
#' Bucket = "string",
#' Name = "string",
#' Version = "string"
#' )
#' ),
#' MinConfidence = 123.0,
#' HumanLoopConfig = list(
#' HumanLoopName = "string",
#' FlowDefinitionArn = "string",
#' DataAttributes = list(
#' ContentClassifiers = list(
#' "FreeOfPersonallyIdentifiableInformation"|"FreeOfAdultContent"
#' )
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname rekognition_detect_moderation_labels
#'
#' @aliases rekognition_detect_moderation_labels
rekognition_detect_moderation_labels <- function(Image, MinConfidence = NULL, HumanLoopConfig = NULL) {
op <- new_operation(
name = "DetectModerationLabels",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$detect_moderation_labels_input(Image = Image, MinConfidence = MinConfidence, HumanLoopConfig = HumanLoopConfig)
output <- .rekognition$detect_moderation_labels_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$detect_moderation_labels <- rekognition_detect_moderation_labels
#' Detects Personal Protective Equipment (PPE) worn by people detected in
#' an image
#'
#' @description
#' Detects Personal Protective Equipment (PPE) worn by people detected in
#' an image. Amazon Rekognition can detect the following types of PPE.
#'
#' - Face cover
#'
#' - Hand cover
#'
#' - Head cover
#'
#' You pass the input image as base64-encoded image bytes or as a reference
#' to an image in an Amazon S3 bucket. The image must be either a PNG or
#' JPG formatted file.
#'
#' [`detect_protective_equipment`][rekognition_detect_protective_equipment]
#' detects PPE worn by up to 15 persons detected in an image.
#'
#' For each person detected in the image the API returns an array of body
#' parts (face, head, left-hand, right-hand). For each body part, an array
#' of detected items of PPE is returned, including an indicator of whether
#' or not the PPE covers the body part. The API returns the confidence it
#' has in each detection (person, PPE, body part and body part coverage).
#' It also returns a bounding box (BoundingBox) for each detected person
#' and each detected item of PPE.
#'
#' You can optionally request a summary of detected PPE items with the
#' `SummarizationAttributes` input parameter. The summary provides the
#' following information.
#'
#' - The persons detected as wearing all of the types of PPE that you
#' specify.
#'
#' - The persons detected as not wearing all of the types PPE that you
#' specify.
#'
#' - The persons detected where PPE adornment could not be determined.
#'
#' This is a stateless API operation. That is, the operation does not
#' persist any data.
#'
#' This operation requires permissions to perform the
#' `rekognition:DetectProtectiveEquipment` action.
#'
#' @usage
#' rekognition_detect_protective_equipment(Image, SummarizationAttributes)
#'
#' @param Image [required] The image in which you want to detect PPE on detected persons. The image
#' can be passed as image bytes or you can reference an image stored in an
#' Amazon S3 bucket.
#' @param SummarizationAttributes An array of PPE types that you want to summarize.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' ProtectiveEquipmentModelVersion = "string",
#' Persons = list(
#' list(
#' BodyParts = list(
#' list(
#' Name = "FACE"|"HEAD"|"LEFT_HAND"|"RIGHT_HAND",
#' Confidence = 123.0,
#' EquipmentDetections = list(
#' list(
#' BoundingBox = list(
#' Width = 123.0,
#' Height = 123.0,
#' Left = 123.0,
#' Top = 123.0
#' ),
#' Confidence = 123.0,
#' Type = "FACE_COVER"|"HAND_COVER"|"HEAD_COVER",
#' CoversBodyPart = list(
#' Confidence = 123.0,
#' Value = TRUE|FALSE
#' )
#' )
#' )
#' )
#' ),
#' BoundingBox = list(
#' Width = 123.0,
#' Height = 123.0,
#' Left = 123.0,
#' Top = 123.0
#' ),
#' Confidence = 123.0,
#' Id = 123
#' )
#' ),
#' Summary = list(
#' PersonsWithRequiredEquipment = list(
#' 123
#' ),
#' PersonsWithoutRequiredEquipment = list(
#' 123
#' ),
#' PersonsIndeterminate = list(
#' 123
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$detect_protective_equipment(
#' Image = list(
#' Bytes = raw,
#' S3Object = list(
#' Bucket = "string",
#' Name = "string",
#' Version = "string"
#' )
#' ),
#' SummarizationAttributes = list(
#' MinConfidence = 123.0,
#' RequiredEquipmentTypes = list(
#' "FACE_COVER"|"HAND_COVER"|"HEAD_COVER"
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname rekognition_detect_protective_equipment
#'
#' @aliases rekognition_detect_protective_equipment
rekognition_detect_protective_equipment <- function(Image, SummarizationAttributes = NULL) {
op <- new_operation(
name = "DetectProtectiveEquipment",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$detect_protective_equipment_input(Image = Image, SummarizationAttributes = SummarizationAttributes)
output <- .rekognition$detect_protective_equipment_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$detect_protective_equipment <- rekognition_detect_protective_equipment
#' Detects text in the input image and converts it into machine-readable
#' text
#'
#' @description
#' Detects text in the input image and converts it into machine-readable
#' text.
#'
#' Pass the input image as base64-encoded image bytes or as a reference to
#' an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon
#' Rekognition operations, you must pass it as a reference to an image in
#' an Amazon S3 bucket. For the AWS CLI, passing image bytes is not
#' supported. The image must be either a .png or .jpeg formatted file.
#'
#' The [`detect_text`][rekognition_detect_text] operation returns text in
#' an array of TextDetection elements, `TextDetections`. Each
#' `TextDetection` element provides information about a single word or line
#' of text that was detected in the image.
#'
#' A word is one or more script characters that are not separated by
#' spaces. [`detect_text`][rekognition_detect_text] can detect up to 100
#' words in an image.
#'
#' A line is a string of equally spaced words. A line isn't necessarily a
#' complete sentence. For example, a driver's license number is detected as
#' a line. A line ends when there is no aligned text after it. Also, a line
#' ends when there is a large gap between words, relative to the length of
#' the words. This means, depending on the gap between words, Amazon
#' Rekognition may detect multiple lines in text aligned in the same
#' direction. Periods don't represent the end of a line. If a sentence
#' spans multiple lines, the [`detect_text`][rekognition_detect_text]
#' operation returns multiple lines.
#'
#' To determine whether a `TextDetection` element is a line of text or a
#' word, use the `TextDetection` object `Type` field.
#'
#' To be detected, text must be within +/- 90 degrees orientation of the
#' horizontal axis.
#'
#' For more information, see Detecting text in the Amazon Rekognition
#' Developer Guide.
#'
#' @usage
#' rekognition_detect_text(Image, Filters)
#'
#' @param Image [required] The input image as base64-encoded bytes or an Amazon S3 object. If you
#' use the AWS CLI to call Amazon Rekognition operations, you can't pass
#' image bytes.
#'
#' If you are using an AWS SDK to call Amazon Rekognition, you might not
#' need to base64-encode image bytes passed using the `Bytes` field. For
#' more information, see Images in the Amazon Rekognition developer guide.
#' @param Filters Optional parameters that let you set the criteria that the text must
#' meet to be included in your response.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' TextDetections = list(
#' list(
#' DetectedText = "string",
#' Type = "LINE"|"WORD",
#' Id = 123,
#' ParentId = 123,
#' Confidence = 123.0,
#' Geometry = list(
#' BoundingBox = list(
#' Width = 123.0,
#' Height = 123.0,
#' Left = 123.0,
#' Top = 123.0
#' ),
#' Polygon = list(
#' list(
#' X = 123.0,
#' Y = 123.0
#' )
#' )
#' )
#' )
#' ),
#' TextModelVersion = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$detect_text(
#' Image = list(
#' Bytes = raw,
#' S3Object = list(
#' Bucket = "string",
#' Name = "string",
#' Version = "string"
#' )
#' ),
#' Filters = list(
#' WordFilter = list(
#' MinConfidence = 123.0,
#' MinBoundingBoxHeight = 123.0,
#' MinBoundingBoxWidth = 123.0
#' ),
#' RegionsOfInterest = list(
#' list(
#' BoundingBox = list(
#' Width = 123.0,
#' Height = 123.0,
#' Left = 123.0,
#' Top = 123.0
#' ),
#' Polygon = list(
#' list(
#' X = 123.0,
#' Y = 123.0
#' )
#' )
#' )
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname rekognition_detect_text
#'
#' @aliases rekognition_detect_text
rekognition_detect_text <- function(Image, Filters = NULL) {
op <- new_operation(
name = "DetectText",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$detect_text_input(Image = Image, Filters = Filters)
output <- .rekognition$detect_text_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$detect_text <- rekognition_detect_text
#' Removes the association between a Face supplied in an array of FaceIds
#' and the User
#'
#' @description
#' Removes the association between a `Face` supplied in an array of
#' `FaceIds` and the User. If the User is not present already, then a
#' `ResourceNotFound` exception is thrown. If successful, an array of faces
#' that are disassociated from the User is returned. If a given face is
#' already disassociated from the given UserID, it will be ignored and not
#' be returned in the response. If a given face is already associated with
#' a different User or not found in the collection it will be returned as
#' part of `UnsuccessfulDisassociations`. You can remove 1 - 100 face IDs
#' from a user at one time.
#'
#' @usage
#' rekognition_disassociate_faces(CollectionId, UserId, ClientRequestToken,
#' FaceIds)
#'
#' @param CollectionId [required] The ID of an existing collection containing the UserID.
#' @param UserId [required] ID for the existing UserID.
#' @param ClientRequestToken Idempotent token used to identify the request to
#' [`disassociate_faces`][rekognition_disassociate_faces]. If you use the
#' same token with multiple
#' [`disassociate_faces`][rekognition_disassociate_faces] requests, the
#' same response is returned. Use ClientRequestToken to prevent the same
#' request from being processed more than once.
#' @param FaceIds [required] An array of face IDs to disassociate from the UserID.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' DisassociatedFaces = list(
#' list(
#' FaceId = "string"
#' )
#' ),
#' UnsuccessfulFaceDisassociations = list(
#' list(
#' FaceId = "string",
#' UserId = "string",
#' Reasons = list(
#' "FACE_NOT_FOUND"|"ASSOCIATED_TO_A_DIFFERENT_USER"
#' )
#' )
#' ),
#' UserStatus = "ACTIVE"|"UPDATING"|"CREATING"|"CREATED"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$disassociate_faces(
#' CollectionId = "string",
#' UserId = "string",
#' ClientRequestToken = "string",
#' FaceIds = list(
#' "string"
#' )
#' )
#' ```
#'
#' @examples
#' \dontrun{
#' # Removes the association between a Face supplied in an array of FaceIds
#' # and the User.
#' svc$disassociate_faces(
#' ClientRequestToken = "550e8400-e29b-41d4-a716-446655440003",
#' CollectionId = "MyCollection",
#' FaceIds = list(
#' "f5817d37-94f6-4335-bfee-6cf79a3d806e",
#' "c92265d4-5f9c-43af-a58e-12be0ce02bc3"
#' ),
#' UserId = "DemoUser"
#' )
#' }
#'
#' @keywords internal
#'
#' @rdname rekognition_disassociate_faces
#'
#' @aliases rekognition_disassociate_faces
rekognition_disassociate_faces <- function(CollectionId, UserId, ClientRequestToken = NULL, FaceIds) {
op <- new_operation(
name = "DisassociateFaces",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$disassociate_faces_input(CollectionId = CollectionId, UserId = UserId, ClientRequestToken = ClientRequestToken, FaceIds = FaceIds)
output <- .rekognition$disassociate_faces_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$disassociate_faces <- rekognition_disassociate_faces
#' Distributes the entries (images) in a training dataset across the
#' training dataset and the test dataset for a project
#'
#' @description
#' Distributes the entries (images) in a training dataset across the
#' training dataset and the test dataset for a project.
#' [`distribute_dataset_entries`][rekognition_distribute_dataset_entries]
#' moves 20% of the training dataset images to the test dataset. An entry
#' is a JSON Line that describes an image.
#'
#' You supply the Amazon Resource Names (ARN) of a project's training
#' dataset and test dataset. The training dataset must contain the images
#' that you want to split. The test dataset must be empty. The datasets
#' must belong to the same project. To create training and test datasets
#' for a project, call [`create_dataset`][rekognition_create_dataset].
#'
#' Distributing a dataset takes a while to complete. To check the status
#' call [`describe_dataset`][rekognition_describe_dataset]. The operation
#' is complete when the `Status` field for the training dataset and the
#' test dataset is `UPDATE_COMPLETE`. If the dataset split fails, the value
#' of `Status` is `UPDATE_FAILED`.
#'
#' This operation requires permissions to perform the
#' `rekognition:DistributeDatasetEntries` action.
#'
#' @usage
#' rekognition_distribute_dataset_entries(Datasets)
#'
#' @param Datasets [required] The ARNS for the training dataset and test dataset that you want to use.
#' The datasets must belong to the same project. The test dataset must be
#' empty.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$distribute_dataset_entries(
#' Datasets = list(
#' list(
#' Arn = "string"
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname rekognition_distribute_dataset_entries
#'
#' @aliases rekognition_distribute_dataset_entries
rekognition_distribute_dataset_entries <- function(Datasets) {
op <- new_operation(
name = "DistributeDatasetEntries",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$distribute_dataset_entries_input(Datasets = Datasets)
output <- .rekognition$distribute_dataset_entries_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$distribute_dataset_entries <- rekognition_distribute_dataset_entries
#' Gets the name and additional information about a celebrity based on
#' their Amazon Rekognition ID
#'
#' @description
#' Gets the name and additional information about a celebrity based on
#' their Amazon Rekognition ID. The additional information is returned as
#' an array of URLs. If there is no additional information about the
#' celebrity, this list is empty.
#'
#' For more information, see Getting information about a celebrity in the
#' Amazon Rekognition Developer Guide.
#'
#' This operation requires permissions to perform the
#' `rekognition:GetCelebrityInfo` action.
#'
#' @usage
#' rekognition_get_celebrity_info(Id)
#'
#' @param Id [required] The ID for the celebrity. You get the celebrity ID from a call to the
#' [`recognize_celebrities`][rekognition_recognize_celebrities] operation,
#' which recognizes celebrities in an image.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Urls = list(
#' "string"
#' ),
#' Name = "string",
#' KnownGender = list(
#' Type = "Male"|"Female"|"Nonbinary"|"Unlisted"
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$get_celebrity_info(
#' Id = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname rekognition_get_celebrity_info
#'
#' @aliases rekognition_get_celebrity_info
rekognition_get_celebrity_info <- function(Id) {
op <- new_operation(
name = "GetCelebrityInfo",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$get_celebrity_info_input(Id = Id)
output <- .rekognition$get_celebrity_info_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$get_celebrity_info <- rekognition_get_celebrity_info
#' Gets the celebrity recognition results for a Amazon Rekognition Video
#' analysis started by StartCelebrityRecognition
#'
#' @description
#' Gets the celebrity recognition results for a Amazon Rekognition Video
#' analysis started by
#' [`start_celebrity_recognition`][rekognition_start_celebrity_recognition].
#'
#' Celebrity recognition in a video is an asynchronous operation. Analysis
#' is started by a call to
#' [`start_celebrity_recognition`][rekognition_start_celebrity_recognition]
#' which returns a job identifier (`JobId`).
#'
#' When the celebrity recognition operation finishes, Amazon Rekognition
#' Video publishes a completion status to the Amazon Simple Notification
#' Service topic registered in the initial call to
#' [`start_celebrity_recognition`][rekognition_start_celebrity_recognition].
#' To get the results of the celebrity recognition analysis, first check
#' that the status value published to the Amazon SNS topic is `SUCCEEDED`.
#' If so, call `GetCelebrityDetection` and pass the job identifier
#' (`JobId`) from the initial call to `StartCelebrityDetection`.
#'
#' For more information, see Working With Stored Videos in the Amazon
#' Rekognition Developer Guide.
#'
#' [`get_celebrity_recognition`][rekognition_get_celebrity_recognition]
#' returns detected celebrities and the time(s) they are detected in an
#' array (`Celebrities`) of CelebrityRecognition objects. Each
#' `CelebrityRecognition` contains information about the celebrity in a
#' CelebrityDetail object and the time, `Timestamp`, the celebrity was
#' detected. This CelebrityDetail object stores information about the
#' detected celebrity's face attributes, a face bounding box, known gender,
#' the celebrity's name, and a confidence estimate.
#'
#' [`get_celebrity_recognition`][rekognition_get_celebrity_recognition]
#' only returns the default facial attributes (`BoundingBox`, `Confidence`,
#' `Landmarks`, `Pose`, and `Quality`). The `BoundingBox` field only
#' applies to the detected face instance. The other facial attributes
#' listed in the `Face` object of the following response syntax are not
#' returned. For more information, see FaceDetail in the Amazon Rekognition
#' Developer Guide.
#'
#' By default, the `Celebrities` array is sorted by time (milliseconds from
#' the start of the video). You can also sort the array by celebrity by
#' specifying the value `ID` in the `SortBy` input parameter.
#'
#' The `CelebrityDetail` object includes the celebrity identifer and
#' additional information urls. If you don't store the additional
#' information urls, you can get them later by calling
#' [`get_celebrity_info`][rekognition_get_celebrity_info] with the
#' celebrity identifer.
#'
#' No information is returned for faces not recognized as celebrities.
#'
#' Use MaxResults parameter to limit the number of labels returned. If
#' there are more results than specified in `MaxResults`, the value of
#' `NextToken` in the operation response contains a pagination token for
#' getting the next set of results. To get the next page of results, call
#' `GetCelebrityDetection` and populate the `NextToken` request parameter
#' with the token value returned from the previous call to
#' [`get_celebrity_recognition`][rekognition_get_celebrity_recognition].
#'
#' @usage
#' rekognition_get_celebrity_recognition(JobId, MaxResults, NextToken,
#' SortBy)
#'
#' @param JobId [required] Job identifier for the required celebrity recognition analysis. You can
#' get the job identifer from a call to
#' [`start_celebrity_recognition`][rekognition_start_celebrity_recognition].
#' @param MaxResults Maximum number of results to return per paginated call. The largest
#' value you can specify is 1000. If you specify a value greater than 1000,
#' a maximum of 1000 results is returned. The default value is 1000.
#' @param NextToken If the previous response was incomplete (because there is more
#' recognized celebrities to retrieve), Amazon Rekognition Video returns a
#' pagination token in the response. You can use this pagination token to
#' retrieve the next set of celebrities.
#' @param SortBy Sort to use for celebrities returned in `Celebrities` field. Specify
#' `ID` to sort by the celebrity identifier, specify `TIMESTAMP` to sort by
#' the time the celebrity was recognized.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' JobStatus = "IN_PROGRESS"|"SUCCEEDED"|"FAILED",
#' StatusMessage = "string",
#' VideoMetadata = list(
#' Codec = "string",
#' DurationMillis = 123,
#' Format = "string",
#' FrameRate = 123.0,
#' FrameHeight = 123,
#' FrameWidth = 123,
#' ColorRange = "FULL"|"LIMITED"
#' ),
#' NextToken = "string",
#' Celebrities = list(
#' list(
#' Timestamp = 123,
#' Celebrity = list(
#' Urls = list(
#' "string"
#' ),
#' Name = "string",
#' Id = "string",
#' Confidence = 123.0,
#' BoundingBox = list(
#' Width = 123.0,
#' Height = 123.0,
#' Left = 123.0,
#' Top = 123.0
#' ),
#' Face = list(
#' BoundingBox = list(
#' Width = 123.0,
#' Height = 123.0,
#' Left = 123.0,
#' Top = 123.0
#' ),
#' AgeRange = list(
#' Low = 123,
#' High = 123
#' ),
#' Smile = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' Eyeglasses = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' Sunglasses = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' Gender = list(
#' Value = "Male"|"Female",
#' Confidence = 123.0
#' ),
#' Beard = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' Mustache = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' EyesOpen = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' MouthOpen = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' Emotions = list(
#' list(
#' Type = "HAPPY"|"SAD"|"ANGRY"|"CONFUSED"|"DISGUSTED"|"SURPRISED"|"CALM"|"UNKNOWN"|"FEAR",
#' Confidence = 123.0
#' )
#' ),
#' Landmarks = list(
#' list(
#' Type = "eyeLeft"|"eyeRight"|"nose"|"mouthLeft"|"mouthRight"|"leftEyeBrowLeft"|"leftEyeBrowRight"|"leftEyeBrowUp"|"rightEyeBrowLeft"|"rightEyeBrowRight"|"rightEyeBrowUp"|"leftEyeLeft"|"leftEyeRight"|"leftEyeUp"|"leftEyeDown"|"rightEyeLeft"|"rightEyeRight"|"rightEyeUp"|"rightEyeDown"|"noseLeft"|"noseRight"|"mouthUp"|"mouthDown"|"leftPupil"|"rightPupil"|"upperJawlineLeft"|"midJawlineLeft"|"chinBottom"|"midJawlineRight"|"upperJawlineRight",
#' X = 123.0,
#' Y = 123.0
#' )
#' ),
#' Pose = list(
#' Roll = 123.0,
#' Yaw = 123.0,
#' Pitch = 123.0
#' ),
#' Quality = list(
#' Brightness = 123.0,
#' Sharpness = 123.0
#' ),
#' Confidence = 123.0,
#' FaceOccluded = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' EyeDirection = list(
#' Yaw = 123.0,
#' Pitch = 123.0,
#' Confidence = 123.0
#' )
#' ),
#' KnownGender = list(
#' Type = "Male"|"Female"|"Nonbinary"|"Unlisted"
#' )
#' )
#' )
#' ),
#' JobId = "string",
#' Video = list(
#' S3Object = list(
#' Bucket = "string",
#' Name = "string",
#' Version = "string"
#' )
#' ),
#' JobTag = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$get_celebrity_recognition(
#' JobId = "string",
#' MaxResults = 123,
#' NextToken = "string",
#' SortBy = "ID"|"TIMESTAMP"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname rekognition_get_celebrity_recognition
#'
#' @aliases rekognition_get_celebrity_recognition
rekognition_get_celebrity_recognition <- function(JobId, MaxResults = NULL, NextToken = NULL, SortBy = NULL) {
op <- new_operation(
name = "GetCelebrityRecognition",
http_method = "POST",
http_path = "/",
paginator = list(input_token = "NextToken", limit_key = "MaxResults", output_token = "NextToken")
)
input <- .rekognition$get_celebrity_recognition_input(JobId = JobId, MaxResults = MaxResults, NextToken = NextToken, SortBy = SortBy)
output <- .rekognition$get_celebrity_recognition_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$get_celebrity_recognition <- rekognition_get_celebrity_recognition
#' Gets the inappropriate, unwanted, or offensive content analysis results
#' for a Amazon Rekognition Video analysis started by
#' StartContentModeration
#'
#' @description
#' Gets the inappropriate, unwanted, or offensive content analysis results
#' for a Amazon Rekognition Video analysis started by
#' [`start_content_moderation`][rekognition_start_content_moderation]. For
#' a list of moderation labels in Amazon Rekognition, see [Using the image
#' and video moderation
#' APIs](https://docs.aws.amazon.com/rekognition/latest/dg/moderation.html#moderation-api).
#'
#' Amazon Rekognition Video inappropriate or offensive content detection in
#' a stored video is an asynchronous operation. You start analysis by
#' calling
#' [`start_content_moderation`][rekognition_start_content_moderation] which
#' returns a job identifier (`JobId`). When analysis finishes, Amazon
#' Rekognition Video publishes a completion status to the Amazon Simple
#' Notification Service topic registered in the initial call to
#' [`start_content_moderation`][rekognition_start_content_moderation]. To
#' get the results of the content analysis, first check that the status
#' value published to the Amazon SNS topic is `SUCCEEDED`. If so, call
#' [`get_content_moderation`][rekognition_get_content_moderation] and pass
#' the job identifier (`JobId`) from the initial call to
#' [`start_content_moderation`][rekognition_start_content_moderation].
#'
#' For more information, see Working with Stored Videos in the Amazon
#' Rekognition Devlopers Guide.
#'
#' [`get_content_moderation`][rekognition_get_content_moderation] returns
#' detected inappropriate, unwanted, or offensive content moderation
#' labels, and the time they are detected, in an array, `ModerationLabels`,
#' of ContentModerationDetection objects.
#'
#' By default, the moderated labels are returned sorted by time, in
#' milliseconds from the start of the video. You can also sort them by
#' moderated label by specifying `NAME` for the `SortBy` input parameter.
#'
#' Since video analysis can return a large number of results, use the
#' `MaxResults` parameter to limit the number of labels returned in a
#' single call to
#' [`get_content_moderation`][rekognition_get_content_moderation]. If there
#' are more results than specified in `MaxResults`, the value of
#' `NextToken` in the operation response contains a pagination token for
#' getting the next set of results. To get the next page of results, call
#' [`get_content_moderation`][rekognition_get_content_moderation] and
#' populate the `NextToken` request parameter with the value of `NextToken`
#' returned from the previous call to
#' [`get_content_moderation`][rekognition_get_content_moderation].
#'
#' For more information, see moderating content in the Amazon Rekognition
#' Developer Guide.
#'
#' @usage
#' rekognition_get_content_moderation(JobId, MaxResults, NextToken, SortBy,
#' AggregateBy)
#'
#' @param JobId [required] The identifier for the inappropriate, unwanted, or offensive content
#' moderation job. Use `JobId` to identify the job in a subsequent call to
#' [`get_content_moderation`][rekognition_get_content_moderation].
#' @param MaxResults Maximum number of results to return per paginated call. The largest
#' value you can specify is 1000. If you specify a value greater than 1000,
#' a maximum of 1000 results is returned. The default value is 1000.
#' @param NextToken If the previous response was incomplete (because there is more data to
#' retrieve), Amazon Rekognition returns a pagination token in the
#' response. You can use this pagination token to retrieve the next set of
#' content moderation labels.
#' @param SortBy Sort to use for elements in the `ModerationLabelDetections` array. Use
#' `TIMESTAMP` to sort array elements by the time labels are detected. Use
#' `NAME` to alphabetically group elements for a label together. Within
#' each label group, the array element are sorted by detection confidence.
#' The default sort is by `TIMESTAMP`.
#' @param AggregateBy Defines how to aggregate results of the StartContentModeration request.
#' Default aggregation option is TIMESTAMPS. SEGMENTS mode aggregates
#' moderation labels over time.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' JobStatus = "IN_PROGRESS"|"SUCCEEDED"|"FAILED",
#' StatusMessage = "string",
#' VideoMetadata = list(
#' Codec = "string",
#' DurationMillis = 123,
#' Format = "string",
#' FrameRate = 123.0,
#' FrameHeight = 123,
#' FrameWidth = 123,
#' ColorRange = "FULL"|"LIMITED"
#' ),
#' ModerationLabels = list(
#' list(
#' Timestamp = 123,
#' ModerationLabel = list(
#' Confidence = 123.0,
#' Name = "string",
#' ParentName = "string"
#' ),
#' StartTimestampMillis = 123,
#' EndTimestampMillis = 123,
#' DurationMillis = 123
#' )
#' ),
#' NextToken = "string",
#' ModerationModelVersion = "string",
#' JobId = "string",
#' Video = list(
#' S3Object = list(
#' Bucket = "string",
#' Name = "string",
#' Version = "string"
#' )
#' ),
#' JobTag = "string",
#' GetRequestMetadata = list(
#' SortBy = "NAME"|"TIMESTAMP",
#' AggregateBy = "TIMESTAMPS"|"SEGMENTS"
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$get_content_moderation(
#' JobId = "string",
#' MaxResults = 123,
#' NextToken = "string",
#' SortBy = "NAME"|"TIMESTAMP",
#' AggregateBy = "TIMESTAMPS"|"SEGMENTS"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname rekognition_get_content_moderation
#'
#' @aliases rekognition_get_content_moderation
rekognition_get_content_moderation <- function(JobId, MaxResults = NULL, NextToken = NULL, SortBy = NULL, AggregateBy = NULL) {
op <- new_operation(
name = "GetContentModeration",
http_method = "POST",
http_path = "/",
paginator = list(input_token = "NextToken", limit_key = "MaxResults", output_token = "NextToken")
)
input <- .rekognition$get_content_moderation_input(JobId = JobId, MaxResults = MaxResults, NextToken = NextToken, SortBy = SortBy, AggregateBy = AggregateBy)
output <- .rekognition$get_content_moderation_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$get_content_moderation <- rekognition_get_content_moderation
#' Gets face detection results for a Amazon Rekognition Video analysis
#' started by StartFaceDetection
#'
#' @description
#' Gets face detection results for a Amazon Rekognition Video analysis
#' started by [`start_face_detection`][rekognition_start_face_detection].
#'
#' Face detection with Amazon Rekognition Video is an asynchronous
#' operation. You start face detection by calling
#' [`start_face_detection`][rekognition_start_face_detection] which returns
#' a job identifier (`JobId`). When the face detection operation finishes,
#' Amazon Rekognition Video publishes a completion status to the Amazon
#' Simple Notification Service topic registered in the initial call to
#' [`start_face_detection`][rekognition_start_face_detection]. To get the
#' results of the face detection operation, first check that the status
#' value published to the Amazon SNS topic is `SUCCEEDED`. If so, call
#' [`get_face_detection`][rekognition_get_face_detection] and pass the job
#' identifier (`JobId`) from the initial call to
#' [`start_face_detection`][rekognition_start_face_detection].
#'
#' [`get_face_detection`][rekognition_get_face_detection] returns an array
#' of detected faces (`Faces`) sorted by the time the faces were detected.
#'
#' Use MaxResults parameter to limit the number of labels returned. If
#' there are more results than specified in `MaxResults`, the value of
#' `NextToken` in the operation response contains a pagination token for
#' getting the next set of results. To get the next page of results, call
#' [`get_face_detection`][rekognition_get_face_detection] and populate the
#' `NextToken` request parameter with the token value returned from the
#' previous call to [`get_face_detection`][rekognition_get_face_detection].
#'
#' @usage
#' rekognition_get_face_detection(JobId, MaxResults, NextToken)
#'
#' @param JobId [required] Unique identifier for the face detection job. The `JobId` is returned
#' from [`start_face_detection`][rekognition_start_face_detection].
#' @param MaxResults Maximum number of results to return per paginated call. The largest
#' value you can specify is 1000. If you specify a value greater than 1000,
#' a maximum of 1000 results is returned. The default value is 1000.
#' @param NextToken If the previous response was incomplete (because there are more faces to
#' retrieve), Amazon Rekognition Video returns a pagination token in the
#' response. You can use this pagination token to retrieve the next set of
#' faces.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' JobStatus = "IN_PROGRESS"|"SUCCEEDED"|"FAILED",
#' StatusMessage = "string",
#' VideoMetadata = list(
#' Codec = "string",
#' DurationMillis = 123,
#' Format = "string",
#' FrameRate = 123.0,
#' FrameHeight = 123,
#' FrameWidth = 123,
#' ColorRange = "FULL"|"LIMITED"
#' ),
#' NextToken = "string",
#' Faces = list(
#' list(
#' Timestamp = 123,
#' Face = list(
#' BoundingBox = list(
#' Width = 123.0,
#' Height = 123.0,
#' Left = 123.0,
#' Top = 123.0
#' ),
#' AgeRange = list(
#' Low = 123,
#' High = 123
#' ),
#' Smile = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' Eyeglasses = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' Sunglasses = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' Gender = list(
#' Value = "Male"|"Female",
#' Confidence = 123.0
#' ),
#' Beard = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' Mustache = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' EyesOpen = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' MouthOpen = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' Emotions = list(
#' list(
#' Type = "HAPPY"|"SAD"|"ANGRY"|"CONFUSED"|"DISGUSTED"|"SURPRISED"|"CALM"|"UNKNOWN"|"FEAR",
#' Confidence = 123.0
#' )
#' ),
#' Landmarks = list(
#' list(
#' Type = "eyeLeft"|"eyeRight"|"nose"|"mouthLeft"|"mouthRight"|"leftEyeBrowLeft"|"leftEyeBrowRight"|"leftEyeBrowUp"|"rightEyeBrowLeft"|"rightEyeBrowRight"|"rightEyeBrowUp"|"leftEyeLeft"|"leftEyeRight"|"leftEyeUp"|"leftEyeDown"|"rightEyeLeft"|"rightEyeRight"|"rightEyeUp"|"rightEyeDown"|"noseLeft"|"noseRight"|"mouthUp"|"mouthDown"|"leftPupil"|"rightPupil"|"upperJawlineLeft"|"midJawlineLeft"|"chinBottom"|"midJawlineRight"|"upperJawlineRight",
#' X = 123.0,
#' Y = 123.0
#' )
#' ),
#' Pose = list(
#' Roll = 123.0,
#' Yaw = 123.0,
#' Pitch = 123.0
#' ),
#' Quality = list(
#' Brightness = 123.0,
#' Sharpness = 123.0
#' ),
#' Confidence = 123.0,
#' FaceOccluded = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' EyeDirection = list(
#' Yaw = 123.0,
#' Pitch = 123.0,
#' Confidence = 123.0
#' )
#' )
#' )
#' ),
#' JobId = "string",
#' Video = list(
#' S3Object = list(
#' Bucket = "string",
#' Name = "string",
#' Version = "string"
#' )
#' ),
#' JobTag = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$get_face_detection(
#' JobId = "string",
#' MaxResults = 123,
#' NextToken = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname rekognition_get_face_detection
#'
#' @aliases rekognition_get_face_detection
rekognition_get_face_detection <- function(JobId, MaxResults = NULL, NextToken = NULL) {
op <- new_operation(
name = "GetFaceDetection",
http_method = "POST",
http_path = "/",
paginator = list(input_token = "NextToken", limit_key = "MaxResults", output_token = "NextToken")
)
input <- .rekognition$get_face_detection_input(JobId = JobId, MaxResults = MaxResults, NextToken = NextToken)
output <- .rekognition$get_face_detection_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$get_face_detection <- rekognition_get_face_detection
#' Retrieves the results of a specific Face Liveness session
#'
#' @description
#' Retrieves the results of a specific Face Liveness session. It requires
#' the `sessionId` as input, which was created using
#' [`create_face_liveness_session`][rekognition_create_face_liveness_session].
#' Returns the corresponding Face Liveness confidence score, a reference
#' image that includes a face bounding box, and audit images that also
#' contain face bounding boxes. The Face Liveness confidence score ranges
#' from 0 to 100. The reference image can optionally be returned.
#'
#' @usage
#' rekognition_get_face_liveness_session_results(SessionId)
#'
#' @param SessionId [required] A unique 128-bit UUID. This is used to uniquely identify the session and
#' also acts as an idempotency token for all operations associated with the
#' session.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' SessionId = "string",
#' Status = "CREATED"|"IN_PROGRESS"|"SUCCEEDED"|"FAILED"|"EXPIRED",
#' Confidence = 123.0,
#' ReferenceImage = list(
#' Bytes = raw,
#' S3Object = list(
#' Bucket = "string",
#' Name = "string",
#' Version = "string"
#' ),
#' BoundingBox = list(
#' Width = 123.0,
#' Height = 123.0,
#' Left = 123.0,
#' Top = 123.0
#' )
#' ),
#' AuditImages = list(
#' list(
#' Bytes = raw,
#' S3Object = list(
#' Bucket = "string",
#' Name = "string",
#' Version = "string"
#' ),
#' BoundingBox = list(
#' Width = 123.0,
#' Height = 123.0,
#' Left = 123.0,
#' Top = 123.0
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$get_face_liveness_session_results(
#' SessionId = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname rekognition_get_face_liveness_session_results
#'
#' @aliases rekognition_get_face_liveness_session_results
rekognition_get_face_liveness_session_results <- function(SessionId) {
op <- new_operation(
name = "GetFaceLivenessSessionResults",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$get_face_liveness_session_results_input(SessionId = SessionId)
output <- .rekognition$get_face_liveness_session_results_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$get_face_liveness_session_results <- rekognition_get_face_liveness_session_results
#' Gets the face search results for Amazon Rekognition Video face search
#' started by StartFaceSearch
#'
#' @description
#' Gets the face search results for Amazon Rekognition Video face search
#' started by [`start_face_search`][rekognition_start_face_search]. The
#' search returns faces in a collection that match the faces of persons
#' detected in a video. It also includes the time(s) that faces are matched
#' in the video.
#'
#' Face search in a video is an asynchronous operation. You start face
#' search by calling to
#' [`start_face_search`][rekognition_start_face_search] which returns a job
#' identifier (`JobId`). When the search operation finishes, Amazon
#' Rekognition Video publishes a completion status to the Amazon Simple
#' Notification Service topic registered in the initial call to
#' [`start_face_search`][rekognition_start_face_search]. To get the search
#' results, first check that the status value published to the Amazon SNS
#' topic is `SUCCEEDED`. If so, call
#' [`get_face_search`][rekognition_get_face_search] and pass the job
#' identifier (`JobId`) from the initial call to
#' [`start_face_search`][rekognition_start_face_search].
#'
#' For more information, see Searching Faces in a Collection in the Amazon
#' Rekognition Developer Guide.
#'
#' The search results are retured in an array, `Persons`, of PersonMatch
#' objects. Each`PersonMatch` element contains details about the matching
#' faces in the input collection, person information (facial attributes,
#' bounding boxes, and person identifer) for the matched person, and the
#' time the person was matched in the video.
#'
#' [`get_face_search`][rekognition_get_face_search] only returns the
#' default facial attributes (`BoundingBox`, `Confidence`, `Landmarks`,
#' `Pose`, and `Quality`). The other facial attributes listed in the `Face`
#' object of the following response syntax are not returned. For more
#' information, see FaceDetail in the Amazon Rekognition Developer Guide.
#'
#' By default, the `Persons` array is sorted by the time, in milliseconds
#' from the start of the video, persons are matched. You can also sort by
#' persons by specifying `INDEX` for the `SORTBY` input parameter.
#'
#' @usage
#' rekognition_get_face_search(JobId, MaxResults, NextToken, SortBy)
#'
#' @param JobId [required] The job identifer for the search request. You get the job identifier
#' from an initial call to
#' [`start_face_search`][rekognition_start_face_search].
#' @param MaxResults Maximum number of results to return per paginated call. The largest
#' value you can specify is 1000. If you specify a value greater than 1000,
#' a maximum of 1000 results is returned. The default value is 1000.
#' @param NextToken If the previous response was incomplete (because there is more search
#' results to retrieve), Amazon Rekognition Video returns a pagination
#' token in the response. You can use this pagination token to retrieve the
#' next set of search results.
#' @param SortBy Sort to use for grouping faces in the response. Use `TIMESTAMP` to group
#' faces by the time that they are recognized. Use `INDEX` to sort by
#' recognized faces.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' JobStatus = "IN_PROGRESS"|"SUCCEEDED"|"FAILED",
#' StatusMessage = "string",
#' NextToken = "string",
#' VideoMetadata = list(
#' Codec = "string",
#' DurationMillis = 123,
#' Format = "string",
#' FrameRate = 123.0,
#' FrameHeight = 123,
#' FrameWidth = 123,
#' ColorRange = "FULL"|"LIMITED"
#' ),
#' Persons = list(
#' list(
#' Timestamp = 123,
#' Person = list(
#' Index = 123,
#' BoundingBox = list(
#' Width = 123.0,
#' Height = 123.0,
#' Left = 123.0,
#' Top = 123.0
#' ),
#' Face = list(
#' BoundingBox = list(
#' Width = 123.0,
#' Height = 123.0,
#' Left = 123.0,
#' Top = 123.0
#' ),
#' AgeRange = list(
#' Low = 123,
#' High = 123
#' ),
#' Smile = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' Eyeglasses = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' Sunglasses = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' Gender = list(
#' Value = "Male"|"Female",
#' Confidence = 123.0
#' ),
#' Beard = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' Mustache = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' EyesOpen = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' MouthOpen = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' Emotions = list(
#' list(
#' Type = "HAPPY"|"SAD"|"ANGRY"|"CONFUSED"|"DISGUSTED"|"SURPRISED"|"CALM"|"UNKNOWN"|"FEAR",
#' Confidence = 123.0
#' )
#' ),
#' Landmarks = list(
#' list(
#' Type = "eyeLeft"|"eyeRight"|"nose"|"mouthLeft"|"mouthRight"|"leftEyeBrowLeft"|"leftEyeBrowRight"|"leftEyeBrowUp"|"rightEyeBrowLeft"|"rightEyeBrowRight"|"rightEyeBrowUp"|"leftEyeLeft"|"leftEyeRight"|"leftEyeUp"|"leftEyeDown"|"rightEyeLeft"|"rightEyeRight"|"rightEyeUp"|"rightEyeDown"|"noseLeft"|"noseRight"|"mouthUp"|"mouthDown"|"leftPupil"|"rightPupil"|"upperJawlineLeft"|"midJawlineLeft"|"chinBottom"|"midJawlineRight"|"upperJawlineRight",
#' X = 123.0,
#' Y = 123.0
#' )
#' ),
#' Pose = list(
#' Roll = 123.0,
#' Yaw = 123.0,
#' Pitch = 123.0
#' ),
#' Quality = list(
#' Brightness = 123.0,
#' Sharpness = 123.0
#' ),
#' Confidence = 123.0,
#' FaceOccluded = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' EyeDirection = list(
#' Yaw = 123.0,
#' Pitch = 123.0,
#' Confidence = 123.0
#' )
#' )
#' ),
#' FaceMatches = list(
#' list(
#' Similarity = 123.0,
#' Face = list(
#' FaceId = "string",
#' BoundingBox = list(
#' Width = 123.0,
#' Height = 123.0,
#' Left = 123.0,
#' Top = 123.0
#' ),
#' ImageId = "string",
#' ExternalImageId = "string",
#' Confidence = 123.0,
#' IndexFacesModelVersion = "string",
#' UserId = "string"
#' )
#' )
#' )
#' )
#' ),
#' JobId = "string",
#' Video = list(
#' S3Object = list(
#' Bucket = "string",
#' Name = "string",
#' Version = "string"
#' )
#' ),
#' JobTag = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$get_face_search(
#' JobId = "string",
#' MaxResults = 123,
#' NextToken = "string",
#' SortBy = "INDEX"|"TIMESTAMP"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname rekognition_get_face_search
#'
#' @aliases rekognition_get_face_search
rekognition_get_face_search <- function(JobId, MaxResults = NULL, NextToken = NULL, SortBy = NULL) {
op <- new_operation(
name = "GetFaceSearch",
http_method = "POST",
http_path = "/",
paginator = list(input_token = "NextToken", limit_key = "MaxResults", output_token = "NextToken")
)
input <- .rekognition$get_face_search_input(JobId = JobId, MaxResults = MaxResults, NextToken = NextToken, SortBy = SortBy)
output <- .rekognition$get_face_search_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$get_face_search <- rekognition_get_face_search
#' Gets the label detection results of a Amazon Rekognition Video analysis
#' started by StartLabelDetection
#'
#' @description
#' Gets the label detection results of a Amazon Rekognition Video analysis
#' started by [`start_label_detection`][rekognition_start_label_detection].
#'
#' The label detection operation is started by a call to
#' [`start_label_detection`][rekognition_start_label_detection] which
#' returns a job identifier (`JobId`). When the label detection operation
#' finishes, Amazon Rekognition publishes a completion status to the Amazon
#' Simple Notification Service topic registered in the initial call to
#' `StartlabelDetection`.
#'
#' To get the results of the label detection operation, first check that
#' the status value published to the Amazon SNS topic is `SUCCEEDED`. If
#' so, call [`get_label_detection`][rekognition_get_label_detection] and
#' pass the job identifier (`JobId`) from the initial call to
#' [`start_label_detection`][rekognition_start_label_detection].
#'
#' [`get_label_detection`][rekognition_get_label_detection] returns an
#' array of detected labels (`Labels`) sorted by the time the labels were
#' detected. You can also sort by the label name by specifying `NAME` for
#' the `SortBy` input parameter. If there is no `NAME` specified, the
#' default sort is by timestamp.
#'
#' You can select how results are aggregated by using the `AggregateBy`
#' input parameter. The default aggregation method is `TIMESTAMPS`. You can
#' also aggregate by `SEGMENTS`, which aggregates all instances of labels
#' detected in a given segment.
#'
#' The returned Labels array may include the following attributes:
#'
#' - Name - The name of the detected label.
#'
#' - Confidence - The level of confidence in the label assigned to a
#' detected object.
#'
#' - Parents - The ancestor labels for a detected label.
#' GetLabelDetection returns a hierarchical taxonomy of detected
#' labels. For example, a detected car might be assigned the label car.
#' The label car has two parent labels: Vehicle (its parent) and
#' Transportation (its grandparent). The response includes the all
#' ancestors for a label, where every ancestor is a unique label. In
#' the previous example, Car, Vehicle, and Transportation are returned
#' as unique labels in the response.
#'
#' - Aliases - Possible Aliases for the label.
#'
#' - Categories - The label categories that the detected label belongs
#' to.
#'
#' - BoundingBox — Bounding boxes are described for all instances of
#' detected common object labels, returned in an array of Instance
#' objects. An Instance object contains a BoundingBox object,
#' describing the location of the label on the input image. It also
#' includes the confidence for the accuracy of the detected bounding
#' box.
#'
#' - Timestamp - Time, in milliseconds from the start of the video, that
#' the label was detected. For aggregation by `SEGMENTS`, the
#' `StartTimestampMillis`, `EndTimestampMillis`, and `DurationMillis`
#' structures are what define a segment. Although the “Timestamp”
#' structure is still returned with each label, its value is set to be
#' the same as `StartTimestampMillis`.
#'
#' Timestamp and Bounding box information are returned for detected
#' Instances, only if aggregation is done by `TIMESTAMPS`. If aggregating
#' by `SEGMENTS`, information about detected instances isn’t returned.
#'
#' The version of the label model used for the detection is also returned.
#'
#' **Note `DominantColors` isn't returned for `Instances`, although it is
#' shown as part of the response in the sample seen below.**
#'
#' Use `MaxResults` parameter to limit the number of labels returned. If
#' there are more results than specified in `MaxResults`, the value of
#' `NextToken` in the operation response contains a pagination token for
#' getting the next set of results. To get the next page of results, call
#' `GetlabelDetection` and populate the `NextToken` request parameter with
#' the token value returned from the previous call to
#' [`get_label_detection`][rekognition_get_label_detection].
#'
#' @usage
#' rekognition_get_label_detection(JobId, MaxResults, NextToken, SortBy,
#' AggregateBy)
#'
#' @param JobId [required] Job identifier for the label detection operation for which you want
#' results returned. You get the job identifer from an initial call to
#' `StartlabelDetection`.
#' @param MaxResults Maximum number of results to return per paginated call. The largest
#' value you can specify is 1000. If you specify a value greater than 1000,
#' a maximum of 1000 results is returned. The default value is 1000.
#' @param NextToken If the previous response was incomplete (because there are more labels
#' to retrieve), Amazon Rekognition Video returns a pagination token in the
#' response. You can use this pagination token to retrieve the next set of
#' labels.
#' @param SortBy Sort to use for elements in the `Labels` array. Use `TIMESTAMP` to sort
#' array elements by the time labels are detected. Use `NAME` to
#' alphabetically group elements for a label together. Within each label
#' group, the array element are sorted by detection confidence. The default
#' sort is by `TIMESTAMP`.
#' @param AggregateBy Defines how to aggregate the returned results. Results can be aggregated
#' by timestamps or segments.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' JobStatus = "IN_PROGRESS"|"SUCCEEDED"|"FAILED",
#' StatusMessage = "string",
#' VideoMetadata = list(
#' Codec = "string",
#' DurationMillis = 123,
#' Format = "string",
#' FrameRate = 123.0,
#' FrameHeight = 123,
#' FrameWidth = 123,
#' ColorRange = "FULL"|"LIMITED"
#' ),
#' NextToken = "string",
#' Labels = list(
#' list(
#' Timestamp = 123,
#' Label = list(
#' Name = "string",
#' Confidence = 123.0,
#' Instances = list(
#' list(
#' BoundingBox = list(
#' Width = 123.0,
#' Height = 123.0,
#' Left = 123.0,
#' Top = 123.0
#' ),
#' Confidence = 123.0,
#' DominantColors = list(
#' list(
#' Red = 123,
#' Blue = 123,
#' Green = 123,
#' HexCode = "string",
#' CSSColor = "string",
#' SimplifiedColor = "string",
#' PixelPercent = 123.0
#' )
#' )
#' )
#' ),
#' Parents = list(
#' list(
#' Name = "string"
#' )
#' ),
#' Aliases = list(
#' list(
#' Name = "string"
#' )
#' ),
#' Categories = list(
#' list(
#' Name = "string"
#' )
#' )
#' ),
#' StartTimestampMillis = 123,
#' EndTimestampMillis = 123,
#' DurationMillis = 123
#' )
#' ),
#' LabelModelVersion = "string",
#' JobId = "string",
#' Video = list(
#' S3Object = list(
#' Bucket = "string",
#' Name = "string",
#' Version = "string"
#' )
#' ),
#' JobTag = "string",
#' GetRequestMetadata = list(
#' SortBy = "NAME"|"TIMESTAMP",
#' AggregateBy = "TIMESTAMPS"|"SEGMENTS"
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$get_label_detection(
#' JobId = "string",
#' MaxResults = 123,
#' NextToken = "string",
#' SortBy = "NAME"|"TIMESTAMP",
#' AggregateBy = "TIMESTAMPS"|"SEGMENTS"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname rekognition_get_label_detection
#'
#' @aliases rekognition_get_label_detection
rekognition_get_label_detection <- function(JobId, MaxResults = NULL, NextToken = NULL, SortBy = NULL, AggregateBy = NULL) {
op <- new_operation(
name = "GetLabelDetection",
http_method = "POST",
http_path = "/",
paginator = list(input_token = "NextToken", limit_key = "MaxResults", output_token = "NextToken")
)
input <- .rekognition$get_label_detection_input(JobId = JobId, MaxResults = MaxResults, NextToken = NextToken, SortBy = SortBy, AggregateBy = AggregateBy)
output <- .rekognition$get_label_detection_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$get_label_detection <- rekognition_get_label_detection
#' Gets the path tracking results of a Amazon Rekognition Video analysis
#' started by StartPersonTracking
#'
#' @description
#' Gets the path tracking results of a Amazon Rekognition Video analysis
#' started by [`start_person_tracking`][rekognition_start_person_tracking].
#'
#' The person path tracking operation is started by a call to
#' [`start_person_tracking`][rekognition_start_person_tracking] which
#' returns a job identifier (`JobId`). When the operation finishes, Amazon
#' Rekognition Video publishes a completion status to the Amazon Simple
#' Notification Service topic registered in the initial call to
#' [`start_person_tracking`][rekognition_start_person_tracking].
#'
#' To get the results of the person path tracking operation, first check
#' that the status value published to the Amazon SNS topic is `SUCCEEDED`.
#' If so, call [`get_person_tracking`][rekognition_get_person_tracking] and
#' pass the job identifier (`JobId`) from the initial call to
#' [`start_person_tracking`][rekognition_start_person_tracking].
#'
#' [`get_person_tracking`][rekognition_get_person_tracking] returns an
#' array, `Persons`, of tracked persons and the time(s) their paths were
#' tracked in the video.
#'
#' [`get_person_tracking`][rekognition_get_person_tracking] only returns
#' the default facial attributes (`BoundingBox`, `Confidence`, `Landmarks`,
#' `Pose`, and `Quality`). The other facial attributes listed in the `Face`
#' object of the following response syntax are not returned.
#'
#' For more information, see FaceDetail in the Amazon Rekognition Developer
#' Guide.
#'
#' By default, the array is sorted by the time(s) a person's path is
#' tracked in the video. You can sort by tracked persons by specifying
#' `INDEX` for the `SortBy` input parameter.
#'
#' Use the `MaxResults` parameter to limit the number of items returned. If
#' there are more results than specified in `MaxResults`, the value of
#' `NextToken` in the operation response contains a pagination token for
#' getting the next set of results. To get the next page of results, call
#' [`get_person_tracking`][rekognition_get_person_tracking] and populate
#' the `NextToken` request parameter with the token value returned from the
#' previous call to
#' [`get_person_tracking`][rekognition_get_person_tracking].
#'
#' @usage
#' rekognition_get_person_tracking(JobId, MaxResults, NextToken, SortBy)
#'
#' @param JobId [required] The identifier for a job that tracks persons in a video. You get the
#' `JobId` from a call to
#' [`start_person_tracking`][rekognition_start_person_tracking].
#' @param MaxResults Maximum number of results to return per paginated call. The largest
#' value you can specify is 1000. If you specify a value greater than 1000,
#' a maximum of 1000 results is returned. The default value is 1000.
#' @param NextToken If the previous response was incomplete (because there are more persons
#' to retrieve), Amazon Rekognition Video returns a pagination token in the
#' response. You can use this pagination token to retrieve the next set of
#' persons.
#' @param SortBy Sort to use for elements in the `Persons` array. Use `TIMESTAMP` to sort
#' array elements by the time persons are detected. Use `INDEX` to sort by
#' the tracked persons. If you sort by `INDEX`, the array elements for each
#' person are sorted by detection confidence. The default sort is by
#' `TIMESTAMP`.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' JobStatus = "IN_PROGRESS"|"SUCCEEDED"|"FAILED",
#' StatusMessage = "string",
#' VideoMetadata = list(
#' Codec = "string",
#' DurationMillis = 123,
#' Format = "string",
#' FrameRate = 123.0,
#' FrameHeight = 123,
#' FrameWidth = 123,
#' ColorRange = "FULL"|"LIMITED"
#' ),
#' NextToken = "string",
#' Persons = list(
#' list(
#' Timestamp = 123,
#' Person = list(
#' Index = 123,
#' BoundingBox = list(
#' Width = 123.0,
#' Height = 123.0,
#' Left = 123.0,
#' Top = 123.0
#' ),
#' Face = list(
#' BoundingBox = list(
#' Width = 123.0,
#' Height = 123.0,
#' Left = 123.0,
#' Top = 123.0
#' ),
#' AgeRange = list(
#' Low = 123,
#' High = 123
#' ),
#' Smile = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' Eyeglasses = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' Sunglasses = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' Gender = list(
#' Value = "Male"|"Female",
#' Confidence = 123.0
#' ),
#' Beard = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' Mustache = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' EyesOpen = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' MouthOpen = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' Emotions = list(
#' list(
#' Type = "HAPPY"|"SAD"|"ANGRY"|"CONFUSED"|"DISGUSTED"|"SURPRISED"|"CALM"|"UNKNOWN"|"FEAR",
#' Confidence = 123.0
#' )
#' ),
#' Landmarks = list(
#' list(
#' Type = "eyeLeft"|"eyeRight"|"nose"|"mouthLeft"|"mouthRight"|"leftEyeBrowLeft"|"leftEyeBrowRight"|"leftEyeBrowUp"|"rightEyeBrowLeft"|"rightEyeBrowRight"|"rightEyeBrowUp"|"leftEyeLeft"|"leftEyeRight"|"leftEyeUp"|"leftEyeDown"|"rightEyeLeft"|"rightEyeRight"|"rightEyeUp"|"rightEyeDown"|"noseLeft"|"noseRight"|"mouthUp"|"mouthDown"|"leftPupil"|"rightPupil"|"upperJawlineLeft"|"midJawlineLeft"|"chinBottom"|"midJawlineRight"|"upperJawlineRight",
#' X = 123.0,
#' Y = 123.0
#' )
#' ),
#' Pose = list(
#' Roll = 123.0,
#' Yaw = 123.0,
#' Pitch = 123.0
#' ),
#' Quality = list(
#' Brightness = 123.0,
#' Sharpness = 123.0
#' ),
#' Confidence = 123.0,
#' FaceOccluded = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' EyeDirection = list(
#' Yaw = 123.0,
#' Pitch = 123.0,
#' Confidence = 123.0
#' )
#' )
#' )
#' )
#' ),
#' JobId = "string",
#' Video = list(
#' S3Object = list(
#' Bucket = "string",
#' Name = "string",
#' Version = "string"
#' )
#' ),
#' JobTag = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$get_person_tracking(
#' JobId = "string",
#' MaxResults = 123,
#' NextToken = "string",
#' SortBy = "INDEX"|"TIMESTAMP"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname rekognition_get_person_tracking
#'
#' @aliases rekognition_get_person_tracking
rekognition_get_person_tracking <- function(JobId, MaxResults = NULL, NextToken = NULL, SortBy = NULL) {
op <- new_operation(
name = "GetPersonTracking",
http_method = "POST",
http_path = "/",
paginator = list(input_token = "NextToken", limit_key = "MaxResults", output_token = "NextToken")
)
input <- .rekognition$get_person_tracking_input(JobId = JobId, MaxResults = MaxResults, NextToken = NextToken, SortBy = SortBy)
output <- .rekognition$get_person_tracking_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$get_person_tracking <- rekognition_get_person_tracking
#' Gets the segment detection results of a Amazon Rekognition Video
#' analysis started by StartSegmentDetection
#'
#' @description
#' Gets the segment detection results of a Amazon Rekognition Video
#' analysis started by
#' [`start_segment_detection`][rekognition_start_segment_detection].
#'
#' Segment detection with Amazon Rekognition Video is an asynchronous
#' operation. You start segment detection by calling
#' [`start_segment_detection`][rekognition_start_segment_detection] which
#' returns a job identifier (`JobId`). When the segment detection operation
#' finishes, Amazon Rekognition publishes a completion status to the Amazon
#' Simple Notification Service topic registered in the initial call to
#' [`start_segment_detection`][rekognition_start_segment_detection]. To get
#' the results of the segment detection operation, first check that the
#' status value published to the Amazon SNS topic is `SUCCEEDED`. if so,
#' call [`get_segment_detection`][rekognition_get_segment_detection] and
#' pass the job identifier (`JobId`) from the initial call of
#' [`start_segment_detection`][rekognition_start_segment_detection].
#'
#' [`get_segment_detection`][rekognition_get_segment_detection] returns
#' detected segments in an array (`Segments`) of SegmentDetection objects.
#' `Segments` is sorted by the segment types specified in the
#' `SegmentTypes` input parameter of
#' [`start_segment_detection`][rekognition_start_segment_detection]. Each
#' element of the array includes the detected segment, the precentage
#' confidence in the acuracy of the detected segment, the type of the
#' segment, and the frame in which the segment was detected.
#'
#' Use `SelectedSegmentTypes` to find out the type of segment detection
#' requested in the call to
#' [`start_segment_detection`][rekognition_start_segment_detection].
#'
#' Use the `MaxResults` parameter to limit the number of segment detections
#' returned. If there are more results than specified in `MaxResults`, the
#' value of `NextToken` in the operation response contains a pagination
#' token for getting the next set of results. To get the next page of
#' results, call
#' [`get_segment_detection`][rekognition_get_segment_detection] and
#' populate the `NextToken` request parameter with the token value returned
#' from the previous call to
#' [`get_segment_detection`][rekognition_get_segment_detection].
#'
#' For more information, see Detecting video segments in stored video in
#' the Amazon Rekognition Developer Guide.
#'
#' @usage
#' rekognition_get_segment_detection(JobId, MaxResults, NextToken)
#'
#' @param JobId [required] Job identifier for the text detection operation for which you want
#' results returned. You get the job identifer from an initial call to
#' [`start_segment_detection`][rekognition_start_segment_detection].
#' @param MaxResults Maximum number of results to return per paginated call. The largest
#' value you can specify is 1000.
#' @param NextToken If the response is truncated, Amazon Rekognition Video returns this
#' token that you can use in the subsequent request to retrieve the next
#' set of text.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' JobStatus = "IN_PROGRESS"|"SUCCEEDED"|"FAILED",
#' StatusMessage = "string",
#' VideoMetadata = list(
#' list(
#' Codec = "string",
#' DurationMillis = 123,
#' Format = "string",
#' FrameRate = 123.0,
#' FrameHeight = 123,
#' FrameWidth = 123,
#' ColorRange = "FULL"|"LIMITED"
#' )
#' ),
#' AudioMetadata = list(
#' list(
#' Codec = "string",
#' DurationMillis = 123,
#' SampleRate = 123,
#' NumberOfChannels = 123
#' )
#' ),
#' NextToken = "string",
#' Segments = list(
#' list(
#' Type = "TECHNICAL_CUE"|"SHOT",
#' StartTimestampMillis = 123,
#' EndTimestampMillis = 123,
#' DurationMillis = 123,
#' StartTimecodeSMPTE = "string",
#' EndTimecodeSMPTE = "string",
#' DurationSMPTE = "string",
#' TechnicalCueSegment = list(
#' Type = "ColorBars"|"EndCredits"|"BlackFrames"|"OpeningCredits"|"StudioLogo"|"Slate"|"Content",
#' Confidence = 123.0
#' ),
#' ShotSegment = list(
#' Index = 123,
#' Confidence = 123.0
#' ),
#' StartFrameNumber = 123,
#' EndFrameNumber = 123,
#' DurationFrames = 123
#' )
#' ),
#' SelectedSegmentTypes = list(
#' list(
#' Type = "TECHNICAL_CUE"|"SHOT",
#' ModelVersion = "string"
#' )
#' ),
#' JobId = "string",
#' Video = list(
#' S3Object = list(
#' Bucket = "string",
#' Name = "string",
#' Version = "string"
#' )
#' ),
#' JobTag = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$get_segment_detection(
#' JobId = "string",
#' MaxResults = 123,
#' NextToken = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname rekognition_get_segment_detection
#'
#' @aliases rekognition_get_segment_detection
rekognition_get_segment_detection <- function(JobId, MaxResults = NULL, NextToken = NULL) {
op <- new_operation(
name = "GetSegmentDetection",
http_method = "POST",
http_path = "/",
paginator = list(input_token = "NextToken", limit_key = "MaxResults", output_token = "NextToken")
)
input <- .rekognition$get_segment_detection_input(JobId = JobId, MaxResults = MaxResults, NextToken = NextToken)
output <- .rekognition$get_segment_detection_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$get_segment_detection <- rekognition_get_segment_detection
#' Gets the text detection results of a Amazon Rekognition Video analysis
#' started by StartTextDetection
#'
#' @description
#' Gets the text detection results of a Amazon Rekognition Video analysis
#' started by [`start_text_detection`][rekognition_start_text_detection].
#'
#' Text detection with Amazon Rekognition Video is an asynchronous
#' operation. You start text detection by calling
#' [`start_text_detection`][rekognition_start_text_detection] which returns
#' a job identifier (`JobId`) When the text detection operation finishes,
#' Amazon Rekognition publishes a completion status to the Amazon Simple
#' Notification Service topic registered in the initial call to
#' [`start_text_detection`][rekognition_start_text_detection]. To get the
#' results of the text detection operation, first check that the status
#' value published to the Amazon SNS topic is `SUCCEEDED`. if so, call
#' [`get_text_detection`][rekognition_get_text_detection] and pass the job
#' identifier (`JobId`) from the initial call of
#' [`start_label_detection`][rekognition_start_label_detection].
#'
#' [`get_text_detection`][rekognition_get_text_detection] returns an array
#' of detected text (`TextDetections`) sorted by the time the text was
#' detected, up to 50 words per frame of video.
#'
#' Each element of the array includes the detected text, the precentage
#' confidence in the acuracy of the detected text, the time the text was
#' detected, bounding box information for where the text was located, and
#' unique identifiers for words and their lines.
#'
#' Use MaxResults parameter to limit the number of text detections
#' returned. If there are more results than specified in `MaxResults`, the
#' value of `NextToken` in the operation response contains a pagination
#' token for getting the next set of results. To get the next page of
#' results, call [`get_text_detection`][rekognition_get_text_detection] and
#' populate the `NextToken` request parameter with the token value returned
#' from the previous call to
#' [`get_text_detection`][rekognition_get_text_detection].
#'
#' @usage
#' rekognition_get_text_detection(JobId, MaxResults, NextToken)
#'
#' @param JobId [required] Job identifier for the text detection operation for which you want
#' results returned. You get the job identifer from an initial call to
#' [`start_text_detection`][rekognition_start_text_detection].
#' @param MaxResults Maximum number of results to return per paginated call. The largest
#' value you can specify is 1000.
#' @param NextToken If the previous response was incomplete (because there are more labels
#' to retrieve), Amazon Rekognition Video returns a pagination token in the
#' response. You can use this pagination token to retrieve the next set of
#' text.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' JobStatus = "IN_PROGRESS"|"SUCCEEDED"|"FAILED",
#' StatusMessage = "string",
#' VideoMetadata = list(
#' Codec = "string",
#' DurationMillis = 123,
#' Format = "string",
#' FrameRate = 123.0,
#' FrameHeight = 123,
#' FrameWidth = 123,
#' ColorRange = "FULL"|"LIMITED"
#' ),
#' TextDetections = list(
#' list(
#' Timestamp = 123,
#' TextDetection = list(
#' DetectedText = "string",
#' Type = "LINE"|"WORD",
#' Id = 123,
#' ParentId = 123,
#' Confidence = 123.0,
#' Geometry = list(
#' BoundingBox = list(
#' Width = 123.0,
#' Height = 123.0,
#' Left = 123.0,
#' Top = 123.0
#' ),
#' Polygon = list(
#' list(
#' X = 123.0,
#' Y = 123.0
#' )
#' )
#' )
#' )
#' )
#' ),
#' NextToken = "string",
#' TextModelVersion = "string",
#' JobId = "string",
#' Video = list(
#' S3Object = list(
#' Bucket = "string",
#' Name = "string",
#' Version = "string"
#' )
#' ),
#' JobTag = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$get_text_detection(
#' JobId = "string",
#' MaxResults = 123,
#' NextToken = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname rekognition_get_text_detection
#'
#' @aliases rekognition_get_text_detection
rekognition_get_text_detection <- function(JobId, MaxResults = NULL, NextToken = NULL) {
op <- new_operation(
name = "GetTextDetection",
http_method = "POST",
http_path = "/",
paginator = list(input_token = "NextToken", limit_key = "MaxResults", output_token = "NextToken")
)
input <- .rekognition$get_text_detection_input(JobId = JobId, MaxResults = MaxResults, NextToken = NextToken)
output <- .rekognition$get_text_detection_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$get_text_detection <- rekognition_get_text_detection
#' Detects faces in the input image and adds them to the specified
#' collection
#'
#' @description
#' Detects faces in the input image and adds them to the specified
#' collection.
#'
#' Amazon Rekognition doesn't save the actual faces that are detected.
#' Instead, the underlying detection algorithm first detects the faces in
#' the input image. For each face, the algorithm extracts facial features
#' into a feature vector, and stores it in the backend database. Amazon
#' Rekognition uses feature vectors when it performs face match and search
#' operations using the [`search_faces`][rekognition_search_faces] and
#' [`search_faces_by_image`][rekognition_search_faces_by_image] operations.
#'
#' For more information, see Adding faces to a collection in the Amazon
#' Rekognition Developer Guide.
#'
#' To get the number of faces in a collection, call
#' [`describe_collection`][rekognition_describe_collection].
#'
#' If you're using version 1.0 of the face detection model,
#' [`index_faces`][rekognition_index_faces] indexes the 15 largest faces in
#' the input image. Later versions of the face detection model index the
#' 100 largest faces in the input image.
#'
#' If you're using version 4 or later of the face model, image orientation
#' information is not returned in the `OrientationCorrection` field.
#'
#' To determine which version of the model you're using, call
#' [`describe_collection`][rekognition_describe_collection] and supply the
#' collection ID. You can also get the model version from the value of
#' `FaceModelVersion` in the response from
#' [`index_faces`][rekognition_index_faces]
#'
#' For more information, see Model Versioning in the Amazon Rekognition
#' Developer Guide.
#'
#' If you provide the optional `ExternalImageId` for the input image you
#' provided, Amazon Rekognition associates this ID with all faces that it
#' detects. When you call the [`list_faces`][rekognition_list_faces]
#' operation, the response returns the external ID. You can use this
#' external image ID to create a client-side index to associate the faces
#' with each image. You can then use the index to find all faces in an
#' image.
#'
#' You can specify the maximum number of faces to index with the `MaxFaces`
#' input parameter. This is useful when you want to index the largest faces
#' in an image and don't want to index smaller faces, such as those
#' belonging to people standing in the background.
#'
#' The `QualityFilter` input parameter allows you to filter out detected
#' faces that don’t meet a required quality bar. The quality bar is based
#' on a variety of common use cases. By default,
#' [`index_faces`][rekognition_index_faces] chooses the quality bar that's
#' used to filter faces. You can also explicitly choose the quality bar.
#' Use `QualityFilter`, to set the quality bar by specifying `LOW`,
#' `MEDIUM`, or `HIGH`. If you do not want to filter detected faces,
#' specify `NONE`.
#'
#' To use quality filtering, you need a collection associated with version
#' 3 of the face model or higher. To get the version of the face model
#' associated with a collection, call
#' [`describe_collection`][rekognition_describe_collection].
#'
#' Information about faces detected in an image, but not indexed, is
#' returned in an array of UnindexedFace objects, `UnindexedFaces`. Faces
#' aren't indexed for reasons such as:
#'
#' - The number of faces detected exceeds the value of the `MaxFaces`
#' request parameter.
#'
#' - The face is too small compared to the image dimensions.
#'
#' - The face is too blurry.
#'
#' - The image is too dark.
#'
#' - The face has an extreme pose.
#'
#' - The face doesn’t have enough detail to be suitable for face search.
#'
#' In response, the [`index_faces`][rekognition_index_faces] operation
#' returns an array of metadata for all detected faces, `FaceRecords`. This
#' includes:
#'
#' - The bounding box, `BoundingBox`, of the detected face.
#'
#' - A confidence value, `Confidence`, which indicates the confidence
#' that the bounding box contains a face.
#'
#' - A face ID, `FaceId`, assigned by the service for each face that's
#' detected and stored.
#'
#' - An image ID, `ImageId`, assigned by the service for the input image.
#'
#' If you request `ALL` or specific facial attributes (e.g.,
#' `FACE_OCCLUDED`) by using the detectionAttributes parameter, Amazon
#' Rekognition returns detailed facial attributes, such as facial landmarks
#' (for example, location of eye and mouth), facial occlusion, and other
#' facial attributes.
#'
#' If you provide the same image, specify the same collection, and use the
#' same external ID in the [`index_faces`][rekognition_index_faces]
#' operation, Amazon Rekognition doesn't save duplicate face metadata.
#'
#' The input image is passed either as base64-encoded image bytes, or as a
#' reference to an image in an Amazon S3 bucket. If you use the AWS CLI to
#' call Amazon Rekognition operations, passing image bytes isn't supported.
#' The image must be formatted as a PNG or JPEG file.
#'
#' This operation requires permissions to perform the
#' `rekognition:IndexFaces` action.
#'
#' @usage
#' rekognition_index_faces(CollectionId, Image, ExternalImageId,
#' DetectionAttributes, MaxFaces, QualityFilter)
#'
#' @param CollectionId [required] The ID of an existing collection to which you want to add the faces that
#' are detected in the input images.
#' @param Image [required] The input image as base64-encoded bytes or an S3 object. If you use the
#' AWS CLI to call Amazon Rekognition operations, passing base64-encoded
#' image bytes isn't supported.
#'
#' If you are using an AWS SDK to call Amazon Rekognition, you might not
#' need to base64-encode image bytes passed using the `Bytes` field. For
#' more information, see Images in the Amazon Rekognition developer guide.
#' @param ExternalImageId The ID you want to assign to all the faces detected in the image.
#' @param DetectionAttributes An array of facial attributes you want to be returned. A `DEFAULT`
#' subset of facial attributes - `BoundingBox`, `Confidence`, `Pose`,
#' `Quality`, and `Landmarks` - will always be returned. You can request
#' for specific facial attributes (in addition to the default list) - by
#' using `["DEFAULT", "FACE_OCCLUDED"]` or just `["FACE_OCCLUDED"]`. You
#' can request for all facial attributes by using `["ALL"]`. Requesting
#' more attributes may increase response time.
#'
#' If you provide both, `["ALL", "DEFAULT"]`, the service uses a logical
#' AND operator to determine which attributes to return (in this case, all
#' attributes).
#' @param MaxFaces The maximum number of faces to index. The value of `MaxFaces` must be
#' greater than or equal to 1. [`index_faces`][rekognition_index_faces]
#' returns no more than 100 detected faces in an image, even if you specify
#' a larger value for `MaxFaces`.
#'
#' If [`index_faces`][rekognition_index_faces] detects more faces than the
#' value of `MaxFaces`, the faces with the lowest quality are filtered out
#' first. If there are still more faces than the value of `MaxFaces`, the
#' faces with the smallest bounding boxes are filtered out (up to the
#' number that's needed to satisfy the value of `MaxFaces`). Information
#' about the unindexed faces is available in the `UnindexedFaces` array.
#'
#' The faces that are returned by [`index_faces`][rekognition_index_faces]
#' are sorted by the largest face bounding box size to the smallest size,
#' in descending order.
#'
#' `MaxFaces` can be used with a collection associated with any version of
#' the face model.
#' @param QualityFilter A filter that specifies a quality bar for how much filtering is done to
#' identify faces. Filtered faces aren't indexed. If you specify `AUTO`,
#' Amazon Rekognition chooses the quality bar. If you specify `LOW`,
#' `MEDIUM`, or `HIGH`, filtering removes all faces that don’t meet the
#' chosen quality bar. The default value is `AUTO`. The quality bar is
#' based on a variety of common use cases. Low-quality detections can occur
#' for a number of reasons. Some examples are an object that's
#' misidentified as a face, a face that's too blurry, or a face with a pose
#' that's too extreme to use. If you specify `NONE`, no filtering is
#' performed.
#'
#' To use quality filtering, the collection you are using must be
#' associated with version 3 of the face model or higher.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' FaceRecords = list(
#' list(
#' Face = list(
#' FaceId = "string",
#' BoundingBox = list(
#' Width = 123.0,
#' Height = 123.0,
#' Left = 123.0,
#' Top = 123.0
#' ),
#' ImageId = "string",
#' ExternalImageId = "string",
#' Confidence = 123.0,
#' IndexFacesModelVersion = "string",
#' UserId = "string"
#' ),
#' FaceDetail = list(
#' BoundingBox = list(
#' Width = 123.0,
#' Height = 123.0,
#' Left = 123.0,
#' Top = 123.0
#' ),
#' AgeRange = list(
#' Low = 123,
#' High = 123
#' ),
#' Smile = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' Eyeglasses = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' Sunglasses = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' Gender = list(
#' Value = "Male"|"Female",
#' Confidence = 123.0
#' ),
#' Beard = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' Mustache = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' EyesOpen = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' MouthOpen = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' Emotions = list(
#' list(
#' Type = "HAPPY"|"SAD"|"ANGRY"|"CONFUSED"|"DISGUSTED"|"SURPRISED"|"CALM"|"UNKNOWN"|"FEAR",
#' Confidence = 123.0
#' )
#' ),
#' Landmarks = list(
#' list(
#' Type = "eyeLeft"|"eyeRight"|"nose"|"mouthLeft"|"mouthRight"|"leftEyeBrowLeft"|"leftEyeBrowRight"|"leftEyeBrowUp"|"rightEyeBrowLeft"|"rightEyeBrowRight"|"rightEyeBrowUp"|"leftEyeLeft"|"leftEyeRight"|"leftEyeUp"|"leftEyeDown"|"rightEyeLeft"|"rightEyeRight"|"rightEyeUp"|"rightEyeDown"|"noseLeft"|"noseRight"|"mouthUp"|"mouthDown"|"leftPupil"|"rightPupil"|"upperJawlineLeft"|"midJawlineLeft"|"chinBottom"|"midJawlineRight"|"upperJawlineRight",
#' X = 123.0,
#' Y = 123.0
#' )
#' ),
#' Pose = list(
#' Roll = 123.0,
#' Yaw = 123.0,
#' Pitch = 123.0
#' ),
#' Quality = list(
#' Brightness = 123.0,
#' Sharpness = 123.0
#' ),
#' Confidence = 123.0,
#' FaceOccluded = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' EyeDirection = list(
#' Yaw = 123.0,
#' Pitch = 123.0,
#' Confidence = 123.0
#' )
#' )
#' )
#' ),
#' OrientationCorrection = "ROTATE_0"|"ROTATE_90"|"ROTATE_180"|"ROTATE_270",
#' FaceModelVersion = "string",
#' UnindexedFaces = list(
#' list(
#' Reasons = list(
#' "EXCEEDS_MAX_FACES"|"EXTREME_POSE"|"LOW_BRIGHTNESS"|"LOW_SHARPNESS"|"LOW_CONFIDENCE"|"SMALL_BOUNDING_BOX"|"LOW_FACE_QUALITY"
#' ),
#' FaceDetail = list(
#' BoundingBox = list(
#' Width = 123.0,
#' Height = 123.0,
#' Left = 123.0,
#' Top = 123.0
#' ),
#' AgeRange = list(
#' Low = 123,
#' High = 123
#' ),
#' Smile = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' Eyeglasses = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' Sunglasses = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' Gender = list(
#' Value = "Male"|"Female",
#' Confidence = 123.0
#' ),
#' Beard = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' Mustache = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' EyesOpen = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' MouthOpen = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' Emotions = list(
#' list(
#' Type = "HAPPY"|"SAD"|"ANGRY"|"CONFUSED"|"DISGUSTED"|"SURPRISED"|"CALM"|"UNKNOWN"|"FEAR",
#' Confidence = 123.0
#' )
#' ),
#' Landmarks = list(
#' list(
#' Type = "eyeLeft"|"eyeRight"|"nose"|"mouthLeft"|"mouthRight"|"leftEyeBrowLeft"|"leftEyeBrowRight"|"leftEyeBrowUp"|"rightEyeBrowLeft"|"rightEyeBrowRight"|"rightEyeBrowUp"|"leftEyeLeft"|"leftEyeRight"|"leftEyeUp"|"leftEyeDown"|"rightEyeLeft"|"rightEyeRight"|"rightEyeUp"|"rightEyeDown"|"noseLeft"|"noseRight"|"mouthUp"|"mouthDown"|"leftPupil"|"rightPupil"|"upperJawlineLeft"|"midJawlineLeft"|"chinBottom"|"midJawlineRight"|"upperJawlineRight",
#' X = 123.0,
#' Y = 123.0
#' )
#' ),
#' Pose = list(
#' Roll = 123.0,
#' Yaw = 123.0,
#' Pitch = 123.0
#' ),
#' Quality = list(
#' Brightness = 123.0,
#' Sharpness = 123.0
#' ),
#' Confidence = 123.0,
#' FaceOccluded = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' EyeDirection = list(
#' Yaw = 123.0,
#' Pitch = 123.0,
#' Confidence = 123.0
#' )
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$index_faces(
#' CollectionId = "string",
#' Image = list(
#' Bytes = raw,
#' S3Object = list(
#' Bucket = "string",
#' Name = "string",
#' Version = "string"
#' )
#' ),
#' ExternalImageId = "string",
#' DetectionAttributes = list(
#' "DEFAULT"|"ALL"|"AGE_RANGE"|"BEARD"|"EMOTIONS"|"EYE_DIRECTION"|"EYEGLASSES"|"EYES_OPEN"|"GENDER"|"MOUTH_OPEN"|"MUSTACHE"|"FACE_OCCLUDED"|"SMILE"|"SUNGLASSES"
#' ),
#' MaxFaces = 123,
#' QualityFilter = "NONE"|"AUTO"|"LOW"|"MEDIUM"|"HIGH"
#' )
#' ```
#'
#' @examples
#' \dontrun{
#' # This operation detects faces in an image and adds them to the specified
#' # Rekognition collection.
#' svc$index_faces(
#' CollectionId = "myphotos",
#' DetectionAttributes = list(),
#' ExternalImageId = "myphotoid",
#' Image = list(
#' S3Object = list(
#' Bucket = "mybucket",
#' Name = "myphoto"
#' )
#' )
#' )
#' }
#'
#' @keywords internal
#'
#' @rdname rekognition_index_faces
#'
#' @aliases rekognition_index_faces
rekognition_index_faces <- function(CollectionId, Image, ExternalImageId = NULL, DetectionAttributes = NULL, MaxFaces = NULL, QualityFilter = NULL) {
op <- new_operation(
name = "IndexFaces",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$index_faces_input(CollectionId = CollectionId, Image = Image, ExternalImageId = ExternalImageId, DetectionAttributes = DetectionAttributes, MaxFaces = MaxFaces, QualityFilter = QualityFilter)
output <- .rekognition$index_faces_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$index_faces <- rekognition_index_faces
#' Returns list of collection IDs in your account
#'
#' @description
#' Returns list of collection IDs in your account. If the result is
#' truncated, the response also provides a `NextToken` that you can use in
#' the subsequent request to fetch the next set of collection IDs.
#'
#' For an example, see Listing collections in the Amazon Rekognition
#' Developer Guide.
#'
#' This operation requires permissions to perform the
#' `rekognition:ListCollections` action.
#'
#' @usage
#' rekognition_list_collections(NextToken, MaxResults)
#'
#' @param NextToken Pagination token from the previous response.
#' @param MaxResults Maximum number of collection IDs to return.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' CollectionIds = list(
#' "string"
#' ),
#' NextToken = "string",
#' FaceModelVersions = list(
#' "string"
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$list_collections(
#' NextToken = "string",
#' MaxResults = 123
#' )
#' ```
#'
#' @examples
#' \dontrun{
#' # This operation returns a list of Rekognition collections.
#' svc$list_collections()
#' }
#'
#' @keywords internal
#'
#' @rdname rekognition_list_collections
#'
#' @aliases rekognition_list_collections
rekognition_list_collections <- function(NextToken = NULL, MaxResults = NULL) {
op <- new_operation(
name = "ListCollections",
http_method = "POST",
http_path = "/",
paginator = list(input_token = "NextToken", limit_key = "MaxResults", output_token = "NextToken", result_key = "CollectionIds")
)
input <- .rekognition$list_collections_input(NextToken = NextToken, MaxResults = MaxResults)
output <- .rekognition$list_collections_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$list_collections <- rekognition_list_collections
#' Lists the entries (images) within a dataset
#'
#' @description
#' Lists the entries (images) within a dataset. An entry is a JSON Line
#' that contains the information for a single image, including the image
#' location, assigned labels, and object location bounding boxes. For more
#' information, see [Creating a manifest
#' file](https://docs.aws.amazon.com/rekognition/latest/customlabels-dg/).
#'
#' JSON Lines in the response include information about non-terminal errors
#' found in the dataset. Non terminal errors are reported in `errors` lists
#' within each JSON Line. The same information is reported in the training
#' and testing validation result manifests that Amazon Rekognition Custom
#' Labels creates during model training.
#'
#' You can filter the response in variety of ways, such as choosing which
#' labels to return and returning JSON Lines created after a specific date.
#'
#' This operation requires permissions to perform the
#' `rekognition:ListDatasetEntries` action.
#'
#' @usage
#' rekognition_list_dataset_entries(DatasetArn, ContainsLabels, Labeled,
#' SourceRefContains, HasErrors, NextToken, MaxResults)
#'
#' @param DatasetArn [required] The Amazon Resource Name (ARN) for the dataset that you want to use.
#' @param ContainsLabels Specifies a label filter for the response. The response includes an
#' entry only if one or more of the labels in `ContainsLabels` exist in the
#' entry.
#' @param Labeled Specify `true` to get only the JSON Lines where the image is labeled.
#' Specify `false` to get only the JSON Lines where the image isn't
#' labeled. If you don't specify `Labeled`,
#' [`list_dataset_entries`][rekognition_list_dataset_entries] returns JSON
#' Lines for labeled and unlabeled images.
#' @param SourceRefContains If specified, [`list_dataset_entries`][rekognition_list_dataset_entries]
#' only returns JSON Lines where the value of `SourceRefContains` is part
#' of the `source-ref` field. The `source-ref` field contains the Amazon S3
#' location of the image. You can use `SouceRefContains` for tasks such as
#' getting the JSON Line for a single image, or gettting JSON Lines for all
#' images within a specific folder.
#' @param HasErrors Specifies an error filter for the response. Specify `True` to only
#' include entries that have errors.
#' @param NextToken If the previous response was incomplete (because there is more results
#' to retrieve), Amazon Rekognition Custom Labels returns a pagination
#' token in the response. You can use this pagination token to retrieve the
#' next set of results.
#' @param MaxResults The maximum number of results to return per paginated call. The largest
#' value you can specify is 100. If you specify a value greater than 100, a
#' ValidationException error occurs. The default value is 100.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' DatasetEntries = list(
#' "string"
#' ),
#' NextToken = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$list_dataset_entries(
#' DatasetArn = "string",
#' ContainsLabels = list(
#' "string"
#' ),
#' Labeled = TRUE|FALSE,
#' SourceRefContains = "string",
#' HasErrors = TRUE|FALSE,
#' NextToken = "string",
#' MaxResults = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname rekognition_list_dataset_entries
#'
#' @aliases rekognition_list_dataset_entries
rekognition_list_dataset_entries <- function(DatasetArn, ContainsLabels = NULL, Labeled = NULL, SourceRefContains = NULL, HasErrors = NULL, NextToken = NULL, MaxResults = NULL) {
op <- new_operation(
name = "ListDatasetEntries",
http_method = "POST",
http_path = "/",
paginator = list(input_token = "NextToken", limit_key = "MaxResults", output_token = "NextToken", result_key = "DatasetEntries")
)
input <- .rekognition$list_dataset_entries_input(DatasetArn = DatasetArn, ContainsLabels = ContainsLabels, Labeled = Labeled, SourceRefContains = SourceRefContains, HasErrors = HasErrors, NextToken = NextToken, MaxResults = MaxResults)
output <- .rekognition$list_dataset_entries_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$list_dataset_entries <- rekognition_list_dataset_entries
#' Lists the labels in a dataset
#'
#' @description
#' Lists the labels in a dataset. Amazon Rekognition Custom Labels uses
#' labels to describe images. For more information, see [Labeling
#' images](https://docs.aws.amazon.com/rekognition/latest/customlabels-dg/md-labeling-images.html).
#'
#' Lists the labels in a dataset. Amazon Rekognition Custom Labels uses
#' labels to describe images. For more information, see Labeling images in
#' the *Amazon Rekognition Custom Labels Developer Guide*.
#'
#' @usage
#' rekognition_list_dataset_labels(DatasetArn, NextToken, MaxResults)
#'
#' @param DatasetArn [required] The Amazon Resource Name (ARN) of the dataset that you want to use.
#' @param NextToken If the previous response was incomplete (because there is more results
#' to retrieve), Amazon Rekognition Custom Labels returns a pagination
#' token in the response. You can use this pagination token to retrieve the
#' next set of results.
#' @param MaxResults The maximum number of results to return per paginated call. The largest
#' value you can specify is 100. If you specify a value greater than 100, a
#' ValidationException error occurs. The default value is 100.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' DatasetLabelDescriptions = list(
#' list(
#' LabelName = "string",
#' LabelStats = list(
#' EntryCount = 123,
#' BoundingBoxCount = 123
#' )
#' )
#' ),
#' NextToken = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$list_dataset_labels(
#' DatasetArn = "string",
#' NextToken = "string",
#' MaxResults = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname rekognition_list_dataset_labels
#'
#' @aliases rekognition_list_dataset_labels
rekognition_list_dataset_labels <- function(DatasetArn, NextToken = NULL, MaxResults = NULL) {
op <- new_operation(
name = "ListDatasetLabels",
http_method = "POST",
http_path = "/",
paginator = list(input_token = "NextToken", limit_key = "MaxResults", output_token = "NextToken", result_key = "DatasetLabelDescriptions")
)
input <- .rekognition$list_dataset_labels_input(DatasetArn = DatasetArn, NextToken = NextToken, MaxResults = MaxResults)
output <- .rekognition$list_dataset_labels_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$list_dataset_labels <- rekognition_list_dataset_labels
#' Returns metadata for faces in the specified collection
#'
#' @description
#' Returns metadata for faces in the specified collection. This metadata
#' includes information such as the bounding box coordinates, the
#' confidence (that the bounding box contains a face), and face ID. For an
#' example, see Listing Faces in a Collection in the Amazon Rekognition
#' Developer Guide.
#'
#' This operation requires permissions to perform the
#' `rekognition:ListFaces` action.
#'
#' @usage
#' rekognition_list_faces(CollectionId, NextToken, MaxResults, UserId,
#' FaceIds)
#'
#' @param CollectionId [required] ID of the collection from which to list the faces.
#' @param NextToken If the previous response was incomplete (because there is more data to
#' retrieve), Amazon Rekognition returns a pagination token in the
#' response. You can use this pagination token to retrieve the next set of
#' faces.
#' @param MaxResults Maximum number of faces to return.
#' @param UserId An array of user IDs to match when listing faces in a collection.
#' @param FaceIds An array of face IDs to match when listing faces in a collection.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Faces = list(
#' list(
#' FaceId = "string",
#' BoundingBox = list(
#' Width = 123.0,
#' Height = 123.0,
#' Left = 123.0,
#' Top = 123.0
#' ),
#' ImageId = "string",
#' ExternalImageId = "string",
#' Confidence = 123.0,
#' IndexFacesModelVersion = "string",
#' UserId = "string"
#' )
#' ),
#' NextToken = "string",
#' FaceModelVersion = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$list_faces(
#' CollectionId = "string",
#' NextToken = "string",
#' MaxResults = 123,
#' UserId = "string",
#' FaceIds = list(
#' "string"
#' )
#' )
#' ```
#'
#' @examples
#' \dontrun{
#' # This operation lists the faces in a Rekognition collection.
#' svc$list_faces(
#' CollectionId = "myphotos",
#' MaxResults = 20L
#' )
#' }
#'
#' @keywords internal
#'
#' @rdname rekognition_list_faces
#'
#' @aliases rekognition_list_faces
rekognition_list_faces <- function(CollectionId, NextToken = NULL, MaxResults = NULL, UserId = NULL, FaceIds = NULL) {
op <- new_operation(
name = "ListFaces",
http_method = "POST",
http_path = "/",
paginator = list(input_token = "NextToken", limit_key = "MaxResults", output_token = "NextToken", result_key = "Faces")
)
input <- .rekognition$list_faces_input(CollectionId = CollectionId, NextToken = NextToken, MaxResults = MaxResults, UserId = UserId, FaceIds = FaceIds)
output <- .rekognition$list_faces_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$list_faces <- rekognition_list_faces
#' Gets a list of the project policies attached to a project
#'
#' @description
#' Gets a list of the project policies attached to a project.
#'
#' To attach a project policy to a project, call
#' [`put_project_policy`][rekognition_put_project_policy]. To remove a
#' project policy from a project, call
#' [`delete_project_policy`][rekognition_delete_project_policy].
#'
#' This operation requires permissions to perform the
#' `rekognition:ListProjectPolicies` action.
#'
#' @usage
#' rekognition_list_project_policies(ProjectArn, NextToken, MaxResults)
#'
#' @param ProjectArn [required] The ARN of the project for which you want to list the project policies.
#' @param NextToken If the previous response was incomplete (because there is more results
#' to retrieve), Amazon Rekognition Custom Labels returns a pagination
#' token in the response. You can use this pagination token to retrieve the
#' next set of results.
#' @param MaxResults The maximum number of results to return per paginated call. The largest
#' value you can specify is 5. If you specify a value greater than 5, a
#' ValidationException error occurs. The default value is 5.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' ProjectPolicies = list(
#' list(
#' ProjectArn = "string",
#' PolicyName = "string",
#' PolicyRevisionId = "string",
#' PolicyDocument = "string",
#' CreationTimestamp = as.POSIXct(
#' "2015-01-01"
#' ),
#' LastUpdatedTimestamp = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' ),
#' NextToken = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$list_project_policies(
#' ProjectArn = "string",
#' NextToken = "string",
#' MaxResults = 123
#' )
#' ```
#'
#' @examples
#' \dontrun{
#' # This operation lists the project policies that are attached to an Amazon
#' # Rekognition Custom Labels project.
#' svc$list_project_policies(
#' MaxResults = 5L,
#' NextToken = "",
#' ProjectArn = "arn:aws:rekognition:us-east-1:111122223333:project/my-sdk-p..."
#' )
#' }
#'
#' @keywords internal
#'
#' @rdname rekognition_list_project_policies
#'
#' @aliases rekognition_list_project_policies
rekognition_list_project_policies <- function(ProjectArn, NextToken = NULL, MaxResults = NULL) {
op <- new_operation(
name = "ListProjectPolicies",
http_method = "POST",
http_path = "/",
paginator = list(input_token = "NextToken", limit_key = "MaxResults", output_token = "NextToken", result_key = "ProjectPolicies")
)
input <- .rekognition$list_project_policies_input(ProjectArn = ProjectArn, NextToken = NextToken, MaxResults = MaxResults)
output <- .rekognition$list_project_policies_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$list_project_policies <- rekognition_list_project_policies
#' Gets a list of stream processors that you have created with
#' CreateStreamProcessor
#'
#' @description
#' Gets a list of stream processors that you have created with
#' [`create_stream_processor`][rekognition_create_stream_processor].
#'
#' @usage
#' rekognition_list_stream_processors(NextToken, MaxResults)
#'
#' @param NextToken If the previous response was incomplete (because there are more stream
#' processors to retrieve), Amazon Rekognition Video returns a pagination
#' token in the response. You can use this pagination token to retrieve the
#' next set of stream processors.
#' @param MaxResults Maximum number of stream processors you want Amazon Rekognition Video to
#' return in the response. The default is 1000.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' NextToken = "string",
#' StreamProcessors = list(
#' list(
#' Name = "string",
#' Status = "STOPPED"|"STARTING"|"RUNNING"|"FAILED"|"STOPPING"|"UPDATING"
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$list_stream_processors(
#' NextToken = "string",
#' MaxResults = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname rekognition_list_stream_processors
#'
#' @aliases rekognition_list_stream_processors
rekognition_list_stream_processors <- function(NextToken = NULL, MaxResults = NULL) {
op <- new_operation(
name = "ListStreamProcessors",
http_method = "POST",
http_path = "/",
paginator = list(input_token = "NextToken", limit_key = "MaxResults", output_token = "NextToken")
)
input <- .rekognition$list_stream_processors_input(NextToken = NextToken, MaxResults = MaxResults)
output <- .rekognition$list_stream_processors_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$list_stream_processors <- rekognition_list_stream_processors
#' Returns a list of tags in an Amazon Rekognition collection, stream
#' processor, or Custom Labels model
#'
#' @description
#' Returns a list of tags in an Amazon Rekognition collection, stream
#' processor, or Custom Labels model.
#'
#' This operation requires permissions to perform the
#' `rekognition:ListTagsForResource` action.
#'
#' @usage
#' rekognition_list_tags_for_resource(ResourceArn)
#'
#' @param ResourceArn [required] Amazon Resource Name (ARN) of the model, collection, or stream processor
#' that contains the tags that you want a list of.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Tags = list(
#' "string"
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$list_tags_for_resource(
#' ResourceArn = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname rekognition_list_tags_for_resource
#'
#' @aliases rekognition_list_tags_for_resource
rekognition_list_tags_for_resource <- function(ResourceArn) {
op <- new_operation(
name = "ListTagsForResource",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$list_tags_for_resource_input(ResourceArn = ResourceArn)
output <- .rekognition$list_tags_for_resource_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$list_tags_for_resource <- rekognition_list_tags_for_resource
#' Returns metadata of the User such as UserID in the specified collection
#'
#' @description
#' Returns metadata of the User such as `UserID` in the specified
#' collection. Anonymous User (to reserve faces without any identity) is
#' not returned as part of this request. The results are sorted by system
#' generated primary key ID. If the response is truncated, `NextToken` is
#' returned in the response that can be used in the subsequent request to
#' retrieve the next set of identities.
#'
#' @usage
#' rekognition_list_users(CollectionId, MaxResults, NextToken)
#'
#' @param CollectionId [required] The ID of an existing collection.
#' @param MaxResults Maximum number of UsersID to return.
#' @param NextToken Pagingation token to receive the next set of UsersID.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Users = list(
#' list(
#' UserId = "string",
#' UserStatus = "ACTIVE"|"UPDATING"|"CREATING"|"CREATED"
#' )
#' ),
#' NextToken = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$list_users(
#' CollectionId = "string",
#' MaxResults = 123,
#' NextToken = "string"
#' )
#' ```
#'
#' @examples
#' \dontrun{
#' # Returns metadata of the User such as UserID in the specified collection.
#' svc$list_users(
#' CollectionId = "MyCollection"
#' )
#' }
#'
#' @keywords internal
#'
#' @rdname rekognition_list_users
#'
#' @aliases rekognition_list_users
rekognition_list_users <- function(CollectionId, MaxResults = NULL, NextToken = NULL) {
op <- new_operation(
name = "ListUsers",
http_method = "POST",
http_path = "/",
paginator = list(input_token = "NextToken", limit_key = "MaxResults", output_token = "NextToken", result_key = "Users")
)
input <- .rekognition$list_users_input(CollectionId = CollectionId, MaxResults = MaxResults, NextToken = NextToken)
output <- .rekognition$list_users_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$list_users <- rekognition_list_users
#' Attaches a project policy to a Amazon Rekognition Custom Labels project
#' in a trusting AWS account
#'
#' @description
#' Attaches a project policy to a Amazon Rekognition Custom Labels project
#' in a trusting AWS account. A project policy specifies that a trusted AWS
#' account can copy a model version from a trusting AWS account to a
#' project in the trusted AWS account. To copy a model version you use the
#' [`copy_project_version`][rekognition_copy_project_version] operation.
#'
#' For more information about the format of a project policy document, see
#' Attaching a project policy (SDK) in the *Amazon Rekognition Custom
#' Labels Developer Guide*.
#'
#' The response from [`put_project_policy`][rekognition_put_project_policy]
#' is a revision ID for the project policy. You can attach multiple project
#' policies to a project. You can also update an existing project policy by
#' specifying the policy revision ID of the existing policy.
#'
#' To remove a project policy from a project, call
#' [`delete_project_policy`][rekognition_delete_project_policy]. To get a
#' list of project policies attached to a project, call
#' [`list_project_policies`][rekognition_list_project_policies].
#'
#' You copy a model version by calling
#' [`copy_project_version`][rekognition_copy_project_version].
#'
#' This operation requires permissions to perform the
#' `rekognition:PutProjectPolicy` action.
#'
#' @usage
#' rekognition_put_project_policy(ProjectArn, PolicyName, PolicyRevisionId,
#' PolicyDocument)
#'
#' @param ProjectArn [required] The Amazon Resource Name (ARN) of the project that the project policy is
#' attached to.
#' @param PolicyName [required] A name for the policy.
#' @param PolicyRevisionId The revision ID for the Project Policy. Each time you modify a policy,
#' Amazon Rekognition Custom Labels generates and assigns a new
#' `PolicyRevisionId` and then deletes the previous version of the policy.
#' @param PolicyDocument [required] A resource policy to add to the model. The policy is a JSON structure
#' that contains one or more statements that define the policy. The policy
#' must follow the IAM syntax. For more information about the contents of a
#' JSON policy document, see [IAM JSON policy
#' reference](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html).
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' PolicyRevisionId = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$put_project_policy(
#' ProjectArn = "string",
#' PolicyName = "string",
#' PolicyRevisionId = "string",
#' PolicyDocument = "string"
#' )
#' ```
#'
#' @examples
#' \dontrun{
#' # This operation attaches a project policy to a Amazon Rekognition Custom
#' # Labels project in a trusting AWS account.
#' svc$put_project_policy(
#' PolicyDocument = "'\{"Version":"2012-10-17","Statement":[\{"Effect":"ALLOW"...",
#' PolicyName = "SamplePolicy",
#' PolicyRevisionId = "0123456789abcdef",
#' ProjectArn = "arn:aws:rekognition:us-east-1:111122223333:project/my-sdk-p..."
#' )
#' }
#'
#' @keywords internal
#'
#' @rdname rekognition_put_project_policy
#'
#' @aliases rekognition_put_project_policy
rekognition_put_project_policy <- function(ProjectArn, PolicyName, PolicyRevisionId = NULL, PolicyDocument) {
op <- new_operation(
name = "PutProjectPolicy",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$put_project_policy_input(ProjectArn = ProjectArn, PolicyName = PolicyName, PolicyRevisionId = PolicyRevisionId, PolicyDocument = PolicyDocument)
output <- .rekognition$put_project_policy_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$put_project_policy <- rekognition_put_project_policy
#' Returns an array of celebrities recognized in the input image
#'
#' @description
#' Returns an array of celebrities recognized in the input image. For more
#' information, see Recognizing celebrities in the Amazon Rekognition
#' Developer Guide.
#'
#' [`recognize_celebrities`][rekognition_recognize_celebrities] returns the
#' 64 largest faces in the image. It lists the recognized celebrities in
#' the `CelebrityFaces` array and any unrecognized faces in the
#' `UnrecognizedFaces` array.
#' [`recognize_celebrities`][rekognition_recognize_celebrities] doesn't
#' return celebrities whose faces aren't among the largest 64 faces in the
#' image.
#'
#' For each celebrity recognized,
#' [`recognize_celebrities`][rekognition_recognize_celebrities] returns a
#' `Celebrity` object. The `Celebrity` object contains the celebrity name,
#' ID, URL links to additional information, match confidence, and a
#' `ComparedFace` object that you can use to locate the celebrity's face on
#' the image.
#'
#' Amazon Rekognition doesn't retain information about which images a
#' celebrity has been recognized in. Your application must store this
#' information and use the `Celebrity` ID property as a unique identifier
#' for the celebrity. If you don't store the celebrity name or additional
#' information URLs returned by
#' [`recognize_celebrities`][rekognition_recognize_celebrities], you will
#' need the ID to identify the celebrity in a call to the
#' [`get_celebrity_info`][rekognition_get_celebrity_info] operation.
#'
#' You pass the input image either as base64-encoded image bytes or as a
#' reference to an image in an Amazon S3 bucket. If you use the AWS CLI to
#' call Amazon Rekognition operations, passing image bytes is not
#' supported. The image must be either a PNG or JPEG formatted file.
#'
#' For an example, see Recognizing celebrities in an image in the Amazon
#' Rekognition Developer Guide.
#'
#' This operation requires permissions to perform the
#' `rekognition:RecognizeCelebrities` operation.
#'
#' @usage
#' rekognition_recognize_celebrities(Image)
#'
#' @param Image [required] The input image as base64-encoded bytes or an S3 object. If you use the
#' AWS CLI to call Amazon Rekognition operations, passing base64-encoded
#' image bytes is not supported.
#'
#' If you are using an AWS SDK to call Amazon Rekognition, you might not
#' need to base64-encode image bytes passed using the `Bytes` field. For
#' more information, see Images in the Amazon Rekognition developer guide.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' CelebrityFaces = list(
#' list(
#' Urls = list(
#' "string"
#' ),
#' Name = "string",
#' Id = "string",
#' Face = list(
#' BoundingBox = list(
#' Width = 123.0,
#' Height = 123.0,
#' Left = 123.0,
#' Top = 123.0
#' ),
#' Confidence = 123.0,
#' Landmarks = list(
#' list(
#' Type = "eyeLeft"|"eyeRight"|"nose"|"mouthLeft"|"mouthRight"|"leftEyeBrowLeft"|"leftEyeBrowRight"|"leftEyeBrowUp"|"rightEyeBrowLeft"|"rightEyeBrowRight"|"rightEyeBrowUp"|"leftEyeLeft"|"leftEyeRight"|"leftEyeUp"|"leftEyeDown"|"rightEyeLeft"|"rightEyeRight"|"rightEyeUp"|"rightEyeDown"|"noseLeft"|"noseRight"|"mouthUp"|"mouthDown"|"leftPupil"|"rightPupil"|"upperJawlineLeft"|"midJawlineLeft"|"chinBottom"|"midJawlineRight"|"upperJawlineRight",
#' X = 123.0,
#' Y = 123.0
#' )
#' ),
#' Pose = list(
#' Roll = 123.0,
#' Yaw = 123.0,
#' Pitch = 123.0
#' ),
#' Quality = list(
#' Brightness = 123.0,
#' Sharpness = 123.0
#' ),
#' Emotions = list(
#' list(
#' Type = "HAPPY"|"SAD"|"ANGRY"|"CONFUSED"|"DISGUSTED"|"SURPRISED"|"CALM"|"UNKNOWN"|"FEAR",
#' Confidence = 123.0
#' )
#' ),
#' Smile = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' )
#' ),
#' MatchConfidence = 123.0,
#' KnownGender = list(
#' Type = "Male"|"Female"|"Nonbinary"|"Unlisted"
#' )
#' )
#' ),
#' UnrecognizedFaces = list(
#' list(
#' BoundingBox = list(
#' Width = 123.0,
#' Height = 123.0,
#' Left = 123.0,
#' Top = 123.0
#' ),
#' Confidence = 123.0,
#' Landmarks = list(
#' list(
#' Type = "eyeLeft"|"eyeRight"|"nose"|"mouthLeft"|"mouthRight"|"leftEyeBrowLeft"|"leftEyeBrowRight"|"leftEyeBrowUp"|"rightEyeBrowLeft"|"rightEyeBrowRight"|"rightEyeBrowUp"|"leftEyeLeft"|"leftEyeRight"|"leftEyeUp"|"leftEyeDown"|"rightEyeLeft"|"rightEyeRight"|"rightEyeUp"|"rightEyeDown"|"noseLeft"|"noseRight"|"mouthUp"|"mouthDown"|"leftPupil"|"rightPupil"|"upperJawlineLeft"|"midJawlineLeft"|"chinBottom"|"midJawlineRight"|"upperJawlineRight",
#' X = 123.0,
#' Y = 123.0
#' )
#' ),
#' Pose = list(
#' Roll = 123.0,
#' Yaw = 123.0,
#' Pitch = 123.0
#' ),
#' Quality = list(
#' Brightness = 123.0,
#' Sharpness = 123.0
#' ),
#' Emotions = list(
#' list(
#' Type = "HAPPY"|"SAD"|"ANGRY"|"CONFUSED"|"DISGUSTED"|"SURPRISED"|"CALM"|"UNKNOWN"|"FEAR",
#' Confidence = 123.0
#' )
#' ),
#' Smile = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' )
#' )
#' ),
#' OrientationCorrection = "ROTATE_0"|"ROTATE_90"|"ROTATE_180"|"ROTATE_270"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$recognize_celebrities(
#' Image = list(
#' Bytes = raw,
#' S3Object = list(
#' Bucket = "string",
#' Name = "string",
#' Version = "string"
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname rekognition_recognize_celebrities
#'
#' @aliases rekognition_recognize_celebrities
rekognition_recognize_celebrities <- function(Image) {
op <- new_operation(
name = "RecognizeCelebrities",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$recognize_celebrities_input(Image = Image)
output <- .rekognition$recognize_celebrities_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$recognize_celebrities <- rekognition_recognize_celebrities
#' For a given input face ID, searches for matching faces in the collection
#' the face belongs to
#'
#' @description
#' For a given input face ID, searches for matching faces in the collection
#' the face belongs to. You get a face ID when you add a face to the
#' collection using the [`index_faces`][rekognition_index_faces] operation.
#' The operation compares the features of the input face with faces in the
#' specified collection.
#'
#' You can also search faces without indexing faces by using the
#' [`search_faces_by_image`][rekognition_search_faces_by_image] operation.
#'
#' The operation response returns an array of faces that match, ordered by
#' similarity score with the highest similarity first. More specifically,
#' it is an array of metadata for each face match that is found. Along with
#' the metadata, the response also includes a `confidence` value for each
#' face match, indicating the confidence that the specific face matches the
#' input face.
#'
#' For an example, see Searching for a face using its face ID in the Amazon
#' Rekognition Developer Guide.
#'
#' This operation requires permissions to perform the
#' `rekognition:SearchFaces` action.
#'
#' @usage
#' rekognition_search_faces(CollectionId, FaceId, MaxFaces,
#' FaceMatchThreshold)
#'
#' @param CollectionId [required] ID of the collection the face belongs to.
#' @param FaceId [required] ID of a face to find matches for in the collection.
#' @param MaxFaces Maximum number of faces to return. The operation returns the maximum
#' number of faces with the highest confidence in the match.
#' @param FaceMatchThreshold Optional value specifying the minimum confidence in the face match to
#' return. For example, don't return any matches where confidence in
#' matches is less than 70%. The default value is 80%.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' SearchedFaceId = "string",
#' FaceMatches = list(
#' list(
#' Similarity = 123.0,
#' Face = list(
#' FaceId = "string",
#' BoundingBox = list(
#' Width = 123.0,
#' Height = 123.0,
#' Left = 123.0,
#' Top = 123.0
#' ),
#' ImageId = "string",
#' ExternalImageId = "string",
#' Confidence = 123.0,
#' IndexFacesModelVersion = "string",
#' UserId = "string"
#' )
#' )
#' ),
#' FaceModelVersion = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$search_faces(
#' CollectionId = "string",
#' FaceId = "string",
#' MaxFaces = 123,
#' FaceMatchThreshold = 123.0
#' )
#' ```
#'
#' @examples
#' \dontrun{
#' # This operation searches for matching faces in the collection the
#' # supplied face belongs to.
#' svc$search_faces(
#' CollectionId = "myphotos",
#' FaceId = "70008e50-75e4-55d0-8e80-363fb73b3a14",
#' FaceMatchThreshold = 90L,
#' MaxFaces = 10L
#' )
#' }
#'
#' @keywords internal
#'
#' @rdname rekognition_search_faces
#'
#' @aliases rekognition_search_faces
rekognition_search_faces <- function(CollectionId, FaceId, MaxFaces = NULL, FaceMatchThreshold = NULL) {
op <- new_operation(
name = "SearchFaces",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$search_faces_input(CollectionId = CollectionId, FaceId = FaceId, MaxFaces = MaxFaces, FaceMatchThreshold = FaceMatchThreshold)
output <- .rekognition$search_faces_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$search_faces <- rekognition_search_faces
#' For a given input image, first detects the largest face in the image,
#' and then searches the specified collection for matching faces
#'
#' @description
#' For a given input image, first detects the largest face in the image,
#' and then searches the specified collection for matching faces. The
#' operation compares the features of the input face with faces in the
#' specified collection.
#'
#' To search for all faces in an input image, you might first call the
#' [`index_faces`][rekognition_index_faces] operation, and then use the
#' face IDs returned in subsequent calls to the
#' [`search_faces`][rekognition_search_faces] operation.
#'
#' You can also call the [`detect_faces`][rekognition_detect_faces]
#' operation and use the bounding boxes in the response to make face crops,
#' which then you can pass in to the
#' [`search_faces_by_image`][rekognition_search_faces_by_image] operation.
#'
#' You pass the input image either as base64-encoded image bytes or as a
#' reference to an image in an Amazon S3 bucket. If you use the AWS CLI to
#' call Amazon Rekognition operations, passing image bytes is not
#' supported. The image must be either a PNG or JPEG formatted file.
#'
#' The response returns an array of faces that match, ordered by similarity
#' score with the highest similarity first. More specifically, it is an
#' array of metadata for each face match found. Along with the metadata,
#' the response also includes a `similarity` indicating how similar the
#' face is to the input face. In the response, the operation also returns
#' the bounding box (and a confidence level that the bounding box contains
#' a face) of the face that Amazon Rekognition used for the input image.
#'
#' If no faces are detected in the input image,
#' [`search_faces_by_image`][rekognition_search_faces_by_image] returns an
#' `InvalidParameterException` error.
#'
#' For an example, Searching for a Face Using an Image in the Amazon
#' Rekognition Developer Guide.
#'
#' The `QualityFilter` input parameter allows you to filter out detected
#' faces that don’t meet a required quality bar. The quality bar is based
#' on a variety of common use cases. Use `QualityFilter` to set the quality
#' bar for filtering by specifying `LOW`, `MEDIUM`, or `HIGH`. If you do
#' not want to filter detected faces, specify `NONE`. The default value is
#' `NONE`.
#'
#' To use quality filtering, you need a collection associated with version
#' 3 of the face model or higher. To get the version of the face model
#' associated with a collection, call
#' [`describe_collection`][rekognition_describe_collection].
#'
#' This operation requires permissions to perform the
#' `rekognition:SearchFacesByImage` action.
#'
#' @usage
#' rekognition_search_faces_by_image(CollectionId, Image, MaxFaces,
#' FaceMatchThreshold, QualityFilter)
#'
#' @param CollectionId [required] ID of the collection to search.
#' @param Image [required] The input image as base64-encoded bytes or an S3 object. If you use the
#' AWS CLI to call Amazon Rekognition operations, passing base64-encoded
#' image bytes is not supported.
#'
#' If you are using an AWS SDK to call Amazon Rekognition, you might not
#' need to base64-encode image bytes passed using the `Bytes` field. For
#' more information, see Images in the Amazon Rekognition developer guide.
#' @param MaxFaces Maximum number of faces to return. The operation returns the maximum
#' number of faces with the highest confidence in the match.
#' @param FaceMatchThreshold (Optional) Specifies the minimum confidence in the face match to return.
#' For example, don't return any matches where confidence in matches is
#' less than 70%. The default value is 80%.
#' @param QualityFilter A filter that specifies a quality bar for how much filtering is done to
#' identify faces. Filtered faces aren't searched for in the collection. If
#' you specify `AUTO`, Amazon Rekognition chooses the quality bar. If you
#' specify `LOW`, `MEDIUM`, or `HIGH`, filtering removes all faces that
#' don’t meet the chosen quality bar. The quality bar is based on a variety
#' of common use cases. Low-quality detections can occur for a number of
#' reasons. Some examples are an object that's misidentified as a face, a
#' face that's too blurry, or a face with a pose that's too extreme to use.
#' If you specify `NONE`, no filtering is performed. The default value is
#' `NONE`.
#'
#' To use quality filtering, the collection you are using must be
#' associated with version 3 of the face model or higher.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' SearchedFaceBoundingBox = list(
#' Width = 123.0,
#' Height = 123.0,
#' Left = 123.0,
#' Top = 123.0
#' ),
#' SearchedFaceConfidence = 123.0,
#' FaceMatches = list(
#' list(
#' Similarity = 123.0,
#' Face = list(
#' FaceId = "string",
#' BoundingBox = list(
#' Width = 123.0,
#' Height = 123.0,
#' Left = 123.0,
#' Top = 123.0
#' ),
#' ImageId = "string",
#' ExternalImageId = "string",
#' Confidence = 123.0,
#' IndexFacesModelVersion = "string",
#' UserId = "string"
#' )
#' )
#' ),
#' FaceModelVersion = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$search_faces_by_image(
#' CollectionId = "string",
#' Image = list(
#' Bytes = raw,
#' S3Object = list(
#' Bucket = "string",
#' Name = "string",
#' Version = "string"
#' )
#' ),
#' MaxFaces = 123,
#' FaceMatchThreshold = 123.0,
#' QualityFilter = "NONE"|"AUTO"|"LOW"|"MEDIUM"|"HIGH"
#' )
#' ```
#'
#' @examples
#' \dontrun{
#' # This operation searches for faces in a Rekognition collection that match
#' # the largest face in an S3 bucket stored image.
#' svc$search_faces_by_image(
#' CollectionId = "myphotos",
#' FaceMatchThreshold = 95L,
#' Image = list(
#' S3Object = list(
#' Bucket = "mybucket",
#' Name = "myphoto"
#' )
#' ),
#' MaxFaces = 5L
#' )
#' }
#'
#' @keywords internal
#'
#' @rdname rekognition_search_faces_by_image
#'
#' @aliases rekognition_search_faces_by_image
rekognition_search_faces_by_image <- function(CollectionId, Image, MaxFaces = NULL, FaceMatchThreshold = NULL, QualityFilter = NULL) {
op <- new_operation(
name = "SearchFacesByImage",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$search_faces_by_image_input(CollectionId = CollectionId, Image = Image, MaxFaces = MaxFaces, FaceMatchThreshold = FaceMatchThreshold, QualityFilter = QualityFilter)
output <- .rekognition$search_faces_by_image_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$search_faces_by_image <- rekognition_search_faces_by_image
#' Searches for UserIDs within a collection based on a FaceId or UserId
#'
#' @description
#' Searches for UserIDs within a collection based on a `FaceId` or
#' `UserId`. This API can be used to find the closest UserID (with a
#' highest similarity) to associate a face. The request must be provided
#' with either `FaceId` or `UserId`. The operation returns an array of
#' UserID that match the `FaceId` or `UserId`, ordered by similarity score
#' with the highest similarity first.
#'
#' @usage
#' rekognition_search_users(CollectionId, UserId, FaceId,
#' UserMatchThreshold, MaxUsers)
#'
#' @param CollectionId [required] The ID of an existing collection containing the UserID, used with a
#' UserId or FaceId. If a FaceId is provided, UserId isn’t required to be
#' present in the Collection.
#' @param UserId ID for the existing User.
#' @param FaceId ID for the existing face.
#' @param UserMatchThreshold Optional value that specifies the minimum confidence in the matched
#' UserID to return. Default value of 80.
#' @param MaxUsers Maximum number of identities to return.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' UserMatches = list(
#' list(
#' Similarity = 123.0,
#' User = list(
#' UserId = "string",
#' UserStatus = "ACTIVE"|"UPDATING"|"CREATING"|"CREATED"
#' )
#' )
#' ),
#' FaceModelVersion = "string",
#' SearchedFace = list(
#' FaceId = "string"
#' ),
#' SearchedUser = list(
#' UserId = "string"
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$search_users(
#' CollectionId = "string",
#' UserId = "string",
#' FaceId = "string",
#' UserMatchThreshold = 123.0,
#' MaxUsers = 123
#' )
#' ```
#'
#' @examples
#' \dontrun{
#' # Searches for UserIDs within a collection based on a FaceId or UserId.
#' svc$search_users(
#' CollectionId = "MyCollection",
#' MaxUsers = 2L,
#' UserId = "DemoUser",
#' UserMatchThreshold = 70L
#' )
#' }
#'
#' @keywords internal
#'
#' @rdname rekognition_search_users
#'
#' @aliases rekognition_search_users
rekognition_search_users <- function(CollectionId, UserId = NULL, FaceId = NULL, UserMatchThreshold = NULL, MaxUsers = NULL) {
op <- new_operation(
name = "SearchUsers",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$search_users_input(CollectionId = CollectionId, UserId = UserId, FaceId = FaceId, UserMatchThreshold = UserMatchThreshold, MaxUsers = MaxUsers)
output <- .rekognition$search_users_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$search_users <- rekognition_search_users
#' Searches for UserIDs using a supplied image
#'
#' @description
#' Searches for UserIDs using a supplied image. It first detects the
#' largest face in the image, and then searches a specified collection for
#' matching UserIDs.
#'
#' The operation returns an array of UserIDs that match the face in the
#' supplied image, ordered by similarity score with the highest similarity
#' first. It also returns a bounding box for the face found in the input
#' image.
#'
#' Information about faces detected in the supplied image, but not used for
#' the search, is returned in an array of `UnsearchedFace` objects. If no
#' valid face is detected in the image, the response will contain an empty
#' `UserMatches` list and no `SearchedFace` object.
#'
#' @usage
#' rekognition_search_users_by_image(CollectionId, Image,
#' UserMatchThreshold, MaxUsers, QualityFilter)
#'
#' @param CollectionId [required] The ID of an existing collection containing the UserID.
#' @param Image [required]
#' @param UserMatchThreshold Specifies the minimum confidence in the UserID match to return. Default
#' value is 80.
#' @param MaxUsers Maximum number of UserIDs to return.
#' @param QualityFilter A filter that specifies a quality bar for how much filtering is done to
#' identify faces. Filtered faces aren't searched for in the collection.
#' The default value is NONE.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' UserMatches = list(
#' list(
#' Similarity = 123.0,
#' User = list(
#' UserId = "string",
#' UserStatus = "ACTIVE"|"UPDATING"|"CREATING"|"CREATED"
#' )
#' )
#' ),
#' FaceModelVersion = "string",
#' SearchedFace = list(
#' FaceDetail = list(
#' BoundingBox = list(
#' Width = 123.0,
#' Height = 123.0,
#' Left = 123.0,
#' Top = 123.0
#' ),
#' AgeRange = list(
#' Low = 123,
#' High = 123
#' ),
#' Smile = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' Eyeglasses = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' Sunglasses = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' Gender = list(
#' Value = "Male"|"Female",
#' Confidence = 123.0
#' ),
#' Beard = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' Mustache = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' EyesOpen = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' MouthOpen = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' Emotions = list(
#' list(
#' Type = "HAPPY"|"SAD"|"ANGRY"|"CONFUSED"|"DISGUSTED"|"SURPRISED"|"CALM"|"UNKNOWN"|"FEAR",
#' Confidence = 123.0
#' )
#' ),
#' Landmarks = list(
#' list(
#' Type = "eyeLeft"|"eyeRight"|"nose"|"mouthLeft"|"mouthRight"|"leftEyeBrowLeft"|"leftEyeBrowRight"|"leftEyeBrowUp"|"rightEyeBrowLeft"|"rightEyeBrowRight"|"rightEyeBrowUp"|"leftEyeLeft"|"leftEyeRight"|"leftEyeUp"|"leftEyeDown"|"rightEyeLeft"|"rightEyeRight"|"rightEyeUp"|"rightEyeDown"|"noseLeft"|"noseRight"|"mouthUp"|"mouthDown"|"leftPupil"|"rightPupil"|"upperJawlineLeft"|"midJawlineLeft"|"chinBottom"|"midJawlineRight"|"upperJawlineRight",
#' X = 123.0,
#' Y = 123.0
#' )
#' ),
#' Pose = list(
#' Roll = 123.0,
#' Yaw = 123.0,
#' Pitch = 123.0
#' ),
#' Quality = list(
#' Brightness = 123.0,
#' Sharpness = 123.0
#' ),
#' Confidence = 123.0,
#' FaceOccluded = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' EyeDirection = list(
#' Yaw = 123.0,
#' Pitch = 123.0,
#' Confidence = 123.0
#' )
#' )
#' ),
#' UnsearchedFaces = list(
#' list(
#' FaceDetails = list(
#' BoundingBox = list(
#' Width = 123.0,
#' Height = 123.0,
#' Left = 123.0,
#' Top = 123.0
#' ),
#' AgeRange = list(
#' Low = 123,
#' High = 123
#' ),
#' Smile = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' Eyeglasses = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' Sunglasses = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' Gender = list(
#' Value = "Male"|"Female",
#' Confidence = 123.0
#' ),
#' Beard = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' Mustache = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' EyesOpen = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' MouthOpen = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' Emotions = list(
#' list(
#' Type = "HAPPY"|"SAD"|"ANGRY"|"CONFUSED"|"DISGUSTED"|"SURPRISED"|"CALM"|"UNKNOWN"|"FEAR",
#' Confidence = 123.0
#' )
#' ),
#' Landmarks = list(
#' list(
#' Type = "eyeLeft"|"eyeRight"|"nose"|"mouthLeft"|"mouthRight"|"leftEyeBrowLeft"|"leftEyeBrowRight"|"leftEyeBrowUp"|"rightEyeBrowLeft"|"rightEyeBrowRight"|"rightEyeBrowUp"|"leftEyeLeft"|"leftEyeRight"|"leftEyeUp"|"leftEyeDown"|"rightEyeLeft"|"rightEyeRight"|"rightEyeUp"|"rightEyeDown"|"noseLeft"|"noseRight"|"mouthUp"|"mouthDown"|"leftPupil"|"rightPupil"|"upperJawlineLeft"|"midJawlineLeft"|"chinBottom"|"midJawlineRight"|"upperJawlineRight",
#' X = 123.0,
#' Y = 123.0
#' )
#' ),
#' Pose = list(
#' Roll = 123.0,
#' Yaw = 123.0,
#' Pitch = 123.0
#' ),
#' Quality = list(
#' Brightness = 123.0,
#' Sharpness = 123.0
#' ),
#' Confidence = 123.0,
#' FaceOccluded = list(
#' Value = TRUE|FALSE,
#' Confidence = 123.0
#' ),
#' EyeDirection = list(
#' Yaw = 123.0,
#' Pitch = 123.0,
#' Confidence = 123.0
#' )
#' ),
#' Reasons = list(
#' "FACE_NOT_LARGEST"|"EXCEEDS_MAX_FACES"|"EXTREME_POSE"|"LOW_BRIGHTNESS"|"LOW_SHARPNESS"|"LOW_CONFIDENCE"|"SMALL_BOUNDING_BOX"|"LOW_FACE_QUALITY"
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$search_users_by_image(
#' CollectionId = "string",
#' Image = list(
#' Bytes = raw,
#' S3Object = list(
#' Bucket = "string",
#' Name = "string",
#' Version = "string"
#' )
#' ),
#' UserMatchThreshold = 123.0,
#' MaxUsers = 123,
#' QualityFilter = "NONE"|"AUTO"|"LOW"|"MEDIUM"|"HIGH"
#' )
#' ```
#'
#' @examples
#' \dontrun{
#' # Searches for UserIDs using a supplied image.
#' svc$search_users_by_image(
#' CollectionId = "MyCollection",
#' Image = list(
#' S3Object = list(
#' Bucket = "bucket",
#' Name = "input.jpg"
#' )
#' ),
#' MaxUsers = 2L,
#' QualityFilter = "MEDIUM",
#' UserMatchThreshold = 70L
#' )
#' }
#'
#' @keywords internal
#'
#' @rdname rekognition_search_users_by_image
#'
#' @aliases rekognition_search_users_by_image
rekognition_search_users_by_image <- function(CollectionId, Image, UserMatchThreshold = NULL, MaxUsers = NULL, QualityFilter = NULL) {
op <- new_operation(
name = "SearchUsersByImage",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$search_users_by_image_input(CollectionId = CollectionId, Image = Image, UserMatchThreshold = UserMatchThreshold, MaxUsers = MaxUsers, QualityFilter = QualityFilter)
output <- .rekognition$search_users_by_image_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$search_users_by_image <- rekognition_search_users_by_image
#' Starts asynchronous recognition of celebrities in a stored video
#'
#' @description
#' Starts asynchronous recognition of celebrities in a stored video.
#'
#' Amazon Rekognition Video can detect celebrities in a video must be
#' stored in an Amazon S3 bucket. Use Video to specify the bucket name and
#' the filename of the video.
#' [`start_celebrity_recognition`][rekognition_start_celebrity_recognition]
#' returns a job identifier (`JobId`) which you use to get the results of
#' the analysis. When celebrity recognition analysis is finished, Amazon
#' Rekognition Video publishes a completion status to the Amazon Simple
#' Notification Service topic that you specify in `NotificationChannel`. To
#' get the results of the celebrity recognition analysis, first check that
#' the status value published to the Amazon SNS topic is `SUCCEEDED`. If
#' so, call
#' [`get_celebrity_recognition`][rekognition_get_celebrity_recognition] and
#' pass the job identifier (`JobId`) from the initial call to
#' [`start_celebrity_recognition`][rekognition_start_celebrity_recognition].
#'
#' For more information, see Recognizing celebrities in the Amazon
#' Rekognition Developer Guide.
#'
#' @usage
#' rekognition_start_celebrity_recognition(Video, ClientRequestToken,
#' NotificationChannel, JobTag)
#'
#' @param Video [required] The video in which you want to recognize celebrities. The video must be
#' stored in an Amazon S3 bucket.
#' @param ClientRequestToken Idempotent token used to identify the start request. If you use the same
#' token with multiple
#' [`start_celebrity_recognition`][rekognition_start_celebrity_recognition]
#' requests, the same `JobId` is returned. Use `ClientRequestToken` to
#' prevent the same job from being accidently started more than once.
#' @param NotificationChannel The Amazon SNS topic ARN that you want Amazon Rekognition Video to
#' publish the completion status of the celebrity recognition analysis to.
#' The Amazon SNS topic must have a topic name that begins with
#' *AmazonRekognition* if you are using the AmazonRekognitionServiceRole
#' permissions policy.
#' @param JobTag An identifier you specify that's returned in the completion notification
#' that's published to your Amazon Simple Notification Service topic. For
#' example, you can use `JobTag` to group related jobs and identify them in
#' the completion notification.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' JobId = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$start_celebrity_recognition(
#' Video = list(
#' S3Object = list(
#' Bucket = "string",
#' Name = "string",
#' Version = "string"
#' )
#' ),
#' ClientRequestToken = "string",
#' NotificationChannel = list(
#' SNSTopicArn = "string",
#' RoleArn = "string"
#' ),
#' JobTag = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname rekognition_start_celebrity_recognition
#'
#' @aliases rekognition_start_celebrity_recognition
rekognition_start_celebrity_recognition <- function(Video, ClientRequestToken = NULL, NotificationChannel = NULL, JobTag = NULL) {
op <- new_operation(
name = "StartCelebrityRecognition",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$start_celebrity_recognition_input(Video = Video, ClientRequestToken = ClientRequestToken, NotificationChannel = NotificationChannel, JobTag = JobTag)
output <- .rekognition$start_celebrity_recognition_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$start_celebrity_recognition <- rekognition_start_celebrity_recognition
#' Starts asynchronous detection of inappropriate, unwanted, or offensive
#' content in a stored video
#'
#' @description
#' Starts asynchronous detection of inappropriate, unwanted, or offensive
#' content in a stored video. For a list of moderation labels in Amazon
#' Rekognition, see [Using the image and video moderation
#' APIs](https://docs.aws.amazon.com/rekognition/latest/dg/moderation.html#moderation-api).
#'
#' Amazon Rekognition Video can moderate content in a video stored in an
#' Amazon S3 bucket. Use Video to specify the bucket name and the filename
#' of the video.
#' [`start_content_moderation`][rekognition_start_content_moderation]
#' returns a job identifier (`JobId`) which you use to get the results of
#' the analysis. When content analysis is finished, Amazon Rekognition
#' Video publishes a completion status to the Amazon Simple Notification
#' Service topic that you specify in `NotificationChannel`.
#'
#' To get the results of the content analysis, first check that the status
#' value published to the Amazon SNS topic is `SUCCEEDED`. If so, call
#' [`get_content_moderation`][rekognition_get_content_moderation] and pass
#' the job identifier (`JobId`) from the initial call to
#' [`start_content_moderation`][rekognition_start_content_moderation].
#'
#' For more information, see Moderating content in the Amazon Rekognition
#' Developer Guide.
#'
#' @usage
#' rekognition_start_content_moderation(Video, MinConfidence,
#' ClientRequestToken, NotificationChannel, JobTag)
#'
#' @param Video [required] The video in which you want to detect inappropriate, unwanted, or
#' offensive content. The video must be stored in an Amazon S3 bucket.
#' @param MinConfidence Specifies the minimum confidence that Amazon Rekognition must have in
#' order to return a moderated content label. Confidence represents how
#' certain Amazon Rekognition is that the moderated content is correctly
#' identified. 0 is the lowest confidence. 100 is the highest confidence.
#' Amazon Rekognition doesn't return any moderated content labels with a
#' confidence level lower than this specified value. If you don't specify
#' `MinConfidence`,
#' [`get_content_moderation`][rekognition_get_content_moderation] returns
#' labels with confidence values greater than or equal to 50 percent.
#' @param ClientRequestToken Idempotent token used to identify the start request. If you use the same
#' token with multiple
#' [`start_content_moderation`][rekognition_start_content_moderation]
#' requests, the same `JobId` is returned. Use `ClientRequestToken` to
#' prevent the same job from being accidently started more than once.
#' @param NotificationChannel The Amazon SNS topic ARN that you want Amazon Rekognition Video to
#' publish the completion status of the content analysis to. The Amazon SNS
#' topic must have a topic name that begins with *AmazonRekognition* if you
#' are using the AmazonRekognitionServiceRole permissions policy to access
#' the topic.
#' @param JobTag An identifier you specify that's returned in the completion notification
#' that's published to your Amazon Simple Notification Service topic. For
#' example, you can use `JobTag` to group related jobs and identify them in
#' the completion notification.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' JobId = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$start_content_moderation(
#' Video = list(
#' S3Object = list(
#' Bucket = "string",
#' Name = "string",
#' Version = "string"
#' )
#' ),
#' MinConfidence = 123.0,
#' ClientRequestToken = "string",
#' NotificationChannel = list(
#' SNSTopicArn = "string",
#' RoleArn = "string"
#' ),
#' JobTag = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname rekognition_start_content_moderation
#'
#' @aliases rekognition_start_content_moderation
rekognition_start_content_moderation <- function(Video, MinConfidence = NULL, ClientRequestToken = NULL, NotificationChannel = NULL, JobTag = NULL) {
op <- new_operation(
name = "StartContentModeration",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$start_content_moderation_input(Video = Video, MinConfidence = MinConfidence, ClientRequestToken = ClientRequestToken, NotificationChannel = NotificationChannel, JobTag = JobTag)
output <- .rekognition$start_content_moderation_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$start_content_moderation <- rekognition_start_content_moderation
#' Starts asynchronous detection of faces in a stored video
#'
#' @description
#' Starts asynchronous detection of faces in a stored video.
#'
#' Amazon Rekognition Video can detect faces in a video stored in an Amazon
#' S3 bucket. Use Video to specify the bucket name and the filename of the
#' video. [`start_face_detection`][rekognition_start_face_detection]
#' returns a job identifier (`JobId`) that you use to get the results of
#' the operation. When face detection is finished, Amazon Rekognition Video
#' publishes a completion status to the Amazon Simple Notification Service
#' topic that you specify in `NotificationChannel`. To get the results of
#' the face detection operation, first check that the status value
#' published to the Amazon SNS topic is `SUCCEEDED`. If so, call
#' [`get_face_detection`][rekognition_get_face_detection] and pass the job
#' identifier (`JobId`) from the initial call to
#' [`start_face_detection`][rekognition_start_face_detection].
#'
#' For more information, see Detecting faces in a stored video in the
#' Amazon Rekognition Developer Guide.
#'
#' @usage
#' rekognition_start_face_detection(Video, ClientRequestToken,
#' NotificationChannel, FaceAttributes, JobTag)
#'
#' @param Video [required] The video in which you want to detect faces. The video must be stored in
#' an Amazon S3 bucket.
#' @param ClientRequestToken Idempotent token used to identify the start request. If you use the same
#' token with multiple
#' [`start_face_detection`][rekognition_start_face_detection] requests, the
#' same `JobId` is returned. Use `ClientRequestToken` to prevent the same
#' job from being accidently started more than once.
#' @param NotificationChannel The ARN of the Amazon SNS topic to which you want Amazon Rekognition
#' Video to publish the completion status of the face detection operation.
#' The Amazon SNS topic must have a topic name that begins with
#' *AmazonRekognition* if you are using the AmazonRekognitionServiceRole
#' permissions policy.
#' @param FaceAttributes The face attributes you want returned.
#'
#' `DEFAULT` - The following subset of facial attributes are returned:
#' BoundingBox, Confidence, Pose, Quality and Landmarks.
#'
#' `ALL` - All facial attributes are returned.
#' @param JobTag An identifier you specify that's returned in the completion notification
#' that's published to your Amazon Simple Notification Service topic. For
#' example, you can use `JobTag` to group related jobs and identify them in
#' the completion notification.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' JobId = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$start_face_detection(
#' Video = list(
#' S3Object = list(
#' Bucket = "string",
#' Name = "string",
#' Version = "string"
#' )
#' ),
#' ClientRequestToken = "string",
#' NotificationChannel = list(
#' SNSTopicArn = "string",
#' RoleArn = "string"
#' ),
#' FaceAttributes = "DEFAULT"|"ALL",
#' JobTag = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname rekognition_start_face_detection
#'
#' @aliases rekognition_start_face_detection
rekognition_start_face_detection <- function(Video, ClientRequestToken = NULL, NotificationChannel = NULL, FaceAttributes = NULL, JobTag = NULL) {
op <- new_operation(
name = "StartFaceDetection",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$start_face_detection_input(Video = Video, ClientRequestToken = ClientRequestToken, NotificationChannel = NotificationChannel, FaceAttributes = FaceAttributes, JobTag = JobTag)
output <- .rekognition$start_face_detection_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$start_face_detection <- rekognition_start_face_detection
#' Starts the asynchronous search for faces in a collection that match the
#' faces of persons detected in a stored video
#'
#' @description
#' Starts the asynchronous search for faces in a collection that match the
#' faces of persons detected in a stored video.
#'
#' The video must be stored in an Amazon S3 bucket. Use Video to specify
#' the bucket name and the filename of the video.
#' [`start_face_search`][rekognition_start_face_search] returns a job
#' identifier (`JobId`) which you use to get the search results once the
#' search has completed. When searching is finished, Amazon Rekognition
#' Video publishes a completion status to the Amazon Simple Notification
#' Service topic that you specify in `NotificationChannel`. To get the
#' search results, first check that the status value published to the
#' Amazon SNS topic is `SUCCEEDED`. If so, call
#' [`get_face_search`][rekognition_get_face_search] and pass the job
#' identifier (`JobId`) from the initial call to
#' [`start_face_search`][rekognition_start_face_search]. For more
#' information, see [Searching stored videos for
#' faces](https://docs.aws.amazon.com/rekognition/latest/dg/procedure-person-search-videos.html).
#'
#' @usage
#' rekognition_start_face_search(Video, ClientRequestToken,
#' FaceMatchThreshold, CollectionId, NotificationChannel, JobTag)
#'
#' @param Video [required] The video you want to search. The video must be stored in an Amazon S3
#' bucket.
#' @param ClientRequestToken Idempotent token used to identify the start request. If you use the same
#' token with multiple [`start_face_search`][rekognition_start_face_search]
#' requests, the same `JobId` is returned. Use `ClientRequestToken` to
#' prevent the same job from being accidently started more than once.
#' @param FaceMatchThreshold The minimum confidence in the person match to return. For example, don't
#' return any matches where confidence in matches is less than 70%. The
#' default value is 80%.
#' @param CollectionId [required] ID of the collection that contains the faces you want to search for.
#' @param NotificationChannel The ARN of the Amazon SNS topic to which you want Amazon Rekognition
#' Video to publish the completion status of the search. The Amazon SNS
#' topic must have a topic name that begins with *AmazonRekognition* if you
#' are using the AmazonRekognitionServiceRole permissions policy to access
#' the topic.
#' @param JobTag An identifier you specify that's returned in the completion notification
#' that's published to your Amazon Simple Notification Service topic. For
#' example, you can use `JobTag` to group related jobs and identify them in
#' the completion notification.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' JobId = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$start_face_search(
#' Video = list(
#' S3Object = list(
#' Bucket = "string",
#' Name = "string",
#' Version = "string"
#' )
#' ),
#' ClientRequestToken = "string",
#' FaceMatchThreshold = 123.0,
#' CollectionId = "string",
#' NotificationChannel = list(
#' SNSTopicArn = "string",
#' RoleArn = "string"
#' ),
#' JobTag = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname rekognition_start_face_search
#'
#' @aliases rekognition_start_face_search
rekognition_start_face_search <- function(Video, ClientRequestToken = NULL, FaceMatchThreshold = NULL, CollectionId, NotificationChannel = NULL, JobTag = NULL) {
op <- new_operation(
name = "StartFaceSearch",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$start_face_search_input(Video = Video, ClientRequestToken = ClientRequestToken, FaceMatchThreshold = FaceMatchThreshold, CollectionId = CollectionId, NotificationChannel = NotificationChannel, JobTag = JobTag)
output <- .rekognition$start_face_search_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$start_face_search <- rekognition_start_face_search
#' Starts asynchronous detection of labels in a stored video
#'
#' @description
#' Starts asynchronous detection of labels in a stored video.
#'
#' Amazon Rekognition Video can detect labels in a video. Labels are
#' instances of real-world entities. This includes objects like flower,
#' tree, and table; events like wedding, graduation, and birthday party;
#' concepts like landscape, evening, and nature; and activities like a
#' person getting out of a car or a person skiing.
#'
#' The video must be stored in an Amazon S3 bucket. Use Video to specify
#' the bucket name and the filename of the video.
#' [`start_label_detection`][rekognition_start_label_detection] returns a
#' job identifier (`JobId`) which you use to get the results of the
#' operation. When label detection is finished, Amazon Rekognition Video
#' publishes a completion status to the Amazon Simple Notification Service
#' topic that you specify in `NotificationChannel`.
#'
#' To get the results of the label detection operation, first check that
#' the status value published to the Amazon SNS topic is `SUCCEEDED`. If
#' so, call [`get_label_detection`][rekognition_get_label_detection] and
#' pass the job identifier (`JobId`) from the initial call to
#' [`start_label_detection`][rekognition_start_label_detection].
#'
#' *Optional Parameters*
#'
#' [`start_label_detection`][rekognition_start_label_detection] has the
#' `GENERAL_LABELS` Feature applied by default. This feature allows you to
#' provide filtering criteria to the `Settings` parameter. You can filter
#' with sets of individual labels or with label categories. You can specify
#' inclusive filters, exclusive filters, or a combination of inclusive and
#' exclusive filters. For more information on filtering, see [Detecting
#' labels in a
#' video](https://docs.aws.amazon.com/rekognition/latest/dg/labels-detecting-labels-video.html).
#'
#' You can specify `MinConfidence` to control the confidence threshold for
#' the labels returned. The default is 50.
#'
#' @usage
#' rekognition_start_label_detection(Video, ClientRequestToken,
#' MinConfidence, NotificationChannel, JobTag, Features, Settings)
#'
#' @param Video [required] The video in which you want to detect labels. The video must be stored
#' in an Amazon S3 bucket.
#' @param ClientRequestToken Idempotent token used to identify the start request. If you use the same
#' token with multiple
#' [`start_label_detection`][rekognition_start_label_detection] requests,
#' the same `JobId` is returned. Use `ClientRequestToken` to prevent the
#' same job from being accidently started more than once.
#' @param MinConfidence Specifies the minimum confidence that Amazon Rekognition Video must have
#' in order to return a detected label. Confidence represents how certain
#' Amazon Rekognition is that a label is correctly identified.0 is the
#' lowest confidence. 100 is the highest confidence. Amazon Rekognition
#' Video doesn't return any labels with a confidence level lower than this
#' specified value.
#'
#' If you don't specify `MinConfidence`, the operation returns labels and
#' bounding boxes (if detected) with confidence values greater than or
#' equal to 50 percent.
#' @param NotificationChannel The Amazon SNS topic ARN you want Amazon Rekognition Video to publish
#' the completion status of the label detection operation to. The Amazon
#' SNS topic must have a topic name that begins with *AmazonRekognition* if
#' you are using the AmazonRekognitionServiceRole permissions policy.
#' @param JobTag An identifier you specify that's returned in the completion notification
#' that's published to your Amazon Simple Notification Service topic. For
#' example, you can use `JobTag` to group related jobs and identify them in
#' the completion notification.
#' @param Features The features to return after video analysis. You can specify that
#' GENERAL_LABELS are returned.
#' @param Settings The settings for a StartLabelDetection request.Contains the specified
#' parameters for the label detection request of an asynchronous label
#' analysis operation. Settings can include filters for GENERAL_LABELS.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' JobId = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$start_label_detection(
#' Video = list(
#' S3Object = list(
#' Bucket = "string",
#' Name = "string",
#' Version = "string"
#' )
#' ),
#' ClientRequestToken = "string",
#' MinConfidence = 123.0,
#' NotificationChannel = list(
#' SNSTopicArn = "string",
#' RoleArn = "string"
#' ),
#' JobTag = "string",
#' Features = list(
#' "GENERAL_LABELS"
#' ),
#' Settings = list(
#' GeneralLabels = list(
#' LabelInclusionFilters = list(
#' "string"
#' ),
#' LabelExclusionFilters = list(
#' "string"
#' ),
#' LabelCategoryInclusionFilters = list(
#' "string"
#' ),
#' LabelCategoryExclusionFilters = list(
#' "string"
#' )
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname rekognition_start_label_detection
#'
#' @aliases rekognition_start_label_detection
rekognition_start_label_detection <- function(Video, ClientRequestToken = NULL, MinConfidence = NULL, NotificationChannel = NULL, JobTag = NULL, Features = NULL, Settings = NULL) {
op <- new_operation(
name = "StartLabelDetection",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$start_label_detection_input(Video = Video, ClientRequestToken = ClientRequestToken, MinConfidence = MinConfidence, NotificationChannel = NotificationChannel, JobTag = JobTag, Features = Features, Settings = Settings)
output <- .rekognition$start_label_detection_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$start_label_detection <- rekognition_start_label_detection
#' Starts the asynchronous tracking of a person's path in a stored video
#'
#' @description
#' Starts the asynchronous tracking of a person's path in a stored video.
#'
#' Amazon Rekognition Video can track the path of people in a video stored
#' in an Amazon S3 bucket. Use Video to specify the bucket name and the
#' filename of the video.
#' [`start_person_tracking`][rekognition_start_person_tracking] returns a
#' job identifier (`JobId`) which you use to get the results of the
#' operation. When label detection is finished, Amazon Rekognition
#' publishes a completion status to the Amazon Simple Notification Service
#' topic that you specify in `NotificationChannel`.
#'
#' To get the results of the person detection operation, first check that
#' the status value published to the Amazon SNS topic is `SUCCEEDED`. If
#' so, call [`get_person_tracking`][rekognition_get_person_tracking] and
#' pass the job identifier (`JobId`) from the initial call to
#' [`start_person_tracking`][rekognition_start_person_tracking].
#'
#' @usage
#' rekognition_start_person_tracking(Video, ClientRequestToken,
#' NotificationChannel, JobTag)
#'
#' @param Video [required] The video in which you want to detect people. The video must be stored
#' in an Amazon S3 bucket.
#' @param ClientRequestToken Idempotent token used to identify the start request. If you use the same
#' token with multiple
#' [`start_person_tracking`][rekognition_start_person_tracking] requests,
#' the same `JobId` is returned. Use `ClientRequestToken` to prevent the
#' same job from being accidently started more than once.
#' @param NotificationChannel The Amazon SNS topic ARN you want Amazon Rekognition Video to publish
#' the completion status of the people detection operation to. The Amazon
#' SNS topic must have a topic name that begins with *AmazonRekognition* if
#' you are using the AmazonRekognitionServiceRole permissions policy.
#' @param JobTag An identifier you specify that's returned in the completion notification
#' that's published to your Amazon Simple Notification Service topic. For
#' example, you can use `JobTag` to group related jobs and identify them in
#' the completion notification.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' JobId = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$start_person_tracking(
#' Video = list(
#' S3Object = list(
#' Bucket = "string",
#' Name = "string",
#' Version = "string"
#' )
#' ),
#' ClientRequestToken = "string",
#' NotificationChannel = list(
#' SNSTopicArn = "string",
#' RoleArn = "string"
#' ),
#' JobTag = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname rekognition_start_person_tracking
#'
#' @aliases rekognition_start_person_tracking
rekognition_start_person_tracking <- function(Video, ClientRequestToken = NULL, NotificationChannel = NULL, JobTag = NULL) {
op <- new_operation(
name = "StartPersonTracking",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$start_person_tracking_input(Video = Video, ClientRequestToken = ClientRequestToken, NotificationChannel = NotificationChannel, JobTag = JobTag)
output <- .rekognition$start_person_tracking_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$start_person_tracking <- rekognition_start_person_tracking
#' Starts the running of the version of a model
#'
#' @description
#' Starts the running of the version of a model. Starting a model takes a
#' while to complete. To check the current state of the model, use
#' [`describe_project_versions`][rekognition_describe_project_versions].
#'
#' Once the model is running, you can detect custom labels in new images by
#' calling [`detect_custom_labels`][rekognition_detect_custom_labels].
#'
#' You are charged for the amount of time that the model is running. To
#' stop a running model, call
#' [`stop_project_version`][rekognition_stop_project_version].
#'
#' For more information, see *Running a trained Amazon Rekognition Custom
#' Labels model* in the Amazon Rekognition Custom Labels Guide.
#'
#' This operation requires permissions to perform the
#' `rekognition:StartProjectVersion` action.
#'
#' @usage
#' rekognition_start_project_version(ProjectVersionArn, MinInferenceUnits,
#' MaxInferenceUnits)
#'
#' @param ProjectVersionArn [required] The Amazon Resource Name(ARN) of the model version that you want to
#' start.
#' @param MinInferenceUnits [required] The minimum number of inference units to use. A single inference unit
#' represents 1 hour of processing.
#'
#' For information about the number of transactions per second (TPS) that
#' an inference unit can support, see *Running a trained Amazon Rekognition
#' Custom Labels model* in the Amazon Rekognition Custom Labels Guide.
#'
#' Use a higher number to increase the TPS throughput of your model. You
#' are charged for the number of inference units that you use.
#' @param MaxInferenceUnits The maximum number of inference units to use for auto-scaling the model.
#' If you don't specify a value, Amazon Rekognition Custom Labels doesn't
#' auto-scale the model.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Status = "TRAINING_IN_PROGRESS"|"TRAINING_COMPLETED"|"TRAINING_FAILED"|"STARTING"|"RUNNING"|"FAILED"|"STOPPING"|"STOPPED"|"DELETING"|"COPYING_IN_PROGRESS"|"COPYING_COMPLETED"|"COPYING_FAILED"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$start_project_version(
#' ProjectVersionArn = "string",
#' MinInferenceUnits = 123,
#' MaxInferenceUnits = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname rekognition_start_project_version
#'
#' @aliases rekognition_start_project_version
rekognition_start_project_version <- function(ProjectVersionArn, MinInferenceUnits, MaxInferenceUnits = NULL) {
op <- new_operation(
name = "StartProjectVersion",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$start_project_version_input(ProjectVersionArn = ProjectVersionArn, MinInferenceUnits = MinInferenceUnits, MaxInferenceUnits = MaxInferenceUnits)
output <- .rekognition$start_project_version_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$start_project_version <- rekognition_start_project_version
#' Starts asynchronous detection of segment detection in a stored video
#'
#' @description
#' Starts asynchronous detection of segment detection in a stored video.
#'
#' Amazon Rekognition Video can detect segments in a video stored in an
#' Amazon S3 bucket. Use Video to specify the bucket name and the filename
#' of the video.
#' [`start_segment_detection`][rekognition_start_segment_detection] returns
#' a job identifier (`JobId`) which you use to get the results of the
#' operation. When segment detection is finished, Amazon Rekognition Video
#' publishes a completion status to the Amazon Simple Notification Service
#' topic that you specify in `NotificationChannel`.
#'
#' You can use the `Filters` (StartSegmentDetectionFilters) input parameter
#' to specify the minimum detection confidence returned in the response.
#' Within `Filters`, use `ShotFilter` (StartShotDetectionFilter) to filter
#' detected shots. Use `TechnicalCueFilter`
#' (StartTechnicalCueDetectionFilter) to filter technical cues.
#'
#' To get the results of the segment detection operation, first check that
#' the status value published to the Amazon SNS topic is `SUCCEEDED`. if
#' so, call [`get_segment_detection`][rekognition_get_segment_detection]
#' and pass the job identifier (`JobId`) from the initial call to
#' [`start_segment_detection`][rekognition_start_segment_detection].
#'
#' For more information, see Detecting video segments in stored video in
#' the Amazon Rekognition Developer Guide.
#'
#' @usage
#' rekognition_start_segment_detection(Video, ClientRequestToken,
#' NotificationChannel, JobTag, Filters, SegmentTypes)
#'
#' @param Video [required]
#' @param ClientRequestToken Idempotent token used to identify the start request. If you use the same
#' token with multiple
#' [`start_segment_detection`][rekognition_start_segment_detection]
#' requests, the same `JobId` is returned. Use `ClientRequestToken` to
#' prevent the same job from being accidently started more than once.
#' @param NotificationChannel The ARN of the Amazon SNS topic to which you want Amazon Rekognition
#' Video to publish the completion status of the segment detection
#' operation. Note that the Amazon SNS topic must have a topic name that
#' begins with *AmazonRekognition* if you are using the
#' AmazonRekognitionServiceRole permissions policy to access the topic.
#' @param JobTag An identifier you specify that's returned in the completion notification
#' that's published to your Amazon Simple Notification Service topic. For
#' example, you can use `JobTag` to group related jobs and identify them in
#' the completion notification.
#' @param Filters Filters for technical cue or shot detection.
#' @param SegmentTypes [required] An array of segment types to detect in the video. Valid values are
#' TECHNICAL_CUE and SHOT.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' JobId = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$start_segment_detection(
#' Video = list(
#' S3Object = list(
#' Bucket = "string",
#' Name = "string",
#' Version = "string"
#' )
#' ),
#' ClientRequestToken = "string",
#' NotificationChannel = list(
#' SNSTopicArn = "string",
#' RoleArn = "string"
#' ),
#' JobTag = "string",
#' Filters = list(
#' TechnicalCueFilter = list(
#' MinSegmentConfidence = 123.0,
#' BlackFrame = list(
#' MaxPixelThreshold = 123.0,
#' MinCoveragePercentage = 123.0
#' )
#' ),
#' ShotFilter = list(
#' MinSegmentConfidence = 123.0
#' )
#' ),
#' SegmentTypes = list(
#' "TECHNICAL_CUE"|"SHOT"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname rekognition_start_segment_detection
#'
#' @aliases rekognition_start_segment_detection
rekognition_start_segment_detection <- function(Video, ClientRequestToken = NULL, NotificationChannel = NULL, JobTag = NULL, Filters = NULL, SegmentTypes) {
op <- new_operation(
name = "StartSegmentDetection",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$start_segment_detection_input(Video = Video, ClientRequestToken = ClientRequestToken, NotificationChannel = NotificationChannel, JobTag = JobTag, Filters = Filters, SegmentTypes = SegmentTypes)
output <- .rekognition$start_segment_detection_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$start_segment_detection <- rekognition_start_segment_detection
#' Starts processing a stream processor
#'
#' @description
#' Starts processing a stream processor. You create a stream processor by
#' calling
#' [`create_stream_processor`][rekognition_create_stream_processor]. To
#' tell [`start_stream_processor`][rekognition_start_stream_processor]
#' which stream processor to start, use the value of the `Name` field
#' specified in the call to
#' [`create_stream_processor`][rekognition_create_stream_processor].
#'
#' If you are using a label detection stream processor to detect labels,
#' you need to provide a `Start selector` and a `Stop selector` to
#' determine the length of the stream processing time.
#'
#' @usage
#' rekognition_start_stream_processor(Name, StartSelector, StopSelector)
#'
#' @param Name [required] The name of the stream processor to start processing.
#' @param StartSelector Specifies the starting point in the Kinesis stream to start processing.
#' You can use the producer timestamp or the fragment number. If you use
#' the producer timestamp, you must put the time in milliseconds. For more
#' information about fragment numbers, see
#' [Fragment](https://docs.aws.amazon.com/kinesisvideostreams/latest/dg/API_reader_Fragment.html).
#'
#' This is a required parameter for label detection stream processors and
#' should not be used to start a face search stream processor.
#' @param StopSelector Specifies when to stop processing the stream. You can specify a maximum
#' amount of time to process the video.
#'
#' This is a required parameter for label detection stream processors and
#' should not be used to start a face search stream processor.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' SessionId = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$start_stream_processor(
#' Name = "string",
#' StartSelector = list(
#' KVSStreamStartSelector = list(
#' ProducerTimestamp = 123,
#' FragmentNumber = "string"
#' )
#' ),
#' StopSelector = list(
#' MaxDurationInSeconds = 123
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname rekognition_start_stream_processor
#'
#' @aliases rekognition_start_stream_processor
rekognition_start_stream_processor <- function(Name, StartSelector = NULL, StopSelector = NULL) {
op <- new_operation(
name = "StartStreamProcessor",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$start_stream_processor_input(Name = Name, StartSelector = StartSelector, StopSelector = StopSelector)
output <- .rekognition$start_stream_processor_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$start_stream_processor <- rekognition_start_stream_processor
#' Starts asynchronous detection of text in a stored video
#'
#' @description
#' Starts asynchronous detection of text in a stored video.
#'
#' Amazon Rekognition Video can detect text in a video stored in an Amazon
#' S3 bucket. Use Video to specify the bucket name and the filename of the
#' video. [`start_text_detection`][rekognition_start_text_detection]
#' returns a job identifier (`JobId`) which you use to get the results of
#' the operation. When text detection is finished, Amazon Rekognition Video
#' publishes a completion status to the Amazon Simple Notification Service
#' topic that you specify in `NotificationChannel`.
#'
#' To get the results of the text detection operation, first check that the
#' status value published to the Amazon SNS topic is `SUCCEEDED`. if so,
#' call [`get_text_detection`][rekognition_get_text_detection] and pass the
#' job identifier (`JobId`) from the initial call to
#' [`start_text_detection`][rekognition_start_text_detection].
#'
#' @usage
#' rekognition_start_text_detection(Video, ClientRequestToken,
#' NotificationChannel, JobTag, Filters)
#'
#' @param Video [required]
#' @param ClientRequestToken Idempotent token used to identify the start request. If you use the same
#' token with multiple
#' [`start_text_detection`][rekognition_start_text_detection] requests, the
#' same `JobId` is returned. Use `ClientRequestToken` to prevent the same
#' job from being accidentaly started more than once.
#' @param NotificationChannel
#' @param JobTag An identifier returned in the completion status published by your Amazon
#' Simple Notification Service topic. For example, you can use `JobTag` to
#' group related jobs and identify them in the completion notification.
#' @param Filters Optional parameters that let you set criteria the text must meet to be
#' included in your response.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' JobId = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$start_text_detection(
#' Video = list(
#' S3Object = list(
#' Bucket = "string",
#' Name = "string",
#' Version = "string"
#' )
#' ),
#' ClientRequestToken = "string",
#' NotificationChannel = list(
#' SNSTopicArn = "string",
#' RoleArn = "string"
#' ),
#' JobTag = "string",
#' Filters = list(
#' WordFilter = list(
#' MinConfidence = 123.0,
#' MinBoundingBoxHeight = 123.0,
#' MinBoundingBoxWidth = 123.0
#' ),
#' RegionsOfInterest = list(
#' list(
#' BoundingBox = list(
#' Width = 123.0,
#' Height = 123.0,
#' Left = 123.0,
#' Top = 123.0
#' ),
#' Polygon = list(
#' list(
#' X = 123.0,
#' Y = 123.0
#' )
#' )
#' )
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname rekognition_start_text_detection
#'
#' @aliases rekognition_start_text_detection
rekognition_start_text_detection <- function(Video, ClientRequestToken = NULL, NotificationChannel = NULL, JobTag = NULL, Filters = NULL) {
op <- new_operation(
name = "StartTextDetection",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$start_text_detection_input(Video = Video, ClientRequestToken = ClientRequestToken, NotificationChannel = NotificationChannel, JobTag = JobTag, Filters = Filters)
output <- .rekognition$start_text_detection_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$start_text_detection <- rekognition_start_text_detection
#' Stops a running model
#'
#' @description
#' Stops a running model. The operation might take a while to complete. To
#' check the current status, call
#' [`describe_project_versions`][rekognition_describe_project_versions].
#'
#' This operation requires permissions to perform the
#' `rekognition:StopProjectVersion` action.
#'
#' @usage
#' rekognition_stop_project_version(ProjectVersionArn)
#'
#' @param ProjectVersionArn [required] The Amazon Resource Name (ARN) of the model version that you want to
#' delete.
#'
#' This operation requires permissions to perform the
#' `rekognition:StopProjectVersion` action.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Status = "TRAINING_IN_PROGRESS"|"TRAINING_COMPLETED"|"TRAINING_FAILED"|"STARTING"|"RUNNING"|"FAILED"|"STOPPING"|"STOPPED"|"DELETING"|"COPYING_IN_PROGRESS"|"COPYING_COMPLETED"|"COPYING_FAILED"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$stop_project_version(
#' ProjectVersionArn = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname rekognition_stop_project_version
#'
#' @aliases rekognition_stop_project_version
rekognition_stop_project_version <- function(ProjectVersionArn) {
op <- new_operation(
name = "StopProjectVersion",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$stop_project_version_input(ProjectVersionArn = ProjectVersionArn)
output <- .rekognition$stop_project_version_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$stop_project_version <- rekognition_stop_project_version
#' Stops a running stream processor that was created by
#' CreateStreamProcessor
#'
#' @description
#' Stops a running stream processor that was created by
#' [`create_stream_processor`][rekognition_create_stream_processor].
#'
#' @usage
#' rekognition_stop_stream_processor(Name)
#'
#' @param Name [required] The name of a stream processor created by
#' [`create_stream_processor`][rekognition_create_stream_processor].
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$stop_stream_processor(
#' Name = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname rekognition_stop_stream_processor
#'
#' @aliases rekognition_stop_stream_processor
rekognition_stop_stream_processor <- function(Name) {
op <- new_operation(
name = "StopStreamProcessor",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$stop_stream_processor_input(Name = Name)
output <- .rekognition$stop_stream_processor_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$stop_stream_processor <- rekognition_stop_stream_processor
#' Adds one or more key-value tags to an Amazon Rekognition collection,
#' stream processor, or Custom Labels model
#'
#' @description
#' Adds one or more key-value tags to an Amazon Rekognition collection,
#' stream processor, or Custom Labels model. For more information, see
#' [Tagging AWS
#' Resources](https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html).
#'
#' This operation requires permissions to perform the
#' `rekognition:TagResource` action.
#'
#' @usage
#' rekognition_tag_resource(ResourceArn, Tags)
#'
#' @param ResourceArn [required] Amazon Resource Name (ARN) of the model, collection, or stream processor
#' that you want to assign the tags to.
#' @param Tags [required] The key-value tags to assign to the resource.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$tag_resource(
#' ResourceArn = "string",
#' Tags = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname rekognition_tag_resource
#'
#' @aliases rekognition_tag_resource
rekognition_tag_resource <- function(ResourceArn, Tags) {
op <- new_operation(
name = "TagResource",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$tag_resource_input(ResourceArn = ResourceArn, Tags = Tags)
output <- .rekognition$tag_resource_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$tag_resource <- rekognition_tag_resource
#' Removes one or more tags from an Amazon Rekognition collection, stream
#' processor, or Custom Labels model
#'
#' @description
#' Removes one or more tags from an Amazon Rekognition collection, stream
#' processor, or Custom Labels model.
#'
#' This operation requires permissions to perform the
#' `rekognition:UntagResource` action.
#'
#' @usage
#' rekognition_untag_resource(ResourceArn, TagKeys)
#'
#' @param ResourceArn [required] Amazon Resource Name (ARN) of the model, collection, or stream processor
#' that you want to remove the tags from.
#' @param TagKeys [required] A list of the tags that you want to remove.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$untag_resource(
#' ResourceArn = "string",
#' TagKeys = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname rekognition_untag_resource
#'
#' @aliases rekognition_untag_resource
rekognition_untag_resource <- function(ResourceArn, TagKeys) {
op <- new_operation(
name = "UntagResource",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$untag_resource_input(ResourceArn = ResourceArn, TagKeys = TagKeys)
output <- .rekognition$untag_resource_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$untag_resource <- rekognition_untag_resource
#' Adds or updates one or more entries (images) in a dataset
#'
#' @description
#' Adds or updates one or more entries (images) in a dataset. An entry is a
#' JSON Line which contains the information for a single image, including
#' the image location, assigned labels, and object location bounding boxes.
#' For more information, see Image-Level labels in manifest files and
#' Object localization in manifest files in the *Amazon Rekognition Custom
#' Labels Developer Guide*.
#'
#' If the `source-ref` field in the JSON line references an existing image,
#' the existing image in the dataset is updated. If `source-ref` field
#' doesn't reference an existing image, the image is added as a new image
#' to the dataset.
#'
#' You specify the changes that you want to make in the `Changes` input
#' parameter. There isn't a limit to the number JSON Lines that you can
#' change, but the size of `Changes` must be less than 5MB.
#'
#' [`update_dataset_entries`][rekognition_update_dataset_entries] returns
#' immediatly, but the dataset update might take a while to complete. Use
#' [`describe_dataset`][rekognition_describe_dataset] to check the current
#' status. The dataset updated successfully if the value of `Status` is
#' `UPDATE_COMPLETE`.
#'
#' To check if any non-terminal errors occured, call
#' [`list_dataset_entries`][rekognition_list_dataset_entries] and check for
#' the presence of `errors` lists in the JSON Lines.
#'
#' Dataset update fails if a terminal error occurs (`Status` =
#' `UPDATE_FAILED`). Currently, you can't access the terminal error
#' information from the Amazon Rekognition Custom Labels SDK.
#'
#' This operation requires permissions to perform the
#' `rekognition:UpdateDatasetEntries` action.
#'
#' @usage
#' rekognition_update_dataset_entries(DatasetArn, Changes)
#'
#' @param DatasetArn [required] The Amazon Resource Name (ARN) of the dataset that you want to update.
#' @param Changes [required] The changes that you want to make to the dataset.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$update_dataset_entries(
#' DatasetArn = "string",
#' Changes = list(
#' GroundTruth = raw
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname rekognition_update_dataset_entries
#'
#' @aliases rekognition_update_dataset_entries
rekognition_update_dataset_entries <- function(DatasetArn, Changes) {
op <- new_operation(
name = "UpdateDatasetEntries",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$update_dataset_entries_input(DatasetArn = DatasetArn, Changes = Changes)
output <- .rekognition$update_dataset_entries_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$update_dataset_entries <- rekognition_update_dataset_entries
#' Allows you to update a stream processor
#'
#' @description
#' Allows you to update a stream processor. You can change some settings
#' and regions of interest and delete certain parameters.
#'
#' @usage
#' rekognition_update_stream_processor(Name, SettingsForUpdate,
#' RegionsOfInterestForUpdate, DataSharingPreferenceForUpdate,
#' ParametersToDelete)
#'
#' @param Name [required] Name of the stream processor that you want to update.
#' @param SettingsForUpdate The stream processor settings that you want to update. Label detection
#' settings can be updated to detect different labels with a different
#' minimum confidence.
#' @param RegionsOfInterestForUpdate Specifies locations in the frames where Amazon Rekognition checks for
#' objects or people. This is an optional parameter for label detection
#' stream processors.
#' @param DataSharingPreferenceForUpdate Shows whether you are sharing data with Rekognition to improve model
#' performance. You can choose this option at the account level or on a
#' per-stream basis. Note that if you opt out at the account level this
#' setting is ignored on individual streams.
#' @param ParametersToDelete A list of parameters you want to delete from the stream processor.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$update_stream_processor(
#' Name = "string",
#' SettingsForUpdate = list(
#' ConnectedHomeForUpdate = list(
#' Labels = list(
#' "string"
#' ),
#' MinConfidence = 123.0
#' )
#' ),
#' RegionsOfInterestForUpdate = list(
#' list(
#' BoundingBox = list(
#' Width = 123.0,
#' Height = 123.0,
#' Left = 123.0,
#' Top = 123.0
#' ),
#' Polygon = list(
#' list(
#' X = 123.0,
#' Y = 123.0
#' )
#' )
#' )
#' ),
#' DataSharingPreferenceForUpdate = list(
#' OptIn = TRUE|FALSE
#' ),
#' ParametersToDelete = list(
#' "ConnectedHomeMinConfidence"|"RegionsOfInterest"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname rekognition_update_stream_processor
#'
#' @aliases rekognition_update_stream_processor
rekognition_update_stream_processor <- function(Name, SettingsForUpdate = NULL, RegionsOfInterestForUpdate = NULL, DataSharingPreferenceForUpdate = NULL, ParametersToDelete = NULL) {
op <- new_operation(
name = "UpdateStreamProcessor",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .rekognition$update_stream_processor_input(Name = Name, SettingsForUpdate = SettingsForUpdate, RegionsOfInterestForUpdate = RegionsOfInterestForUpdate, DataSharingPreferenceForUpdate = DataSharingPreferenceForUpdate, ParametersToDelete = ParametersToDelete)
output <- .rekognition$update_stream_processor_output()
config <- get_config()
svc <- .rekognition$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.rekognition$operations$update_stream_processor <- rekognition_update_stream_processor
|
0cad6090c1be4ec37a3225573d9e820aafbcaae8
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/phylosim/examples/omegaHist.CodonSequence.Rd.R
|
ea333bfa778cb4537c7f3ced18f5452b53335227
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 580
|
r
|
omegaHist.CodonSequence.Rd.R
|
library(phylosim)
### Name: omegaHist.CodonSequence
### Title: Plot a histogram of omega values from a range
### Aliases: omegaHist.CodonSequence CodonSequence.omegaHist
### omegaHist,CodonSequence-method
### ** Examples
# create a GY94 process
p<-GY94()
# create a CodonSequence object,
# attach a process p
s<-CodonSequence(length=20,processes=list(list(p)))
# set omega values through omegaVarM2.CodonSequence
omegaVarM2(s,p,p0=0.5,p1=0.2,omega=1.5)
# get a histogram of omega values from the range 1:15
omegaHist(s,p,breaks=10,1:15)
|
4a8148e7adbea52ac13fecf83c5ec238df562cdf
|
15d417ccfacf20314589ed086c0b8daa2245cc1f
|
/libc/sys/unlink/man.r
|
f6b353a45d541ea51776d693032dd09777e0ec1c
|
[] |
no_license
|
paulohrpinheiro/tropix-libs
|
17e7faf0715b104fbf6f305074de76bd1cf08fc5
|
c41d33a6f95064ec6c0567a801048896be28c626
|
refs/heads/master
| 2021-12-03T19:21:09.621584
| 2014-11-06T17:47:26
| 2014-11-06T17:47:26
| null | 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 1,110
|
r
|
man.r
|
.bp
.he 'UNLINK (sys)'TROPIX: Manual de Referência'UNLINK (sys)'
.fo 'Atualizado em 23.08.95'Versão 3.0'Pag. %'
.b NOME
.in 5
.wo "unlink -"
remove uma entrada de um diretório
.br
.in
.sp
.b SINTAXE
.in 5
.(l
#include <sys/syscall.h>
int unlink (const char *path);
.)l
.in
.sp
.b DESCRIÇÃO
.in 5
A chamada ao sistema "unlink" remove uma referência a um arquivo,
ou seja, remove a entrada do diretório cujo nome é dado pela cadeia
de caracteres apontada por "path".
.sp
Quando todos as referências a um arquivo foram removidas,
e nenhum processo está com o arquivo aberto, o espaço ocupado
por ele é liberado e ele deixa de existir.
.sp
Se um ou mais processos
estiverem com o arquivo aberto quando a última referência
for removida, a sua remoção é adiada até que todos os processos
fechem o arquivo.
.in
.sp
.b
VALOR DE RETORNO
.r
.in 5
Em caso de sucesso, a chamada ao sistema devolve zero.
Em caso contrário, devolve -1 e indica em "errno" a causa do erro.
.in
.sp
.b
VEJA TAMBÉM
.r
.in 5
.wo "(cmd): "
rm
.br
.wo "(sys): "
close, link, open
.br
.in
.sp
.b ESTADO
.in 5
Efetivo.
|
b70f239ca7d2d35045479f0dfc92d8733a6272e7
|
6f5518ed43cfeb96dd87e9f112aba0058940d0b0
|
/Exercise Files/Stats with One Variable/03_04/Ex03_04.R
|
86071aee192d360bc0efe0cf3c14b0505a4e88e0
|
[] |
no_license
|
Lula27/R_StatisticsEssentialTraining
|
f0518f3cc6f9f9c2459ac2c6629e9bdb30ce7857
|
7238740374d5e6f115974c2f430009cb7d47cadd
|
refs/heads/master
| 2021-06-27T01:57:11.663965
| 2019-03-22T23:06:03
| 2019-03-22T23:06:03
| 105,818,590
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 952
|
r
|
Ex03_04.R
|
# R Statistics Essential Training
# Ex03_04
# Single mean: Hypothesis test and confidence interval
# Load data - look at structure
?quakes
str(quakes)
# See the first 5 lines of the data
quakes[1:5, ]
# Just load the magnitude variable
quakes$mag[1:5]
# Select magnitude for entire data set - store in object called mag
mag <- quakes$mag
mag
# Select first five rows for mag & notice the different in t-test results
mag2 <- quakes$mag[1:5]
# Use t-test for one-sample
# Default t-test (compares mean to 0)
# Ho: true mean = 0
# Ha: true mean != 0
?t.test()
t.test(mag) # T-test for entire sample of magnitudes
# Notice for this natural science example, that the t-test is huge; for social sciences, much smaller t-tests (around single digits)
t.test(mag2) # T-test for sample of first 5 observations
# right One-sided t-test w/ true mean(mu) = 4 as Ho
t.test(mag, alternative = "greater", mu = 4)
# Clean up
rm(list = ls())
|
4fb5f7b36b19d8c263a794f5a5e9b6b30f8fe1de
|
431719d48e8567140216bdfdcd27c76cc335a490
|
/man/AgaveCache.Rd
|
585be2a26fac24dfdafb8e83f745849230c827b2
|
[
"BSD-3-Clause"
] |
permissive
|
agaveplatform/r-sdk
|
4f32526da4889b4c6d72905e188ccdbb3452b840
|
b09f33d150103e7ef25945e742b8d0e8e9bb640d
|
refs/heads/master
| 2018-10-15T08:34:11.607171
| 2018-09-21T23:40:19
| 2018-09-21T23:40:19
| 118,783,778
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 649
|
rd
|
AgaveCache.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AgaveCache.r
\docType{data}
\name{AgaveCache}
\alias{AgaveCache}
\title{Agave Cache utility class}
\format{An object of class \code{R6ClassGenerator} of length 24.}
\usage{
AgaveCache
}
\description{
rAgave.AgaveCache
}
\details{
AgaveCache Class
Handles access to the authentication cache including reading,
writing from default system locations.
NOTE: This class is NOT generated by the swagger code generator program.
}
\section{Methods}{
\describe{
load
write
getClient
setClient
getToken
setToken
getTenant
setTenant
getProperty
}
}
\keyword{datasets}
|
d5a88c585658137b2d629195805da1d3061b02c5
|
335ae8eef3bef300794d25a2e3f70bf5ffd65b0a
|
/scripts/analise/dadosMar15/usuarios/kendallDistance.R
|
93d277f334295a3c5669c8d055b82f216a4c118e
|
[] |
no_license
|
davidcmm/campinaPulse
|
6ba006a13744f2488d650928129b9d2dcdae46aa
|
4142bc6e306ff2b95452b2db466a8a48c003555d
|
refs/heads/master
| 2021-01-23T19:44:10.166746
| 2019-09-28T12:56:06
| 2019-09-28T12:56:06
| 20,739,839
| 0
| 0
| null | 2019-10-22T04:56:53
| 2014-06-11T20:20:11
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 1,994
|
r
|
kendallDistance.R
|
# Functions to calculate the kendall tau distance of two rankings
mergeSort <- function(x){
# Sorts a list computing the number of inversions
#
# Args:
# x: list with itens to be sorted
# Returns:
# List with two values:
# "inversions": number of inversions and sorted list
# "sortedVector": x sorted
#
# Method adapted from http://goo.gl/LyDFRf
if(length(x) == 1){
inv <- 0
} else {
n <- length(x)
n1 <- ceiling(n/2)
n2 <- n-n1
y1 <- mergeSort(x[1:n1])
y2 <- mergeSort(x[n1+1:n2])
inv <- y1$inversions + y2$inversions
x1 <- y1$sortedVector
x2 <- y2$sortedVector
i1 <- 1
i2 <- 1
while(i1+i2 <= n1+n2+1){
if(i2 > n2 || (i1 <= n1 && x1[i1] <= x2[i2])){
x[i1+i2-1] <- x1[i1]
i1 <- i1 + 1
} else {
inv <- inv + n1 + 1 - i1
x[i1+i2-1] <- x2[i2]
i2 <- i2 + 1
}
}
}
return (list(inversions=inv,sortedVector=x))
}
numberOfInversions <- function(x){
# Computes number of inversions
#
# Args:
# x: list with the distance of each item
#
# Returns:
# Kendall thau, i.e. the number of inversions
r <- mergeSort(x)
return (r$inversions)
}
normalizedKendallTauDistance <- function(x,y){
# Computes normalized kendall tau distance
#
# Args:
# x: One of two vectors whose distance is to be calculated.
# y: The other vector. x and y must have the same length, greater than one,
# with no missing values.
#
# Returns:
# The normalized Kendall tau distance
#
# Based on http://en.wikipedia.org/wiki/Kendall_tau_distance
tau = numberOfInversions(order(x)[rank(y)])
nItens = length(x)
maxNumberOfInverstions <- (nItens*(nItens-1))/2
normalized = tau/maxNumberOfInverstions
return(normalized)
}
args <- commandArgs(trailingOnly = TRUE)
rank1 <- read.table(args[1])
rank2 <- read.table(args[2])
matriz <- merge(rank1, rank2, by="V1")
normalizedKendallTauDistance(matriz$V2.x, matriz$V2.y)
|
c7ead956e4f8e3061bfdd6b8cc4614182f790e83
|
1d14abe82ab2d1eaee21725835b3bde44b44009e
|
/R/wnd_gen.R
|
788a272d50db9ca27ac5c54c8185f2d71c0e6868
|
[] |
no_license
|
Dardare/prjct
|
e055f254f8fc18a7a0c421a89a0ee523d5c7372b
|
7903d977894b830e18fe9b4572d4fc99c5a48659
|
refs/heads/master
| 2021-01-18T23:29:45.754532
| 2016-06-16T20:54:23
| 2016-06-16T20:54:23
| 46,169,800
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 539
|
r
|
wnd_gen.R
|
windows_gen <- function(wndSize, fname) {
sep_files <- separate_files(fname)
list_eeg <- sep_files[[1]]
list_events <- sep_files[[2]]
target <- list()
nontarget <- list()
for (i in seq(from = 1, to = length(list_eeg), by = 1)) {
#csv_data <- loadCSVdata(list_eeg[1], list_events[1])
tmp <- wnd_create(list_eeg[i], list_events[i], wndSize)
#tmp <- mean_subtr(tmp)
target <- c(target, tmp[[1]])
nontarget <- c(nontarget, tmp[[2]])
}
list(target, nontarget)
#save(target, nontarget, file='all.dat')
}
|
d99fd8afb2b9006521bd7fc3822aa93e329deedf
|
9cdf25555df47da99a719168b24715b45a9630e9
|
/cachematrix.R
|
16f5f6f1e47b2417a1ff5e4cbbd726f4fd8d6026
|
[] |
no_license
|
markraphael/ProgrammingAssignment2
|
aecd8d82a4708a8ef2876ee91090955197050c43
|
bbe68431bfd28f567e568a37454c4948b01f61b0
|
refs/heads/master
| 2021-08-30T03:48:39.459783
| 2017-12-15T23:00:05
| 2017-12-15T23:00:05
| 114,416,692
| 0
| 0
| null | 2017-12-15T22:36:09
| 2017-12-15T22:36:08
| null |
UTF-8
|
R
| false
| false
| 751
|
r
|
cachematrix.R
|
## Creates a matrix object which caches its inverse
## Creates the matrix object described above
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
get <- function() x
set <- function(y) {
x <<- y
inv <<- NULL
}
getInverse <- function() inv
setInverse <- function(inverse) {
inv <<- inverse
}
list(get = get, set = set,
getInverse = getInverse,
setInverse = setInverse)
}
## Retrieves the cached inverse of the matrix object passed in,
## or calculates the inverse if it has not been cached yet
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if (!is.null(inv)) return(inv)
inv <- solve(x$get(), ...)
x$setInverse(inv)
inv
}
|
10520d2a14168bd46e002a8cdf6c1569b2e8cfb7
|
14b48b4d294d2f5d113ba70821b012f97a609861
|
/ctg.r
|
d8dafb2f98212556f7ef2a9f2fec13a5900352de
|
[] |
no_license
|
Rutujakenjale/classification-
|
99b5c80cd711239036fce4eb6723976a28af8fd1
|
db98f8d453832d65d33f65c4d367f3f64b335bbe
|
refs/heads/master
| 2020-03-29T21:35:35.431166
| 2018-09-26T06:27:04
| 2018-09-26T06:27:04
| 150,375,641
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 788
|
r
|
ctg.r
|
#read file
data<- read.csv("F:/imarticus projects/dataset/data/datasets/CTG.csv")
View(data)
#install.packages("party")
library(party)
#install.packages("rpart")
library(rpart)
data$NSP<- factor(data$NSP)
###spliting data
sam_data<- sample(2,nrow(data), replace = TRUE, prob = c(.9, .1))
train<- data[sam_data==1, ]
test<- data[Sam_data==2, ]
#model
ctree1<- ctree(NSP ~ LB+AC+FM,data = train)
plot(ctree1)
ctree2<- ctree(NSP ~ LB+AC+FM,data = train,controls = ctree_control(mincriterion = .90,minsplit = 450))
plot(ctree2)
Pred_val<- predict(ctree2, test)
Pred_val
pred_actual_DF<- data.frame(Pred_val, test$NSP)
pred_actual_DF
predict_table<- table(Pred_val,test$NSP) ##confuction matrix
predict_table
acc<-sum(diag(predict_table))/sum(predict_table)*100
acc
|
4c7024c6a178c243e501407b8b5d68e9f79bced9
|
9e72f2d88e396432a7bdf217c8408f8a1fff02e8
|
/181101_ggplot.R
|
bb13270894e09be048100049b3253a20bed49f9c
|
[] |
no_license
|
SeokHyeon-Hwang/R_data_analysis
|
271cdc33b601d0cc61788e4a0fc1e51795daccbd
|
61c4af51e1cac736a0290c6ac6c2dc2a927256f1
|
refs/heads/master
| 2021-07-11T23:02:26.650102
| 2019-03-05T04:52:33
| 2019-03-05T04:52:33
| 148,569,523
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 236
|
r
|
181101_ggplot.R
|
library(ggplot2)
library(ggmap)
library(MASS)
data(Cars93)
str(Cars93)
summary(Cars93)
p1<-ggplot(data=Cars93, aes(x=Manufacturer, y=Horsepower))
p1<-p1+geom_point()
p1<-p1+ggtitle('Plot of Manufacturer vs Horsepower')
p1
|
c0a3825a3253feacc847f8950062fecec6aa1b7f
|
4c2e0b1ea2520e3c36414687258525bce7df3e8a
|
/code/01_install_packages.R
|
f51184c58c1fb42a0ce38cd0b1da4e41dcba9890
|
[] |
no_license
|
czheluo/Teach-Bioinformatics-R-dataviz
|
d1c313f3f790373133e66ee2a37100cc8b0a7582
|
c58e75e7e3d18472616649bcff9aee64c640fdc7
|
refs/heads/master
| 2021-08-07T20:25:57.608170
| 2020-06-05T01:23:22
| 2020-06-05T01:23:22
| 186,523,260
| 7
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 884
|
r
|
01_install_packages.R
|
##########################################
# http://www.majorbio.com/
# Copyright (C) 2019 in Majorbio workshop
# contact: meng.luo@majorbio.com
##########################################
# install R packages
install.packages("name")
if (!requireNamespace("BiocManager", quietly = TRUE)) {
install.packages("BiocManager")
}
BiocManager::install("name")
devtools::install_github("name")
# load packages
library(name)
require(name)
# examples
install.packages('nVennR')
install.packages('VennDiagram')
if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager")
BiocManager::install("ComplexHeatmap")
devtools::install_github("jokergoo/ComplexHeatmap")
library(ComplexHeatmap)
library(nVennR)
library(VennDiagram)
require(ComplexHeatmap)
require(nVennR)
require(VennDiagram)
|
dad639da012008530ed5f20d856cdc98048f1b7d
|
f73b240bd75b6d72a8af59d6cb8dac6f9fc1eef1
|
/run_analysis.R
|
2a15233f19d2205e7421a4f4b15b87384f8d2f7a
|
[] |
no_license
|
whantos/Getting-and-Cleaning-Data-Course-Project
|
42b85bee5853eac8e9ab8601244b184a2c746068
|
ad82ea77eca035ee68338671c1183d3a8540e1b9
|
refs/heads/master
| 2021-01-10T02:15:26.257088
| 2016-03-22T10:26:19
| 2016-03-22T10:26:19
| 54,198,537
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,801
|
r
|
run_analysis.R
|
# This File contains all Function for getting and prepare the Data
# needet fpor the Course Project
library(data.table)
library(httr)
# This function Downloads the raw-Data as zip and extrakt them
url = "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
file = "data/projectfiles.zip"
if(!dir.exists("data")) { dir.create("data") }
if(!file.exists(file)) { download.file(url,file) }
# check if extrakted
#if(!dir.exists("data/UCI HAR Dataset")) {
print("Unzip Data ...")
unzip(file, exdir = "data")
print("Unzip complete")
#}
print("Raw Data is now available.")
# read the variable-labels
features <- fread("data/UCI HAR Dataset/features.txt")
# read the activity-labels
activity_labels <- fread("data/UCI HAR Dataset/activity_labels.txt")
# select the column names that we want to keep
colNumbersToKeep <- grep("-((mean(Freq)?)|(std))\\(\\)", features$V2, value = FALSE)
# read the measurement
trainXData <- fread("data/UCI HAR Dataset/train/X_train.txt"
, col.names = features$V2[colNumbersToKeep]
, select = colNumbersToKeep)
testXData <- fread("data/UCI HAR Dataset/test/X_test.txt"
, col.names = features$V2[colNumbersToKeep]
, select = colNumbersToKeep)
# read the activity
trainYData <- fread("data/UCI HAR Dataset/train/Y_train.txt"
, col.names = c("activity"))
testYData <- fread("data/UCI HAR Dataset/test/Y_test.txt"
, col.names = c("activity"))
# read the subjects
trainSubjectData <- fread("data/UCI HAR Dataset/train/subject_train.txt"
, col.names = c("subject"))
testSubjectData <- fread("data/UCI HAR Dataset/test/subject_test.txt"
, col.names = c("subject"))
# merge the train and test data
yData <- rbind(trainYData,testYData)
xData <- rbind(trainXData,testXData)
subjectData <- rbind(trainSubjectData, testSubjectData)
# join subject, activity and measurements
mergedData <- cbind(subjectData, yData, xData)
# use "factor" to encode the activity-row as a factor
mergedData$activity <- factor( mergedData$activity
, levels = activity_labels$V1
, labels = activity_labels$V2)
# create a second Data set with the average of each variable
# for each activity and each subject.
summary_data <- mergedData[,lapply(.SD, mean), by=c("subject","activity")]
# rename the column-names to show the used summary method
colnames(summary_data)[-c(1:2)] <- paste(colnames(summary_data)[-c(1:2)]
,"_mean",sep="")
# save the result as csv-file
write.table(summary_data, "./summary_data.txt", row.names = FALSE)
print("summary_data.txt is now available.")
|
04bd60d3dcea4c3fadd8c6340acbed2c1e6f7629
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/mvprpb/examples/mvprpb-package.Rd.R
|
905b0c5e4bfd7b06cc3bfc4e2a7d1246e3ec543f
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 312
|
r
|
mvprpb-package.Rd.R
|
library(mvprpb)
### Name: mvprpb-package
### Title: mvprpb
### Aliases: mvprpb-package mvprpb
### ** Examples
dim.p <- 8
mu <- c( rep(- 0.5 , dim.p -1) , 3 )
cov <- diag( dim.p ) * 0.5 + 0.5
n.itr <- 800
integ.range <- 10
res.val <- mvorpb( dim.p , mu , cov ,n.itr , integ.range )
print(res.val)
|
16590a371f019fbabed34baa38577571d3776ab5
|
a397bbd13ae390b0ad6b14f9162513e5d6a8e11b
|
/R/print.TextData.R
|
964db9156ec3bcccc2abb03afa3081acb765c562
|
[] |
no_license
|
cran/Xplortext
|
4de9793765cb6fb7f54d201b8193ad641a1c2c42
|
7ff29ca1559610aec4d1982e7b204b5046b7d331
|
refs/heads/master
| 2023-04-29T02:57:45.909957
| 2023-04-24T08:10:02
| 2023-04-24T08:10:02
| 92,412,290
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,923
|
r
|
print.TextData.R
|
#' @export
print.TextData <- function (x, file = NULL, sep = ";", ...)
{
options(stringsAsFactors = FALSE)
res.TextData <- x
if (!inherits(res.TextData, "TextData"))
stop("non convenient data")
sink.reset <- function(){
for(i in seq_len(sink.number())){sink()}}
sink.reset
cat("*The results are available in the following objects:\n\n")
addindex <- 0
if(is.null(x$DocSeg)) indexp <- 7 else indexp <- 9
if(!is.null(x$SourceTerm)) addindex <- 1
if(!is.null(x$FullDocTerm)) addindex <- addindex +1
index <- indexp+addindex
res <- array("", c(index, 2), list(1:index, c("name", "description")))
res[1, ] <- c("$summGen", "General summary")
res[2, ] <- c("$summDoc", "Documents summary")
res[3, ] <- c("$indexW", "Index of words")
res[4, ] <- c("$DocTerm", "Documents by Words table (*1)")
res[5, ] <- c("$context", "Contextual variables")
res[6, ] <- c("$info", "Information about selection of words")
res[7, ] <- c("$remov.docs", "Removed empty documents")
if(!is.null(x$DocSeg)){
res[8, ] <- c("$indexS", "Index of segments")
res[9, ] <- c("$DocSeg", "Documents by Segments table (*2)")}
if(!is.null(x$SourceTerm)) {
res[indexp+1, ] <- c("$SourceTerm", "Sequency Documents by Words table (*3)")
}
print(res[c(1:index),])
print(paste("(*1) $DocTerm has a compressed format"))
print(paste("Use print(as.matrix(x$DocTerm))"))
if(!is.null(x$DocSeg)) {
print(paste("(*2) $DocSeg has a compressed format"))
print(paste("Use print(as.matrix(x$DocSeg))"))
}
if(!is.null(x$SourceTerm)) {
print(paste("(*3) $SourceTerm has a compressed format"))
print(paste("Use print(as.matrix(x$SourceTerm))"))
}
if(!is.null(x$FullDocTerm)) {
print(paste("(*4) $FullDocTerm has a compressed format"))
print(paste("Use print(as.matrix(x$FullDocTerm))"))
}
if (!is.null(file)) {
for(i in seq_len(sink.number())){sink(NULL)}
sink(file)
ndoc <- x$info$Ndoc[[1]]
var.agg <- as.character(x$info$name.var.agg[[1]])
cat("\nGeneral summary (summGen)\n")
if(is.null(file)) print(x$summGen) else {
out1 <- t(c("","Before", "After",sep="\t"))
write.table(out1, quote=FALSE, sep = sep,col.names=FALSE, row.names=FALSE)
write.table(x$summGen, quote=FALSE, sep = sep, col.names=FALSE)}
cat("\nDocuments summary (summDoc)\n")
if(var.agg=="") {
name1 <- c("Occurrences", "DistinctWords", "PctLength", "Mean Length100")
nameF <- c("DocName", name1, name1)
name2 <- c("", rep("before",4), rep("after",4))
} else {
nameF <- c("DocName", "Occurrences", "DistinctWords", "NumberDocs","PctLength", "MeanLength100",
"Occurrences", "DistinctWords", "PctLength", "MeanLength100")
name2 <- c("", rep("before",5), rep("after",4))
}
A1 <- data.frame(nameF,name2)
colnames(A1) <- NULL
A2 <- cbind(A1,t(x$summDoc))
colnames(A2) <- NULL
A3 <- data.frame(t(A2))
colnames(A3) <- NULL
rownames(A3) <- c(""," ",c(1:(nrow(A3)-2)))
write.table(A3[1:(ndoc+2),], quote=FALSE, sep = sep)
cat("\nIndex of words (indexW)\n")
cat("\nIndex of the most frequent words\n")
A1 <- cbind(format(as.data.frame(rownames(x$indexW)), justify = "l"), x$indexW)
colnames(A1) <- c(format("Word",justify = "l"), "Frequency", "N.Documents")
out1 <- t(c("","Word", "Frequency", "N.Documents",sep="\t"))
write.table(out1, quote=FALSE, sep = sep, col.names=FALSE, row.names=FALSE)
write.table(A1, quote=FALSE, sep = sep, col.names=FALSE)
out1 <- t(c("","Word", "Frequency", "N.Documents",sep="\t"))
Tabfq <- x$indexW[order(rownames(x$indexW)),]
cat("\nIndex of the words in alphabetical order\n")
write.table(Tabfq, quote=FALSE, sep = sep, col.names=FALSE)
cat("\nDocuments by Words table (DocTerm)\n")
out1 <- t(c("", colnames(x$DocTerm),sep="\t"))
write.table(out1, quote=FALSE, sep = sep,col.names=FALSE, row.names=FALSE)
write.table(as.matrix(x$DocTerm), quote=FALSE, sep = sep, col.names=FALSE, row.names=TRUE)
# Repeated segments
if(!is.null(x$DocSeg)) {
cat("\nNumber of repeated segments\n")
cat(" ", ncol(x$DocSeg),"\n")
cat("\nIndex of segments (indexS)\n")
cat("\nSegments ordered by frequency (indexS$segOrderFreq)\n")
A1 <- cbind(format(as.data.frame(rownames(x$indexS$segOrderFreq)), justify = "l"),
x$indexS$segOrderFreq)
colnames(A1) <- c("Number", format("Segment",justify = "l"), "Frequency", "Long")
out1 <- t(c("Number","Segment", "Frequency", "Long",sep="\t"))
write.table(out1, quote=FALSE, sep = sep, col.names=FALSE, row.names=FALSE)
write.table(x$indexS$segOrderFreq, quote=FALSE, sep = sep, col.names=FALSE)
cat("\nSegments in alphabetical order (indexS$segOrderlist)\n")
out1 <- t(c("Number","Segment", "Frequency", "Long",sep="\t"))
write.table(out1, quote=FALSE, sep = sep, col.names=FALSE, row.names=FALSE)
write.table(x$indexS$segOrderlist, quote=FALSE, sep = sep, col.names=FALSE)
cat("\nDocuments by Repeated segments table (DocSeg)\n")
out1 <- t(c("", colnames(x$DocSeg),sep="\t"))
write.table(out1, quote=FALSE, sep = sep,col.names=FALSE, row.names=FALSE)
write.table(as.matrix(x$DocSeg), quote=FALSE, sep = sep, col.names=FALSE, row.names=TRUE)
}
# Supplementary variables
if(var.agg=="") {
if(!is.null(x$context$quali))
if(length(x$context$quali)>0) {
cat("\nSummary of contextual qualitative variables\n")
print(summary(x$context$quali))}
if(!is.null(x$context$quanti))
if(ncol(x$context$quanti)>0){
cat("\nSummary of contextual quantitative variables\n")
print(summary(x$context$quanti))}
} else {
if(!is.null(x$context$quanti)){
cat("\nSummary of contextual quantitative variables\n")
Tabfq <- cbind(x$context$quanti, x$summDoc[,4])
sname <- colnames(x$context$quanti)
colnames(Tabfq) <- c(sname,"Ndocs")
out1 <- t(c("",colnames(Tabfq)))
write.table(out1, quote=FALSE, sep = sep,col.names=FALSE, row.names=FALSE)
write.table(Tabfq, quote=FALSE, sep = sep, col.names=FALSE, row.names=TRUE)
}
if(!is.null(x$context$quali$qualivar))
if(ncol(x$context$quali$qualivar)>0){
cat("\nSummary of contextual qualitative variables\n")
out1 <- t(c("",colnames(x$context$quali$qualitable)))
write.table(out1, quote=FALSE, sep = sep,col.names=FALSE, row.names=FALSE)
write.table(x$context$quali$qualitable, quote=FALSE, sep = sep, col.names=FALSE, row.names=TRUE)
}
}
if(!is.null(x$SourceTerm)) {
cat("\nFirst 5 documents in the source documents by words table\n")
cat("For print all documents to a file use write.table(as.matrix(x$SourceTerm),file=''...)\n")
write.table(head(as.matrix(x$SourceTerm)))}
cat("\nInformation about selected options in TextData\n")
writeLines(unlist(lapply(x$info, paste, collapse=" ")))
sink()
}
if (!is.null(file)) {
print(paste("All the results are in the file", file))
}
}
|
4f2fcc6710fe59c263895551b2331bac6c2712f4
|
288c8e62f30fedce6f423bb73cd0c41fce48a421
|
/R/utils.R
|
7b3e9f6d054367b99474fbd1379f3da4d484034b
|
[] |
no_license
|
EmilHvitfeldt/ggpage
|
7d6ea7a0be00e3c6eab5d5d255b5ebbc7982bc2e
|
83821d9dc1fa3639c6a94d4f5e3c9134697060fc
|
refs/heads/master
| 2022-07-13T02:32:44.315719
| 2019-06-13T23:41:07
| 2019-06-13T23:41:07
| 101,570,252
| 332
| 20
| null | 2018-07-30T16:45:03
| 2017-08-27T17:55:37
|
R
|
UTF-8
|
R
| false
| false
| 3,132
|
r
|
utils.R
|
#' Internal function for converting words to lines
#'
#' extents the str_wrap() function from the stringr package to work with longer
#' strings.
#'
#' @param words data.frame. Where each row is a separate word words with the
#' column name text.
#' @param wot_number Numeric. how many words to split whole string by.
#' @return Character. have each element be a separate line.
#' @export
word_to_line <- function(words, wot_number = 1000) {
words %>%
dplyr::mutate(split = floor((seq_len(NROW(words)) - 1L) / wot_number)) %>%
split(.$split) %>%
purrr::map(~ .x %>% dplyr::pull(.data$text) %>%
stringr::str_c(collapse = " ") %>%
stringr::str_wrap() %>%
stringr::str_split("\n *")) %>%
unlist()
}
#' paragraph split
#'
#' Converts a word vector into a line vector with variable paragraph lengths.
#'
#' FUN most be a function that takes in a number n and returns a vector of
#' natural numbers.
#'
#' @param n Numeric. Numbers of words.
#' @param FUN Numeric. how many words to split whole string by.
#' @param ... Extra arguments.
#' @return Numeric. paragraph indicator.
#' @export
para_index <- function(n, FUN, ...) {
numbers <- FUN(n, ...)
if(any(numbers < 0)) stop("FUN must return non-negative numbers.")
index <- sum(cumsum(numbers) < n) + 1
out <- numbers[seq_len(index)]
out[index] <- out[index] - (sum(out) - n)
out
}
#' Repeating of indexes
#'
#' @param x Numerical, vector.
#' @return Numerical.
#' @examples
#' break_help(c(1, 2, 3))
#' break_help(c(6, 8, 23, 50))
#' @export
break_help <- function(x) {
unlist(
purrr::map2(x, 1:length(x), ~ rep(.y, .x))
)
}
#' Identify the edges of the paper of each page
#'
#' @param data data.frame created by ggpage_build.
#' @return data.frame,
#' @examples
#' paper_shape(ggpage_build(tinderbox))
#' @export
paper_shape <- function(data) {
dplyr::group_by(data, .data$page) %>%
dplyr::summarise(xmin = max(.data$xmin),
xmax = min(.data$xmax),
ymin = max(.data$ymin),
ymax = min(.data$ymax))
}
#' Add line number within pages
#'
#' @param data data.frame
#' @return data.frame
#' @export
page_liner <- function(data) {
line <- data %>%
dplyr::group_by(.data$page) %>%
dplyr::tally() %>%
dplyr::pull(.data$n) %>%
purrr::map(~ seq_len(.x)) %>%
unlist()
data %>%
dplyr::mutate(line)
}
#' Adjust lines
#'
#' @param line data.frame
#' @param max_length numerical. number of letters allowed on a line.
#' @param type Type of line alignment. Must be one of "left", "right" or "both".
#' @return data.frame
#' @export
line_align <- function(line, max_length, type) {
line_length <- abs(max(line$xmin) - min(line$xmax))
n_words <- NROW(line)
adjust <- 0
if(n_words > 1) {
if(type == "both") {
adjust <- c(0, (max_length - line_length) / (n_words - 1) * seq_len(n_words - 1))
}
}
if(type == "right") {
adjust <- max_length - line_length
}
if(type == "left") {
adjust <- 0
}
line$xmax <- line$xmax + adjust
line$xmin <- line$xmin + adjust
line
}
|
b3a796dc259b05233fd3e4d20a40ce3095300e7d
|
578ff90fab102b21f283e9e4b624a7fafee4cfdf
|
/inst/shiny/DiagnosticsExplorer/ui.R
|
c14045b43fdf283bade411e8238cd1d58eef5607
|
[
"Apache-2.0"
] |
permissive
|
gowthamrao/CohortDiagnostics
|
a97de0cd96b316498edad08944a6ed63e9f5effc
|
befe0eb0e488fa8a4d308fed861cb03c64b67294
|
refs/heads/main
| 2023-03-16T09:53:40.348161
| 2022-09-01T20:57:59
| 2022-09-01T20:57:59
| 241,672,665
| 0
| 0
| null | 2020-02-19T16:56:33
| 2020-02-19T16:56:33
| null |
UTF-8
|
R
| false
| false
| 46,742
|
r
|
ui.R
|
addInfo <- function(item, infoId) {
infoTag <- tags$small(
class = "badge pull-right action-button",
style = "padding: 1px 6px 2px 6px; background-color: steelblue;",
type = "button",
id = infoId,
"i"
)
item$children[[1]]$children <-
append(item$children[[1]]$children, list(infoTag))
return(item)
}
cohortReference <- function(outputId) {
shinydashboard::box(
status = "warning",
width = "100%",
tags$div(
style = "max-height: 100px; overflow-y: auto",
shiny::uiOutput(outputId = outputId)
)
)
}
cohortReferenceWithDatabaseId <- function(cohortOutputId, databaseOutputId) {
shinydashboard::box(
status = "warning",
width = "100%",
tags$div(
style = "max-height: 100px; overflow-y: auto",
tags$table(
width = "100%",
tags$tr(
tags$td(
width = "70%",
tags$b("Cohorts :"),
shiny::uiOutput(outputId = cohortOutputId)
),
tags$td(
style = "align: right !important;", width = "30%",
tags$b("Database :"),
shiny::uiOutput(outputId = databaseOutputId)
)
)
)
)
)
}
choicesFordatabaseOrVocabularySchema <- database$databaseIdWithVocabularyVersion
if (enableAnnotation) {
headerContent <- tags$li(
shiny::conditionalPanel(
"output.postAnnotationEnabled == false",
shiny::actionButton(
inputId = "annotationUserPopUp",
label = "Sign in"
)
),
shiny::conditionalPanel(
"output.postAnnotationEnabled == true",
shiny::uiOutput(outputId = "userNameLabel", style = "color:white;font-weight:bold;padding-right:30px")
),
class = "dropdown",
style = "margin-top: 8px !important; margin-right : 5px !important"
)
} else {
headerContent <- tags$li(
class = "dropdown",
style = "margin-top: 8px !important; margin-right : 5px !important"
)
}
header <-
shinydashboard::dashboardHeader(title = "Cohort Diagnostics", headerContent)
sidebarMenu <-
shinydashboard::sidebarMenu(
id = "tabs",
if (exists("cohort")) {
shinydashboard::menuItem(text = "Cohort Definition", tabName = "cohortDefinition")
},
if (exists("includedSourceConcept")) {
addInfo(
item = shinydashboard::menuItem(text = "Concepts in Data Source", tabName = "conceptsInDataSource"),
infoId = "conceptsInDataSourceInfo"
)
},
if (exists("orphanConcept")) {
addInfo(
item = shinydashboard::menuItem(text = "Orphan Concepts", tabName = "orphanConcepts"),
infoId = "orphanConceptsInfo"
)
},
if (exists("cohortCount")) {
addInfo(
item = shinydashboard::menuItem(text = "Cohort Counts", tabName = "cohortCounts"),
infoId = "cohortCountsInfo"
)
},
if (exists("incidenceRate")) {
addInfo(
item = shinydashboard::menuItem(text = "Incidence Rate", tabName = "incidenceRate"),
infoId = "incidenceRateInfo"
)
},
if (exists("temporalCovariateValue")) {
addInfo(
item = shinydashboard::menuItem(text = "Time Distributions", tabName = "timeDistribution"),
infoId = "timeDistributionInfo"
)
},
if (exists("inclusionRuleStats")) {
addInfo(
item = shinydashboard::menuItem(text = "Inclusion Rule Statistics", tabName = "inclusionRuleStats"),
infoId = "inclusionRuleStatsInfo"
)
},
if (exists("indexEventBreakdown")) {
addInfo(
item = shinydashboard::menuItem(text = "Index Event Breakdown", tabName = "indexEventBreakdown"),
infoId = "indexEventBreakdownInfo"
)
},
if (exists("visitContext")) {
addInfo(
item = shinydashboard::menuItem(text = "Visit Context", tabName = "visitContext"),
infoId = "visitContextInfo"
)
},
if (exists("relationship")) {
addInfo(
shinydashboard::menuItem(text = "Cohort Overlap", tabName = "cohortOverlap"),
infoId = "cohortOverlapInfo"
)
},
if (exists("temporalCovariateValue")) {
addInfo(
shinydashboard::menuItem(text = "Cohort Characterization", tabName = "cohortCharacterization"),
infoId = "cohortCharacterizationInfo"
)
},
if (exists("temporalCovariateValue")) {
addInfo(
shinydashboard::menuItem(text = "Temporal Characterization", tabName = "temporalCharacterization"),
infoId = "temporalCharacterizationInfo"
)
},
if (exists("temporalCovariateValue")) {
addInfo(
item = shinydashboard::menuItem(text = "Compare Cohort Char.", tabName = "compareCohortCharacterization"),
infoId = "compareCohortCharacterizationInfo"
)
},
if (exists("temporalCovariateValue")) {
addInfo(
shinydashboard::menuItem(text = "Compare Temporal Char.", tabName = "compareTemporalCharacterization"),
infoId = "compareTemporalCharacterizationInfo"
)
},
shinydashboard::menuItem(text = "Meta data", tabName = "databaseInformation"),
# Conditional dropdown boxes in the side bar ------------------------------------------------------
shiny::conditionalPanel(
condition = "input.tabs!='incidenceRate' &
input.tabs != 'timeDistribution' &
input.tabs != 'cohortCharacterization' &
input.tabs != 'cohortCounts' &
input.tabs != 'indexEventBreakdown' &
input.tabs != 'cohortDefinition' &
input.tabs != 'conceptsInDataSource' &
input.tabs != 'orphanConcepts' &
input.tabs != 'inclusionRuleStats' &
input.tabs != 'visitContext' &
input.tabs != 'cohortOverlap'",
shinyWidgets::pickerInput(
inputId = "database",
label = "Database",
choices = database$databaseId %>% unique(),
selected = database$databaseId[1],
multiple = FALSE,
choicesOpt = list(style = rep_len("color: black;", 999)),
options = shinyWidgets::pickerOptions(
actionsBox = TRUE,
liveSearch = TRUE,
size = 10,
liveSearchStyle = "contains",
liveSearchPlaceholder = "Type here to search",
virtualScroll = 50
)
)
),
shiny::conditionalPanel(
condition = "input.tabs=='incidenceRate' |
input.tabs == 'timeDistribution' |
input.tabs =='cohortCharacterization' |
input.tabs == 'cohortCounts' |
input.tabs == 'indexEventBreakdown' |
input.tabs == 'conceptsInDataSource' |
input.tabs == 'orphanConcepts' |
input.tabs == 'inclusionRuleStats' |
input.tabs == 'visitContext' |
input.tabs == 'cohortOverlap'",
shinyWidgets::pickerInput(
inputId = "databases",
label = "Database",
choices = database$databaseId %>% unique(),
selected = database$databaseId[1],
multiple = TRUE,
choicesOpt = list(style = rep_len("color: black;", 999)),
options = shinyWidgets::pickerOptions(
actionsBox = TRUE,
liveSearch = TRUE,
size = 10,
liveSearchStyle = "contains",
liveSearchPlaceholder = "Type here to search",
virtualScroll = 50
)
)
),
if (exists("temporalCovariateValue")) {
shiny::conditionalPanel(
condition = "input.tabs=='temporalCharacterization' | input.tabs =='compareTemporalCharacterization'",
shinyWidgets::pickerInput(
inputId = "timeIdChoices",
label = "Temporal Choice",
choices = temporalCharacterizationTimeIdChoices$temporalChoices,
multiple = TRUE,
choicesOpt = list(style = rep_len("color: black;", 999)),
selected = temporalCharacterizationTimeIdChoices %>%
dplyr::filter(.data$primaryTimeId == 1) %>%
dplyr::filter(.data$isTemporal == 1) %>%
dplyr::arrange(.data$sequence) %>%
dplyr::pull("temporalChoices"),
options = shinyWidgets::pickerOptions(
actionsBox = TRUE,
liveSearch = TRUE,
size = 10,
liveSearchStyle = "contains",
liveSearchPlaceholder = "Type here to search",
virtualScroll = 50
)
)
)
},
shiny::conditionalPanel(
condition = "input.tabs != 'databaseInformation' &
input.tabs != 'cohortDefinition' &
input.tabs != 'cohortCounts' &
input.tabs != 'cohortOverlap'&
input.tabs != 'incidenceRate' &
input.tabs != 'timeDistribution'",
shinyWidgets::pickerInput(
inputId = "targetCohort",
label = "Cohort",
choices = c(""),
multiple = FALSE,
choicesOpt = list(style = rep_len("color: black;", 999)),
options = shinyWidgets::pickerOptions(
actionsBox = TRUE,
liveSearch = TRUE,
liveSearchStyle = "contains",
size = 10,
liveSearchPlaceholder = "Type here to search",
virtualScroll = 50
)
)
),
shiny::conditionalPanel(
condition = "input.tabs == 'cohortCounts' |
input.tabs == 'cohortOverlap' |
input.tabs == 'incidenceRate' |
input.tabs == 'timeDistribution'",
shinyWidgets::pickerInput(
inputId = "cohorts",
label = "Cohorts",
choices = c(""),
selected = c(""),
multiple = TRUE,
choicesOpt = list(style = rep_len("color: black;", 999)),
options = shinyWidgets::pickerOptions(
actionsBox = TRUE,
liveSearch = TRUE,
liveSearchStyle = "contains",
size = 10,
dropupAuto = TRUE,
liveSearchPlaceholder = "Type here to search",
virtualScroll = 50
)
)
),
shiny::conditionalPanel(
condition = "input.tabs == 'compareCohortCharacterization'|
input.tabs == 'compareTemporalCharacterization'",
shinyWidgets::pickerInput(
inputId = "comparatorCohort",
label = "Comparator",
choices = c(""),
multiple = FALSE,
choicesOpt = list(style = rep_len("color: black;", 999)),
options = shinyWidgets::pickerOptions(
actionsBox = TRUE,
liveSearch = TRUE,
liveSearchStyle = "contains",
size = 10,
dropupAuto = TRUE,
liveSearchPlaceholder = "Type here to search",
virtualScroll = 50
)
)
),
shiny::conditionalPanel(
condition = "input.tabs == 'cohortCharacterization' |
input.tabs == 'compareCohortCharacterization' |
input.tabs == 'temporalCharacterization' |
input.tabs == 'compareTemporalCharacterization' |
input.tabs == 'conceptsInDataSource' |
input.tabs == 'orphanConcepts'",
shinyWidgets::pickerInput(
inputId = "conceptSetsSelected",
label = "Concept sets",
choices = c(""),
selected = c(""),
multiple = TRUE,
choicesOpt = list(style = rep_len("color: black;", 999)),
options = shinyWidgets::pickerOptions(
actionsBox = TRUE,
liveSearch = TRUE,
size = 10,
liveSearchStyle = "contains",
liveSearchPlaceholder = "Type here to search",
virtualScroll = 50
)
)
)
)
# Side bar code
sidebar <-
shinydashboard::dashboardSidebar(sidebarMenu,
width = NULL,
collapsed = FALSE
)
# Body - items in tabs --------------------------------------------------
bodyTabItems <- shinydashboard::tabItems(
shinydashboard::tabItem(
tabName = "about",
if (exists("aboutText")) {
HTML(aboutText)
}
),
shinydashboard::tabItem(
tabName = "cohortDefinition",
shinydashboard::box(
width = NULL,
status = "primary",
htmltools::withTags(
table(
width = "100%",
tr(
td(
align = "left",
h4("Cohort Definition")
),
td(
align = "right",
shiny::downloadButton(
outputId = "exportAllCohortDetails",
label = "Export Cohorts Zip",
icon = shiny::icon("file-export"),
style = "margin-top: 5px; margin-bottom: 5px;"
)
)
)
)
),
shiny::column(
12,
shinycssloaders::withSpinner(reactable::reactableOutput(outputId = "cohortDefinitionTable"))
),
shiny::column(
12,
conditionalPanel(
"output.cohortDefinitionRowIsSelected == true",
shiny::tabsetPanel(
type = "tab",
shiny::tabPanel(
title = "Details",
shiny::htmlOutput("cohortDetailsText")
),
shiny::tabPanel(
title = "Cohort Count",
tags$br(),
htmltools::withTags(table(
width = "100%",
tr(
td(
align = "right",
)
)
)),
shinycssloaders::withSpinner(reactable::reactableOutput(outputId = "cohortDefinitionCohortCountTable"))
),
shiny::tabPanel(
title = "Cohort definition",
copyToClipboardButton(
toCopyId = "cohortDefinitionText",
style = "margin-top: 5px; margin-bottom: 5px;"
),
shinycssloaders::withSpinner(shiny::htmlOutput("cohortDefinitionText"))
),
shiny::tabPanel(
title = "Concept Sets",
shinycssloaders::withSpinner(reactable::reactableOutput(outputId = "conceptsetExpressionsInCohort")),
shiny::conditionalPanel(
condition = "output.cohortDefinitionConceptSetExpressionRowIsSelected == true",
tags$table(
tags$tr(
tags$td(
shiny::radioButtons(
inputId = "conceptSetsType",
label = "",
choices = c(
"Concept Set Expression",
"Resolved",
"Orphan concepts",
"Json"
),
selected = "Concept Set Expression",
inline = TRUE
)
),
tags$td(
shinyWidgets::pickerInput(
inputId = "databaseOrVocabularySchema",
label = "Vocabulary version choices:",
choices = choicesFordatabaseOrVocabularySchema,
multiple = FALSE,
width = 200,
inline = TRUE,
choicesOpt = list(style = rep_len("color: black;", 999)),
options = shinyWidgets::pickerOptions(
actionsBox = TRUE,
liveSearch = TRUE,
size = 10,
liveSearchStyle = "contains",
liveSearchPlaceholder = "Type here to search",
virtualScroll = 50
)
)
),
tags$td(shiny::htmlOutput("subjectCountInCohortConceptSet")),
tags$td(shiny::htmlOutput("recordCountInCohortConceptSet")),
tags$td(
shiny::conditionalPanel(
condition = "input.conceptSetsType == 'Resolved' ||
input.conceptSetsType == 'Orphan concepts'",
shiny::checkboxInput(
inputId = "withRecordCount",
label = "With Record Count",
value = TRUE
)
)
)
)
)
),
shiny::conditionalPanel(
condition = "output.cohortDefinitionConceptSetExpressionRowIsSelected == true &
input.conceptSetsType != 'Resolved' &
input.conceptSetsType != 'Json' &
input.conceptSetsType != 'Orphan concepts'",
htmltools::withTags(table(
width = "100%",
tr(
td(
align = "right",
)
)
)),
shinycssloaders::withSpinner(reactable::reactableOutput(outputId = "cohortDefinitionConceptSetDetailsTable"))
),
shiny::conditionalPanel(
condition = "input.conceptSetsType == 'Resolved'",
htmltools::withTags(table(
width = "100%",
tr(
td(
align = "right",
)
)
)),
shinycssloaders::withSpinner(reactable::reactableOutput(outputId = "cohortDefinitionResolvedConceptsTable"))
),
shiny::conditionalPanel(
condition = "output.cohortDefinitionResolvedRowIsSelected == true && input.conceptSetsType == 'Resolved'",
htmltools::withTags(table(
width = "100%",
tr(
td(
align = "right",
)
)
)),
shinydashboard::box(
title = "Mapped Concepts",
width = NULL,
shinycssloaders::withSpinner(reactable::reactableOutput(outputId = "cohortDefinitionResolvedTableSelectedConceptIdMappedConcepts"))
)
),
shiny::conditionalPanel(
condition = "input.conceptSetsType == 'Orphan concepts'",
htmltools::withTags(table(
width = "100%",
tr(
td(
align = "right",
)
)
)),
shinycssloaders::withSpinner(reactable::reactableOutput(outputId = "cohortDefinitionOrphanConceptTable"))
),
shiny::conditionalPanel(
condition = "input.conceptSetsType == 'Json'",
copyToClipboardButton(
toCopyId = "cohortConceptsetExpressionJson",
style = "margin-top: 5px; margin-bottom: 5px;"
),
shiny::verbatimTextOutput(outputId = "cohortConceptsetExpressionJson"),
tags$head(
tags$style("#cohortConceptsetExpressionJson { max-height:400px};")
)
)
),
shiny::tabPanel(
title = "JSON",
copyToClipboardButton("cohortDefinitionJson", style = "margin-top: 5px; margin-bottom: 5px;"),
shiny::verbatimTextOutput("cohortDefinitionJson"),
tags$head(tags$style(
"#cohortDefinitionJson { max-height:400px};"
))
),
shiny::tabPanel(
title = "SQL",
copyToClipboardButton("cohortDefinitionSql", style = "margin-top: 5px; margin-bottom: 5px;"),
shiny::verbatimTextOutput("cohortDefinitionSql"),
tags$head(tags$style(
"#cohortDefinitionSql { max-height:400px};"
))
)
)
)
),
)
),
shinydashboard::tabItem(
tabName = "cohortCounts",
cohortReference("cohortCountsSelectedCohorts"),
shinydashboard::box(
width = NULL,
title = NULL,
htmltools::withTags(
table(
width = "100%",
tr(
td(
shiny::radioButtons(
inputId = "cohortCountsTableColumnFilter",
label = "Display",
choices = c("Both", "Persons", "Records"),
selected = "Both",
inline = TRUE
)
),
td(
align = "right",
)
)
)
),
shinycssloaders::withSpinner(reactable::reactableOutput(outputId = "cohortCountsTable")),
shiny::conditionalPanel(
condition = "output.cohortCountRowIsSelected == true",
tags$br(),
shinycssloaders::withSpinner(reactable::reactableOutput("InclusionRuleStatForCohortSeletedTable", width = NULL))
),
if (showAnnotation) {
column(
12,
tags$br(),
annotationFunction("cohortCounts")
)
}
)
),
shinydashboard::tabItem(
tabName = "incidenceRate",
cohortReference("incidenceRateSelectedCohorts"),
shinydashboard::box(
title = "Incidence Rate",
width = NULL,
status = "primary",
htmltools::withTags(
table(
style = "width: 100%",
tr(
td(
valign = "bottom",
shiny::checkboxGroupInput(
inputId = "irStratification",
label = "Stratify by",
choices = c("Age", "Sex", "Calendar Year"),
selected = c("Age", "Sex", "Calendar Year"),
inline = TRUE
)
),
td(HTML(" ")),
td(
valign = "bottom",
style = "width:30% !important;margin-top:10px;",
shiny::conditionalPanel(
condition = "input.irYscaleFixed",
shiny::sliderInput(
inputId = "YscaleMinAndMax",
label = "Limit y-scale range to:",
min = c(0),
max = c(0),
value = c(0, 0),
dragRange = TRUE, width = 400,
step = 1,
sep = "",
)
)
),
td(HTML(" ")),
td(
valign = "bottom",
style = "text-align: right",
shiny::checkboxInput("irYscaleFixed", "Use same y-scale across databases")
)
)
)
),
htmltools::withTags(
table(
width = "100%",
tr(
td(
shiny::conditionalPanel(
condition = "input.irStratification.indexOf('Age') > -1",
shinyWidgets::pickerInput(
inputId = "incidenceRateAgeFilter",
label = "Filter By Age",
width = 400,
choices = c("All"),
selected = c("All"),
multiple = TRUE,
choicesOpt = list(style = rep_len("color: black;", 999)),
options = shinyWidgets::pickerOptions(
actionsBox = TRUE,
liveSearch = TRUE,
size = 10,
dropupAuto = TRUE,
liveSearchStyle = "contains",
liveSearchPlaceholder = "Type here to search",
virtualScroll = 50
)
)
)
),
td(
shiny::conditionalPanel(
condition = "input.irStratification.indexOf('Sex') > -1",
shinyWidgets::pickerInput(
inputId = "incidenceRateGenderFilter",
label = "Filter By Sex",
width = 200,
choices = c("All"),
selected = c("All"),
multiple = TRUE,
choicesOpt = list(style = rep_len("color: black;", 999)),
options = shinyWidgets::pickerOptions(
actionsBox = TRUE,
liveSearch = TRUE,
size = 10,
dropupAuto = TRUE,
liveSearchStyle = "contains",
liveSearchPlaceholder = "Type here to search",
virtualScroll = 50
)
)
)
),
td(
style = "width:30% !important",
shiny::conditionalPanel(
condition = "input.irStratification.indexOf('Calendar Year') > -1",
shiny::sliderInput(
inputId = "incidenceRateCalenderFilter",
label = "Filter By Calender Year",
min = c(0),
max = c(0),
value = c(0, 0),
dragRange = TRUE,
pre = "Year ",
step = 1,
sep = ""
)
)
),
td(
shiny::numericInput(
inputId = "minPersonYear",
label = "Minimum person years",
value = 1000,
min = 0
)
),
td(
shiny::numericInput(
inputId = "minSubjetCount",
label = "Minimum subject count",
value = NULL
)
),
td(
align = "right",
shiny::downloadButton(
"saveIncidenceRatePlot",
label = "",
icon = shiny::icon("download"),
style = "margin-top: 5px; margin-bottom: 5px;"
)
)
)
)
),
shiny::htmlOutput(outputId = "hoverInfoIr"),
ggiraph::ggiraphOutput(
outputId = "incidenceRatePlot",
width = "100%",
height = "100%"
)
)
),
shinydashboard::tabItem(
tabName = "timeDistribution",
cohortReference("timeDistributionSelectedCohorts"),
shinydashboard::box(
title = "Time Distributions",
width = NULL,
status = "primary",
shiny::radioButtons(
inputId = "timeDistributionType",
label = "",
choices = c("Table", "Plot"),
selected = "Plot",
inline = TRUE
),
shiny::conditionalPanel(
condition = "input.timeDistributionType=='Table'",
tags$table(
width = "100%",
tags$tr(tags$td(
align = "right",
))
),
shinycssloaders::withSpinner(reactable::reactableOutput(outputId = "timeDistributionTable"))
),
shiny::conditionalPanel(
condition = "input.timeDistributionType=='Plot'",
tags$br(),
ggiraph::ggiraphOutput("timeDistributionPlot", width = "100%", height = "100%")
),
if (showAnnotation) {
column(
12,
tags$br(),
annotationFunction("timeDistribution")
)
}
)
),
shinydashboard::tabItem(
tabName = "conceptsInDataSource",
cohortReference("conceptsInDataSourceSelectedCohort"),
shinydashboard::box(
title = "Concepts in Data Source",
width = NULL,
htmltools::withTags(
table(
width = "100%",
tr(
td(
shiny::radioButtons(
inputId = "includedType",
label = "",
choices = c("Source fields", "Standard fields"),
selected = "Standard fields",
inline = TRUE
)
),
td(
shiny::radioButtons(
inputId = "conceptsInDataSourceTableColumnFilter",
label = "",
choices = c("Both", "Persons", "Records"),
#
selected = "Persons",
inline = TRUE
)
),
td(
align = "right",
)
)
)
),
shinycssloaders::withSpinner(reactable::reactableOutput(outputId = "conceptsInDataSourceTable")),
if (showAnnotation) {
column(
12,
tags$br(),
annotationFunction("conceptsInDataSource")
)
}
)
),
shinydashboard::tabItem(
tabName = "orphanConcepts",
cohortReference("orphanConceptsSelectedCohort"),
shinydashboard::box(
title = NULL,
width = NULL,
htmltools::withTags(
table(
width = "100%",
tr(
td(
shiny::radioButtons(
inputId = "orphanConceptsType",
label = "Filters",
choices = c("All", "Standard Only", "Non Standard Only"),
selected = "All",
inline = TRUE
)
),
td(HTML(" ")),
td(
shiny::radioButtons(
inputId = "orphanConceptsColumFilterType",
label = "Display",
choices = c("All", "Persons", "Records"),
selected = "All",
inline = TRUE
)
),
td(
)
)
)
),
shinycssloaders::withSpinner(reactable::reactableOutput(outputId = "orphanConceptsTable")),
if (showAnnotation) {
column(
12,
tags$br(),
annotationFunction("orphanConcepts")
)
}
)
),
shinydashboard::tabItem(
tabName = "inclusionRuleStats",
cohortReference("inclusionRuleStatSelectedCohort"),
shinydashboard::box(
title = NULL,
width = NULL,
htmltools::withTags(
table(
width = "100%",
tr(
td(
align = "left",
shiny::radioButtons(
inputId = "inclusionRuleTableFilters",
label = "Inclusion Rule Events",
choices = c("All", "Meet", "Gain", "Remain", "Total"),
selected = "All",
inline = TRUE
)
),
td(
align = "right",
)
)
)
),
shinycssloaders::withSpinner(reactable::reactableOutput(outputId = "inclusionRuleTable")),
column(
12,
if (showAnnotation) {
column(
12,
tags$br(),
annotationFunction("inclusionRuleStats")
)
}
)
)
),
shinydashboard::tabItem(
tabName = "indexEventBreakdown",
cohortReference("indexEventBreakdownSelectedCohort"),
shinydashboard::box(
width = NULL,
title = NULL,
htmltools::withTags(
table(
width = "100%",
tr(
td(
shiny::radioButtons(
inputId = "indexEventBreakdownTableRadioButton",
label = "",
choices = c("All", "Standard concepts", "Non Standard Concepts"),
selected = "All",
inline = TRUE
)
),
td(HTML(" ")),
td(
shiny::radioButtons(
inputId = "indexEventBreakdownTableFilter",
label = "Display",
choices = c("Both", "Records", "Persons"),
selected = "Persons",
inline = TRUE
)
),
td(
shiny::checkboxInput(
inputId = "indexEventBreakDownShowAsPercent",
label = "Show as percent"
)
),
td(
align = "right",
)
)
)
),
shinycssloaders::withSpinner(reactable::reactableOutput(outputId = "breakdownTable")),
if (showAnnotation) {
column(
12,
tags$br(),
annotationFunction("indexEventBreakdown")
)
}
)
),
shinydashboard::tabItem(
tabName = "visitContext",
cohortReference("visitContextSelectedCohort"),
shinydashboard::box(
width = NULL,
title = NULL,
tags$table(
width = "100%",
tags$tr(
tags$td(
shiny::radioButtons(
inputId = "visitContextTableFilters",
label = "Display",
choices = c("All", "Before", "During", "Simultaneous", "After"),
selected = "All",
inline = TRUE
)
),
tags$td(
shiny::radioButtons(
inputId = "visitContextPersonOrRecords",
label = "Display",
choices = c("Persons", "Records"),
selected = "Persons",
inline = TRUE
)
),
tags$td(
align = "right",
)
)
),
shinycssloaders::withSpinner(reactable::reactableOutput(outputId = "visitContextTable")),
if (showAnnotation) {
column(
12,
tags$br(),
annotationFunction("visitContext")
)
}
)
),
shinydashboard::tabItem(
tabName = "cohortOverlap",
cohortReference("cohortOverlapSelectedCohort"),
shinydashboard::box(
title = "Cohort Overlap (Subjects)",
width = NULL,
status = "primary",
shiny::radioButtons(
inputId = "overlapPlotType",
label = "",
choices = c("Percentages", "Counts"),
selected = "Percentages",
inline = TRUE
),
ggiraph::ggiraphOutput("overlapPlot", width = "100%", height = "100%"),
if (showAnnotation) {
column(
12,
tags$br(),
annotationFunction("cohortOverlap")
)
}
)
),
shinydashboard::tabItem(
tabName = "cohortCharacterization",
cohortReference("characterizationSelectedCohort"),
shinydashboard::box(
width = NULL,
title = NULL,
tags$table(
tags$tr(
tags$td(
shiny::radioButtons(
inputId = "charType",
label = "",
choices = c("Pretty", "Raw"),
selected = "Pretty",
inline = TRUE
)
),
tags$td(
shiny::conditionalPanel(
condition = "input.charType == 'Raw'",
tags$table(tags$tr(
tags$td(
shiny::radioButtons(
inputId = "characterizationProportionOrContinuous",
label = "",
choices = c("All", "Proportion", "Continuous"),
selected = "Proportion",
inline = TRUE
)
)
))
)
)
),
tags$tr(
tags$td(
colspan = 2,
shiny::conditionalPanel(
condition = "input.charType == 'Raw'",
shiny::radioButtons(
inputId = "characterizationColumnFilters",
label = "Display",
choices = c("Mean and Standard Deviation", "Mean only"),
selected = "Mean only",
inline = TRUE
)
)
)
)
),
tags$table(
width = "100%",
tags$tr(
tags$td(
align = "right",
)
)
),
shinycssloaders::withSpinner(
reactable::reactableOutput(outputId = "characterizationTable")
),
if (showAnnotation) {
column(
12,
tags$br(),
annotationFunction("cohortCharacterization")
)
}
)
),
shinydashboard::tabItem(
tabName = "temporalCharacterization",
cohortReferenceWithDatabaseId("temporalCharacterizationSelectedCohort", "temporalCharacterizationSelectedDatabase"),
shinydashboard::box(
width = NULL,
title = NULL,
tags$table(tags$tr(
tags$td(
),
tags$td(
),
tags$td(
shiny::radioButtons(
inputId = "temporalProportionOrContinuous",
label = "",
choices = c("All", "Proportion", "Continuous"),
selected = "Proportion",
inline = TRUE
)
)
)),
tags$table(
width = "100%",
tags$tr(
tags$td(
align = "right",
)
)
),
shinycssloaders::withSpinner(reactable::reactableOutput("temporalCharacterizationTable")),
if (showAnnotation) {
column(
12,
tags$br(),
annotationFunction("temporalCharacterization")
)
}
)
),
shinydashboard::tabItem(
tabName = "compareCohortCharacterization",
cohortReferenceWithDatabaseId("cohortCharCompareSelectedCohort", "cohortCharCompareSelectedDatabase"),
shinydashboard::box(
width = NULL,
title = NULL,
tags$table(
tags$tr(
tags$td(
shiny::radioButtons(
inputId = "charCompareType",
label = "",
choices = c("Pretty table", "Raw table", "Plot"),
selected = "Plot",
inline = TRUE
),
),
tags$td(HTML(" ")),
tags$td(
shiny::conditionalPanel(
condition = "input.charCompareType == 'Raw table'",
shiny::radioButtons(
inputId = "compareCharacterizationColumnFilters",
label = "Display",
choices = c("Mean and Standard Deviation", "Mean only"),
selected = "Mean only",
inline = TRUE
)
)
)
)
),
shiny::conditionalPanel(
condition = "input.charCompareType == 'Raw table' | input.charCompareType=='Plot'",
tags$table(tags$tr(
tags$td(
shinyWidgets::pickerInput(
inputId = "compareCohortCharacterizationAnalysisNameFilter",
label = "Analysis name",
choices = c(""),
selected = c(""),
multiple = TRUE,
width = 200,
choicesOpt = list(style = rep_len("color: black;", 999)),
options = shinyWidgets::pickerOptions(
actionsBox = TRUE,
liveSearch = TRUE,
size = 10,
liveSearchStyle = "contains",
liveSearchPlaceholder = "Type here to search",
virtualScroll = 50
)
)
),
tags$td(
shinyWidgets::pickerInput(
inputId = "compareCohortcharacterizationDomainIdFilter",
label = "Domain name",
choices = c(""),
selected = c(""),
multiple = TRUE,
width = 200,
choicesOpt = list(style = rep_len("color: black;", 999)),
options = shinyWidgets::pickerOptions(
actionsBox = TRUE,
liveSearch = TRUE,
size = 10,
liveSearchStyle = "contains",
liveSearchPlaceholder = "Type here to search",
virtualScroll = 50
)
)
),
tags$td(
shiny::radioButtons(
inputId = "compareCharacterizationProportionOrContinuous",
label = "",
choices = c("All", "Proportion", "Continuous"),
selected = "Proportion",
inline = TRUE
)
)
))
),
shiny::conditionalPanel(
condition = "input.charCompareType=='Pretty table' | input.charCompareType=='Raw table'",
tags$table(
width = "100%",
tags$tr(
tags$td(
align = "right",
)
)
),
shinycssloaders::withSpinner(
reactable::reactableOutput("compareCohortCharacterizationTable")
)
),
shiny::conditionalPanel(
condition = "input.charCompareType=='Plot'",
shinydashboard::box(
title = "Compare Cohort Characterization",
width = NULL,
status = "primary",
shiny::htmlOutput("compareCohortCharacterizationSelectedCohort"),
shinycssloaders::withSpinner(
ggiraph::ggiraphOutput(
outputId = "compareCohortCharacterizationBalancePlot",
width = "100%",
height = "100%"
)
)
)
),
if (showAnnotation) {
column(
12,
tags$br(),
annotationFunction("compareCohortCharacterization")
)
}
)
),
shinydashboard::tabItem(
tabName = "compareTemporalCharacterization",
cohortReferenceWithDatabaseId(cohortOutputId = "temporalCharCompareSelectedCohort", databaseOutputId = "temporalCharCompareSelectedDatabase"),
shinydashboard::box(
width = NULL,
title = NULL,
tags$table(
tags$tr(
tags$td(
shiny::radioButtons(
inputId = "temporalCharacterizationType",
label = "",
choices = c("Raw table", "Plot"),
selected = "Plot",
inline = TRUE
)
),
tags$td(HTML(" ")),
tags$td(
shiny::conditionalPanel(
condition = "input.temporalCharacterizationType == 'Raw table'",
shiny::radioButtons(
inputId = "temporalCharacterizationTypeColumnFilter",
label = "Show in table:",
choices = c("Mean and Standard Deviation", "Mean only"),
selected = "Mean only",
inline = TRUE
)
)
)
)
),
shiny::conditionalPanel(
condition = "input.temporalCharacterizationType == 'Raw table' | input.temporalCharacterizationType=='Plot'",
tags$table(tags$tr(
tags$td(
shinyWidgets::pickerInput(
inputId = "temporalCompareAnalysisNameFilter",
label = "Analysis name",
choices = c(""),
selected = c(""),
multiple = TRUE,
width = 200,
choicesOpt = list(style = rep_len("color: black;", 999)),
options = shinyWidgets::pickerOptions(
actionsBox = TRUE,
liveSearch = TRUE,
size = 10,
liveSearchStyle = "contains",
liveSearchPlaceholder = "Type here to search",
virtualScroll = 50
)
)
),
tags$td(
shiny::radioButtons(
inputId = "temporalCompareCharacterizationProportionOrContinuous",
label = "Filter to:",
choices = c("All", "Proportion", "Continuous"),
selected = "Proportion",
inline = TRUE
)
)
))
),
shiny::conditionalPanel(
condition = "input.temporalCharacterizationType=='Pretty table' |
input.temporalCharacterizationType=='Raw table'",
tags$table(
width = "100%",
tags$tr(
tags$td(
align = "right",
)
)
),
shinycssloaders::withSpinner(
reactable::reactableOutput(outputId = "temporalCharacterizationCompareTable")
)
),
shiny::conditionalPanel(
condition = "input.temporalCharacterizationType=='Plot'",
shinydashboard::box(
title = "Compare Temporal Characterization",
width = NULL,
status = "primary",
shinycssloaders::withSpinner(
ggiraph::ggiraphOutput(
outputId = "temporalCharacterizationComparePlot",
width = "100%",
height = "100%"
)
)
)
),
if (showAnnotation) {
column(
12,
tags$br(),
annotationFunction("compareTemporalCharacterization")
)
}
)
),
shinydashboard::tabItem(
tabName = "databaseInformation",
shinydashboard::box(
width = NULL,
title = NULL,
shiny::tabsetPanel(
id = "metadataInformationTabsetPanel",
shiny::tabPanel(
title = "Data source",
value = "datasourceTabPanel",
tags$br(),
htmltools::withTags(table(
width = "100%",
tr(
td(
align = "right",
)
)
)),
tags$br(),
shinycssloaders::withSpinner(reactable::reactableOutput(outputId = "databaseInformationTable"))
),
shiny::tabPanel(
title = "Meta data information",
value = "metaDataInformationTabPanel",
tags$br(),
shinydashboard::box(
title = shiny::htmlOutput(outputId = "metadataInfoTitle"),
collapsible = TRUE,
width = NULL,
collapsed = FALSE,
shiny::htmlOutput(outputId = "metadataInfoDetailsText"),
shinydashboard::box(
title = NULL,
collapsible = TRUE,
width = NULL,
collapsed = FALSE,
shinycssloaders::withSpinner(reactable::reactableOutput(outputId = "packageDependencySnapShotTable"))
),
shinydashboard::box(
title = NULL,
collapsible = TRUE,
width = NULL,
collapsed = FALSE,
shiny::verbatimTextOutput(outputId = "argumentsAtDiagnosticsInitiationJson"),
tags$head(
tags$style("#argumentsAtDiagnosticsInitiationJson { max-height:400px};")
)
)
)
)
)
)
)
)
# body
body <- shinydashboard::dashboardBody(
bodyTabItems,
htmltools::withTags(
div(
style = "margin-left : 0px",
h6(appInformationText)
)
)
)
# main
shinydashboard::dashboardPage(
tags$head(tags$style(HTML(
"
th, td {
padding-right: 10px;
}
"
))),
header = header,
sidebar = sidebar,
body = body
)
|
0516796f98554663fc73bcd25b4550ae54b8ce7c
|
6c739524e36e6847b920574317b9393a4f417fc5
|
/man/normalized.ratio.index.Rd
|
327fac88e89629f8022bc1b9c809e1bc07fced6e
|
[] |
no_license
|
cran/hsdar
|
8f501bf3006508e86700049e0bec34c159eba802
|
3c5cd851e3dd181b361b23b494d05dbeb02c2018
|
refs/heads/master
| 2022-04-12T22:08:55.843226
| 2022-02-21T11:20:02
| 2022-02-21T11:20:02
| 31,416,910
| 14
| 10
| null | 2018-08-03T18:30:18
| 2015-02-27T11:44:37
|
Fortran
|
UTF-8
|
R
| false
| false
| 2,849
|
rd
|
normalized.ratio.index.Rd
|
\name{nri}
\alias{nri}
%\alias{print.nri}
%\alias{as.matrix.nri}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Normalised ratio index
}
\description{
Calculate normalised ratio index (nri) for a single given band combination or for all possible band combinations. Calculating nri is a frequently used method to standardize reflectance values and to find relationships between properties of the objects and their spectral data.
}
\usage{
nri(x, b1, b2, recursive = FALSE, bywavelength = TRUE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
List of class \code{Speclib} or of class \code{Nri} for print and as.matrix methods.
}
\item{b1}{
Band 1 given as band number or wavelength.
}
\item{b2}{
Band 2 given as band number or wavelength.
}
\item{recursive}{
If TRUE indices for all possible band combinations are calculated. If FALSE, only a single nri for the given bands in \code{b1} and \code{b2} is calculated.
}
\item{bywavelength}{
Flag to determine if b1 and b2 are band number (bywavelength = FALSE) or wavelength (bywavelength = TRUE) values.
}
% \item{named_matrix}{
%Flag if column names should be set to band names and row names to ids of spectra.
%}
}
\details{
Function for \code{nri} performs the following calculation:
\deqn{nri_{B1,~B2}=\frac{R_{B1}-R_{B2}}{R_{B1}-R_{B2}};}
with \eqn{R} being reflectance values at wavelength \eqn{B1} and \eqn{B2}, respectively.
If recursive = TRUE, all possible band combinations are calculated.
}
\value{
If recursive = FALSE, a data frame with index values is returned. Otherwise result is an object of class \code{\linkS4class{Nri}}. See \code{\link{glm.nri}} for applying a generalised linear model to an array of normalised ratio indices.
}
\references{
Sims, D.A.; Gamon, J.A. (2002). Relationships between leaf pigment content and spectral reflectance across a wide range of species, leaf structures and developmental stages. Remote Sensing of Environment: 81/2, 337 - 354.
Thenkabail, P.S.; Smith, R.B.; Pauw, E.D. (2000). Hyperspectral vegetation indices and their relationships with agricultural crop characteristics. Remote Sensing of Environment: 71/2, 158 - 182.
}
\author{
Lukas Lehnert
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{glm.nri}}, \code{\link{glm}}, \code{\linkS4class{Speclib}}, \code{\linkS4class{Nri}}
}
\examples{
data(spectral_data)
## Calculate NDVI
ndvi <- nri(spectral_data, b1=800, b2=680)
## Calculate all possible combinations for WorldView-2-8
spec_WV <- spectralResampling(spectral_data, "WorldView2-8",
response_function = FALSE)
nri_WV <- nri(spec_WV, recursive = TRUE)
nri_WV
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{multivariate}
|
2558022255f69e045f74d2604fbef62b4a483f20
|
19101bca18ae24b13cbb81490fff45d515e4927c
|
/HW4/tapply_dplyr.R
|
1fcd07f9c30ca5132cca7f0481ab8fb97a41a6ee
|
[] |
no_license
|
zcoeman/PLS900_BBIC
|
ce48fc80d6f74a8df6ca7453424c96068f5a5027
|
715071d3197bbc2d8f2215c279f748c22ab9d586
|
refs/heads/master
| 2021-05-05T09:00:38.610972
| 2018-03-22T21:30:53
| 2018-03-22T21:30:53
| 119,143,937
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 851
|
r
|
tapply_dplyr.R
|
load("/Users/nickbichay/Desktop/ /aPLS 900/Week 4/polity_dataframe.rda")
#### With tapply
f <- function(x) c(mean = mean(x, na.rm=TRUE), median=median(x, na.rm=TRUE), sd = sd(x, na.rm=TRUE))
bound <- do.call(rbind, sapply(polity[,c('democ','autoc','polity2','xconst')], function(x) tapply(x, polity$year, f)))
stats <- matrix(bound, nrow=length(unique(polity$year)), ncol=12,
dimnames=list( unique(polity$year),
c("democ_mean", "autoc_mean", "polity2_mean", "xconst_mean", "democ_median", "autoc_median", "polity2_median", "xconst_median", "democ_sd", "autoc_sd", "polity2_sd", "xconst_sd"))
)
#### With dplyr
library(dplyr)
stats2 <- polity[,c('democ','autoc','polity2','xconst')] %>% group_by(polity$year) %>% summarise_all(funs(mean, median, sd), na.rm=TRUE)
|
6145ca4bf927ae068a71c3c1653f43747acea279
|
01e473d07ba9e8353a22c39647d78c8eee272ec2
|
/data-raw/DATASET.R
|
46755317c68f3a9b524541fbbb1445609a0d9a18
|
[
"MIT"
] |
permissive
|
bailliem/pharmavisR
|
36bc8ca2c79a1ce361a57955aa1e64b6c50422dc
|
3d0a1bf63c05543b9757096dc1fce0f4d9850dbe
|
refs/heads/master
| 2023-07-20T21:56:18.811707
| 2022-08-19T06:01:46
| 2022-08-19T06:01:46
| 212,809,031
| 1
| 0
| null | 2019-10-04T12:23:17
| 2019-10-04T12:23:17
| null |
UTF-8
|
R
| false
| false
| 1,355
|
r
|
DATASET.R
|
## code to prepare `DATASET` dataset goes here
usethis::use_data("DATASET")
# Breast survival data
library(dplyr)
library(RTCGA)
library(RTCGA.clinical)
brca_cohort <- survivalTCGA(
BRCA.clinical,
extract.cols = c(
"admin.disease_code",
"patient.breast_carcinoma_estrogen_receptor_status",
"patient.breast_carcinoma_progesterone_receptor_status",
"patient.clinical_cqcf.tumor_type",
"patient.drugs.drug.therapy_types.therapy_type",
"patient.age_at_initial_pathologic_diagnosis"
)
) %>%
dplyr::rename(
tumor_type = patient.clinical_cqcf.tumor_type,
therapy = patient.drugs.drug.therapy_types.therapy_type,
er_status = patient.breast_carcinoma_estrogen_receptor_status,
progesterone_status = patient.breast_carcinoma_progesterone_receptor_status,
dx_age = patient.age_at_initial_pathologic_diagnosis,
followup_time = times
) %>%
dplyr::mutate(
er_status = factor(er_status),
progesterone_status = factor(progesterone_status),
dx_age = as.numeric(dx_age),
dx_age_group = factor(
case_when(
dx_age < 30 ~ "< 30y",
dx_age >= 30 &
dx_age <= 50 ~ "30-50y",
dx_age > 50 &
dx_age <= 70 ~ "51-70y",
dx_age > 70 ~ "> 70y"
),
levels = c("< 30y", "30-50y", "51-70y", "> 70y")
)
)
usethis::use_data(brca_cohort)
|
d0842d2314fc56728e6dbb2538296c2ce1f32264
|
7f72ac13d08fa64bfd8ac00f44784fef6060fec3
|
/RGtk2/man/gtkIconSourceCopy.Rd
|
06104ea1cf60ff965a0e5d4ce3d1318a3afcbaac
|
[] |
no_license
|
lawremi/RGtk2
|
d2412ccedf2d2bc12888618b42486f7e9cceee43
|
eb315232f75c3bed73bae9584510018293ba6b83
|
refs/heads/master
| 2023-03-05T01:13:14.484107
| 2023-02-25T15:19:06
| 2023-02-25T15:20:41
| 2,554,865
| 14
| 9
| null | 2023-02-06T21:28:56
| 2011-10-11T11:50:22
|
R
|
UTF-8
|
R
| false
| false
| 402
|
rd
|
gtkIconSourceCopy.Rd
|
\alias{gtkIconSourceCopy}
\name{gtkIconSourceCopy}
\title{gtkIconSourceCopy}
\description{Creates a copy of \code{source}; mostly useful for language bindings.}
\usage{gtkIconSourceCopy(object)}
\arguments{\item{\verb{object}}{a \code{\link{GtkIconSource}}}}
\value{[\code{\link{GtkIconSource}}] a new \code{\link{GtkIconSource}}}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
7dd429279c29f6b2c1ad64fd5ebdfabbdaa4eff9
|
5cdfe09b136d8c56160b7048733d26d391e770b7
|
/Scripts/drafts/master_credito.R
|
d29f68f17240ea105c955cb2e20158c264a069f5
|
[] |
no_license
|
DanielRZapataS/Recommendation_System_Retail_Banking
|
a7265b731127c60f9233138f492989649f2be3ce
|
1de13ca704dfa80ba8e4e374ade481d7ba33ecb9
|
refs/heads/master
| 2020-11-25T21:28:36.779429
| 2019-12-18T15:09:49
| 2019-12-18T15:09:49
| 228,851,338
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 654
|
r
|
master_credito.R
|
#' Crear master de cada modelo
# Segun el modelo se toman unas u otras variables
# variables de la base que le sirven al modelo de tarjetas
# aa_vlr_ing_bru_mes
# aa_vlr_egreso_mes
# age
# antiguedad
# aa_vlr_activos
# aa_vlr_pasivos
# aa_estrato
var_interest <- c("aa_vlr_ing_bru_mes", "aa_vlr_egreso_mes",
"aa_vlr_activos",
"bb_tipo_doc_pn", "aa_nit",
"birthdate", "sex", "nivel_educativo", "mar_status",
"aa_tipo_vivienda", "aa_estrato", "aa_cod_ciiu",
"hire_dt", "bb_seg_comercial", "aa_cod_ocupacion",
"cod_ciud_dir_ppal", "aa_vlr_activos",
"aa_vlr_ing_bru_mes", "aa_vlr_egreso_mes",
"aa_vlr_pasivos", "aa_vlr_ventas", "aa_declara_renta")
|
b5e8575d2e124c29ce9a989c28c431c7b0599530
|
e1cbbf8791b0ac6d40f6d5b397785560105441d9
|
/man/quapdq3.Rd
|
a9e2061137b19b5b7910b516483dcfefd9348473
|
[] |
no_license
|
wasquith/lmomco
|
96a783dc88b67017a315e51da3326dfc8af0c831
|
8d7cc8497702536f162d7114a4b0a4ad88f72048
|
refs/heads/master
| 2023-09-02T07:48:53.169644
| 2023-08-30T02:40:09
| 2023-08-30T02:40:09
| 108,880,810
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,584
|
rd
|
quapdq3.Rd
|
\encoding{utf8}
\name{quapdq3}
\alias{quapdq3}
\title{Quantile Function of the Polynomial Density-Quantile3 Distribution}
\description{
This function computes the quantiles of the Polynomial Density-Quantile3 distribution (PDQ3) given parameters (\eqn{\xi}, \eqn{\alpha}, and \eqn{\kappa}) computed by \code{\link{parpdq3}}. The quantile function is
\deqn{x(F) = \xi + \alpha \biggl[\log\biggl(\frac{F}{1-F}\biggr) + \kappa \log\bigg(\frac{[1-\kappa(2F-1)]^2}{4F(1-F)}\biggr)\biggr]\mbox{,}}
where \eqn{x(F)} is the quantile for nonexceedance probability \eqn{F},
\eqn{\xi} is a location parameter, \eqn{\alpha} is a scale parameter,
and \eqn{\kappa} is a shape parameter. The range of the distribution is \eqn{-\infty < x < \infty}. This formulation of logistic distribution generalization is unique in the literature.
}
\usage{
quapdq3(f, para, paracheck=TRUE)
}
\arguments{
\item{f}{Nonexceedance probability (\eqn{0 \le F \le 1}).}
\item{para}{The parameters from \code{\link{parpdq3}} or \code{\link{vec2par}}.}
\item{paracheck}{A logical controlling whether the parameters are checked for validity. Overriding of this check might be extremely important and needed for use of the quantile function in the context of TL-moments with nonzero trimming.}
}
\value{
Quantile value for nonexceedance probability \eqn{F}.
}
\details{
The PDQ3 was proposed by Hosking (2007) with the core justification of maximizing entropy and that \dQuote{maximizing entropy subject to a set of constraints can be regarded as deriving a distribution that is consistent with the information specified in the constraints while making minimal assumptions about the form of the distribution other than those embodied in the constraints.} The PDQ3 is that family constrained to the \eqn{\lambda_1}, \eqn{\lambda_2}, and \eqn{\tau_3} values of the L-moments. (See also the Polynomial Density-Quantile4 function for constraint on \eqn{\lambda_1}, \eqn{\lambda_2}, and \eqn{\tau_4} values of the L-moments, \code{\link{quapdq4}}.)
The PDQ3 has maximum entropy conditional on having specified values for the L-moments of \eqn{\lambda_1}, \eqn{\lambda_2}, and \eqn{\lambda_3 = \tau_3\lambda_2}. The tails of the PDQ3 are exponentially decreasing and the distribution could be useful in distributional analysis with data showing similar tail characteristics. The attainable L-kurtosis range is \eqn{\tau_4 = (5\tau_3/\kappa) - 1}.
}
\references{
Hosking, J.R.M., 2007, Distributions with maximum entropy subject to constraints on their L-moments or expected order statistics: Journal of Statistical Planning and Inference, v. 137, no. 9, pp. 2870--2891, \doi{10.1016/j.jspi.2006.10.010}.
}
\author{W.H. Asquith}
\seealso{\code{\link{cdfpdq3}}, \code{\link{pdfpdq3}}, \code{\link{lmompdq3}}, \code{\link{parpdq3}}}
\examples{
lmr <- lmoms(c(123, 34, 4, 654, 37, 78))
quapdq3(0.5, parpdq3(lmr)) # [1] 51.22802
\dontrun{
FF <- seq(0.002475, 1 - 0.002475, by=0.001)
para <- list(para=c(0.6933, 1.5495, 0.5488), type="pdq3")
plot(log(FF/(1-FF)), quapdq3(FF, para), type="l", col=grey(0.8), lwd=4,
xlab="Logistic variate, log(f/(1-f))", ylab="Quantile, Q(f)")
lines(log(FF/(1-FF)), log(qf(FF, df1=7, df2=1)), lty=2)
legend("topleft", c("log F(7,1) distribution with same L-moments",
"PDQ3 distribution with same L-moments as the log F(7,1)"),
lwd=c(1, 4), lty=c(2, 1), col=c(1, grey(0.8)), cex=0.8)
mtext("Mimic Hosking (2007, fig. 2 [right])") # }
}
\keyword{distribution}
\keyword{quantile function}
\keyword{Distribution: Polynomial Density-Quantile3}
|
209b531d72bb47a837f6898ed2a28f5b88c498c6
|
306758ad07d8287d078bd3b1c18d55aaab086539
|
/ProgrammingAssignment2.R
|
c7c7811c64e44b8a535ed5901364b37f1d7af05f
|
[] |
no_license
|
JLPherigo/ProgrammingAssignment2
|
9176b0ecad3c9ee6f4c9275996778dfb0f46ffab
|
f71626bdde1e53af45776393a2ca6a9f71ac41af
|
refs/heads/master
| 2021-09-01T04:23:29.498905
| 2017-12-24T19:19:47
| 2017-12-24T19:19:47
| 115,277,268
| 0
| 0
| null | 2017-12-24T17:13:08
| 2017-12-24T17:13:08
| null |
UTF-8
|
R
| false
| false
| 1,079
|
r
|
ProgrammingAssignment2.R
|
## These two functions are in partial fulfillment of the
## R Programming Programming Assignment 2: Lexical Scoping.
##These two functions will cache the inverse of a matrix
## The function makeCacheMatrix creates a special matrix object
## that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
invmatrix <- NULL
setmatrix <- function(y) {
x <<- y
invmatrix <<- NULL
}
getmatrix <- function() x
setinverse <- function(inverse) invmatrix <- inverse
getinverse <- function() invmatrix
list(setmatrix = setmatrix, getmatrix = getmatrix,
setinverse = setinverse, getinverse = getinverse)
}
## The cacheSolve function computes the inverse of the matrix
## returned by the makeCacheMatrix function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
invmatrix <- x$getinverse()
if(!is.null(invmatrix)) {
message("getting cached invertible matrix")
return(invmatrix)
}
matrixdata <- x$getmatrix()
invmatrix <- solve(matrixdata, ...)
x$setinverse(invmatrix)
return(invmatrix)
}
|
5ce5559da1a5504543c7370229d02cb7f018ddd7
|
02a204c4ff6a767037e4559d376311eeed50430c
|
/dm1/lecture/08/centroids.r
|
e3acc847df70572d226cc26576df02362b62b2ec
|
[
"MIT"
] |
permissive
|
codeAligned/rw
|
6bdecd74388cfc51357b1c48eddc0eb9cbb9042a
|
7fb6cf2e0da3fe48a108f391e8d3efb9044e7a03
|
refs/heads/master
| 2020-06-18T13:40:04.749230
| 2019-06-14T22:48:08
| 2019-06-14T22:48:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,377
|
r
|
centroids.r
|
x<-c(1,1,2,6,6,7,7)
y<-c(9,10,9.5,1,2,1.5,3)
plot(y~x)
m<-matrix(data=cbind(x,y),nrow=7,ncol=2)
colnames(m)<-c("x","y")
rownames(m)<-c("a","b","c","q","r","s","t")
m
library("lsa")
tm <- t(m)
tm
simMat<-cosine(tm)
simMat
##what's the centroid of a,b,c?
c1.x<-1+1+2
c1.y<-9+10+9.5
##what's the centroid of q,r,s,t
c2.x<-6+6+7+7
c2.y<-1+2+1.5+3
c1<-cbind(c1.x,c1.y)
c2<-cbind(c2.x,c2.y)
m2<-cbind(tm,t(c1),t(c2))
colnames(m2)[8]<-"c1"
colnames(m2)[9]<-"c2"
m2
cosine(m2)
plot(t(m2),col=c("blue","blue","blue","blue","blue","blue","blue","red","red"))
##Notice the centroid vectors do indeed have angles that fall in between their respective
##clusters. But they are somehow unsatisfying to look at.
##This doesn't matter in higher dimensions because we can't visualize them anyway
##However, if it bothers you we can always turn all of our vectors into unit vectors
##without affecting the angles
m2
m3<-m2
norm1<-sqrt(m3[1,]*m3[1,] + m3[2,]*m3[2,])
norm1
for (i in 1:2){
for (j in 1:9){
m3[i,j]<-m3[i,j]/norm1[j]
}
}
m3
plot(t(m3),col=c("blue","blue","blue","blue","blue","blue","blue","red","red"))
cosine(m3)
cosine(m2)
## Do we get the same similarity matrices using normalized and un-normalized vectors?
## probably, but let's see
cosine(m3)-cosine(m2)
## 10^(-17) is 0 in my book, but still it's unsatisfying
round(cosine(m3)-cosine(m2))
##Bottome line:
##
##When computing cosine similarities we can compute the centroid of a cluster by adding
##all of the vectors in the cluster together.
##
##You might also see in papers and books that all of the vectors first get normalized
##to unit vectors and the centroids get normalized to unit vectors.
##
##Don't let this confuse you. It's a nicety and some people do it and some don't
##Let's look at Euclidean centroids
##
plot(m)
m
##What's the centroid of a,b,c?
##Standard formula is sum of x components divided by number of x components and
##sum of y components divided by number of y components
c1.x <- (1+1+2)/3
c1.y <- (9+10+9.5)/3
c2.x <- (6+6+7+7)/4
c2.y <- (1+2+1.5+3)/4
c1<-cbind(c1.x,c1.y)
c2<-cbind(c2.x,c2.y)
m4<-cbind(tm,t(c1),t(c2))
colnames(m4)[8]<-"c1"
colnames(m4)[9]<-"c2"
plot(t(m4),col=c("blue","blue","blue","blue","blue","blue","blue","red","red"))
|
fd37545a20683cbcf6ae55d6a1af9d2b815b2d70
|
1e36998839f250b75e991bdb27e0d7ec474608e5
|
/PracticalML/project/project.R
|
08c080d6c8f225a2c7f178aacf29f0eac66a3d7c
|
[
"MIT"
] |
permissive
|
NatalieTan/R
|
7f81e5e8ebbe9ced2db305b78453374d62ce3d62
|
57e493e90250b9667f014a1e30377b044df3aa2d
|
refs/heads/master
| 2021-01-24T05:06:00.061935
| 2015-09-11T14:26:18
| 2015-09-11T14:26:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 882
|
r
|
project.R
|
library(caret)
set.seed(5152)
# pml.testing <- read.csv("PracticalML/project/pml-testing.csv")
# sapply(pml.testing, class)
pml.training <- read.csv("PracticalML/project/pml-training.csv")
# sapply(pml.training, class)
inTrain <- createDataPartition(y = pml.training$classe,
p = 0.65,
list = FALSE)
training <- pml.training[inTrain,]
testing <- pml.training[-inTrain,]
te <- training[complete.cases(training),]
te2 <- training[complete.cases(training),]
te <- te[sapply(te,is.numeric)]
prePro <- preProcess(te, method = "pca", thresh = 0.9, na.remove = TRUE)
trainPC <- predict(prePro, te)
modelfit <- train(te2$classe ~ ., data = trainPC, method = "knn", prePro="pca")
testPC <- predict(prePro, testing[, grep("IL|diagnosis", names(testing))][2:13])
confusionMatrix(testing$diagnosis,predict(modelfit,testPC))
|
79e0d9dd3d4645b2d0c4faad7192a07af53ba54f
|
96dd0f70cfcb97754853ae9279b858133891682c
|
/man/mvgls.dfa.Rd
|
abca0f83d030e4f9e9c2823a8a193d16f95b89a5
|
[] |
no_license
|
JClavel/mvMORPH
|
27e18d6172eefb28e527fde88671275f80afca07
|
e75c68a0fece428e5e98d8f9ae7281569b7159c8
|
refs/heads/master
| 2023-07-10T21:12:01.839493
| 2023-06-30T14:37:11
| 2023-06-30T14:37:11
| 36,449,296
| 17
| 8
| null | 2022-06-22T14:40:37
| 2015-05-28T15:50:01
|
R
|
UTF-8
|
R
| false
| false
| 2,902
|
rd
|
mvgls.dfa.Rd
|
\name{mvgls.dfa}
\alias{mvgls.dfa}
\title{
Discriminant Function Analysis (DFA) - also called Linear Discriminant Analysis (LDA) or Canonical Variate Analysis (CVA) - based on multivariate GLS (or OLS) model fit
}
\description{
Performs a discriminant analysis (DFA) on a regularized variance-covariance matrix obtained using either the \code{mvgls} or \code{mvols} function.
}
\usage{
mvgls.dfa(object, ...)
}
\arguments{
\item{object}{
A model fit obtained by the \code{mvgls} or the \code{mvols} function.
}
\item{...}{
Options to be passed through. (e.g., \code{term="the term corresponding to the factor of interest"}, \code{type="I"} for the type of decomposition of the hypothesis matrix (see also manova.gls) , etc.)
}
}
\value{
a list with the following components
\item{coeffs}{a matrix containing the raw discriminants}
\item{coeffs.std}{a matrix containing the standardized discriminants}
\item{scores}{a matrix containing the discriminant scores [residuals X coeffs]}
\item{residuals}{the centered [with GLS or OLS] response variables}
\item{H}{the hypothesis (or between group model matrix)}
\item{E}{the error (or residual model matrix)}
\item{rank}{the rank of HE^{-1}}
\item{pct}{the percentage of the discriminant functions}
}
\details{
\code{mvgls.dfa} allows computing a discriminant analysis based on GLS (or OLS) estimates from a regression model (see \code{mvgls} and \code{mvols}). Discriminant functions can be used for dimensionality reduction, to follow up a MANOVA analysis to describe group separation, or for group prediction.
}
\note{
Still in development, may not handle special designs. }
\references{
Clavel, J., Aristide, L., Morlon, H., 2019. A Penalized Likelihood framework for high-dimensional phylogenetic comparative methods and an application to new-world monkeys brain evolution. Systematic Biology 68(1): 93-116.
Clavel, J., Morlon, H., 2020. Reliable phylogenetic regressions for multivariate comparative data: illustration with the MANOVA and application to the effect of diet on mandible morphology in Phyllostomid bats. Systematic Biology 69(5): 927-943.
}
\author{J. Clavel}
\seealso{
\code{\link{mvgls}},
\code{\link{mvols}},
\code{\link{manova.gls}},
\code{\link{mvgls.pca}},
\code{\link{predict.mvgls.dfa}},
}
\examples{
\donttest{
library(mvMORPH)
n=64
p=4
tree <- pbtree(n=n)
sigma <- crossprod(matrix(runif(p*p),p,p))
resid <- mvSIM(tree, model="BM1", param=list(sigma=sigma))
Y <- rep(c(0,1.5), each=n/2) + resid
grp <- as.factor(rep(c("gp1","gp2"),each=n/2))
names(grp) = rownames(Y)
data <- list(Y=Y, grp=grp)
mod <- mvgls(Y~grp, data=data, tree=tree, model="BM")
# fda
da1 <- mvgls.dfa(mod)
plot(da1)
}
}
\keyword{ LDA }
\keyword{ CVA }
\keyword{ DFA }
\keyword{ Discriminant }
\keyword{ Regularization }
\keyword{ Penalized likelihood }
\keyword{ High dimensions }% __ONLY ONE__ keyword per line
|
2921e831fa73e8154dc4e8d01628107d7f22285b
|
9cc15201bab2a24a4e5a7a9cff49a647d6f97db1
|
/man/wabl.Rd
|
1074f8c2ad85aadb551dee3b7dbe8c2c12e8b0bc
|
[] |
no_license
|
cran/FuzzySTs
|
486e3bee5be628fcbf95a1e201ed702d74b794d7
|
c7c45543baf531f37c64f9082b13aea5532480ea
|
refs/heads/master
| 2023-01-19T15:42:56.661744
| 2020-11-23T12:50:03
| 2020-11-23T12:50:03
| 278,227,869
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,052
|
rd
|
wabl.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Distances_17102018.R
\name{wabl}
\alias{wabl}
\title{Calculates a distance by the d_wabl between fuzzy numbers}
\usage{
wabl(X, Y, i = 1, j = 1, theta = 1/3, breakpoints = 100)
}
\arguments{
\item{X}{a fuzzy number.}
\item{Y}{a fuzzy number.}
\item{i}{parameter of the density function of the Beta distribution, fixed by default to i = 1.}
\item{j}{parameter of the density function of the Beta distribution, fixed by default to j = 1.}
\item{theta}{a numerical value between 0 and 1, representing a weighting parameter. By default, theta is fixed to 1/3 referring to the Lebesgue space. This measure is used in the calculations of the following distances: d_Bertoluzza, d_mid/spr and d_phi-wabl/ldev/rdev.}
\item{breakpoints}{a positive arbitrary integer representing the number of breaks chosen to build the numerical alpha-cuts. It is fixed to 100 by default.}
}
\value{
A numerical value.
}
\description{
Calculates a distance by the d_wabl between fuzzy numbers
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.